hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
8cbe39854b42825e294b04cd98eb394c6be92c82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__
/**
* This is the kernel that convolutes the two given float arrays.
* The result is saved in the third array.
*/
void cudaConvolve(float *a, float *b, float *res, int aLen, int bLen)
{
// Calculate the length of the result
int abMax = max(aLen, bLen);
int convLen = aLen + bLen - 1;
// Find the starting point and the step of the loop
int index = blockIdx.x * blockDim.x + threadIdx.x;
int step = blockDim.x * gridDim.x;
for (int n = index; n < convLen; n += step)
{
float prod = 0;
// Find the minimum amount of iterations needed
int kMax = min(abMax, n);
for (int k = 0; k <= kMax; ++k)
{
// Make sure we're in bounds for both arrays,
// otherwise there's no overlap between the two.
if (k < aLen && n - k < bLen)
{
prod += a[k] * b[n - k];
}
}
res[n] = prod;
}
}
/**
* This method calls the CUDA kernel for the convolution, after
* calculating the proper amount of blocks and threads needed.
*/
void myConvolve(float *a, float *b, float *res, int aLen, int bLen)
{
int blockSize = 256;
int numBlocks = ((aLen + bLen - 1) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( cudaConvolve), dim3(numBlocks), dim3(blockSize), 0, 0, a, b, res, aLen, bLen);
hipDeviceSynchronize(); // Wait for all CUDA cores to finish
}
| 8cbe39854b42825e294b04cd98eb394c6be92c82.cu | __global__
/**
* This is the kernel that convolutes the two given float arrays.
* The result is saved in the third array.
*/
void cudaConvolve(float *a, float *b, float *res, int aLen, int bLen)
{
// Calculate the length of the result
int abMax = max(aLen, bLen);
int convLen = aLen + bLen - 1;
// Find the starting point and the step of the loop
int index = blockIdx.x * blockDim.x + threadIdx.x;
int step = blockDim.x * gridDim.x;
for (int n = index; n < convLen; n += step)
{
float prod = 0;
// Find the minimum amount of iterations needed
int kMax = min(abMax, n);
for (int k = 0; k <= kMax; ++k)
{
// Make sure we're in bounds for both arrays,
// otherwise there's no overlap between the two.
if (k < aLen && n - k < bLen)
{
prod += a[k] * b[n - k];
}
}
res[n] = prod;
}
}
/**
* This method calls the CUDA kernel for the convolution, after
* calculating the proper amount of blocks and threads needed.
*/
void myConvolve(float *a, float *b, float *res, int aLen, int bLen)
{
int blockSize = 256;
int numBlocks = ((aLen + bLen - 1) + blockSize - 1) / blockSize;
cudaConvolve<<<numBlocks, blockSize>>>(a, b, res, aLen, bLen);
cudaDeviceSynchronize(); // Wait for all CUDA cores to finish
}
|
baba6e506f888d781c009e0f979395bd6738e1a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pooling_common.cu"
#include "random.cu"
/// @brief Does max pooling over convolutional layer output.
/// @param h batch of input multichannel interleaved images.
/// @param y batch of output multichannel interleaved images.
/// @param h_offs indexes of y value in corresponding to it h array.
/// @details If ABS_VALUES is defined, compare absolute values; otherwise,
/// as usual.
/// Should be defined externally:
/// SX - input image width,
/// SY - input image height,
/// N_CHANNELS - number of input channels,
/// KX - pooling kernel width,
/// KY - pooling kernel height,
/// SLIDE_X - kernel sliding by x-axis,
/// SLIDE_Y - kernel sliding by y-axis.
/// Kernel should be run as:
/// global_size = [out_width, out_height],
/// local_size = None.
extern "C"
__global__ void max_pooling(const dtype *h, dtype *y, int *h_offs) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= OUTPUT_SIZE) {
return;
}
int target_y = idx / (OUT_SX * N_CHANNELS);
int target_x = idx % (OUT_SX * N_CHANNELS);
dtype max_vle = -FLT_MAX;
#ifdef ABS_VALUES
dtype max_absvle = -1;
#endif
int max_offs = 0;
int start_x = TARGET_PIXEL_X * SLIDE_X * N_CHANNELS + TARGET_CHANNEL;
int start_y = target_y % OUT_SY * SLIDE_Y;
int offs = ((target_y / OUT_SY) * SY + start_y) * SX * N_CHANNELS;
#if (OUT_SY - 1) * SLIDE_Y + KY == SY
// No partial windows at the bottom
for (int i = 0; i < KY; i++, offs += SX * N_CHANNELS) {
#else
// There are partial windows at the bottom
for (int i = 0, y = start_y; (i < KY) && (y < SY); i++, y++, offs += SX * N_CHANNELS) {
#endif
#if (OUT_SX - 1) * SLIDE_X + KX == SX
// No partial windows at the right
for (int j = 0, x = start_x; j < KX; j++, x += N_CHANNELS) {
#else
// There are partial windows at the right
for (int j = 0, x = start_x; (j < KX) && (x < SX * N_CHANNELS); j++, x += N_CHANNELS) {
#endif
dtype vle = h[offs + x];
#ifdef ABS_VALUES
dtype absvle = fabs(vle);
bool hit = (absvle > max_absvle);
max_absvle = (hit) ? absvle : max_absvle;
#else
bool hit = (vle > max_vle);
#endif
max_vle = (hit) ? vle : max_vle;
max_offs = (hit) ? offs + x : max_offs;
}
}
y[idx] = max_vle;
h_offs[idx] = max_offs;
}
/// @brief Does avg pooling over convolutional layer output.
/// @param h batch of input multichannel interleaved images.
/// @param y batch of output multichannel interleaved images.
extern "C"
__global__ void avg_pooling(const dtype /* IN */ *h, dtype /* OUT */ *y) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= OUTPUT_SIZE) {
return;
}
int target_y = idx / (OUT_SX * N_CHANNELS);
int target_x = idx % (OUT_SX * N_CHANNELS);
dtype smm = 0;
int start_x = TARGET_PIXEL_X * SLIDE_X * N_CHANNELS + TARGET_CHANNEL;
int start_y = target_y % OUT_SY * SLIDE_Y;
int offs = ((target_y / OUT_SY) * SY + start_y) * SX * N_CHANNELS;
#if (OUT_SY - 1) * SLIDE_Y + KY == SY
// No partial windows at the bottom
for (int i = 0; i < KY; i++, offs += SX * N_CHANNELS) {
#else
// There are partial windows at the bottom
for (int i = 0, y = start_y; (i < KY) && (y < SY); i++, y++, offs += SX * N_CHANNELS) {
#endif
#if (OUT_SX - 1) * SLIDE_X + KX == SX
// No partial windows at the right
for (int j = 0, x = start_x; j < KX; j++, x += N_CHANNELS) {
#else
// There are partial windows at the right
for (int j = 0, x = start_x; (j < KX) && (x < SX * N_CHANNELS); j++, x += N_CHANNELS) {
#endif
smm += h[offs + x];
}
}
#if (OUT_SY - 1) * SLIDE_Y + KY == SY
#define NY KY
#else
#define NY MIN(KY, SY - (target_y % OUT_SY) * SLIDE_Y)
#endif
#if (OUT_SX - 1) * SLIDE_X + KX == SX
#define NX KX
#else
#define NX MIN(KX, SX - TARGET_PIXEL_X * SLIDE_X)
#endif
y[idx] = smm / (NX * NY);
#undef NX
#undef NY
}
/// @brief Does stochastic pooling over convolutional layer output.
/// @param h batch of input multichannel interleaved images.
/// @param y batch of output multichannel interleaved images.
/// @param h_offs indexes of y value in corresponding to it h array.
/// @param rand random numbers.
/// @details If ABS_VALUES is defined, use absolute values; otherwise,
/// discard negative ones.
/// Should be defined externally:
/// SX - input image width,
/// SY - input image height,
/// N_CHANNELS - number of input channels,
/// KX - pooling kernel width,
/// KY - pooling kernel height,
/// SLIDE_X - kernel sliding by x-axis,
/// SLIDE_Y - kernel sliding by y-axis.
/// Kernel should be run as:
/// global_size = [out_width, out_height],
/// local_size = None.
#if KX * KY > 65536
#error "Too large kernel size for the current stochastic pooling implementation"
#endif
extern "C"
__global__ void stochastic_pooling(
const dtype /* IN */ *h, dtype /* OUT */ *y, int /* OUT */ *h_offs,
ushort /* IN, OUT */ *rand) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= OUTPUT_SIZE) {
return;
}
int target_y = idx / (OUT_SX * N_CHANNELS);
int target_x = idx % (OUT_SX * N_CHANNELS);
int start_x = TARGET_PIXEL_X * SLIDE_X * N_CHANNELS + TARGET_CHANNEL;
int start_y = target_y % OUT_SY * SLIDE_Y;
int offs = ((target_y / OUT_SY) * SY + start_y) * SX * N_CHANNELS;
int original_offset = offs;
dtype sum = 0;
int count = 0;
#if (OUT_SY - 1) * SLIDE_Y + KY == SY
// No partial windows at the bottom
for (int i = 0; i < KY; i++, offs += SX * N_CHANNELS) {
#else
// There are partial windows at the bottom
for (int i = 0, y = start_y; (i < KY) && (y < SY);
i++, y++, offs += SX * N_CHANNELS) {
#endif
#if (OUT_SX - 1) * SLIDE_X + KX == SX
// No partial windows at the right
for (int j = 0, x = start_x; j < KX; j++, x += N_CHANNELS, count++) {
#else
// There are partial windows at the right
for (int j = 0, x = start_x; (j < KX) && (x < SX * N_CHANNELS);
j++, x += N_CHANNELS, count++) {
#endif
dtype val = h[offs + x];
#ifdef ABS_VALUES
val = fabs(val);
#else
val = fmax(val, (dtype)0);
#endif
sum += val;
}
}
ushort random = rand[idx];
// The index of the passed through
int lucky = 0;
// All elements can be <= 0
dtype pos_add = (sum == 0) ? 1 : 0;
dtype pos_factor = (sum == 0) ? count : sum;
dtype pos = (pos_factor * random) / 65536;
sum = 0;
// This is not just copy-paste of previous for-s
offs = original_offset;
#if (OUT_SY - 1) * SLIDE_Y + KY == SY
// No partial windows at the bottom
for (int i = 0; i < KY; i++, offs += SX * N_CHANNELS) {
#else
// There are partial windows at the bottom
for (int i = 0, y = start_y; (i < KY) && (y < SY);
i++, y++, offs += SX * N_CHANNELS) {
#endif
#if (OUT_SX - 1) * SLIDE_X + KX == SX
// No partial windows at the right
for (int j = 0, x = start_x; j < KX; j++, x += N_CHANNELS) {
#else
// There are partial windows at the right
for (int j = 0, x = start_x; (j < KX) && (x < SX * N_CHANNELS);
j++, x += N_CHANNELS) {
#endif
dtype val = h[offs + x];
#ifdef ABS_VALUES
val = fabs(val);
#else
val = fmax(val, (dtype)0);
#endif
sum += val;
sum += pos_add;
lucky = (pos <= sum) ? offs + x : lucky;
sum = (pos <= sum) ? -FLT_MAX : sum;
}
}
y[idx] = h[lucky];
h_offs[idx] = lucky;
}
#ifdef USE_POOLING_DEPOOLING
#if (KX != SLIDE_X) || (KY != SLIDE_Y)
#error "Sliding should be equal to the kernel size for the current implementation"
#endif
extern "C"
__global__
void stochastic_pooling_depooling(dtype *h, const ushort *rand) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= OUTPUT_SIZE) {
return;
}
int target_y = idx / (OUT_SX * N_CHANNELS);
int target_x = idx % (OUT_SX * N_CHANNELS);
int start_x = TARGET_PIXEL_X * SLIDE_X * N_CHANNELS + TARGET_CHANNEL,
start_y = target_y % OUT_SY * SLIDE_Y;
int offs = ((target_y / OUT_SY) * SY + start_y) * SX * N_CHANNELS;
int original_offset = offs;
dtype sum = 0;
int count = 0;
#if (OUT_SY - 1) * SLIDE_Y + KY == SY
// No partial windows at the bottom
for (int i = 0; i < KY; i++, offs += SX * N_CHANNELS) {
#else
// There are partial windows at the bottom
for (int i = 0, y = start_y; (i < KY) && (y < SY);
i++, y++, offs += SX * N_CHANNELS) {
#endif
#if (OUT_SX - 1) * SLIDE_X + KX == SX
// No partial windows at the right
for (int j = 0, x = start_x; j < KX; j++, x += N_CHANNELS, count++) {
#else
// There are partial windows at the right
for (int j = 0, x = start_x; (j < KX) && (x < SX * N_CHANNELS);
j++, x += N_CHANNELS, count++) {
#endif
dtype val = h[offs + x];
#ifdef ABS_VALUES
val = fabs(val);
#else
val = fmax(val, (dtype)0);
#endif
sum += val;
}
}
ushort random = rand[idx];
// The index of the passed through
int lucky = 0;
// All elements can be <= 0
dtype pos_add = (sum == 0) ? 1 : 0;
dtype pos_factor = (sum == 0) ? count : sum;
dtype pos = (pos_factor * random) / 65536;
sum = 0;
// This is not just copy-paste of previous for-s
offs = original_offset;
#if (OUT_SY - 1) * SLIDE_Y + KY == SY
// No partial windows at the bottom
for (int i = 0; i < KY; i++, offs += SX * N_CHANNELS) {
#else
// There are partial windows at the bottom
for (int i = 0, y = start_y; (i < KY) && (y < SY);
i++, y++, offs += SX * N_CHANNELS) {
#endif
#if (OUT_SX - 1) * SLIDE_X + KX == SX
// No partial windows at the right
for (int j = 0, x = start_x; j < KX; j++, x += N_CHANNELS) {
#else
// There are partial windows at the right
for (int j = 0, x = start_x; (j < KX) && (x < SX * N_CHANNELS);
j++, x += N_CHANNELS) {
#endif
dtype val = h[offs + x];
#ifdef ABS_VALUES
val = fabs(val);
#else
val = fmax(val, (dtype)0);
#endif
sum += val;
sum += pos_add;
lucky = (pos <= sum) ? offs + x : lucky;
sum = (pos <= sum) ? -FLT_MAX : sum;
}
}
dtype chosen_value = h[lucky];
// This is not just copy-paste of previous for-s
offs = original_offset;
#if (OUT_SY - 1) * SLIDE_Y + KY == SY
// No partial windows at the bottom
for (int i = 0; i < KY; i++, offs += SX * N_CHANNELS) {
#else
// There are partial windows at the bottom
for (int i = 0, y = start_y; (i < KY) && (y < SY);
i++, y++, offs += SX * N_CHANNELS) {
#endif
#if (OUT_SX - 1) * SLIDE_X + KX == SX
// No partial windows at the right
for (int j = 0, x = start_x; j < KX; j++, x += N_CHANNELS) {
#else
// There are partial windows at the right
for (int j = 0, x = start_x; (j < KX) && (x < SX * N_CHANNELS);
j++, x += N_CHANNELS) {
#endif
h[offs + x] = (offs + x == lucky) ? chosen_value : 0;
}
}
}
#endif // USE_POOLING_DEPOOLING
#undef TARGET_CHANNEL
#undef TARGET_PIXEL_X
#undef OUT_SY
#undef OUT_SX
| baba6e506f888d781c009e0f979395bd6738e1a4.cu | #include "pooling_common.cu"
#include "random.cu"
/// @brief Does max pooling over convolutional layer output.
/// @param h batch of input multichannel interleaved images.
/// @param y batch of output multichannel interleaved images.
/// @param h_offs indexes of y value in corresponding to it h array.
/// @details If ABS_VALUES is defined, compare absolute values; otherwise,
/// as usual.
/// Should be defined externally:
/// SX - input image width,
/// SY - input image height,
/// N_CHANNELS - number of input channels,
/// KX - pooling kernel width,
/// KY - pooling kernel height,
/// SLIDE_X - kernel sliding by x-axis,
/// SLIDE_Y - kernel sliding by y-axis.
/// Kernel should be run as:
/// global_size = [out_width, out_height],
/// local_size = None.
extern "C"
__global__ void max_pooling(const dtype *h, dtype *y, int *h_offs) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= OUTPUT_SIZE) {
return;
}
int target_y = idx / (OUT_SX * N_CHANNELS);
int target_x = idx % (OUT_SX * N_CHANNELS);
dtype max_vle = -FLT_MAX;
#ifdef ABS_VALUES
dtype max_absvle = -1;
#endif
int max_offs = 0;
int start_x = TARGET_PIXEL_X * SLIDE_X * N_CHANNELS + TARGET_CHANNEL;
int start_y = target_y % OUT_SY * SLIDE_Y;
int offs = ((target_y / OUT_SY) * SY + start_y) * SX * N_CHANNELS;
#if (OUT_SY - 1) * SLIDE_Y + KY == SY
// No partial windows at the bottom
for (int i = 0; i < KY; i++, offs += SX * N_CHANNELS) {
#else
// There are partial windows at the bottom
for (int i = 0, y = start_y; (i < KY) && (y < SY); i++, y++, offs += SX * N_CHANNELS) {
#endif
#if (OUT_SX - 1) * SLIDE_X + KX == SX
// No partial windows at the right
for (int j = 0, x = start_x; j < KX; j++, x += N_CHANNELS) {
#else
// There are partial windows at the right
for (int j = 0, x = start_x; (j < KX) && (x < SX * N_CHANNELS); j++, x += N_CHANNELS) {
#endif
dtype vle = h[offs + x];
#ifdef ABS_VALUES
dtype absvle = fabs(vle);
bool hit = (absvle > max_absvle);
max_absvle = (hit) ? absvle : max_absvle;
#else
bool hit = (vle > max_vle);
#endif
max_vle = (hit) ? vle : max_vle;
max_offs = (hit) ? offs + x : max_offs;
}
}
y[idx] = max_vle;
h_offs[idx] = max_offs;
}
/// @brief Does avg pooling over convolutional layer output.
/// @param h batch of input multichannel interleaved images.
/// @param y batch of output multichannel interleaved images.
extern "C"
__global__ void avg_pooling(const dtype /* IN */ *h, dtype /* OUT */ *y) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= OUTPUT_SIZE) {
return;
}
int target_y = idx / (OUT_SX * N_CHANNELS);
int target_x = idx % (OUT_SX * N_CHANNELS);
dtype smm = 0;
int start_x = TARGET_PIXEL_X * SLIDE_X * N_CHANNELS + TARGET_CHANNEL;
int start_y = target_y % OUT_SY * SLIDE_Y;
int offs = ((target_y / OUT_SY) * SY + start_y) * SX * N_CHANNELS;
#if (OUT_SY - 1) * SLIDE_Y + KY == SY
// No partial windows at the bottom
for (int i = 0; i < KY; i++, offs += SX * N_CHANNELS) {
#else
// There are partial windows at the bottom
for (int i = 0, y = start_y; (i < KY) && (y < SY); i++, y++, offs += SX * N_CHANNELS) {
#endif
#if (OUT_SX - 1) * SLIDE_X + KX == SX
// No partial windows at the right
for (int j = 0, x = start_x; j < KX; j++, x += N_CHANNELS) {
#else
// There are partial windows at the right
for (int j = 0, x = start_x; (j < KX) && (x < SX * N_CHANNELS); j++, x += N_CHANNELS) {
#endif
smm += h[offs + x];
}
}
#if (OUT_SY - 1) * SLIDE_Y + KY == SY
#define NY KY
#else
#define NY MIN(KY, SY - (target_y % OUT_SY) * SLIDE_Y)
#endif
#if (OUT_SX - 1) * SLIDE_X + KX == SX
#define NX KX
#else
#define NX MIN(KX, SX - TARGET_PIXEL_X * SLIDE_X)
#endif
y[idx] = smm / (NX * NY);
#undef NX
#undef NY
}
/// @brief Does stochastic pooling over convolutional layer output.
/// @param h batch of input multichannel interleaved images.
/// @param y batch of output multichannel interleaved images.
/// @param h_offs indexes of y value in corresponding to it h array.
/// @param rand random numbers.
/// @details If ABS_VALUES is defined, use absolute values; otherwise,
/// discard negative ones.
/// Should be defined externally:
/// SX - input image width,
/// SY - input image height,
/// N_CHANNELS - number of input channels,
/// KX - pooling kernel width,
/// KY - pooling kernel height,
/// SLIDE_X - kernel sliding by x-axis,
/// SLIDE_Y - kernel sliding by y-axis.
/// Kernel should be run as:
/// global_size = [out_width, out_height],
/// local_size = None.
#if KX * KY > 65536
#error "Too large kernel size for the current stochastic pooling implementation"
#endif
extern "C"
__global__ void stochastic_pooling(
const dtype /* IN */ *h, dtype /* OUT */ *y, int /* OUT */ *h_offs,
ushort /* IN, OUT */ *rand) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= OUTPUT_SIZE) {
return;
}
int target_y = idx / (OUT_SX * N_CHANNELS);
int target_x = idx % (OUT_SX * N_CHANNELS);
int start_x = TARGET_PIXEL_X * SLIDE_X * N_CHANNELS + TARGET_CHANNEL;
int start_y = target_y % OUT_SY * SLIDE_Y;
int offs = ((target_y / OUT_SY) * SY + start_y) * SX * N_CHANNELS;
int original_offset = offs;
dtype sum = 0;
int count = 0;
#if (OUT_SY - 1) * SLIDE_Y + KY == SY
// No partial windows at the bottom
for (int i = 0; i < KY; i++, offs += SX * N_CHANNELS) {
#else
// There are partial windows at the bottom
for (int i = 0, y = start_y; (i < KY) && (y < SY);
i++, y++, offs += SX * N_CHANNELS) {
#endif
#if (OUT_SX - 1) * SLIDE_X + KX == SX
// No partial windows at the right
for (int j = 0, x = start_x; j < KX; j++, x += N_CHANNELS, count++) {
#else
// There are partial windows at the right
for (int j = 0, x = start_x; (j < KX) && (x < SX * N_CHANNELS);
j++, x += N_CHANNELS, count++) {
#endif
dtype val = h[offs + x];
#ifdef ABS_VALUES
val = fabs(val);
#else
val = fmax(val, (dtype)0);
#endif
sum += val;
}
}
ushort random = rand[idx];
// The index of the passed through
int lucky = 0;
// All elements can be <= 0
dtype pos_add = (sum == 0) ? 1 : 0;
dtype pos_factor = (sum == 0) ? count : sum;
dtype pos = (pos_factor * random) / 65536;
sum = 0;
// This is not just copy-paste of previous for-s
offs = original_offset;
#if (OUT_SY - 1) * SLIDE_Y + KY == SY
// No partial windows at the bottom
for (int i = 0; i < KY; i++, offs += SX * N_CHANNELS) {
#else
// There are partial windows at the bottom
for (int i = 0, y = start_y; (i < KY) && (y < SY);
i++, y++, offs += SX * N_CHANNELS) {
#endif
#if (OUT_SX - 1) * SLIDE_X + KX == SX
// No partial windows at the right
for (int j = 0, x = start_x; j < KX; j++, x += N_CHANNELS) {
#else
// There are partial windows at the right
for (int j = 0, x = start_x; (j < KX) && (x < SX * N_CHANNELS);
j++, x += N_CHANNELS) {
#endif
dtype val = h[offs + x];
#ifdef ABS_VALUES
val = fabs(val);
#else
val = fmax(val, (dtype)0);
#endif
sum += val;
sum += pos_add;
lucky = (pos <= sum) ? offs + x : lucky;
sum = (pos <= sum) ? -FLT_MAX : sum;
}
}
y[idx] = h[lucky];
h_offs[idx] = lucky;
}
#ifdef USE_POOLING_DEPOOLING
#if (KX != SLIDE_X) || (KY != SLIDE_Y)
#error "Sliding should be equal to the kernel size for the current implementation"
#endif
extern "C"
__global__
void stochastic_pooling_depooling(dtype *h, const ushort *rand) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= OUTPUT_SIZE) {
return;
}
int target_y = idx / (OUT_SX * N_CHANNELS);
int target_x = idx % (OUT_SX * N_CHANNELS);
int start_x = TARGET_PIXEL_X * SLIDE_X * N_CHANNELS + TARGET_CHANNEL,
start_y = target_y % OUT_SY * SLIDE_Y;
int offs = ((target_y / OUT_SY) * SY + start_y) * SX * N_CHANNELS;
int original_offset = offs;
dtype sum = 0;
int count = 0;
#if (OUT_SY - 1) * SLIDE_Y + KY == SY
// No partial windows at the bottom
for (int i = 0; i < KY; i++, offs += SX * N_CHANNELS) {
#else
// There are partial windows at the bottom
for (int i = 0, y = start_y; (i < KY) && (y < SY);
i++, y++, offs += SX * N_CHANNELS) {
#endif
#if (OUT_SX - 1) * SLIDE_X + KX == SX
// No partial windows at the right
for (int j = 0, x = start_x; j < KX; j++, x += N_CHANNELS, count++) {
#else
// There are partial windows at the right
for (int j = 0, x = start_x; (j < KX) && (x < SX * N_CHANNELS);
j++, x += N_CHANNELS, count++) {
#endif
dtype val = h[offs + x];
#ifdef ABS_VALUES
val = fabs(val);
#else
val = fmax(val, (dtype)0);
#endif
sum += val;
}
}
ushort random = rand[idx];
// The index of the passed through
int lucky = 0;
// All elements can be <= 0
dtype pos_add = (sum == 0) ? 1 : 0;
dtype pos_factor = (sum == 0) ? count : sum;
dtype pos = (pos_factor * random) / 65536;
sum = 0;
// This is not just copy-paste of previous for-s
offs = original_offset;
#if (OUT_SY - 1) * SLIDE_Y + KY == SY
// No partial windows at the bottom
for (int i = 0; i < KY; i++, offs += SX * N_CHANNELS) {
#else
// There are partial windows at the bottom
for (int i = 0, y = start_y; (i < KY) && (y < SY);
i++, y++, offs += SX * N_CHANNELS) {
#endif
#if (OUT_SX - 1) * SLIDE_X + KX == SX
// No partial windows at the right
for (int j = 0, x = start_x; j < KX; j++, x += N_CHANNELS) {
#else
// There are partial windows at the right
for (int j = 0, x = start_x; (j < KX) && (x < SX * N_CHANNELS);
j++, x += N_CHANNELS) {
#endif
dtype val = h[offs + x];
#ifdef ABS_VALUES
val = fabs(val);
#else
val = fmax(val, (dtype)0);
#endif
sum += val;
sum += pos_add;
lucky = (pos <= sum) ? offs + x : lucky;
sum = (pos <= sum) ? -FLT_MAX : sum;
}
}
dtype chosen_value = h[lucky];
// This is not just copy-paste of previous for-s
offs = original_offset;
#if (OUT_SY - 1) * SLIDE_Y + KY == SY
// No partial windows at the bottom
for (int i = 0; i < KY; i++, offs += SX * N_CHANNELS) {
#else
// There are partial windows at the bottom
for (int i = 0, y = start_y; (i < KY) && (y < SY);
i++, y++, offs += SX * N_CHANNELS) {
#endif
#if (OUT_SX - 1) * SLIDE_X + KX == SX
// No partial windows at the right
for (int j = 0, x = start_x; j < KX; j++, x += N_CHANNELS) {
#else
// There are partial windows at the right
for (int j = 0, x = start_x; (j < KX) && (x < SX * N_CHANNELS);
j++, x += N_CHANNELS) {
#endif
h[offs + x] = (offs + x == lucky) ? chosen_value : 0;
}
}
}
#endif // USE_POOLING_DEPOOLING
#undef TARGET_CHANNEL
#undef TARGET_PIXEL_X
#undef OUT_SY
#undef OUT_SX
|
abc8dd1934f64ab108c932780cc634e2716c805e.hip | // !!! This is a file automatically generated by hipify!!!
/**
* \file dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4_reorder_filter_per_chan_hswish.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
// generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4_reorder_filter.cuinl"
template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4_reorder_filter<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>>>(
const int8_t* d_src,
const int8_t* d_filter,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>> epilogue,
const ConvParam& param,
float alpha,
float beta,
hipStream_t stream);
| abc8dd1934f64ab108c932780cc634e2716c805e.cu | /**
* \file dnn/src/cuda/conv_bias/int8_imma/kimpl/conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4_reorder_filter_per_chan_hswish.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
// generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4_reorder_filter.cuinl"
template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4_reorder_filter<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>>>(
const int8_t* d_src,
const int8_t* d_filter,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>> epilogue,
const ConvParam& param,
float alpha,
float beta,
cudaStream_t stream);
|
357d8395817171b4a130367b7e144f3d0e89718a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "compute.cuh"
#include "../../../global.hpp"
#include "../../../timer.cuh"
#include "../../../utils.hpp"
#include <cstdio>
void pagerank_graph_init(graph_shard<float> &graph, config_t *conf){
for(uint nodeIdex = 0; nodeIdex < graph.n; ++nodeIdex){
graph.values[nodeIdex] = conf->init_prval;
}
}
// graph_shard initialize function point to a specific function
template<>
void (*graph_initializer<float>::graph_init)(graph_shard<float>&, config_t*) = &pagerank_graph_init;
void write_to_file(graph_shard<float> &graph, config_t *conf){
FILE *fp = open_file_access(conf->output_path, "w");
for(uint nodeIdex = 0; nodeIdex < graph.n; ++nodeIdex){
fprintf(fp, "%u -> %.4f\n", graph.labels[nodeIdex], graph.values[nodeIdex]);
}
fclose(fp);
}
inline __device__ void compute(float* local, const uint *nbrs, const float *src){
if(*nbrs != 0){
atomicAdd(local, *src / *nbrs);
}
}
__global__ void kernel ( const uint num_nodes,
const uint nShards,
const uint shardMaxNumVertices,
const float factor,
const float threshold,
const uint *srcIndex,
const uint *destIndex,
float *srcValues,
float *values,
const uint *neighbors,
const uint *shardSizeScan,
const uint *windowSizeScan,
bool *lock
)
{
extern __shared__ float localValues[];
uint shardOffSet = blockIdx.x * shardMaxNumVertices;
uint shardStartAddr = shardSizeScan[ blockIdx.x ];
uint shardEndAddr = shardSizeScan[ blockIdx.x + 1 ];
float *blockValues = values + shardOffSet;
for(uint tid = threadIdx.x; tid < shardMaxNumVertices; tid += blockDim.x){
localValues[ tid ] = 0;
}
__syncthreads();
for(uint entryAddr = shardStartAddr + threadIdx.x;
entryAddr < shardEndAddr;
entryAddr += blockDim.x
)
{
compute(localValues + (destIndex[entryAddr] - shardOffSet),
neighbors + entryAddr,
srcValues + entryAddr
);
}
__syncthreads();
bool flag = false;
for(uint tid = threadIdx.x; tid < shardMaxNumVertices; tid += blockDim.x){
localValues[ tid ] = localValues[ tid ] * factor + (1.0 - factor) / num_nodes;
if(fabs(localValues[ tid ] - blockValues[ tid ]) > threshold){
flag = true;
blockValues[ tid ] = localValues[ tid ];
}
}
if(__syncthreads_or(flag)){
for(uint shardIdx = threadIdx.x / warpSize;
shardIdx < nShards;
shardIdx += (blockDim.x / warpSize)
)
{
uint windowStartAddr = windowSizeScan[ shardIdx * nShards + blockIdx.x ];
uint windowEndAddr = windowSizeScan[ shardIdx * nShards + blockIdx.x + 1 ];
for(uint entryAddr = windowStartAddr + ( threadIdx.x & (warpSize - 1));
entryAddr < windowEndAddr;
entryAddr += warpSize
)
{
srcValues[entryAddr] = localValues [ srcIndex[entryAddr] - shardOffSet ];
}
}
if(threadIdx.x == 0) *lock = true;
}
}
void process( const uint num_nodes,
const uint blocksize,
const uint shardMaxNumVertices,
const uint nShards,
const float factor,
const float threshold,
const uint maximum_iterations,
float *values,
const uint *windowSizeScan,
const uint *shardSizeScan,
float *srcValues,
const uint *srcIndex,
const uint *destIndex,
const uint *neighbors,
bool verbose
)
{
bool lock;
bool *dev_lock;
hipMalloc(&dev_lock, sizeof(bool));
uint iterations = 0;
GpuTimer timer;
float total_time = 0.0f;
printf("+--------------------------------------------------------------------------------+\n");
printf("| Times | function | costs(ms) +\n");
printf("+--------------------------------------------------------------------------------+\n");
do {
lock = false;
timer.start_record();
hipMemcpyAsync(dev_lock, &lock, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel), dim3(nShards), dim3(blocksize), sizeof(uint) * shardMaxNumVertices , 0,
num_nodes,
nShards,
shardMaxNumVertices,
factor,
threshold,
srcIndex,
destIndex,
srcValues,
values,
neighbors,
shardSizeScan,
windowSizeScan,
dev_lock
);
hipPeekAtLastError();
hipMemcpyAsync(&lock, dev_lock, sizeof(bool), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
++iterations;
float elapsed = timer.stop_record();
total_time += elapsed;
if(verbose){
printf("| %3dth | kernel function iteration | %3.4f |\n", iterations, elapsed);
}
}while(lock && iterations < maximum_iterations);
if(verbose){
printf("+--------------------------------------------------------------------------------+\n");
printf("| total | | %3.4f |\n", total_time );
printf("+--------------------------------------------------------------------------------+\n");
printf("| avg | | %3.4f |\n", total_time / iterations);
printf("+--------------------------------------------------------------------------------+\n");
}
}
void execute(graph_shard<float> &graph, config_t *conf){
buffer<float, DEVICE> dev_values;
buffer<float, DEVICE> dev_srcValues;
buffer<uint, DEVICE> dev_neigbors;
buffer<uint, DEVICE> dev_destIndex;
buffer<uint, DEVICE> dev_srcIndex;
buffer<uint, DEVICE> dev_shardSizeScan;
buffer<uint, DEVICE> dev_windowSizeScan;
dev_values = graph.values;
dev_srcValues = graph.src_values;
dev_srcIndex = graph.src_indexs;
dev_destIndex = graph.dest_indexs;
dev_neigbors = graph.neigbors_size;
dev_shardSizeScan = graph.shards_size;
dev_windowSizeScan = graph.windows_size;
hipDeviceSynchronize();
process ( graph.n,
graph.blocksize,
graph.shard_max_num_nodes,
graph.num_shards,
conf->factor,
conf->threshold,
conf->maximum_iterations,
dev_values.ptr,
dev_windowSizeScan.ptr,
dev_shardSizeScan.ptr,
dev_srcValues.ptr,
dev_srcIndex.ptr,
dev_destIndex.ptr,
dev_neigbors.ptr,
conf->verbose
);
graph.values = dev_values;
write_to_file(graph, conf);
dev_values.free();
dev_srcValues.free();
dev_srcIndex.free();
dev_destIndex.free();
dev_shardSizeScan.free();
dev_windowSizeScan.free();
dev_neigbors.free();
hipDeviceReset();
} | 357d8395817171b4a130367b7e144f3d0e89718a.cu | #include "compute.cuh"
#include "../../../global.hpp"
#include "../../../timer.cuh"
#include "../../../utils.hpp"
#include <cstdio>
void pagerank_graph_init(graph_shard<float> &graph, config_t *conf){
for(uint nodeIdex = 0; nodeIdex < graph.n; ++nodeIdex){
graph.values[nodeIdex] = conf->init_prval;
}
}
// graph_shard initialize function point to a specific function
template<>
void (*graph_initializer<float>::graph_init)(graph_shard<float>&, config_t*) = &pagerank_graph_init;
void write_to_file(graph_shard<float> &graph, config_t *conf){
FILE *fp = open_file_access(conf->output_path, "w");
for(uint nodeIdex = 0; nodeIdex < graph.n; ++nodeIdex){
fprintf(fp, "%u -> %.4f\n", graph.labels[nodeIdex], graph.values[nodeIdex]);
}
fclose(fp);
}
inline __device__ void compute(float* local, const uint *nbrs, const float *src){
if(*nbrs != 0){
atomicAdd(local, *src / *nbrs);
}
}
__global__ void kernel ( const uint num_nodes,
const uint nShards,
const uint shardMaxNumVertices,
const float factor,
const float threshold,
const uint *srcIndex,
const uint *destIndex,
float *srcValues,
float *values,
const uint *neighbors,
const uint *shardSizeScan,
const uint *windowSizeScan,
bool *lock
)
{
extern __shared__ float localValues[];
uint shardOffSet = blockIdx.x * shardMaxNumVertices;
uint shardStartAddr = shardSizeScan[ blockIdx.x ];
uint shardEndAddr = shardSizeScan[ blockIdx.x + 1 ];
float *blockValues = values + shardOffSet;
for(uint tid = threadIdx.x; tid < shardMaxNumVertices; tid += blockDim.x){
localValues[ tid ] = 0;
}
__syncthreads();
for(uint entryAddr = shardStartAddr + threadIdx.x;
entryAddr < shardEndAddr;
entryAddr += blockDim.x
)
{
compute(localValues + (destIndex[entryAddr] - shardOffSet),
neighbors + entryAddr,
srcValues + entryAddr
);
}
__syncthreads();
bool flag = false;
for(uint tid = threadIdx.x; tid < shardMaxNumVertices; tid += blockDim.x){
localValues[ tid ] = localValues[ tid ] * factor + (1.0 - factor) / num_nodes;
if(fabs(localValues[ tid ] - blockValues[ tid ]) > threshold){
flag = true;
blockValues[ tid ] = localValues[ tid ];
}
}
if(__syncthreads_or(flag)){
for(uint shardIdx = threadIdx.x / warpSize;
shardIdx < nShards;
shardIdx += (blockDim.x / warpSize)
)
{
uint windowStartAddr = windowSizeScan[ shardIdx * nShards + blockIdx.x ];
uint windowEndAddr = windowSizeScan[ shardIdx * nShards + blockIdx.x + 1 ];
for(uint entryAddr = windowStartAddr + ( threadIdx.x & (warpSize - 1));
entryAddr < windowEndAddr;
entryAddr += warpSize
)
{
srcValues[entryAddr] = localValues [ srcIndex[entryAddr] - shardOffSet ];
}
}
if(threadIdx.x == 0) *lock = true;
}
}
void process( const uint num_nodes,
const uint blocksize,
const uint shardMaxNumVertices,
const uint nShards,
const float factor,
const float threshold,
const uint maximum_iterations,
float *values,
const uint *windowSizeScan,
const uint *shardSizeScan,
float *srcValues,
const uint *srcIndex,
const uint *destIndex,
const uint *neighbors,
bool verbose
)
{
bool lock;
bool *dev_lock;
cudaMalloc(&dev_lock, sizeof(bool));
uint iterations = 0;
GpuTimer timer;
float total_time = 0.0f;
printf("+--------------------------------------------------------------------------------+\n");
printf("| Times | function | costs(ms) +\n");
printf("+--------------------------------------------------------------------------------+\n");
do {
lock = false;
timer.start_record();
cudaMemcpyAsync(dev_lock, &lock, sizeof(bool), cudaMemcpyHostToDevice);
kernel<<< nShards, blocksize, sizeof(uint) * shardMaxNumVertices >>>
(
num_nodes,
nShards,
shardMaxNumVertices,
factor,
threshold,
srcIndex,
destIndex,
srcValues,
values,
neighbors,
shardSizeScan,
windowSizeScan,
dev_lock
);
cudaPeekAtLastError();
cudaMemcpyAsync(&lock, dev_lock, sizeof(bool), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
++iterations;
float elapsed = timer.stop_record();
total_time += elapsed;
if(verbose){
printf("| %3dth | kernel function iteration | %3.4f |\n", iterations, elapsed);
}
}while(lock && iterations < maximum_iterations);
if(verbose){
printf("+--------------------------------------------------------------------------------+\n");
printf("| total | | %3.4f |\n", total_time );
printf("+--------------------------------------------------------------------------------+\n");
printf("| avg | | %3.4f |\n", total_time / iterations);
printf("+--------------------------------------------------------------------------------+\n");
}
}
void execute(graph_shard<float> &graph, config_t *conf){
buffer<float, DEVICE> dev_values;
buffer<float, DEVICE> dev_srcValues;
buffer<uint, DEVICE> dev_neigbors;
buffer<uint, DEVICE> dev_destIndex;
buffer<uint, DEVICE> dev_srcIndex;
buffer<uint, DEVICE> dev_shardSizeScan;
buffer<uint, DEVICE> dev_windowSizeScan;
dev_values = graph.values;
dev_srcValues = graph.src_values;
dev_srcIndex = graph.src_indexs;
dev_destIndex = graph.dest_indexs;
dev_neigbors = graph.neigbors_size;
dev_shardSizeScan = graph.shards_size;
dev_windowSizeScan = graph.windows_size;
cudaDeviceSynchronize();
process ( graph.n,
graph.blocksize,
graph.shard_max_num_nodes,
graph.num_shards,
conf->factor,
conf->threshold,
conf->maximum_iterations,
dev_values.ptr,
dev_windowSizeScan.ptr,
dev_shardSizeScan.ptr,
dev_srcValues.ptr,
dev_srcIndex.ptr,
dev_destIndex.ptr,
dev_neigbors.ptr,
conf->verbose
);
graph.values = dev_values;
write_to_file(graph, conf);
dev_values.free();
dev_srcValues.free();
dev_srcIndex.free();
dev_destIndex.free();
dev_shardSizeScan.free();
dev_windowSizeScan.free();
dev_neigbors.free();
cudaDeviceReset();
} |
4f4d1c978419abb7d6be7e4f59cbff7bdb3b1d47.hip | // !!! This is a file automatically generated by hipify!!!
#include <cupy/type_dispatcher.cuh>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/tuple.h>
#include <thrust/execution_policy.h>
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
// This is used to avoid a problem with constexpr in functions declarations introduced in
// cuda 11.2, MSVC 15 does not fully support it so we need a dummy constexpr declaration
// that is provided by this header. However optional.h is only available
// starting CUDA 10.1
#include <thrust/optional.h>
#ifdef _MSC_VER
#define THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS constexpr
#else
#define THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
#endif
#include "cupy_thrust.h"
#if CUPY_USE_HIP
typedef hipStream_t hipStream_t;
namespace cuda {
using thrust::hip::par;
}
#else // #if CUPY_USE_HIP
namespace cuda {
using thrust::hip::par;
}
#endif // #if CUPY_USE_HIP
extern "C" char *cupy_malloc(void *, size_t);
extern "C" void cupy_free(void *, char *);
class cupy_allocator {
private:
void* memory;
public:
typedef char value_type;
cupy_allocator(void* memory) : memory(memory) {}
char *allocate(size_t num_bytes) {
return cupy_malloc(memory, num_bytes);
}
void deallocate(char *ptr, size_t n) {
cupy_free(memory, ptr);
}
};
/*
* ------------------------------------- Minimum boilerplate for NumPy compatibility --------------------------------------
* We need a specialized operator< here in order to match the NumPy behavior:
* "The sort order for complex numbers is lexicographic. If both the real and imaginary parts are non-nan then the order is
* determined by the real parts except when they are equal, in which case the order is determined by the imaginary parts.
*
* In numpy versions >= 1.4.0 nan values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
* where R is a non-nan real value. Complex values with the same nan placements are sorted according to the non-nan part if
* it exists. Non-nan values are sorted as before."
* Ref: https://numpy.org/doc/stable/reference/generated/numpy.sort.html
*/
template <typename T>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2))
THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
bool _tuple_less(const thrust::tuple<size_t, T>& lhs,
const thrust::tuple<size_t, T>& rhs) {
const size_t& lhs_k = lhs.template get<0>();
const size_t& rhs_k = rhs.template get<0>();
const T& lhs_v = lhs.template get<1>();
const T& rhs_v = rhs.template get<1>();
const thrust::less<T> _less;
// tuple's comparison rule: compare the 1st member, then 2nd, then 3rd, ...,
// which should be respected
if (lhs_k < rhs_k) {
return true;
} else if (lhs_k == rhs_k) {
// same key, compare values
// note that we can't rely on native operator< due to NaN, so we rely on
// thrust::less() to be specialized shortly
return _less(lhs_v, rhs_v);
} else {
return false;
}
}
/*
* ********** complex numbers **********
* We need to specialize thrust::less because obviously we can't overload operator< for complex numbers...
*/
template <typename T>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2))
THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
bool _cmp_less(const T& lhs, const T& rhs) {
bool lhsRe = isnan(lhs.real());
bool lhsIm = isnan(lhs.imag());
bool rhsRe = isnan(rhs.real());
bool rhsIm = isnan(rhs.imag());
// neither side has nan
if (!lhsRe && !lhsIm && !rhsRe && !rhsIm) {
return lhs < rhs;
}
// one side has nan, and the other does not
if (!lhsRe && !lhsIm && (rhsRe || rhsIm)) {
return true;
}
if ((lhsRe || lhsIm) && !rhsRe && !rhsIm) {
return false;
}
// pick 2 from 3 possibilities (R + nanj, nan + Rj, nan + nanj)
if (lhsRe && !rhsRe) {
return false;
}
if (!lhsRe && rhsRe) {
return true;
}
if (lhsIm && !rhsIm) {
return false;
}
if (!lhsIm && rhsIm) {
return true;
}
// pick 1 from 3 and compare the numerical values (nan+nan*I compares to itself as false)
return (((lhsIm && rhsIm) && (lhs.real() < rhs.real())) || ((lhsRe && rhsRe) && (lhs.imag() < rhs.imag())));
}
// specialize thrust::less for single complex
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less<complex<float>>::operator() (
const complex<float>& lhs, const complex<float>& rhs) const {
return _cmp_less<complex<float>>(lhs, rhs);
}
// specialize thrust::less for double complex
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less<complex<double>>::operator() (
const complex<double>& lhs, const complex<double>& rhs) const {
return _cmp_less<complex<double>>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, complex<float>>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less< thrust::tuple<size_t, complex<float>> >::operator() (
const thrust::tuple<size_t, complex<float>>& lhs, const thrust::tuple<size_t, complex<float>>& rhs) const {
return _tuple_less<complex<float>>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, complex<double>>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less< thrust::tuple<size_t, complex<double>> >::operator() (
const thrust::tuple<size_t, complex<double>>& lhs, const thrust::tuple<size_t, complex<double>>& rhs) const {
return _tuple_less<complex<double>>(lhs, rhs);
}
/*
* ********** real numbers (templates) **********
* We need to specialize thrust::less because obviously we can't overload operator< for floating point numbers...
*/
template <typename T>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2))
THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
bool _real_less(const T& lhs, const T& rhs) {
#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__))
if (isnan(lhs)) {
return false;
} else if (isnan(rhs)) {
return true;
} else {
return lhs < rhs;
}
#else
return false; // This will be never executed in the host
#endif
}
/*
* ********** real numbers (specializations for single & double precisions) **********
*/
// specialize thrust::less for float
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less<float>::operator() (
const float& lhs, const float& rhs) const {
return _real_less<float>(lhs, rhs);
}
// specialize thrust::less for double
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less<double>::operator() (
const double& lhs, const double& rhs) const {
return _real_less<double>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, float>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less< thrust::tuple<size_t, float> >::operator() (
const thrust::tuple<size_t, float>& lhs, const thrust::tuple<size_t, float>& rhs) const {
return _tuple_less<float>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, double>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less< thrust::tuple<size_t, double> >::operator() (
const thrust::tuple<size_t, double>& lhs, const thrust::tuple<size_t, double>& rhs) const {
return _tuple_less<double>(lhs, rhs);
}
/*
* ********** real numbers (specializations for half precision) **********
*/
#if ((__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))) || (defined(__HIPCC__) || defined(CUPY_USE_HIP))
// it seems Thrust doesn't care the code path on host, so we just need a wrapper for device
__host__ __device__ __forceinline__ bool isnan(const __half& x) {
#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__))
return __hisnan(x);
#else
return false; // This will never be called on the host
#endif
}
// specialize thrust::less for __half
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less<__half>::operator() (const __half& lhs, const __half& rhs) const {
return _real_less<__half>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, __half>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less< thrust::tuple<size_t, __half> >::operator() (
const thrust::tuple<size_t, __half>& lhs, const thrust::tuple<size_t, __half>& rhs) const {
return _tuple_less<__half>(lhs, rhs);
}
#endif // include cupy_fp16.h
/*
* -------------------------------------------------- end of boilerplate --------------------------------------------------
*/
/*
* sort
*/
struct _sort {
template <typename T>
__forceinline__ void operator()(void *data_start, size_t *keys_start,
const std::vector<ptrdiff_t>& shape, intptr_t stream,
void* memory) {
size_t ndim = shape.size();
ptrdiff_t size;
thrust::device_ptr<T> dp_data_first, dp_data_last;
thrust::device_ptr<size_t> dp_keys_first, dp_keys_last;
hipStream_t stream_ = (hipStream_t)stream;
cupy_allocator alloc(memory);
// Compute the total size of the array.
size = shape[0];
for (size_t i = 1; i < ndim; ++i) {
size *= shape[i];
}
dp_data_first = thrust::device_pointer_cast(static_cast<T*>(data_start));
dp_data_last = thrust::device_pointer_cast(static_cast<T*>(data_start) + size);
if (ndim == 1) {
stable_sort(cuda::par(alloc).on(stream_), dp_data_first, dp_data_last, thrust::less<T>());
} else {
// Generate key indices.
dp_keys_first = thrust::device_pointer_cast(keys_start);
dp_keys_last = thrust::device_pointer_cast(keys_start + size);
transform(cuda::par(alloc).on(stream_),
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(size),
thrust::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
dp_keys_first,
thrust::divides<size_t>());
stable_sort(
cuda::par(alloc).on(stream_),
make_zip_iterator(make_tuple(dp_keys_first, dp_data_first)),
make_zip_iterator(make_tuple(dp_keys_last, dp_data_last)),
thrust::less< thrust::tuple<size_t, T> >());
}
}
};
/*
* lexsort
*/
template <typename T>
class elem_less {
public:
elem_less(const T *data):_data(data) {}
__device__ __forceinline__ bool operator()(size_t i, size_t j) const {
return thrust::less<T>()(_data[i], _data[j]);
}
private:
const T *_data;
};
struct _lexsort {
template <typename T>
__forceinline__ void operator()(size_t *idx_start, void *keys_start, size_t k,
size_t n, intptr_t stream, void *memory) {
/* idx_start is the beginning of the output array where the indexes that
would sort the data will be placed. The original contents of idx_start
will be destroyed. */
thrust::device_ptr<size_t> dp_first = thrust::device_pointer_cast(idx_start);
thrust::device_ptr<size_t> dp_last = thrust::device_pointer_cast(idx_start + n);
hipStream_t stream_ = (hipStream_t)stream;
cupy_allocator alloc(memory);
sequence(cuda::par(alloc).on(stream_), dp_first, dp_last);
for (size_t i = 0; i < k; ++i) {
T *key_start = static_cast<T*>(keys_start) + i * n;
stable_sort(
cuda::par(alloc).on(stream_),
dp_first,
dp_last,
elem_less<T>(key_start)
);
}
}
};
/*
* argsort
*/
struct _argsort {
template <typename T>
__forceinline__ void operator()(size_t *idx_start, void *data_start,
void *keys_start,
const std::vector<ptrdiff_t>& shape,
intptr_t stream, void *memory) {
/* idx_start is the beginning of the output array where the indexes that
would sort the data will be placed. The original contents of idx_start
will be destroyed. */
size_t ndim = shape.size();
ptrdiff_t size;
hipStream_t stream_ = (hipStream_t)stream;
cupy_allocator alloc(memory);
thrust::device_ptr<size_t> dp_idx_first, dp_idx_last;
thrust::device_ptr<T> dp_data_first, dp_data_last;
thrust::device_ptr<size_t> dp_keys_first, dp_keys_last;
// Compute the total size of the data array.
size = shape[0];
for (size_t i = 1; i < ndim; ++i) {
size *= shape[i];
}
// Cast device pointers of data.
dp_data_first = thrust::device_pointer_cast(static_cast<T*>(data_start));
dp_data_last = thrust::device_pointer_cast(static_cast<T*>(data_start) + size);
// Generate an index sequence.
dp_idx_first = thrust::device_pointer_cast(static_cast<size_t*>(idx_start));
dp_idx_last = thrust::device_pointer_cast(static_cast<size_t*>(idx_start) + size);
transform(cuda::par(alloc).on(stream_),
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(size),
thrust::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
dp_idx_first,
thrust::modulus<size_t>());
if (ndim == 1) {
// Sort the index sequence by data.
stable_sort_by_key(cuda::par(alloc).on(stream_),
dp_data_first,
dp_data_last,
dp_idx_first);
} else {
// Generate key indices.
dp_keys_first = thrust::device_pointer_cast(static_cast<size_t*>(keys_start));
dp_keys_last = thrust::device_pointer_cast(static_cast<size_t*>(keys_start) + size);
transform(cuda::par(alloc).on(stream_),
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(size),
thrust::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
dp_keys_first,
thrust::divides<size_t>());
stable_sort_by_key(
cuda::par(alloc).on(stream_),
make_zip_iterator(make_tuple(dp_keys_first, dp_data_first)),
make_zip_iterator(make_tuple(dp_keys_last, dp_data_last)),
dp_idx_first);
}
}
};
//
// APIs exposed to CuPy
//
/* -------- sort -------- */
void thrust_sort(int dtype_id, void *data_start, size_t *keys_start,
const std::vector<ptrdiff_t>& shape, intptr_t stream, void* memory) {
_sort op;
return dtype_dispatcher(dtype_id, op, data_start, keys_start, shape, stream, memory);
}
/* -------- lexsort -------- */
void thrust_lexsort(int dtype_id, size_t *idx_start, void *keys_start, size_t k,
size_t n, intptr_t stream, void *memory) {
_lexsort op;
return dtype_dispatcher(dtype_id, op, idx_start, keys_start, k, n, stream, memory);
}
/* -------- argsort -------- */
void thrust_argsort(int dtype_id, size_t *idx_start, void *data_start,
void *keys_start, const std::vector<ptrdiff_t>& shape, intptr_t stream, void *memory) {
_argsort op;
return dtype_dispatcher(dtype_id, op, idx_start, data_start, keys_start, shape,
stream, memory);
}
| 4f4d1c978419abb7d6be7e4f59cbff7bdb3b1d47.cu | #include <cupy/type_dispatcher.cuh>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/tuple.h>
#include <thrust/execution_policy.h>
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
// This is used to avoid a problem with constexpr in functions declarations introduced in
// cuda 11.2, MSVC 15 does not fully support it so we need a dummy constexpr declaration
// that is provided by this header. However optional.h is only available
// starting CUDA 10.1
#include <thrust/optional.h>
#ifdef _MSC_VER
#define THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS constexpr
#else
#define THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
#endif
#include "cupy_thrust.h"
#if CUPY_USE_HIP
typedef hipStream_t cudaStream_t;
namespace cuda {
using thrust::hip::par;
}
#else // #if CUPY_USE_HIP
namespace cuda {
using thrust::cuda::par;
}
#endif // #if CUPY_USE_HIP
extern "C" char *cupy_malloc(void *, size_t);
extern "C" void cupy_free(void *, char *);
class cupy_allocator {
private:
void* memory;
public:
typedef char value_type;
cupy_allocator(void* memory) : memory(memory) {}
char *allocate(size_t num_bytes) {
return cupy_malloc(memory, num_bytes);
}
void deallocate(char *ptr, size_t n) {
cupy_free(memory, ptr);
}
};
/*
* ------------------------------------- Minimum boilerplate for NumPy compatibility --------------------------------------
* We need a specialized operator< here in order to match the NumPy behavior:
* "The sort order for complex numbers is lexicographic. If both the real and imaginary parts are non-nan then the order is
* determined by the real parts except when they are equal, in which case the order is determined by the imaginary parts.
*
* In numpy versions >= 1.4.0 nan values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
* where R is a non-nan real value. Complex values with the same nan placements are sorted according to the non-nan part if
* it exists. Non-nan values are sorted as before."
* Ref: https://numpy.org/doc/stable/reference/generated/numpy.sort.html
*/
template <typename T>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2))
THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
bool _tuple_less(const thrust::tuple<size_t, T>& lhs,
const thrust::tuple<size_t, T>& rhs) {
const size_t& lhs_k = lhs.template get<0>();
const size_t& rhs_k = rhs.template get<0>();
const T& lhs_v = lhs.template get<1>();
const T& rhs_v = rhs.template get<1>();
const thrust::less<T> _less;
// tuple's comparison rule: compare the 1st member, then 2nd, then 3rd, ...,
// which should be respected
if (lhs_k < rhs_k) {
return true;
} else if (lhs_k == rhs_k) {
// same key, compare values
// note that we can't rely on native operator< due to NaN, so we rely on
// thrust::less() to be specialized shortly
return _less(lhs_v, rhs_v);
} else {
return false;
}
}
/*
* ********** complex numbers **********
* We need to specialize thrust::less because obviously we can't overload operator< for complex numbers...
*/
template <typename T>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2))
THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
bool _cmp_less(const T& lhs, const T& rhs) {
bool lhsRe = isnan(lhs.real());
bool lhsIm = isnan(lhs.imag());
bool rhsRe = isnan(rhs.real());
bool rhsIm = isnan(rhs.imag());
// neither side has nan
if (!lhsRe && !lhsIm && !rhsRe && !rhsIm) {
return lhs < rhs;
}
// one side has nan, and the other does not
if (!lhsRe && !lhsIm && (rhsRe || rhsIm)) {
return true;
}
if ((lhsRe || lhsIm) && !rhsRe && !rhsIm) {
return false;
}
// pick 2 from 3 possibilities (R + nanj, nan + Rj, nan + nanj)
if (lhsRe && !rhsRe) {
return false;
}
if (!lhsRe && rhsRe) {
return true;
}
if (lhsIm && !rhsIm) {
return false;
}
if (!lhsIm && rhsIm) {
return true;
}
// pick 1 from 3 and compare the numerical values (nan+nan*I compares to itself as false)
return (((lhsIm && rhsIm) && (lhs.real() < rhs.real())) || ((lhsRe && rhsRe) && (lhs.imag() < rhs.imag())));
}
// specialize thrust::less for single complex
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less<complex<float>>::operator() (
const complex<float>& lhs, const complex<float>& rhs) const {
return _cmp_less<complex<float>>(lhs, rhs);
}
// specialize thrust::less for double complex
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less<complex<double>>::operator() (
const complex<double>& lhs, const complex<double>& rhs) const {
return _cmp_less<complex<double>>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, complex<float>>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less< thrust::tuple<size_t, complex<float>> >::operator() (
const thrust::tuple<size_t, complex<float>>& lhs, const thrust::tuple<size_t, complex<float>>& rhs) const {
return _tuple_less<complex<float>>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, complex<double>>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less< thrust::tuple<size_t, complex<double>> >::operator() (
const thrust::tuple<size_t, complex<double>>& lhs, const thrust::tuple<size_t, complex<double>>& rhs) const {
return _tuple_less<complex<double>>(lhs, rhs);
}
/*
* ********** real numbers (templates) **********
* We need to specialize thrust::less because obviously we can't overload operator< for floating point numbers...
*/
template <typename T>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2))
THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
bool _real_less(const T& lhs, const T& rhs) {
#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__))
if (isnan(lhs)) {
return false;
} else if (isnan(rhs)) {
return true;
} else {
return lhs < rhs;
}
#else
return false; // This will be never executed in the host
#endif
}
/*
* ********** real numbers (specializations for single & double precisions) **********
*/
// specialize thrust::less for float
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less<float>::operator() (
const float& lhs, const float& rhs) const {
return _real_less<float>(lhs, rhs);
}
// specialize thrust::less for double
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less<double>::operator() (
const double& lhs, const double& rhs) const {
return _real_less<double>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, float>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less< thrust::tuple<size_t, float> >::operator() (
const thrust::tuple<size_t, float>& lhs, const thrust::tuple<size_t, float>& rhs) const {
return _tuple_less<float>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, double>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less< thrust::tuple<size_t, double> >::operator() (
const thrust::tuple<size_t, double>& lhs, const thrust::tuple<size_t, double>& rhs) const {
return _tuple_less<double>(lhs, rhs);
}
/*
* ********** real numbers (specializations for half precision) **********
*/
#if ((__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))) || (defined(__HIPCC__) || defined(CUPY_USE_HIP))
// it seems Thrust doesn't care the code path on host, so we just need a wrapper for device
__host__ __device__ __forceinline__ bool isnan(const __half& x) {
#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__))
return __hisnan(x);
#else
return false; // This will never be called on the host
#endif
}
// specialize thrust::less for __half
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less<__half>::operator() (const __half& lhs, const __half& rhs) const {
return _real_less<__half>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, __half>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less< thrust::tuple<size_t, __half> >::operator() (
const thrust::tuple<size_t, __half>& lhs, const thrust::tuple<size_t, __half>& rhs) const {
return _tuple_less<__half>(lhs, rhs);
}
#endif // include cupy_fp16.h
/*
* -------------------------------------------------- end of boilerplate --------------------------------------------------
*/
/*
* sort
*/
struct _sort {
template <typename T>
__forceinline__ void operator()(void *data_start, size_t *keys_start,
const std::vector<ptrdiff_t>& shape, intptr_t stream,
void* memory) {
size_t ndim = shape.size();
ptrdiff_t size;
thrust::device_ptr<T> dp_data_first, dp_data_last;
thrust::device_ptr<size_t> dp_keys_first, dp_keys_last;
cudaStream_t stream_ = (cudaStream_t)stream;
cupy_allocator alloc(memory);
// Compute the total size of the array.
size = shape[0];
for (size_t i = 1; i < ndim; ++i) {
size *= shape[i];
}
dp_data_first = thrust::device_pointer_cast(static_cast<T*>(data_start));
dp_data_last = thrust::device_pointer_cast(static_cast<T*>(data_start) + size);
if (ndim == 1) {
stable_sort(cuda::par(alloc).on(stream_), dp_data_first, dp_data_last, thrust::less<T>());
} else {
// Generate key indices.
dp_keys_first = thrust::device_pointer_cast(keys_start);
dp_keys_last = thrust::device_pointer_cast(keys_start + size);
transform(cuda::par(alloc).on(stream_),
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(size),
thrust::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
dp_keys_first,
thrust::divides<size_t>());
stable_sort(
cuda::par(alloc).on(stream_),
make_zip_iterator(make_tuple(dp_keys_first, dp_data_first)),
make_zip_iterator(make_tuple(dp_keys_last, dp_data_last)),
thrust::less< thrust::tuple<size_t, T> >());
}
}
};
/*
* lexsort
*/
template <typename T>
class elem_less {
public:
elem_less(const T *data):_data(data) {}
__device__ __forceinline__ bool operator()(size_t i, size_t j) const {
return thrust::less<T>()(_data[i], _data[j]);
}
private:
const T *_data;
};
struct _lexsort {
template <typename T>
__forceinline__ void operator()(size_t *idx_start, void *keys_start, size_t k,
size_t n, intptr_t stream, void *memory) {
/* idx_start is the beginning of the output array where the indexes that
would sort the data will be placed. The original contents of idx_start
will be destroyed. */
thrust::device_ptr<size_t> dp_first = thrust::device_pointer_cast(idx_start);
thrust::device_ptr<size_t> dp_last = thrust::device_pointer_cast(idx_start + n);
cudaStream_t stream_ = (cudaStream_t)stream;
cupy_allocator alloc(memory);
sequence(cuda::par(alloc).on(stream_), dp_first, dp_last);
for (size_t i = 0; i < k; ++i) {
T *key_start = static_cast<T*>(keys_start) + i * n;
stable_sort(
cuda::par(alloc).on(stream_),
dp_first,
dp_last,
elem_less<T>(key_start)
);
}
}
};
/*
* argsort
*/
struct _argsort {
template <typename T>
__forceinline__ void operator()(size_t *idx_start, void *data_start,
void *keys_start,
const std::vector<ptrdiff_t>& shape,
intptr_t stream, void *memory) {
/* idx_start is the beginning of the output array where the indexes that
would sort the data will be placed. The original contents of idx_start
will be destroyed. */
size_t ndim = shape.size();
ptrdiff_t size;
cudaStream_t stream_ = (cudaStream_t)stream;
cupy_allocator alloc(memory);
thrust::device_ptr<size_t> dp_idx_first, dp_idx_last;
thrust::device_ptr<T> dp_data_first, dp_data_last;
thrust::device_ptr<size_t> dp_keys_first, dp_keys_last;
// Compute the total size of the data array.
size = shape[0];
for (size_t i = 1; i < ndim; ++i) {
size *= shape[i];
}
// Cast device pointers of data.
dp_data_first = thrust::device_pointer_cast(static_cast<T*>(data_start));
dp_data_last = thrust::device_pointer_cast(static_cast<T*>(data_start) + size);
// Generate an index sequence.
dp_idx_first = thrust::device_pointer_cast(static_cast<size_t*>(idx_start));
dp_idx_last = thrust::device_pointer_cast(static_cast<size_t*>(idx_start) + size);
transform(cuda::par(alloc).on(stream_),
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(size),
thrust::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
dp_idx_first,
thrust::modulus<size_t>());
if (ndim == 1) {
// Sort the index sequence by data.
stable_sort_by_key(cuda::par(alloc).on(stream_),
dp_data_first,
dp_data_last,
dp_idx_first);
} else {
// Generate key indices.
dp_keys_first = thrust::device_pointer_cast(static_cast<size_t*>(keys_start));
dp_keys_last = thrust::device_pointer_cast(static_cast<size_t*>(keys_start) + size);
transform(cuda::par(alloc).on(stream_),
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(size),
thrust::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
dp_keys_first,
thrust::divides<size_t>());
stable_sort_by_key(
cuda::par(alloc).on(stream_),
make_zip_iterator(make_tuple(dp_keys_first, dp_data_first)),
make_zip_iterator(make_tuple(dp_keys_last, dp_data_last)),
dp_idx_first);
}
}
};
//
// APIs exposed to CuPy
//
/* -------- sort -------- */
void thrust_sort(int dtype_id, void *data_start, size_t *keys_start,
const std::vector<ptrdiff_t>& shape, intptr_t stream, void* memory) {
_sort op;
return dtype_dispatcher(dtype_id, op, data_start, keys_start, shape, stream, memory);
}
/* -------- lexsort -------- */
void thrust_lexsort(int dtype_id, size_t *idx_start, void *keys_start, size_t k,
size_t n, intptr_t stream, void *memory) {
_lexsort op;
return dtype_dispatcher(dtype_id, op, idx_start, keys_start, k, n, stream, memory);
}
/* -------- argsort -------- */
void thrust_argsort(int dtype_id, size_t *idx_start, void *data_start,
void *keys_start, const std::vector<ptrdiff_t>& shape, intptr_t stream, void *memory) {
_argsort op;
return dtype_dispatcher(dtype_id, op, idx_start, data_start, keys_start, shape,
stream, memory);
}
|
3fbdc2f7a6e560a25d3ff3acff8631758f42f80d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel3_plus_4_b;
int xdim0_update_halo_kernel3_plus_4_b_h = -1;
__constant__ int ydim0_update_halo_kernel3_plus_4_b;
int ydim0_update_halo_kernel3_plus_4_b_h = -1;
__constant__ int xdim1_update_halo_kernel3_plus_4_b;
int xdim1_update_halo_kernel3_plus_4_b_h = -1;
__constant__ int ydim1_update_halo_kernel3_plus_4_b;
int ydim1_update_halo_kernel3_plus_4_b_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel3_plus_4_b * (y) + \
xdim0_update_halo_kernel3_plus_4_b * ydim0_update_halo_kernel3_plus_4_b * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel3_plus_4_b * (y) + \
xdim1_update_halo_kernel3_plus_4_b * ydim1_update_halo_kernel3_plus_4_b * \
(z))
// user function
__device__
inline void
update_halo_kernel3_plus_4_b_gpu(double *vol_flux_x, double *mass_flux_x,
const int *fields) {
if (fields[FIELD_VOL_FLUX_X] == 1)
vol_flux_x[OPS_ACC0(0, 0, 0)] = vol_flux_x[OPS_ACC0(0, -4, 0)];
if (fields[FIELD_MASS_FLUX_X] == 1)
mass_flux_x[OPS_ACC1(0, 0, 0)] = mass_flux_x[OPS_ACC1(0, -4, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel3_plus_4_b(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel3_plus_4_b +
idx_z * 1 * 1 * xdim0_update_halo_kernel3_plus_4_b *
ydim0_update_halo_kernel3_plus_4_b;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel3_plus_4_b +
idx_z * 1 * 1 * xdim1_update_halo_kernel3_plus_4_b *
ydim1_update_halo_kernel3_plus_4_b;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel3_plus_4_b_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel3_plus_4_b(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 106))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(106, "update_halo_kernel3_plus_4_b");
OPS_kernels[106].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel3_plus_4_b_h ||
ydim0 != ydim0_update_halo_kernel3_plus_4_b_h ||
xdim1 != xdim1_update_halo_kernel3_plus_4_b_h ||
ydim1 != ydim1_update_halo_kernel3_plus_4_b_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel3_plus_4_b, &xdim0, sizeof(int));
xdim0_update_halo_kernel3_plus_4_b_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel3_plus_4_b, &ydim0, sizeof(int));
ydim0_update_halo_kernel3_plus_4_b_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel3_plus_4_b, &xdim1, sizeof(int));
xdim1_update_halo_kernel3_plus_4_b_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel3_plus_4_b, &ydim1, sizeof(int));
ydim1_update_halo_kernel3_plus_4_b_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[106].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel3_plus_4_b), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[106].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[106].mpi_time += t2 - t1;
OPS_kernels[106].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[106].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| 3fbdc2f7a6e560a25d3ff3acff8631758f42f80d.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel3_plus_4_b;
int xdim0_update_halo_kernel3_plus_4_b_h = -1;
__constant__ int ydim0_update_halo_kernel3_plus_4_b;
int ydim0_update_halo_kernel3_plus_4_b_h = -1;
__constant__ int xdim1_update_halo_kernel3_plus_4_b;
int xdim1_update_halo_kernel3_plus_4_b_h = -1;
__constant__ int ydim1_update_halo_kernel3_plus_4_b;
int ydim1_update_halo_kernel3_plus_4_b_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel3_plus_4_b * (y) + \
xdim0_update_halo_kernel3_plus_4_b * ydim0_update_halo_kernel3_plus_4_b * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel3_plus_4_b * (y) + \
xdim1_update_halo_kernel3_plus_4_b * ydim1_update_halo_kernel3_plus_4_b * \
(z))
// user function
__device__
inline void
update_halo_kernel3_plus_4_b_gpu(double *vol_flux_x, double *mass_flux_x,
const int *fields) {
if (fields[FIELD_VOL_FLUX_X] == 1)
vol_flux_x[OPS_ACC0(0, 0, 0)] = vol_flux_x[OPS_ACC0(0, -4, 0)];
if (fields[FIELD_MASS_FLUX_X] == 1)
mass_flux_x[OPS_ACC1(0, 0, 0)] = mass_flux_x[OPS_ACC1(0, -4, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel3_plus_4_b(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel3_plus_4_b +
idx_z * 1 * 1 * xdim0_update_halo_kernel3_plus_4_b *
ydim0_update_halo_kernel3_plus_4_b;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel3_plus_4_b +
idx_z * 1 * 1 * xdim1_update_halo_kernel3_plus_4_b *
ydim1_update_halo_kernel3_plus_4_b;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel3_plus_4_b_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel3_plus_4_b(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 106))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(106, "update_halo_kernel3_plus_4_b");
OPS_kernels[106].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel3_plus_4_b_h ||
ydim0 != ydim0_update_halo_kernel3_plus_4_b_h ||
xdim1 != xdim1_update_halo_kernel3_plus_4_b_h ||
ydim1 != ydim1_update_halo_kernel3_plus_4_b_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel3_plus_4_b, &xdim0, sizeof(int));
xdim0_update_halo_kernel3_plus_4_b_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel3_plus_4_b, &ydim0, sizeof(int));
ydim0_update_halo_kernel3_plus_4_b_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel3_plus_4_b, &xdim1, sizeof(int));
xdim1_update_halo_kernel3_plus_4_b_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel3_plus_4_b, &ydim1, sizeof(int));
ydim1_update_halo_kernel3_plus_4_b_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[106].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel3_plus_4_b<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[106].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[106].mpi_time += t2 - t1;
OPS_kernels[106].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[106].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
e6d37ed8aa2b54594068c47451307911bcf40eef.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
int main(int argc,char *argv[]){
if(argc<3){
printf("Usage: ./test.cu <ptx_file> <cuda_device>\n");
exit(0);
}
// Error code
hipError_t error;
// My number
unsigned int h_var=7;
// Initialize driver API
error = hipInit(0);
if((int)error!=0){
printf("Error! hipInit returned: %d\n",(int)error);
exit(0);
}
// Get Cuda Device and give handle
hipDevice_t cu_device;
error = hipDeviceGet(&cu_device,atoi(argv[2]));
if((int)error!=0){
printf("Error! hipDeviceGet returned: %d\n",(int)error);
exit(0);
}
// Create context to run on device
hipCtx_t cu_context;
error = hipCtxCreate(&cu_context, 0, cu_device);
if((int)error!=0){
printf("Error! hipCtxCreate returned: %d\n",(int)error);
exit(0);
}
// Load ptx code
hipModule_t cu_module;
error = hipModuleLoad(&cu_module,argv[1]);
if((int)error!=0){
printf("Error! hipModuleLoad returned: %d\n",(int)error);
exit(0);
}
// Get kernel function
hipFunction_t func;
error = hipModuleGetFunction(&func,cu_module,"testing");
if((int)error!=0){
printf("Error! hipModuleGetFunction returned: %d\n",(int)error);
exit(0);
}
hipDeviceptr_t var;
// Allocate device memory
unsigned int size = sizeof(unsigned int);
error = cuMemAlloc(&var, size);
if((int)error!=0){
printf("Error! cuMemAlloc returned: %d\n",(int)error);
exit(0);
}
// Copy variable to host
error = cuMemcpyHtoD(var,&h_var,size);
if((int)error!=0){
printf("Error! cuMemcpyHtoD returned: %d\n",(int)error);
exit(0);
}
// Lauch kernel
void *args[] = {&var};
error = hipModuleLaunchKernel(func, 1, 1, 1, 1, 1, 1, 0, NULL, args, NULL);
if((int)error!=0){
printf("Error! hipModuleLaunchKernel returned: %d\n",(int)error);
exit(0);
}
// Get result to host
error = cuMemcpyDtoH(&h_var,var,size);
if((int)error!=0){
printf("Error! cuMemcpyDtoH returned: %d\n",(int)error);
exit(0);
}
// Free device memory
error = hipFree(var);
if((int)error!=0){
printf("Error! hipFree returned: %d\n",(int)error);
exit(0);
}
// Destroy context
error = hipCtxDestroy(cu_context);
if((int)error!=0){
printf("Error! hipCtxDestroy returned: %d\n",(int)error);
exit(0);
}
// Print result
printf("var: %d\n",h_var);
}
| e6d37ed8aa2b54594068c47451307911bcf40eef.cu | #include <cuda.h>
#include <stdio.h>
int main(int argc,char *argv[]){
if(argc<3){
printf("Usage: ./test.cu <ptx_file> <cuda_device>\n");
exit(0);
}
// Error code
CUresult error;
// My number
unsigned int h_var=7;
// Initialize driver API
error = cuInit(0);
if((int)error!=0){
printf("Error! cuInit returned: %d\n",(int)error);
exit(0);
}
// Get Cuda Device and give handle
CUdevice cu_device;
error = cuDeviceGet(&cu_device,atoi(argv[2]));
if((int)error!=0){
printf("Error! cuDeviceGet returned: %d\n",(int)error);
exit(0);
}
// Create context to run on device
CUcontext cu_context;
error = cuCtxCreate(&cu_context, 0, cu_device);
if((int)error!=0){
printf("Error! cuCtxCreate returned: %d\n",(int)error);
exit(0);
}
// Load ptx code
CUmodule cu_module;
error = cuModuleLoad(&cu_module,argv[1]);
if((int)error!=0){
printf("Error! cuModuleLoad returned: %d\n",(int)error);
exit(0);
}
// Get kernel function
CUfunction func;
error = cuModuleGetFunction(&func,cu_module,"testing");
if((int)error!=0){
printf("Error! cuModuleGetFunction returned: %d\n",(int)error);
exit(0);
}
CUdeviceptr var;
// Allocate device memory
unsigned int size = sizeof(unsigned int);
error = cuMemAlloc(&var, size);
if((int)error!=0){
printf("Error! cuMemAlloc returned: %d\n",(int)error);
exit(0);
}
// Copy variable to host
error = cuMemcpyHtoD(var,&h_var,size);
if((int)error!=0){
printf("Error! cuMemcpyHtoD returned: %d\n",(int)error);
exit(0);
}
// Lauch kernel
void *args[] = {&var};
error = cuLaunchKernel(func, 1, 1, 1, 1, 1, 1, 0, NULL, args, NULL);
if((int)error!=0){
printf("Error! cuLaunchKernel returned: %d\n",(int)error);
exit(0);
}
// Get result to host
error = cuMemcpyDtoH(&h_var,var,size);
if((int)error!=0){
printf("Error! cuMemcpyDtoH returned: %d\n",(int)error);
exit(0);
}
// Free device memory
error = cuMemFree(var);
if((int)error!=0){
printf("Error! cuMemFree returned: %d\n",(int)error);
exit(0);
}
// Destroy context
error = cuCtxDestroy(cu_context);
if((int)error!=0){
printf("Error! cuCtxDestroy returned: %d\n",(int)error);
exit(0);
}
// Print result
printf("var: %d\n",h_var);
}
|
90509672f40e1c36a6e4a916b56a4d6e7776f762.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//xfail:BOOGIE_ERROR
//--blockDim=1024 --gridDim=1 --no-inline
//possible attempt to modify constant memory
__constant__ int A[1024];
__global__ void foo(int *B) {
A[threadIdx.x] = B[threadIdx.x];
}
| 90509672f40e1c36a6e4a916b56a4d6e7776f762.cu | //xfail:BOOGIE_ERROR
//--blockDim=1024 --gridDim=1 --no-inline
//possible attempt to modify constant memory
__constant__ int A[1024];
__global__ void foo(int *B) {
A[threadIdx.x] = B[threadIdx.x];
}
|
b3464defb24e399cbd548a85b4afd0c0c739cd59.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "util.cuh"
// __device__ char char_atomicCAS(char *addr, char cmp, char val) {
// unsigned *al_addr = reinterpret_cast<unsigned *>(((unsigned long long)addr)
// &
// (0xFFFFFFFFFFFFFFFCULL));
// unsigned al_offset = ((unsigned)(((unsigned long long)addr) & 3)) * 8;
// unsigned mask = 0xFFU;
// mask <<= al_offset;
// mask = ~mask;
// unsigned sval = val;
// sval <<= al_offset;
// unsigned old = *al_addr, assumed, setval;
// do {
// assumed = old;
// setval = assumed & mask;
// setval |= sval;
// old = atomicCAS(al_addr, assumed, setval);
// } while (assumed != old);
// return (char)((assumed >> al_offset) & 0xFFU);
// }
// template <typename T>
// __inline__ __device__ T warpPrefixSum(T val, int lane_id) {
// T val_shuffled;
// for (int offset = 1; offset < warpSize; offset *= 2) {
// val_shuffled = __shfl_up(val, offset);
// if (lane_id >= offset) {
// val += val_shuffled;
// }
// }
// return val;
// }
double wtime() {
double time[2];
struct timeval time1;
gettimeofday(&time1, NULL);
time[0] = time1.tv_sec;
time[1] = time1.tv_usec;
return time[0] + time[1] * 1.0e-6;
}
__device__ void __conv() { coalesced_group active = coalesced_threads(); }
__device__ void active_size(int n = 0) {
coalesced_group active = coalesced_threads();
if (active.thread_rank() == 0)
printf("TBID: %d WID: %d coalesced_group %llu at line %d\n", BID, WID,
active.size(), n);
}
__device__ int active_size2(char *txt, int n = 0) {
coalesced_group active = coalesced_threads();
if (active.thread_rank() == 0)
printf("%s coalesced_group %llu at line %d\n", txt, active.size(), n);
}
template <typename T>
void printH(T *ptr, int size) {
T *ptrh = new T[size];
CUDA_RT_CALL(hipMemcpy(ptrh, ptr, size * sizeof(T), hipMemcpyDeviceToHost));
printf("printH: ");
for (size_t i = 0; i < size; i++) {
// printf("%d\t", ptrh[i]);
std::cout << ptrh[i] << "\t";
}
printf("\n");
delete ptrh;
}
// https://forums.developer.nvidia.com/t/how-can-i-use-atomicsub-for-floats-and-doubles/64340/5
// __device__ double my_atomicSub(double *address, double val) {
// unsigned long long int *address_as_ull = (unsigned long long int *)address;
// unsigned long long int old = *address_as_ull, assumed;
// do {
// assumed = old;
// old = atomicCAS(
// address_as_ull, assumed,
// __double_as_longlong(__longlong_as_double(assumed) -
// val)); // Note: uses integer comparison to avoid
// // hang in case of NaN (since NaN != NaN)
// } while (assumed != old);
// return __longlong_as_double(old);
// }
// https://forums.developer.nvidia.com/t/how-can-i-use-atomicsub-for-floats-and-doubles/64340/5
__device__ float my_atomicSub(float *address, float val) {
int *address_as_int = (int *)address;
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(
address_as_int, assumed,
__float_as_int(__int_as_float(assumed) -
val)); // Note: uses integer comparison to avoid hang in
// case of NaN (since NaN != NaN)
} while (assumed != old);
return __int_as_float(old);
}
// __device__ long long my_atomicSub(long long *address, long long val) {
// unsigned long long int *address_as_ull = (unsigned long long int *)address;
// unsigned long long int old = *address_as_ull, assumed;
// do {
// assumed = old;
// old = atomicCAS(address_as_ull, assumed,
// ((assumed)-val)); // Note: uses integer comparison to avoid
// // hang in case of NaN (since NaN != NaN)
// } while (assumed != old);
// return (old);
// }
// __device__ unsigned long long my_atomicSub(unsigned long long *address,
// unsigned long long val) {
// unsigned long long int *address_as_ull = (unsigned long long int *)address;
// unsigned long long int old = *address_as_ull, assumed;
// do {
// assumed = old;
// old = atomicCAS(address_as_ull, assumed, ((assumed)-val));
// } while (assumed != old);
// return (old);
// }
// __device__ long long my_atomicAdd(long long *address, long long val) {
// unsigned long long int *address_as_ull = (unsigned long long int *)address;
// unsigned long long int old = *address_as_ull, assumed;
// do {
// assumed = old;
// old = atomicCAS(address_as_ull, assumed, ((assumed) + val));
// } while (assumed != old);
// return (old);
// }
template <>
__device__ void printD<float>(float *ptr, size_t size) {
printf("printDf: size %llu: ", (u64)size);
for (size_t i = 0; i < size; i++) {
printf("%f\t", ptr[i]);
}
printf("\n");
}
template <>
__device__ void printD<int>(int *ptr, size_t size) {
printf("printDf: size %llu: ", (u64)size);
for (size_t i = 0; i < size; i++) {
printf("%d\t", ptr[i]);
}
printf("\n");
}
template <>
__device__ void printD<uint>(uint *ptr, size_t size) {
printf("printDf: size %llu: ", (u64)size);
for (size_t i = 0; i < size; i++) {
printf("%u\t", ptr[i]);
}
printf("\n");
}
// template <typename T> __global__ void init_range_d(T *ptr, size_t size) {
// if (TID < size) {
// ptr[TID] = TID;
// }
// }
// template <typename T> void init_range(T *ptr, size_t size) {
// init_range_d<T><<<size / 512 + 1, 512>>>(ptr, size);
// }
// template <typename T> __global__ void init_array_d(T *ptr, size_t size, T v)
// {
// if (TID < size) {
// ptr[TID] = v;
// }
// }
// template <typename T> void init_array(T *ptr, size_t size, T v) {
// init_array_d<T><<<size / 512 + 1, 512>>>(ptr, size, v);
// }
| b3464defb24e399cbd548a85b4afd0c0c739cd59.cu | #include "util.cuh"
// __device__ char char_atomicCAS(char *addr, char cmp, char val) {
// unsigned *al_addr = reinterpret_cast<unsigned *>(((unsigned long long)addr)
// &
// (0xFFFFFFFFFFFFFFFCULL));
// unsigned al_offset = ((unsigned)(((unsigned long long)addr) & 3)) * 8;
// unsigned mask = 0xFFU;
// mask <<= al_offset;
// mask = ~mask;
// unsigned sval = val;
// sval <<= al_offset;
// unsigned old = *al_addr, assumed, setval;
// do {
// assumed = old;
// setval = assumed & mask;
// setval |= sval;
// old = atomicCAS(al_addr, assumed, setval);
// } while (assumed != old);
// return (char)((assumed >> al_offset) & 0xFFU);
// }
// template <typename T>
// __inline__ __device__ T warpPrefixSum(T val, int lane_id) {
// T val_shuffled;
// for (int offset = 1; offset < warpSize; offset *= 2) {
// val_shuffled = __shfl_up(val, offset);
// if (lane_id >= offset) {
// val += val_shuffled;
// }
// }
// return val;
// }
double wtime() {
double time[2];
struct timeval time1;
gettimeofday(&time1, NULL);
time[0] = time1.tv_sec;
time[1] = time1.tv_usec;
return time[0] + time[1] * 1.0e-6;
}
__device__ void __conv() { coalesced_group active = coalesced_threads(); }
__device__ void active_size(int n = 0) {
coalesced_group active = coalesced_threads();
if (active.thread_rank() == 0)
printf("TBID: %d WID: %d coalesced_group %llu at line %d\n", BID, WID,
active.size(), n);
}
__device__ int active_size2(char *txt, int n = 0) {
coalesced_group active = coalesced_threads();
if (active.thread_rank() == 0)
printf("%s coalesced_group %llu at line %d\n", txt, active.size(), n);
}
template <typename T>
void printH(T *ptr, int size) {
T *ptrh = new T[size];
CUDA_RT_CALL(cudaMemcpy(ptrh, ptr, size * sizeof(T), cudaMemcpyDeviceToHost));
printf("printH: ");
for (size_t i = 0; i < size; i++) {
// printf("%d\t", ptrh[i]);
std::cout << ptrh[i] << "\t";
}
printf("\n");
delete ptrh;
}
// https://forums.developer.nvidia.com/t/how-can-i-use-atomicsub-for-floats-and-doubles/64340/5
// __device__ double my_atomicSub(double *address, double val) {
// unsigned long long int *address_as_ull = (unsigned long long int *)address;
// unsigned long long int old = *address_as_ull, assumed;
// do {
// assumed = old;
// old = atomicCAS(
// address_as_ull, assumed,
// __double_as_longlong(__longlong_as_double(assumed) -
// val)); // Note: uses integer comparison to avoid
// // hang in case of NaN (since NaN != NaN)
// } while (assumed != old);
// return __longlong_as_double(old);
// }
// https://forums.developer.nvidia.com/t/how-can-i-use-atomicsub-for-floats-and-doubles/64340/5
__device__ float my_atomicSub(float *address, float val) {
int *address_as_int = (int *)address;
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(
address_as_int, assumed,
__float_as_int(__int_as_float(assumed) -
val)); // Note: uses integer comparison to avoid hang in
// case of NaN (since NaN != NaN)
} while (assumed != old);
return __int_as_float(old);
}
// __device__ long long my_atomicSub(long long *address, long long val) {
// unsigned long long int *address_as_ull = (unsigned long long int *)address;
// unsigned long long int old = *address_as_ull, assumed;
// do {
// assumed = old;
// old = atomicCAS(address_as_ull, assumed,
// ((assumed)-val)); // Note: uses integer comparison to avoid
// // hang in case of NaN (since NaN != NaN)
// } while (assumed != old);
// return (old);
// }
// __device__ unsigned long long my_atomicSub(unsigned long long *address,
// unsigned long long val) {
// unsigned long long int *address_as_ull = (unsigned long long int *)address;
// unsigned long long int old = *address_as_ull, assumed;
// do {
// assumed = old;
// old = atomicCAS(address_as_ull, assumed, ((assumed)-val));
// } while (assumed != old);
// return (old);
// }
// __device__ long long my_atomicAdd(long long *address, long long val) {
// unsigned long long int *address_as_ull = (unsigned long long int *)address;
// unsigned long long int old = *address_as_ull, assumed;
// do {
// assumed = old;
// old = atomicCAS(address_as_ull, assumed, ((assumed) + val));
// } while (assumed != old);
// return (old);
// }
template <>
__device__ void printD<float>(float *ptr, size_t size) {
printf("printDf: size %llu: ", (u64)size);
for (size_t i = 0; i < size; i++) {
printf("%f\t", ptr[i]);
}
printf("\n");
}
template <>
__device__ void printD<int>(int *ptr, size_t size) {
printf("printDf: size %llu: ", (u64)size);
for (size_t i = 0; i < size; i++) {
printf("%d\t", ptr[i]);
}
printf("\n");
}
template <>
__device__ void printD<uint>(uint *ptr, size_t size) {
printf("printDf: size %llu: ", (u64)size);
for (size_t i = 0; i < size; i++) {
printf("%u\t", ptr[i]);
}
printf("\n");
}
// template <typename T> __global__ void init_range_d(T *ptr, size_t size) {
// if (TID < size) {
// ptr[TID] = TID;
// }
// }
// template <typename T> void init_range(T *ptr, size_t size) {
// init_range_d<T><<<size / 512 + 1, 512>>>(ptr, size);
// }
// template <typename T> __global__ void init_array_d(T *ptr, size_t size, T v)
// {
// if (TID < size) {
// ptr[TID] = v;
// }
// }
// template <typename T> void init_array(T *ptr, size_t size, T v) {
// init_array_d<T><<<size / 512 + 1, 512>>>(ptr, size, v);
// }
|
1080044572d09a60657b1cf7307c363d4d053338.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include "pow_utv_gpu.h"
#include <mkl.h>
#include <time.h>
#define max( a, b ) ( (a) > (b) ? (a) : (b) )
#define min( a, b ) ( (a) < (b) ? (a) : (b) )
// ============================================================================
// Declaration of local prototypes.
static void matrix_generate( int m_A, int n_A,
double * buff_A, int ldim_A );
// ============================================================================
int main() {
int ldim_A;
double * buff_A, * buff_U, * buff_V;
int i, j;
int bl_size = 128;
int n_A[] = {2000,3000,4000,5000,6000,8000,10000,12000,15000};
int q[] = {1,2};
int p = 0;
// for timing
timespec t1, t2;
uint64_t diff;
double t_pow_gpu[ (sizeof( n_A ) / sizeof( int ))*(sizeof(q)/sizeof(int)) ];
// for output file
FILE * ofp;
char mode = 'a';
for ( j=0; j < sizeof( q ) / sizeof( int ); j++ ) {
printf( "%% q = %d \n", q[j] );
for ( i=0; i < sizeof( n_A ) / sizeof( int ); i++ ) {
// Create matrix A, matrix U, and matrix V.
buff_A = ( double * ) malloc( n_A[ i ] * n_A[ i ] * sizeof( double ) );
ldim_A = max( 1, n_A[ i ] );
buff_U = ( double * ) malloc( n_A[ i ] * n_A[ i ] * sizeof( double ) );
buff_V = ( double * ) malloc( n_A[ i ] * n_A[ i ] * sizeof( double ) );
// Generate matrix.
matrix_generate( n_A[ i ], n_A[ i ], buff_A, ldim_A );
// Factorize matrix.
printf( "%% Working on n = %d \n", n_A[ i ] );
// start timing
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, & t1 );
// do factorization
pow_utv_gpu( n_A[i], n_A[i], buff_A, ldim_A,
1, n_A[i], n_A[i], buff_U, n_A[i],
1, n_A[i], n_A[i], buff_V, n_A[i],
q[j] );
// stop timing and record time
hipDeviceSynchronize();
clock_gettime( CLOCK_MONOTONIC, & t2 );
diff = (1E9) * (t2.tv_sec - t1.tv_sec) + t2.tv_nsec - t1.tv_nsec;
t_pow_gpu[ i + j*(sizeof(n_A)/sizeof(int)) ] = ( double ) diff / (1E9);
// Free matrices and vectors.
free( buff_A );
free( buff_U );
free( buff_V );
}
}
// write results to file
ofp = fopen( "times_powurv_gpu.m", & mode );
fprintf( ofp, "%% the ROWS of the matrix t_rutv_gpu correspond to the values of q in ascending order (i.e. 1st row is q=1) \n \n " );
// write out vector of values of n used for these tests
fprintf( ofp, "n_rutv_gpu = [ \n" );
for ( i=0; i < sizeof( n_A ) / sizeof( int ); i++ ) {
fprintf( ofp, "%d ", n_A[ i ] );
}
fprintf( ofp, "]; \n \n");
// write out vector of times
fprintf( ofp, "t_rutv_gpu = [ \n" );
for ( i=0; i < (sizeof(n_A) * sizeof(q)) / (sizeof(int) * sizeof(int)); i++ ) {
fprintf( ofp, "%.2e ", t_pow_gpu[ i ] );
if ( (i+1) % (sizeof(n_A)/sizeof(int)) == 0 )
fprintf( ofp, "; \n" );
}
fprintf( ofp, "]; \n \n");
fclose(ofp);
printf( "%% End of Program\n" );
return 0;
}
// ============================================================================
static void matrix_generate( int m_A, int n_A, double * buff_A, int ldim_A ) {
int i, j;
srand( 10 );
for ( j = 0; j < n_A; j++ ) {
for ( i = 0; i < m_A; i++ ) {
buff_A[ i + j * ldim_A ] = ( double ) rand() / ( double ) RAND_MAX;
}
}
}
| 1080044572d09a60657b1cf7307c363d4d053338.cu | #include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include "pow_utv_gpu.h"
#include <mkl.h>
#include <time.h>
#define max( a, b ) ( (a) > (b) ? (a) : (b) )
#define min( a, b ) ( (a) < (b) ? (a) : (b) )
// ============================================================================
// Declaration of local prototypes.
static void matrix_generate( int m_A, int n_A,
double * buff_A, int ldim_A );
// ============================================================================
int main() {
int ldim_A;
double * buff_A, * buff_U, * buff_V;
int i, j;
int bl_size = 128;
int n_A[] = {2000,3000,4000,5000,6000,8000,10000,12000,15000};
int q[] = {1,2};
int p = 0;
// for timing
timespec t1, t2;
uint64_t diff;
double t_pow_gpu[ (sizeof( n_A ) / sizeof( int ))*(sizeof(q)/sizeof(int)) ];
// for output file
FILE * ofp;
char mode = 'a';
for ( j=0; j < sizeof( q ) / sizeof( int ); j++ ) {
printf( "%% q = %d \n", q[j] );
for ( i=0; i < sizeof( n_A ) / sizeof( int ); i++ ) {
// Create matrix A, matrix U, and matrix V.
buff_A = ( double * ) malloc( n_A[ i ] * n_A[ i ] * sizeof( double ) );
ldim_A = max( 1, n_A[ i ] );
buff_U = ( double * ) malloc( n_A[ i ] * n_A[ i ] * sizeof( double ) );
buff_V = ( double * ) malloc( n_A[ i ] * n_A[ i ] * sizeof( double ) );
// Generate matrix.
matrix_generate( n_A[ i ], n_A[ i ], buff_A, ldim_A );
// Factorize matrix.
printf( "%% Working on n = %d \n", n_A[ i ] );
// start timing
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, & t1 );
// do factorization
pow_utv_gpu( n_A[i], n_A[i], buff_A, ldim_A,
1, n_A[i], n_A[i], buff_U, n_A[i],
1, n_A[i], n_A[i], buff_V, n_A[i],
q[j] );
// stop timing and record time
cudaDeviceSynchronize();
clock_gettime( CLOCK_MONOTONIC, & t2 );
diff = (1E9) * (t2.tv_sec - t1.tv_sec) + t2.tv_nsec - t1.tv_nsec;
t_pow_gpu[ i + j*(sizeof(n_A)/sizeof(int)) ] = ( double ) diff / (1E9);
// Free matrices and vectors.
free( buff_A );
free( buff_U );
free( buff_V );
}
}
// write results to file
ofp = fopen( "times_powurv_gpu.m", & mode );
fprintf( ofp, "%% the ROWS of the matrix t_rutv_gpu correspond to the values of q in ascending order (i.e. 1st row is q=1) \n \n " );
// write out vector of values of n used for these tests
fprintf( ofp, "n_rutv_gpu = [ \n" );
for ( i=0; i < sizeof( n_A ) / sizeof( int ); i++ ) {
fprintf( ofp, "%d ", n_A[ i ] );
}
fprintf( ofp, "]; \n \n");
// write out vector of times
fprintf( ofp, "t_rutv_gpu = [ \n" );
for ( i=0; i < (sizeof(n_A) * sizeof(q)) / (sizeof(int) * sizeof(int)); i++ ) {
fprintf( ofp, "%.2e ", t_pow_gpu[ i ] );
if ( (i+1) % (sizeof(n_A)/sizeof(int)) == 0 )
fprintf( ofp, "; \n" );
}
fprintf( ofp, "]; \n \n");
fclose(ofp);
printf( "%% End of Program\n" );
return 0;
}
// ============================================================================
static void matrix_generate( int m_A, int n_A, double * buff_A, int ldim_A ) {
int i, j;
srand( 10 );
for ( j = 0; j < n_A; j++ ) {
for ( i = 0; i < m_A; i++ ) {
buff_A[ i + j * ldim_A ] = ( double ) rand() / ( double ) RAND_MAX;
}
}
}
|
ed173aa8ea6d9db26fc439556ed4d438c1bc4923.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: joaander
#include "TwoStepBDGPU.cuh"
#include "saruprngCUDA.h"
#include <assert.h>
/*! \file TwoSteBDGPU.cu
\brief Defines GPU kernel code for Brownian integration on the GPU. Used by TwoStepBDGPU.
*/
//! Shared memory array for gpu_langevin_step_two_kernel()
extern __shared__ Scalar s_gammas[];
//! Takes the second half-step forward in the Langevin integration on a group of particles with
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_image array of particle images
\param box simulation box
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param d_gamma List of per-type gammas
\param n_types Number of particle types in the simulation
\param use_lambda If true, gamma = lambda * diameter
\param lambda Scale factor to convert diameter to lambda (when use_lambda is true)
\param timestep Current timestep of the simulation
\param seed User chosen random number seed
\param T Temperature set point
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
This kernel is implemented in a very similar manner to gpu_nve_step_one_kernel(), see it for design details.
Random number generation is done per thread with Saru's 3-seed constructor. The seeds are, the time step,
the particle tag, and the user-defined seed.
This kernel must be launched with enough dynamic shared memory per block to read in d_gamma
*/
extern "C" __global__
void gpu_brownian_step_one_kernel(Scalar4 *d_pos,
Scalar4 *d_vel,
int3 *d_image,
const BoxDim box,
const Scalar *d_diameter,
const unsigned int *d_tag,
const unsigned int *d_group_members,
const unsigned int group_size,
const Scalar4 *d_net_force,
const Scalar *d_gamma,
const unsigned int n_types,
const bool use_lambda,
const Scalar lambda,
const unsigned int timestep,
const unsigned int seed,
const Scalar T,
const Scalar deltaT,
unsigned int D)
{
if (!use_lambda)
{
// read in the gammas (1 dimensional array)
for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < n_types)
s_gammas[cur_offset + threadIdx.x] = d_gamma[cur_offset + threadIdx.x];
}
__syncthreads();
}
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
// determine the particle to work on
unsigned int idx = d_group_members[group_idx];
Scalar4 postype = d_pos[idx];
Scalar4 vel = d_vel[idx];
Scalar4 net_force = d_net_force[idx];
int3 image = d_image[idx];
// read in the tag of our particle.
unsigned int ptag = d_tag[idx];
// compute the random force
SaruGPU saru(ptag, timestep + seed, 0x9977665);
Scalar rx = saru.s<Scalar>(-1,1);
Scalar ry = saru.s<Scalar>(-1,1);
Scalar rz = saru.s<Scalar>(-1,1);
// calculate the magnitude of the random force
Scalar gamma;
if (use_lambda)
{
// determine gamma from diameter
gamma = lambda*d_diameter[idx];
}
else
{
// determine gamma from type
unsigned int typ = __scalar_as_int(postype.w);
gamma = s_gammas[typ];
}
// compute the bd force (the extra factor of 3 is because <rx^2> is 1/3 in the uniform -1,1 distribution
// it is not the dimensionality of the system
Scalar coeff = fast::sqrt(Scalar(3.0)*Scalar(2.0)*gamma*T/deltaT);
Scalar Fr_x = rx*coeff;
Scalar Fr_y = ry*coeff;
Scalar Fr_z = rz*coeff;
if (D < 3)
Fr_z = Scalar(0.0);
// update position
postype.x += (net_force.x + Fr_x) * deltaT / gamma;
postype.y += (net_force.y + Fr_y) * deltaT / gamma;
postype.z += (net_force.z + Fr_z) * deltaT / gamma;
// particles may have been moved slightly outside the box by the above steps, wrap them back into place
box.wrap(postype, image);
// draw a new random velocity for particle j
Scalar mass = vel.w;
Scalar sigma = fast::sqrt(T/mass);
vel.x = gaussian_rng(saru, sigma);
vel.y = gaussian_rng(saru, sigma);
if (D > 2)
vel.z = gaussian_rng(saru, sigma);
else
vel.z = 0;
// write out data
d_pos[idx] = postype;
d_vel[idx] = vel;
d_image[idx] = image;
}
}
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_image array of particle images
\param box simulation box
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param langevin_args Collected arguments for gpu_brownian_step_one_kernel()
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
This is just a driver for gpu_brownian_step_one_kernel(), see it for details.
*/
hipError_t gpu_brownian_step_one(Scalar4 *d_pos,
Scalar4 *d_vel,
int3 *d_image,
const BoxDim& box,
const Scalar *d_diameter,
const unsigned int *d_tag,
const unsigned int *d_group_members,
const unsigned int group_size,
const Scalar4 *d_net_force,
const langevin_step_two_args& langevin_args,
const Scalar deltaT,
const unsigned int D)
{
// setup the grid to run the kernel
dim3 grid(langevin_args.num_blocks, 1, 1);
dim3 grid1(1, 1, 1);
dim3 threads(langevin_args.block_size, 1, 1);
dim3 threads1(256, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_brownian_step_one_kernel), dim3(grid),
dim3(threads),
(unsigned int)(sizeof(Scalar)*langevin_args.n_types)
, 0, d_pos,
d_vel,
d_image,
box,
d_diameter,
d_tag,
d_group_members,
group_size,
d_net_force,
langevin_args.d_gamma,
langevin_args.n_types,
langevin_args.use_lambda,
langevin_args.lambda,
langevin_args.timestep,
langevin_args.seed,
langevin_args.T,
deltaT,
D);
return hipSuccess;
}
| ed173aa8ea6d9db26fc439556ed4d438c1bc4923.cu | /*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: joaander
#include "TwoStepBDGPU.cuh"
#include "saruprngCUDA.h"
#include <assert.h>
/*! \file TwoSteBDGPU.cu
\brief Defines GPU kernel code for Brownian integration on the GPU. Used by TwoStepBDGPU.
*/
//! Shared memory array for gpu_langevin_step_two_kernel()
extern __shared__ Scalar s_gammas[];
//! Takes the second half-step forward in the Langevin integration on a group of particles with
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_image array of particle images
\param box simulation box
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param d_gamma List of per-type gammas
\param n_types Number of particle types in the simulation
\param use_lambda If true, gamma = lambda * diameter
\param lambda Scale factor to convert diameter to lambda (when use_lambda is true)
\param timestep Current timestep of the simulation
\param seed User chosen random number seed
\param T Temperature set point
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
This kernel is implemented in a very similar manner to gpu_nve_step_one_kernel(), see it for design details.
Random number generation is done per thread with Saru's 3-seed constructor. The seeds are, the time step,
the particle tag, and the user-defined seed.
This kernel must be launched with enough dynamic shared memory per block to read in d_gamma
*/
extern "C" __global__
void gpu_brownian_step_one_kernel(Scalar4 *d_pos,
Scalar4 *d_vel,
int3 *d_image,
const BoxDim box,
const Scalar *d_diameter,
const unsigned int *d_tag,
const unsigned int *d_group_members,
const unsigned int group_size,
const Scalar4 *d_net_force,
const Scalar *d_gamma,
const unsigned int n_types,
const bool use_lambda,
const Scalar lambda,
const unsigned int timestep,
const unsigned int seed,
const Scalar T,
const Scalar deltaT,
unsigned int D)
{
if (!use_lambda)
{
// read in the gammas (1 dimensional array)
for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < n_types)
s_gammas[cur_offset + threadIdx.x] = d_gamma[cur_offset + threadIdx.x];
}
__syncthreads();
}
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
// determine the particle to work on
unsigned int idx = d_group_members[group_idx];
Scalar4 postype = d_pos[idx];
Scalar4 vel = d_vel[idx];
Scalar4 net_force = d_net_force[idx];
int3 image = d_image[idx];
// read in the tag of our particle.
unsigned int ptag = d_tag[idx];
// compute the random force
SaruGPU saru(ptag, timestep + seed, 0x9977665);
Scalar rx = saru.s<Scalar>(-1,1);
Scalar ry = saru.s<Scalar>(-1,1);
Scalar rz = saru.s<Scalar>(-1,1);
// calculate the magnitude of the random force
Scalar gamma;
if (use_lambda)
{
// determine gamma from diameter
gamma = lambda*d_diameter[idx];
}
else
{
// determine gamma from type
unsigned int typ = __scalar_as_int(postype.w);
gamma = s_gammas[typ];
}
// compute the bd force (the extra factor of 3 is because <rx^2> is 1/3 in the uniform -1,1 distribution
// it is not the dimensionality of the system
Scalar coeff = fast::sqrt(Scalar(3.0)*Scalar(2.0)*gamma*T/deltaT);
Scalar Fr_x = rx*coeff;
Scalar Fr_y = ry*coeff;
Scalar Fr_z = rz*coeff;
if (D < 3)
Fr_z = Scalar(0.0);
// update position
postype.x += (net_force.x + Fr_x) * deltaT / gamma;
postype.y += (net_force.y + Fr_y) * deltaT / gamma;
postype.z += (net_force.z + Fr_z) * deltaT / gamma;
// particles may have been moved slightly outside the box by the above steps, wrap them back into place
box.wrap(postype, image);
// draw a new random velocity for particle j
Scalar mass = vel.w;
Scalar sigma = fast::sqrt(T/mass);
vel.x = gaussian_rng(saru, sigma);
vel.y = gaussian_rng(saru, sigma);
if (D > 2)
vel.z = gaussian_rng(saru, sigma);
else
vel.z = 0;
// write out data
d_pos[idx] = postype;
d_vel[idx] = vel;
d_image[idx] = image;
}
}
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_image array of particle images
\param box simulation box
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param langevin_args Collected arguments for gpu_brownian_step_one_kernel()
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
This is just a driver for gpu_brownian_step_one_kernel(), see it for details.
*/
cudaError_t gpu_brownian_step_one(Scalar4 *d_pos,
Scalar4 *d_vel,
int3 *d_image,
const BoxDim& box,
const Scalar *d_diameter,
const unsigned int *d_tag,
const unsigned int *d_group_members,
const unsigned int group_size,
const Scalar4 *d_net_force,
const langevin_step_two_args& langevin_args,
const Scalar deltaT,
const unsigned int D)
{
// setup the grid to run the kernel
dim3 grid(langevin_args.num_blocks, 1, 1);
dim3 grid1(1, 1, 1);
dim3 threads(langevin_args.block_size, 1, 1);
dim3 threads1(256, 1, 1);
// run the kernel
gpu_brownian_step_one_kernel<<< grid,
threads,
(unsigned int)(sizeof(Scalar)*langevin_args.n_types)
>>>(d_pos,
d_vel,
d_image,
box,
d_diameter,
d_tag,
d_group_members,
group_size,
d_net_force,
langevin_args.d_gamma,
langevin_args.n_types,
langevin_args.use_lambda,
langevin_args.lambda,
langevin_args.timestep,
langevin_args.seed,
langevin_args.T,
deltaT,
D);
return cudaSuccess;
}
|
556b9dfe524869c4d80a2ceb479d8a0803ba3c73.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <energymin/energymin_amg_level.h>
#include <amg_level.h>
#include <basic_types.h>
#include <cutil.h>
#include <multiply.h>
#include <transpose.h>
#include <blas.h>
#include <util.h>
#include <thrust/logical.h>
#include <thrust/remove.h>
#include <thrust/adjacent_difference.h>
#include <assert.h>
#include <matrix_io.h>
#include <csr_multiply.h>
#include <thrust/logical.h>
#include <thrust/count.h>
#include <thrust/sort.h>
#include <string>
#include <algorithm>
namespace amgx
{
namespace energymin
{
// --------------------------- Begin Base Class Public methods ------------------------------------
template <class T_Config>
Energymin_AMG_Level_Base<T_Config>
::Energymin_AMG_Level_Base(AMG_Class *amg) : AMG_Level<T_Config>(amg)
{
selector = amgx::classical::SelectorFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope);
interpolator = InterpolatorFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope);
strength = NULL;
std::string selector_val = amg->m_cfg->template getParameter<std::string>("selector", amg->m_cfg_scope);
if (selector_val == "PMIS") //or any other classical selector
{
strength = StrengthFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); //using default strength
max_row_sum = amg->m_cfg->AMG_Config::template getParameter<double>("max_row_sum", amg->m_cfg_scope);
}
}
template <class T_Config>
Energymin_AMG_Level_Base<T_Config>::~Energymin_AMG_Level_Base()
{
delete selector;
delete interpolator;
if (strength != NULL) { delete strength; }
}
// Compute A, P, and R operators
template <class T_Config>
void Energymin_AMG_Level_Base<T_Config>
::createCoarseVertices()
{
Matrix<T_Config> &RAP = this->getNextLevel( typename Matrix<T_Config>::memory_space() )->getA();
Matrix<T_Config> &A = this->getA();
int size_all;
size_all = A.get_num_rows();
this->m_cf_map.resize(size_all);
thrust::fill(this->m_cf_map.begin(), this->m_cf_map.end(), 0);
cudaCheckError();
markCoarseFinePoints();
}
template <class T_Config>
void Energymin_AMG_Level_Base<T_Config>
::createCoarseMatrices()
{
Matrix<T_Config> &RAP = this->getNextLevel( typename Matrix<T_Config>::memory_space() )->getA();
Matrix<T_Config> &A = this->getA();
/* WARNING: exit if D1 interpolator is selected in distributed setting */
std::string s("");
s += AMG_Level<T_Config>::amg->m_cfg->AMG_Config
::template getParameter<std::string>("energymin_interpolator",
AMG_Level<T_Config>::amg->m_cfg_scope);
// Compute Restriction operator
computeRestrictionOperator();
// Compute Prolongation operator and coarse matrix Ac
if (!this->A->is_matrix_distributed() || this->A->manager->get_num_partitions() == 1)
{
// Create Prolongation operator
computeProlongationOperator();
computeAOperator();
}
else
{
computeAOperator_distributed();
}
RAP.copyAuxData(&A);
if (this->getA().is_matrix_singleGPU())
{
this->m_next_level_size = this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().get_num_rows()
* this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().get_block_dimy();
}
else
{
// m_next_level_size is the size that will be used to allocate xc, bc vectors
int size, offset;
this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().getOffsetAndSizeForView(FULL, &offset, &size);
this->m_next_level_size = size
* this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().get_block_dimy();
}
}
template <class T_Config>
void Energymin_AMG_Level_Base<T_Config>
::markCoarseFinePoints()
{
Matrix<T_Config> &A = this->getA();
// Allocate necessary memory
typedef Vector<typename TConfig::template setVecPrec<AMGX_vecInt>::Type> IVector;
typedef Vector<typename TConfig::template setVecPrec<AMGX_vecBool>::Type> BVector;
typedef Vector<typename TConfig::template setVecPrec<AMGX_vecFloat>::Type> FVector;
int size_all, size_full, nnz_full;
BVector m_s_con;
IVector m_scratch;
FVector weights;
if (!A.is_matrix_singleGPU())
{
int offset;
// Need to get number of 2-ring rows
A.getOffsetAndSizeForView(ALL, &offset, &size_all);
A.getOffsetAndSizeForView(FULL, &offset, &size_full);
A.getNnzForView(FULL, &nnz_full);
weights.resize(size_full);
}
else
{
size_all = A.get_num_rows();
size_full = A.get_num_rows();
nnz_full = A.get_num_nz();
weights.resize(A.get_num_rows());
}
this->m_cf_map.resize(size_all);
m_s_con.resize(nnz_full);
m_scratch.resize(size_full);
thrust::fill(weights.begin(), weights.end(), 0.0);
cudaCheckError();
thrust::fill(this->m_cf_map.begin(), this->m_cf_map.end(), 0);
cudaCheckError();
thrust::fill(m_s_con.begin(), m_s_con.end(), false);
cudaCheckError();
thrust::fill(m_scratch.begin(), m_scratch.end(), 0);
cudaCheckError();
if (strength != NULL)
{
if (!A.is_matrix_singleGPU())
{
ViewType oldView = A.currentView();
A.setView(FULL);
strength->computeStrongConnectionsAndWeights(A, m_s_con, weights, this->max_row_sum);
A.setView(oldView);
A.manager->exchange_halo(weights, weights.tag);
}
else
{
strength->computeStrongConnectionsAndWeights(A, m_s_con, weights, this->max_row_sum);
}
}
// Mark coarse and fine points
selector->markCoarseFinePoints(A, weights, m_s_con, this->m_cf_map, m_scratch);
this->m_cf_map.dirtybit = 1;
}
template <class T_Config>
void Energymin_AMG_Level_Base<T_Config>
::computeRestrictionOperator()
{
this->Profile.tic("computeR");
Matrix<T_Config> &A = this->getA();
//allocate necessary memory
typedef Vector<typename TConfig::template setVecPrec<AMGX_vecInt>::Type> IVector;
typedef Vector<typename TConfig::template setVecPrec<AMGX_vecBool>::Type> BVector;
typedef Vector<typename TConfig::template setVecPrec<AMGX_vecFloat>::Type> FVector;
// WARNING: Since energymin P is in computed in CSC format and AMGX does not support
// CSC format, we are actually computing P^T (=R) in generateInterpolationMatrix!!
//generate the interpolation matrix
interpolator->generateInterpolationMatrix(A, this->m_cf_map, R,
AMG_Level<TConfig>::amg);
this->m_cf_map.clear();
this->m_cf_map.shrink_to_fit();
this->Profile.toc("computeR");
}
// Compute R=P^T
template <class T_Config>
void Energymin_AMG_Level_Base<T_Config>
::computeProlongationOperator()
{
this->Profile.tic("computeP");
P.set_initialized(0);
R.setView(OWNED);
transpose(R, P, R.get_num_rows());
if (!P.isLatencyHidingEnabled(*this->amg->m_cfg))
{
// This will cause bsrmv to not do latency hiding
P.setInteriorView(OWNED);
P.setExteriorView(OWNED);
}
P.set_initialized(1);
this->Profile.toc("computeP");
}
// Compute the Galerkin product: A_c=R*A*P
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Energymin_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >
::computeAOperator_1x1()
{
FatalError("Energymin AMG computeAOperator_1x1 not implemented on host\n",
AMGX_ERR_NOT_IMPLEMENTED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Energymin_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >
::computeAOperator_1x1_distributed()
{
FatalError("Distributed energymin AMG not implemented for host\n",
AMGX_ERR_NOT_IMPLEMENTED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Energymin_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >
::computeAOperator_1x1()
{
this->Profile.tic("computeA");
Matrix<TConfig_d> &RAP = this->getNextLevel( device_memory() )->getA();
RAP.addProps(CSR);
RAP.set_block_dimx(this->getA().get_block_dimx());
RAP.set_block_dimy(this->getA().get_block_dimy());
this->R.set_initialized(0);
this->R.addProps(CSR);
this->R.set_initialized(1);
this->P.set_initialized(0);
this->P.addProps(CSR);
this->P.set_initialized(1);
void *wk = AMG_Level<TConfig_d>::amg->getCsrWorkspace();
if ( wk == NULL )
{
wk = CSR_Multiply<TConfig_d>::csr_workspace_create( *(AMG_Level<TConfig_d>::amg->m_cfg),
AMG_Level<TConfig_d>::amg->m_cfg_scope );
AMG_Level<TConfig_d>::amg->setCsrWorkspace( wk );
}
RAP.set_initialized(0);
CSR_Multiply<TConfig_d>::csr_galerkin_product(this->R, this->getA(), this->P, RAP,
NULL, NULL, NULL, NULL, NULL, NULL, wk);
RAP.set_initialized(1);
this->Profile.toc("computeA");
}
// Compute the restriction: rr=R*r
template <class T_Config>
void Energymin_AMG_Level_Base<T_Config>
::restrictResidual(VVector &r, VVector &rr)
{
typedef typename TConfig::MemSpace MemorySpace;
Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA();
// we need to resize residual vector to make sure it can store halo rows to be sent
if (!P.is_matrix_singleGPU())
{
int desired_size = ::max(P.manager->halo_offsets[P.manager->neighbors.size()],
Ac.manager->halo_offsets[Ac.manager->neighbors.size()] * rr.get_block_size());
rr.resize(desired_size);
}
this->Profile.tic("restrictRes");
// Disable speculative send of rr
if (P.is_matrix_singleGPU())
{
multiply( R, r, rr);
}
else
{
multiply_with_mask_restriction( R, r, rr, P);
}
rr.dirtybit = 1;
// Do I need this?
if (!P.is_matrix_singleGPU())
{
int desired_size = P.manager->halo_offsets[P.manager->neighbors.size()] * rr.get_block_size();
// P.manager->transformVector(rr); //This is just to make sure size is right
if (rr.size() < desired_size)
{
rr.resize(P.manager->halo_offsets[P.manager->neighbors.size()]*rr.get_block_size());
}
// P.manager->exchange_halo(rr, rr.tag);
}
this->Profile.toc("restrictRes");
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Energymin_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >
::computeAOperator_1x1_distributed()
{
FatalError("Energymin AMG Level computeAOperator_1x1_distributed() not implemented",
AMGX_ERR_NOT_IMPLEMENTED);
}
// Prolongate the error: x+=P*e
template <class T_Config>
void Energymin_AMG_Level_Base<T_Config>
::prolongateAndApplyCorrection(VVector &e, VVector &bc, VVector &x, VVector &tmp)
{
this->Profile.tic("proCorr");
// get coarse matrix
typedef typename TConfig::MemSpace MemorySpace;
Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA();
// Use P.manager to exchange halo of e before doing P
// (since P has columns belonging to one of P.neighbors)
e.dirtybit = 1;
if (!P.is_matrix_singleGPU())
{
int e_size = ::max(P.manager->halo_offsets[P.manager->neighbors.size()],
Ac.manager->halo_offsets[Ac.manager->neighbors.size()])
* e.get_block_size();
e.resize(e_size);
}
if (P.is_matrix_singleGPU())
{
if (e.size() > 0) {multiply( P, e, tmp);}
}
else
{
multiply_with_mask( P, e, tmp);
}
// get owned num rows for fine matrix
int owned_size;
if (Ac.is_matrix_distributed())
{
int owned_offset;
P.manager->getOffsetAndSizeForView(OWNED, &owned_offset, &owned_size);
}
else
{
owned_size = x.size();
}
//apply
axpby(x, tmp, x, ValueType(1), ValueType(1), 0, owned_size);
this->Profile.toc("proCorr");
x.dirtybit = 1;
}
template <class T_Config>
void Energymin_AMG_Level_Base<T_Config>
::computeAOperator()
{
if (this->A->get_block_size() == 1)
{
computeAOperator_1x1();
}
else
{
FatalError("Energymin AMG not implemented for block_size != 1", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <class T_Config>
void Energymin_AMG_Level_Base<T_Config>
::computeAOperator_distributed()
{
if (this->A->get_block_size() == 1)
{
computeAOperator_1x1_distributed();
}
else
{
FatalError("Energymin AMG not implemented for block_size != 1", AMGX_ERR_NOT_IMPLEMENTED);
}
}
/****************************************
* Explicit instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class Energymin_AMG_Level_Base<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class Energymin_AMG_Level<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace energymin
} // namespace amgx
| 556b9dfe524869c4d80a2ceb479d8a0803ba3c73.cu | /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <energymin/energymin_amg_level.h>
#include <amg_level.h>
#include <basic_types.h>
#include <cutil.h>
#include <multiply.h>
#include <transpose.h>
#include <blas.h>
#include <util.h>
#include <thrust/logical.h>
#include <thrust/remove.h>
#include <thrust/adjacent_difference.h>
#include <assert.h>
#include <matrix_io.h>
#include <csr_multiply.h>
#include <thrust/logical.h>
#include <thrust/count.h>
#include <thrust/sort.h>
#include <string>
#include <algorithm>
namespace amgx
{
namespace energymin
{
// --------------------------- Begin Base Class Public methods ------------------------------------
template <class T_Config>
Energymin_AMG_Level_Base<T_Config>
::Energymin_AMG_Level_Base(AMG_Class *amg) : AMG_Level<T_Config>(amg)
{
selector = amgx::classical::SelectorFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope);
interpolator = InterpolatorFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope);
strength = NULL;
std::string selector_val = amg->m_cfg->template getParameter<std::string>("selector", amg->m_cfg_scope);
if (selector_val == "PMIS") //or any other classical selector
{
strength = StrengthFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); //using default strength
max_row_sum = amg->m_cfg->AMG_Config::template getParameter<double>("max_row_sum", amg->m_cfg_scope);
}
}
template <class T_Config>
Energymin_AMG_Level_Base<T_Config>::~Energymin_AMG_Level_Base()
{
delete selector;
delete interpolator;
if (strength != NULL) { delete strength; }
}
// Compute A, P, and R operators
template <class T_Config>
void Energymin_AMG_Level_Base<T_Config>
::createCoarseVertices()
{
Matrix<T_Config> &RAP = this->getNextLevel( typename Matrix<T_Config>::memory_space() )->getA();
Matrix<T_Config> &A = this->getA();
int size_all;
size_all = A.get_num_rows();
this->m_cf_map.resize(size_all);
thrust::fill(this->m_cf_map.begin(), this->m_cf_map.end(), 0);
cudaCheckError();
markCoarseFinePoints();
}
template <class T_Config>
void Energymin_AMG_Level_Base<T_Config>
::createCoarseMatrices()
{
Matrix<T_Config> &RAP = this->getNextLevel( typename Matrix<T_Config>::memory_space() )->getA();
Matrix<T_Config> &A = this->getA();
/* WARNING: exit if D1 interpolator is selected in distributed setting */
std::string s("");
s += AMG_Level<T_Config>::amg->m_cfg->AMG_Config
::template getParameter<std::string>("energymin_interpolator",
AMG_Level<T_Config>::amg->m_cfg_scope);
// Compute Restriction operator
computeRestrictionOperator();
// Compute Prolongation operator and coarse matrix Ac
if (!this->A->is_matrix_distributed() || this->A->manager->get_num_partitions() == 1)
{
// Create Prolongation operator
computeProlongationOperator();
computeAOperator();
}
else
{
computeAOperator_distributed();
}
RAP.copyAuxData(&A);
if (this->getA().is_matrix_singleGPU())
{
this->m_next_level_size = this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().get_num_rows()
* this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().get_block_dimy();
}
else
{
// m_next_level_size is the size that will be used to allocate xc, bc vectors
int size, offset;
this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().getOffsetAndSizeForView(FULL, &offset, &size);
this->m_next_level_size = size
* this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().get_block_dimy();
}
}
template <class T_Config>
void Energymin_AMG_Level_Base<T_Config>
::markCoarseFinePoints()
{
Matrix<T_Config> &A = this->getA();
// Allocate necessary memory
typedef Vector<typename TConfig::template setVecPrec<AMGX_vecInt>::Type> IVector;
typedef Vector<typename TConfig::template setVecPrec<AMGX_vecBool>::Type> BVector;
typedef Vector<typename TConfig::template setVecPrec<AMGX_vecFloat>::Type> FVector;
int size_all, size_full, nnz_full;
BVector m_s_con;
IVector m_scratch;
FVector weights;
if (!A.is_matrix_singleGPU())
{
int offset;
// Need to get number of 2-ring rows
A.getOffsetAndSizeForView(ALL, &offset, &size_all);
A.getOffsetAndSizeForView(FULL, &offset, &size_full);
A.getNnzForView(FULL, &nnz_full);
weights.resize(size_full);
}
else
{
size_all = A.get_num_rows();
size_full = A.get_num_rows();
nnz_full = A.get_num_nz();
weights.resize(A.get_num_rows());
}
this->m_cf_map.resize(size_all);
m_s_con.resize(nnz_full);
m_scratch.resize(size_full);
thrust::fill(weights.begin(), weights.end(), 0.0);
cudaCheckError();
thrust::fill(this->m_cf_map.begin(), this->m_cf_map.end(), 0);
cudaCheckError();
thrust::fill(m_s_con.begin(), m_s_con.end(), false);
cudaCheckError();
thrust::fill(m_scratch.begin(), m_scratch.end(), 0);
cudaCheckError();
if (strength != NULL)
{
if (!A.is_matrix_singleGPU())
{
ViewType oldView = A.currentView();
A.setView(FULL);
strength->computeStrongConnectionsAndWeights(A, m_s_con, weights, this->max_row_sum);
A.setView(oldView);
A.manager->exchange_halo(weights, weights.tag);
}
else
{
strength->computeStrongConnectionsAndWeights(A, m_s_con, weights, this->max_row_sum);
}
}
// Mark coarse and fine points
selector->markCoarseFinePoints(A, weights, m_s_con, this->m_cf_map, m_scratch);
this->m_cf_map.dirtybit = 1;
}
template <class T_Config>
void Energymin_AMG_Level_Base<T_Config>
::computeRestrictionOperator()
{
this->Profile.tic("computeR");
Matrix<T_Config> &A = this->getA();
//allocate necessary memory
typedef Vector<typename TConfig::template setVecPrec<AMGX_vecInt>::Type> IVector;
typedef Vector<typename TConfig::template setVecPrec<AMGX_vecBool>::Type> BVector;
typedef Vector<typename TConfig::template setVecPrec<AMGX_vecFloat>::Type> FVector;
// WARNING: Since energymin P is in computed in CSC format and AMGX does not support
// CSC format, we are actually computing P^T (=R) in generateInterpolationMatrix!!
//generate the interpolation matrix
interpolator->generateInterpolationMatrix(A, this->m_cf_map, R,
AMG_Level<TConfig>::amg);
this->m_cf_map.clear();
this->m_cf_map.shrink_to_fit();
this->Profile.toc("computeR");
}
// Compute R=P^T
template <class T_Config>
void Energymin_AMG_Level_Base<T_Config>
::computeProlongationOperator()
{
this->Profile.tic("computeP");
P.set_initialized(0);
R.setView(OWNED);
transpose(R, P, R.get_num_rows());
if (!P.isLatencyHidingEnabled(*this->amg->m_cfg))
{
// This will cause bsrmv to not do latency hiding
P.setInteriorView(OWNED);
P.setExteriorView(OWNED);
}
P.set_initialized(1);
this->Profile.toc("computeP");
}
// Compute the Galerkin product: A_c=R*A*P
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Energymin_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >
::computeAOperator_1x1()
{
FatalError("Energymin AMG computeAOperator_1x1 not implemented on host\n",
AMGX_ERR_NOT_IMPLEMENTED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Energymin_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >
::computeAOperator_1x1_distributed()
{
FatalError("Distributed energymin AMG not implemented for host\n",
AMGX_ERR_NOT_IMPLEMENTED);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Energymin_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >
::computeAOperator_1x1()
{
this->Profile.tic("computeA");
Matrix<TConfig_d> &RAP = this->getNextLevel( device_memory() )->getA();
RAP.addProps(CSR);
RAP.set_block_dimx(this->getA().get_block_dimx());
RAP.set_block_dimy(this->getA().get_block_dimy());
this->R.set_initialized(0);
this->R.addProps(CSR);
this->R.set_initialized(1);
this->P.set_initialized(0);
this->P.addProps(CSR);
this->P.set_initialized(1);
void *wk = AMG_Level<TConfig_d>::amg->getCsrWorkspace();
if ( wk == NULL )
{
wk = CSR_Multiply<TConfig_d>::csr_workspace_create( *(AMG_Level<TConfig_d>::amg->m_cfg),
AMG_Level<TConfig_d>::amg->m_cfg_scope );
AMG_Level<TConfig_d>::amg->setCsrWorkspace( wk );
}
RAP.set_initialized(0);
CSR_Multiply<TConfig_d>::csr_galerkin_product(this->R, this->getA(), this->P, RAP,
NULL, NULL, NULL, NULL, NULL, NULL, wk);
RAP.set_initialized(1);
this->Profile.toc("computeA");
}
// Compute the restriction: rr=R*r
template <class T_Config>
void Energymin_AMG_Level_Base<T_Config>
::restrictResidual(VVector &r, VVector &rr)
{
typedef typename TConfig::MemSpace MemorySpace;
Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA();
// we need to resize residual vector to make sure it can store halo rows to be sent
if (!P.is_matrix_singleGPU())
{
int desired_size = std::max(P.manager->halo_offsets[P.manager->neighbors.size()],
Ac.manager->halo_offsets[Ac.manager->neighbors.size()] * rr.get_block_size());
rr.resize(desired_size);
}
this->Profile.tic("restrictRes");
// Disable speculative send of rr
if (P.is_matrix_singleGPU())
{
multiply( R, r, rr);
}
else
{
multiply_with_mask_restriction( R, r, rr, P);
}
rr.dirtybit = 1;
// Do I need this?
if (!P.is_matrix_singleGPU())
{
int desired_size = P.manager->halo_offsets[P.manager->neighbors.size()] * rr.get_block_size();
// P.manager->transformVector(rr); //This is just to make sure size is right
if (rr.size() < desired_size)
{
rr.resize(P.manager->halo_offsets[P.manager->neighbors.size()]*rr.get_block_size());
}
// P.manager->exchange_halo(rr, rr.tag);
}
this->Profile.toc("restrictRes");
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Energymin_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >
::computeAOperator_1x1_distributed()
{
FatalError("Energymin AMG Level computeAOperator_1x1_distributed() not implemented",
AMGX_ERR_NOT_IMPLEMENTED);
}
// Prolongate the error: x+=P*e
template <class T_Config>
void Energymin_AMG_Level_Base<T_Config>
::prolongateAndApplyCorrection(VVector &e, VVector &bc, VVector &x, VVector &tmp)
{
this->Profile.tic("proCorr");
// get coarse matrix
typedef typename TConfig::MemSpace MemorySpace;
Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA();
// Use P.manager to exchange halo of e before doing P
// (since P has columns belonging to one of P.neighbors)
e.dirtybit = 1;
if (!P.is_matrix_singleGPU())
{
int e_size = std::max(P.manager->halo_offsets[P.manager->neighbors.size()],
Ac.manager->halo_offsets[Ac.manager->neighbors.size()])
* e.get_block_size();
e.resize(e_size);
}
if (P.is_matrix_singleGPU())
{
if (e.size() > 0) {multiply( P, e, tmp);}
}
else
{
multiply_with_mask( P, e, tmp);
}
// get owned num rows for fine matrix
int owned_size;
if (Ac.is_matrix_distributed())
{
int owned_offset;
P.manager->getOffsetAndSizeForView(OWNED, &owned_offset, &owned_size);
}
else
{
owned_size = x.size();
}
//apply
axpby(x, tmp, x, ValueType(1), ValueType(1), 0, owned_size);
this->Profile.toc("proCorr");
x.dirtybit = 1;
}
template <class T_Config>
void Energymin_AMG_Level_Base<T_Config>
::computeAOperator()
{
if (this->A->get_block_size() == 1)
{
computeAOperator_1x1();
}
else
{
FatalError("Energymin AMG not implemented for block_size != 1", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <class T_Config>
void Energymin_AMG_Level_Base<T_Config>
::computeAOperator_distributed()
{
if (this->A->get_block_size() == 1)
{
computeAOperator_1x1_distributed();
}
else
{
FatalError("Energymin AMG not implemented for block_size != 1", AMGX_ERR_NOT_IMPLEMENTED);
}
}
/****************************************
* Explicit instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class Energymin_AMG_Level_Base<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class Energymin_AMG_Level<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace energymin
} // namespace amgx
|
ce4e59fdbd9df4a89ecc9219b32015d33a477616.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
//#include <torch/extension.h>
#include <torch/serialize/tensor.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include "common.h"
#include "device_tensor.h"
namespace {
template <typename DType, typename Acctype, typename DeviceTensor3>
struct GradOp {
__device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g)
: beta(m), output(i), gradOutput(g) {}
__device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) {
DType g = gradOutput[batch][plane][n];
DType c = ScalarConvert<Acctype, DType>::to(output[batch][plane][n] - beta);
return Float2<DType, Acctype>(g, g * c);
}
const Acctype beta;
const DeviceTensor3 output;
const DeviceTensor3 gradOutput;
};
template <typename DType, typename Acctype>
struct SumOp {
__device__ SumOp(DeviceTensor<DType, 3> i) : input(i){}
__device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) {
DType g = input[batch][plane][n];
return Float2<DType, Acctype>(g, g * g);
}
DType mean;
DeviceTensor<DType, 3> input;
};
// Sum across (batch, x/y/z) applying Op() pointwise
template<typename T, typename Op, typename DeviceTensor3>
__device__ T reduce(Op op, DeviceTensor3 tensor, int plane) {
T sum = (T)0;
for (int batch = 0; batch < tensor.getSize(0); ++batch) {
for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) {
sum += op(batch, plane, x);
}
}
// sum over NumThreads within a warp
sum = warpSum(sum);
// 'transpose', and reduce within warp again
__shared__ T shared[32];
__syncthreads();
if (threadIdx.x % WARP_SIZE == 0) {
shared[threadIdx.x / WARP_SIZE] = sum;
}
if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) {
// zero out the other entries in shared
shared[threadIdx.x] = (T)0;
}
__syncthreads();
if (threadIdx.x / WARP_SIZE == 0) {
sum = warpSum(shared[threadIdx.x]);
if (threadIdx.x == 0) {
shared[0] = sum;
}
}
__syncthreads();
// Everyone picks it up, should be broadcast into the whole gradInput
return shared[0];
}
template <typename DType>
__global__ void BatchNorm_Forward_kernel (
DeviceTensor<DType, 3> output,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta) {
int c = blockIdx.x;
/* main operation */
for (int b = 0; b < input.getSize(0); ++b) {
for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) {
DType inp = input[b][c][x];
output[b][c][x] = gamma[c] * (inp - mean[c]) /
std[c] + beta[c];
}
}
}
template <typename DType>
__global__ void BatchNorm_Forward_Inp_kernel (
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta) {
int c = blockIdx.x;
/* main operation */
for (int b = 0; b < input.getSize(0); ++b) {
for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) {
DType inp = input[b][c][x];
input[b][c][x] = gamma[c] * (inp - mean[c]) /
std[c] + beta[c];
}
}
}
template <typename DType>
__global__ void BatchNorm_Backward_Inp_kernel (
DeviceTensor<DType, 3> gradoutput,
DeviceTensor<DType, 3> output,
DeviceTensor<DType, 3> gradinput,
DeviceTensor<DType, 1> gradgamma,
DeviceTensor<DType, 1> gradbeta,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta,
DeviceTensor<DType, 1> gradEx,
DeviceTensor<DType, 1> gradExs) {
/* declarations of the variables */
/* Get the index and channels */
int c = blockIdx.x;
/* main operation */
GradOp<DType, DType, DeviceTensor<DType, 3>> g(beta[c], output, gradoutput);
Float2<DType, DType> res = reduce<Float2<DType, DType>,
GradOp<DType, DType, DeviceTensor<DType, 3>>,
DeviceTensor<DType, 3>>(g, gradoutput, c);
DType gradOutputSum = res.v1;
DType dotP = res.v2;
DType invstd = DType(1.0) / std[c];
DType gradScale = invstd * gamma[c];
if (threadIdx.x == 0) {
gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP;
gradExs[c] = - 0.5 * invstd * invstd * dotP;
}
if (gradinput.numElements() > 0) {
for (int batch = 0; batch < gradoutput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) {
gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale;
}
}
}
if (gradgamma.numElements() > 0) {
if (threadIdx.x == 0) {
gradgamma[c] += dotP / gamma[c];
}
}
if (gradbeta.numElements() > 0) {
if (threadIdx.x == 0) {
gradbeta[c] += gradOutputSum;
}
}
}
template <typename DType>
__global__ void BatchNorm_Backward_kernel (
DeviceTensor<DType, 3> gradoutput,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 3> gradinput,
DeviceTensor<DType, 1> gradgamma,
DeviceTensor<DType, 1> gradbeta,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta,
DeviceTensor<DType, 1> gradEx,
DeviceTensor<DType, 1> gradExs) {
/* declarations of the variables */
/* Get the index and channels */
int c = blockIdx.x;
/* main operation */
GradOp<DType, DType, DeviceTensor<DType, 3>> g(mean[c], input, gradoutput);
Float2<DType, DType> res = reduce<Float2<DType, DType>,
GradOp<DType, DType, DeviceTensor<DType, 3>>,
DeviceTensor<DType, 3>>(g, gradoutput, c);
DType gradOutputSum = res.v1;
DType dotP = res.v2;
DType invstd = DType(1.0) / std[c];
DType gradScale = invstd * gamma[c];
if (threadIdx.x == 0) {
gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP * gradScale;
gradExs[c] = - 0.5 * invstd * invstd * dotP * gradScale;
}
if (gradinput.numElements() > 0) {
for (int batch = 0; batch < gradoutput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) {
gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale;
}
}
}
if (gradgamma.numElements() > 0) {
if (threadIdx.x == 0) {
gradgamma[c] += dotP * invstd;
}
}
if (gradbeta.numElements() > 0) {
if (threadIdx.x == 0) {
gradbeta[c] += gradOutputSum;
}
}
}
template <typename DType>
__global__ void Expectation_Forward_kernel (
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> ex,
DeviceTensor<DType, 1> exs,
DType norm) {
int c = blockIdx.x;
/* main operation */
SumOp<DType, DType> g(input);
Float2<DType, DType> res = reduce<Float2<DType, DType>,
SumOp<DType, DType>, DeviceTensor<DType, 3>>(g, input, c);
DType xsum = res.v1;
DType xsquare = res.v2;
if (threadIdx.x == 0) {
ex[c] = xsum * norm;
exs[c] = xsquare * norm;
}
}
template <typename DType>
__global__ void Expectation_Backward_kernel (
DeviceTensor<DType, 3> gradInput,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> gradEx,
DeviceTensor<DType, 1> gradExs,
DType norm) {
int c = blockIdx.x;
/* main operation */
for (int batch = 0; batch < gradInput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) {
gradInput[batch][c][x] = gradEx[c] * norm + 2 * gradExs[c] *
input[batch][c][x] * norm;
}
}
}
template <typename DType>
__global__ void Expectation_Backward_Inp_kernel (
DeviceTensor<DType, 3> gradInput,
DeviceTensor<DType, 3> output,
DeviceTensor<DType, 1> gradEx,
DeviceTensor<DType, 1> gradExs,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta,
DType norm) {
int c = blockIdx.x;
/* main operation */
for (int batch = 0; batch < gradInput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) {
gradInput[batch][c][x] += gradEx[c] * norm + 2 * gradExs[c] *
((output[batch][c][x] - beta[c]) / gamma[c] * std[c] + mean[c]) * norm;
}
}
}
} // namespace
at::Tensor BatchNorm_Forward_CUDA(
const at::Tensor input_,
const at::Tensor ex_,
const at::Tensor exs_,
const at::Tensor gamma_,
const at::Tensor beta_,
float eps) {
auto output_ = at::zeros_like(input_);
auto std_ = (exs_ - ex_ * ex_ + eps).sqrt();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> output = devicetensor<scalar_t, 3>(output_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
/* kernel function */
hipLaunchKernelGGL(( BatchNorm_Forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
output, input, ex, std, gamma, beta);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return output_;
}
at::Tensor BatchNorm_Forward_Inp_CUDA(
const at::Tensor input_,
const at::Tensor ex_,
const at::Tensor exs_,
const at::Tensor gamma_,
const at::Tensor beta_,
float eps) {
auto std_ = (exs_ - ex_ * ex_ + eps).sqrt();
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
/* kernel function */
hipLaunchKernelGGL(( BatchNorm_Forward_Inp_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
input, ex, std, gamma, beta);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return input_;
}
std::vector<at::Tensor> BatchNorm_Inp_Backward_CUDA(
const at::Tensor gradoutput_,
const at::Tensor output_,
const at::Tensor ex_,
const at::Tensor exs_,
const at::Tensor gamma_,
const at::Tensor beta_,
float eps) {
/* outputs*/
auto std_ = (exs_ - ex_ * ex_ + eps).sqrt();
auto gradinput_ = at::zeros_like(output_);
auto gradgamma_ = at::zeros_like(gamma_);
auto gradbeta_ = at::zeros_like(beta_);
auto gradEx_ = at::zeros_like(ex_);
auto gradExs_ = at::zeros_like(std_);
/* cuda utils*/
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(output_.size(1));
dim3 threads(getNumThreads(output_.size(2)));
AT_DISPATCH_FLOATING_TYPES(output_.type(), "BatchNorm_Inp_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> gradoutput = devicetensor<scalar_t, 3>(gradoutput_);
DeviceTensor<scalar_t, 3> output = devicetensor<scalar_t, 3>(output_);
DeviceTensor<scalar_t, 3> gradinput = devicetensor<scalar_t, 3>(gradinput_);
DeviceTensor<scalar_t, 1> gradgamma = devicetensor<scalar_t, 1>(gradgamma_);
DeviceTensor<scalar_t, 1> gradbeta = devicetensor<scalar_t, 1>(gradbeta_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_);
DeviceTensor<scalar_t, 1> gradExs = devicetensor<scalar_t, 1>(gradExs_);
/* kernel function */
hipLaunchKernelGGL(( BatchNorm_Backward_Inp_kernel<scalar_t>)
, dim3(blocks), dim3(threads), 0, stream,
gradoutput, output, gradinput, gradgamma, gradbeta, ex, std,
gamma, beta, gradEx, gradExs);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_};
}
std::vector<at::Tensor> BatchNorm_Backward_CUDA(
const at::Tensor gradoutput_,
const at::Tensor input_,
const at::Tensor ex_,
const at::Tensor exs_,
const at::Tensor gamma_,
const at::Tensor beta_,
float eps) {
/* outputs*/
auto std_ = (exs_ - ex_ * ex_ + eps).sqrt();
auto gradinput_ = at::zeros_like(input_);
auto gradgamma_ = at::zeros_like(gamma_);
auto gradbeta_ = at::zeros_like(beta_);
auto gradEx_ = at::zeros_like(ex_);
auto gradExs_ = at::zeros_like(std_);
/* cuda utils*/
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Inp_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> gradoutput = devicetensor<scalar_t, 3>(gradoutput_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 3> gradinput = devicetensor<scalar_t, 3>(gradinput_);
DeviceTensor<scalar_t, 1> gradgamma = devicetensor<scalar_t, 1>(gradgamma_);
DeviceTensor<scalar_t, 1> gradbeta = devicetensor<scalar_t, 1>(gradbeta_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_);
DeviceTensor<scalar_t, 1> gradExs = devicetensor<scalar_t, 1>(gradExs_);
/* kernel function */
hipLaunchKernelGGL(( BatchNorm_Backward_kernel<scalar_t>)
, dim3(blocks), dim3(threads), 0, stream,
gradoutput, input, gradinput, gradgamma, gradbeta, ex, std,
gamma, beta, gradEx, gradExs);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_};
}
std::vector<at::Tensor> Expectation_Forward_CUDA(
const at::Tensor input_) {
/* outputs */
auto ex_ = torch::zeros({input_.size(1)}, input_.options());
auto exs_ = torch::zeros({input_.size(1)}, input_.options());
/* cuda utils*/
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_forward_CUDA", ([&] {
scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2));
/* Device tensors */
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> exs = devicetensor<scalar_t, 1>(exs_);
/* kernel function */
hipLaunchKernelGGL(( Expectation_Forward_kernel<scalar_t>)
, dim3(blocks), dim3(threads), 0, stream, input, ex, exs, norm);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return {ex_, exs_};
}
at::Tensor Expectation_Backward_CUDA(
const at::Tensor input_,
const at::Tensor gradEx_,
const at::Tensor gradExs_) {
/* outputs */
at::Tensor gradInput_ = at::zeros_like(input_);
/* cuda utils*/
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_Backward_CUDA", ([&] {
scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2));
/* Device tensors */
DeviceTensor<scalar_t, 3> gradInput = devicetensor<scalar_t, 3>(gradInput_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_);
DeviceTensor<scalar_t, 1> gradExs =devicetensor<scalar_t, 1>(gradExs_);
/* kernel function */
hipLaunchKernelGGL(( Expectation_Backward_kernel<scalar_t>)
, dim3(blocks), dim3(threads), 0, stream, gradInput, input, gradEx, gradExs, norm);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return gradInput_;
}
at::Tensor Expectation_Inp_Backward_CUDA(
const at::Tensor gradInput_,
const at::Tensor output_,
const at::Tensor gradEx_,
const at::Tensor gradExs_,
const at::Tensor ex_,
const at::Tensor exs_,
const at::Tensor gamma_,
const at::Tensor beta_,
float eps) {
/* outputs */
//auto gradInput_ = at::zeros_like(output_);
auto std_ = (exs_ - ex_ * ex_ + eps).sqrt();
/* cuda utils*/
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 blocks(output_.size(1));
dim3 threads(getNumThreads(output_.size(2)));
AT_DISPATCH_FLOATING_TYPES(output_.type(), "SumSquare_Backward_CUDA", ([&] {
scalar_t norm = scalar_t(1) / (output_.size(0) * output_.size(2));
/* Device tensors */
DeviceTensor<scalar_t, 3> gradInput = devicetensor<scalar_t, 3>(gradInput_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(output_);
DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_);
DeviceTensor<scalar_t, 1> gradExs =devicetensor<scalar_t, 1>(gradExs_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
/* kernel function */
hipLaunchKernelGGL(( Expectation_Backward_Inp_kernel<scalar_t>)
, dim3(blocks), dim3(threads), 0, stream, gradInput, input, gradEx, gradExs,
ex, std, gamma, beta, norm);
}));
AT_ASSERT(hipGetLastError() == hipSuccess);
return gradInput_;
}
| ce4e59fdbd9df4a89ecc9219b32015d33a477616.cu | #include <vector>
//#include <torch/extension.h>
#include <torch/serialize/tensor.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include "common.h"
#include "device_tensor.h"
namespace {
template <typename DType, typename Acctype, typename DeviceTensor3>
struct GradOp {
__device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g)
: beta(m), output(i), gradOutput(g) {}
__device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) {
DType g = gradOutput[batch][plane][n];
DType c = ScalarConvert<Acctype, DType>::to(output[batch][plane][n] - beta);
return Float2<DType, Acctype>(g, g * c);
}
const Acctype beta;
const DeviceTensor3 output;
const DeviceTensor3 gradOutput;
};
template <typename DType, typename Acctype>
struct SumOp {
__device__ SumOp(DeviceTensor<DType, 3> i) : input(i){}
__device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) {
DType g = input[batch][plane][n];
return Float2<DType, Acctype>(g, g * g);
}
DType mean;
DeviceTensor<DType, 3> input;
};
// Sum across (batch, x/y/z) applying Op() pointwise
template<typename T, typename Op, typename DeviceTensor3>
__device__ T reduce(Op op, DeviceTensor3 tensor, int plane) {
T sum = (T)0;
for (int batch = 0; batch < tensor.getSize(0); ++batch) {
for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) {
sum += op(batch, plane, x);
}
}
// sum over NumThreads within a warp
sum = warpSum(sum);
// 'transpose', and reduce within warp again
__shared__ T shared[32];
__syncthreads();
if (threadIdx.x % WARP_SIZE == 0) {
shared[threadIdx.x / WARP_SIZE] = sum;
}
if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) {
// zero out the other entries in shared
shared[threadIdx.x] = (T)0;
}
__syncthreads();
if (threadIdx.x / WARP_SIZE == 0) {
sum = warpSum(shared[threadIdx.x]);
if (threadIdx.x == 0) {
shared[0] = sum;
}
}
__syncthreads();
// Everyone picks it up, should be broadcast into the whole gradInput
return shared[0];
}
template <typename DType>
__global__ void BatchNorm_Forward_kernel (
DeviceTensor<DType, 3> output,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta) {
int c = blockIdx.x;
/* main operation */
for (int b = 0; b < input.getSize(0); ++b) {
for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) {
DType inp = input[b][c][x];
output[b][c][x] = gamma[c] * (inp - mean[c]) /
std[c] + beta[c];
}
}
}
template <typename DType>
__global__ void BatchNorm_Forward_Inp_kernel (
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta) {
int c = blockIdx.x;
/* main operation */
for (int b = 0; b < input.getSize(0); ++b) {
for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) {
DType inp = input[b][c][x];
input[b][c][x] = gamma[c] * (inp - mean[c]) /
std[c] + beta[c];
}
}
}
template <typename DType>
__global__ void BatchNorm_Backward_Inp_kernel (
DeviceTensor<DType, 3> gradoutput,
DeviceTensor<DType, 3> output,
DeviceTensor<DType, 3> gradinput,
DeviceTensor<DType, 1> gradgamma,
DeviceTensor<DType, 1> gradbeta,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta,
DeviceTensor<DType, 1> gradEx,
DeviceTensor<DType, 1> gradExs) {
/* declarations of the variables */
/* Get the index and channels */
int c = blockIdx.x;
/* main operation */
GradOp<DType, DType, DeviceTensor<DType, 3>> g(beta[c], output, gradoutput);
Float2<DType, DType> res = reduce<Float2<DType, DType>,
GradOp<DType, DType, DeviceTensor<DType, 3>>,
DeviceTensor<DType, 3>>(g, gradoutput, c);
DType gradOutputSum = res.v1;
DType dotP = res.v2;
DType invstd = DType(1.0) / std[c];
DType gradScale = invstd * gamma[c];
if (threadIdx.x == 0) {
gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP;
gradExs[c] = - 0.5 * invstd * invstd * dotP;
}
if (gradinput.numElements() > 0) {
for (int batch = 0; batch < gradoutput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) {
gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale;
}
}
}
if (gradgamma.numElements() > 0) {
if (threadIdx.x == 0) {
gradgamma[c] += dotP / gamma[c];
}
}
if (gradbeta.numElements() > 0) {
if (threadIdx.x == 0) {
gradbeta[c] += gradOutputSum;
}
}
}
template <typename DType>
__global__ void BatchNorm_Backward_kernel (
DeviceTensor<DType, 3> gradoutput,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 3> gradinput,
DeviceTensor<DType, 1> gradgamma,
DeviceTensor<DType, 1> gradbeta,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta,
DeviceTensor<DType, 1> gradEx,
DeviceTensor<DType, 1> gradExs) {
/* declarations of the variables */
/* Get the index and channels */
int c = blockIdx.x;
/* main operation */
GradOp<DType, DType, DeviceTensor<DType, 3>> g(mean[c], input, gradoutput);
Float2<DType, DType> res = reduce<Float2<DType, DType>,
GradOp<DType, DType, DeviceTensor<DType, 3>>,
DeviceTensor<DType, 3>>(g, gradoutput, c);
DType gradOutputSum = res.v1;
DType dotP = res.v2;
DType invstd = DType(1.0) / std[c];
DType gradScale = invstd * gamma[c];
if (threadIdx.x == 0) {
gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP * gradScale;
gradExs[c] = - 0.5 * invstd * invstd * dotP * gradScale;
}
if (gradinput.numElements() > 0) {
for (int batch = 0; batch < gradoutput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) {
gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale;
}
}
}
if (gradgamma.numElements() > 0) {
if (threadIdx.x == 0) {
gradgamma[c] += dotP * invstd;
}
}
if (gradbeta.numElements() > 0) {
if (threadIdx.x == 0) {
gradbeta[c] += gradOutputSum;
}
}
}
template <typename DType>
__global__ void Expectation_Forward_kernel (
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> ex,
DeviceTensor<DType, 1> exs,
DType norm) {
int c = blockIdx.x;
/* main operation */
SumOp<DType, DType> g(input);
Float2<DType, DType> res = reduce<Float2<DType, DType>,
SumOp<DType, DType>, DeviceTensor<DType, 3>>(g, input, c);
DType xsum = res.v1;
DType xsquare = res.v2;
if (threadIdx.x == 0) {
ex[c] = xsum * norm;
exs[c] = xsquare * norm;
}
}
template <typename DType>
__global__ void Expectation_Backward_kernel (
DeviceTensor<DType, 3> gradInput,
DeviceTensor<DType, 3> input,
DeviceTensor<DType, 1> gradEx,
DeviceTensor<DType, 1> gradExs,
DType norm) {
int c = blockIdx.x;
/* main operation */
for (int batch = 0; batch < gradInput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) {
gradInput[batch][c][x] = gradEx[c] * norm + 2 * gradExs[c] *
input[batch][c][x] * norm;
}
}
}
template <typename DType>
__global__ void Expectation_Backward_Inp_kernel (
DeviceTensor<DType, 3> gradInput,
DeviceTensor<DType, 3> output,
DeviceTensor<DType, 1> gradEx,
DeviceTensor<DType, 1> gradExs,
DeviceTensor<DType, 1> mean,
DeviceTensor<DType, 1> std,
DeviceTensor<DType, 1> gamma,
DeviceTensor<DType, 1> beta,
DType norm) {
int c = blockIdx.x;
/* main operation */
for (int batch = 0; batch < gradInput.getSize(0); ++batch) {
for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) {
gradInput[batch][c][x] += gradEx[c] * norm + 2 * gradExs[c] *
((output[batch][c][x] - beta[c]) / gamma[c] * std[c] + mean[c]) * norm;
}
}
}
} // namespace
at::Tensor BatchNorm_Forward_CUDA(
const at::Tensor input_,
const at::Tensor ex_,
const at::Tensor exs_,
const at::Tensor gamma_,
const at::Tensor beta_,
float eps) {
auto output_ = at::zeros_like(input_);
auto std_ = (exs_ - ex_ * ex_ + eps).sqrt();
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> output = devicetensor<scalar_t, 3>(output_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
/* kernel function */
BatchNorm_Forward_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
output, input, ex, std, gamma, beta);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return output_;
}
at::Tensor BatchNorm_Forward_Inp_CUDA(
const at::Tensor input_,
const at::Tensor ex_,
const at::Tensor exs_,
const at::Tensor gamma_,
const at::Tensor beta_,
float eps) {
auto std_ = (exs_ - ex_ * ex_ + eps).sqrt();
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
/* kernel function */
BatchNorm_Forward_Inp_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
input, ex, std, gamma, beta);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return input_;
}
std::vector<at::Tensor> BatchNorm_Inp_Backward_CUDA(
const at::Tensor gradoutput_,
const at::Tensor output_,
const at::Tensor ex_,
const at::Tensor exs_,
const at::Tensor gamma_,
const at::Tensor beta_,
float eps) {
/* outputs*/
auto std_ = (exs_ - ex_ * ex_ + eps).sqrt();
auto gradinput_ = at::zeros_like(output_);
auto gradgamma_ = at::zeros_like(gamma_);
auto gradbeta_ = at::zeros_like(beta_);
auto gradEx_ = at::zeros_like(ex_);
auto gradExs_ = at::zeros_like(std_);
/* cuda utils*/
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(output_.size(1));
dim3 threads(getNumThreads(output_.size(2)));
AT_DISPATCH_FLOATING_TYPES(output_.type(), "BatchNorm_Inp_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> gradoutput = devicetensor<scalar_t, 3>(gradoutput_);
DeviceTensor<scalar_t, 3> output = devicetensor<scalar_t, 3>(output_);
DeviceTensor<scalar_t, 3> gradinput = devicetensor<scalar_t, 3>(gradinput_);
DeviceTensor<scalar_t, 1> gradgamma = devicetensor<scalar_t, 1>(gradgamma_);
DeviceTensor<scalar_t, 1> gradbeta = devicetensor<scalar_t, 1>(gradbeta_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_);
DeviceTensor<scalar_t, 1> gradExs = devicetensor<scalar_t, 1>(gradExs_);
/* kernel function */
BatchNorm_Backward_Inp_kernel<scalar_t>
<<<blocks, threads, 0, stream>>>(
gradoutput, output, gradinput, gradgamma, gradbeta, ex, std,
gamma, beta, gradEx, gradExs);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_};
}
std::vector<at::Tensor> BatchNorm_Backward_CUDA(
const at::Tensor gradoutput_,
const at::Tensor input_,
const at::Tensor ex_,
const at::Tensor exs_,
const at::Tensor gamma_,
const at::Tensor beta_,
float eps) {
/* outputs*/
auto std_ = (exs_ - ex_ * ex_ + eps).sqrt();
auto gradinput_ = at::zeros_like(input_);
auto gradgamma_ = at::zeros_like(gamma_);
auto gradbeta_ = at::zeros_like(beta_);
auto gradEx_ = at::zeros_like(ex_);
auto gradExs_ = at::zeros_like(std_);
/* cuda utils*/
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Inp_Backward_CUDA", ([&] {
/* Device tensors */
DeviceTensor<scalar_t, 3> gradoutput = devicetensor<scalar_t, 3>(gradoutput_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 3> gradinput = devicetensor<scalar_t, 3>(gradinput_);
DeviceTensor<scalar_t, 1> gradgamma = devicetensor<scalar_t, 1>(gradgamma_);
DeviceTensor<scalar_t, 1> gradbeta = devicetensor<scalar_t, 1>(gradbeta_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_);
DeviceTensor<scalar_t, 1> gradExs = devicetensor<scalar_t, 1>(gradExs_);
/* kernel function */
BatchNorm_Backward_kernel<scalar_t>
<<<blocks, threads, 0, stream>>>(
gradoutput, input, gradinput, gradgamma, gradbeta, ex, std,
gamma, beta, gradEx, gradExs);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_};
}
std::vector<at::Tensor> Expectation_Forward_CUDA(
const at::Tensor input_) {
/* outputs */
auto ex_ = torch::zeros({input_.size(1)}, input_.options());
auto exs_ = torch::zeros({input_.size(1)}, input_.options());
/* cuda utils*/
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_forward_CUDA", ([&] {
scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2));
/* Device tensors */
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> exs = devicetensor<scalar_t, 1>(exs_);
/* kernel function */
Expectation_Forward_kernel<scalar_t>
<<<blocks, threads, 0, stream>>>(input, ex, exs, norm);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return {ex_, exs_};
}
at::Tensor Expectation_Backward_CUDA(
const at::Tensor input_,
const at::Tensor gradEx_,
const at::Tensor gradExs_) {
/* outputs */
at::Tensor gradInput_ = at::zeros_like(input_);
/* cuda utils*/
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(input_.size(1));
dim3 threads(getNumThreads(input_.size(2)));
AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_Backward_CUDA", ([&] {
scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2));
/* Device tensors */
DeviceTensor<scalar_t, 3> gradInput = devicetensor<scalar_t, 3>(gradInput_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_);
DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_);
DeviceTensor<scalar_t, 1> gradExs =devicetensor<scalar_t, 1>(gradExs_);
/* kernel function */
Expectation_Backward_kernel<scalar_t>
<<<blocks, threads, 0, stream>>>(gradInput, input, gradEx, gradExs, norm);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return gradInput_;
}
at::Tensor Expectation_Inp_Backward_CUDA(
const at::Tensor gradInput_,
const at::Tensor output_,
const at::Tensor gradEx_,
const at::Tensor gradExs_,
const at::Tensor ex_,
const at::Tensor exs_,
const at::Tensor gamma_,
const at::Tensor beta_,
float eps) {
/* outputs */
//auto gradInput_ = at::zeros_like(output_);
auto std_ = (exs_ - ex_ * ex_ + eps).sqrt();
/* cuda utils*/
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(output_.size(1));
dim3 threads(getNumThreads(output_.size(2)));
AT_DISPATCH_FLOATING_TYPES(output_.type(), "SumSquare_Backward_CUDA", ([&] {
scalar_t norm = scalar_t(1) / (output_.size(0) * output_.size(2));
/* Device tensors */
DeviceTensor<scalar_t, 3> gradInput = devicetensor<scalar_t, 3>(gradInput_);
DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(output_);
DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_);
DeviceTensor<scalar_t, 1> gradExs =devicetensor<scalar_t, 1>(gradExs_);
DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_);
DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_);
DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_);
DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_);
/* kernel function */
Expectation_Backward_Inp_kernel<scalar_t>
<<<blocks, threads, 0, stream>>>(gradInput, input, gradEx, gradExs,
ex, std, gamma, beta, norm);
}));
AT_ASSERT(cudaGetLastError() == cudaSuccess);
return gradInput_;
}
|
c7a23011700b1fab0ebd5e3bcb18bb2c00d84865.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/reduce.h>
#include <iostream>
#include "reduction1.cuh"
#include "reduction1_template.cuh"
#include "reduction2.cuh"
#include "reduction2_template.cuh"
#include "reduction3.cuh"
#include "reduction3_template.cuh"
typedef void (*pfnReduction)(int*, int*, const int*, size_t, int, int);
#define NUM_COUNT (1024 * 1024 * 1)
#define NUM_THREADS (1024)
#define MAX_BLOCKS (1024)
double run(int cIterations, int *answer, int *partial, const int *in, const size_t N,
const int numBlocks, int numThreads, pfnReduction func)
{
hipEvent_t start, stop;
checkCudaErrors( hipEventCreate(&start) );
checkCudaErrors( hipEventCreate(&stop) );
hipEventRecord(start);
for (int i = 0; i < cIterations; ++i)
{
func(answer, partial, in, N, numBlocks, numThreads);
}
hipEventRecord(stop);
checkCudaErrors( hipDeviceSynchronize() );
float time = 0;
hipEventElapsedTime(&time, start, stop);
return time;
}
int main(int argc, char const *argv[])
{
int blocks = (NUM_COUNT + NUM_THREADS - 1) / NUM_THREADS;
if (blocks > MAX_BLOCKS) blocks = MAX_BLOCKS;
std::cout << "blocks: " << blocks << " threads: " << NUM_THREADS << std::endl;
thrust::host_vector<int> h_vec(NUM_COUNT);
thrust::fill(h_vec.begin(), h_vec.end(), 1);
thrust::device_vector<int> d_vec(NUM_COUNT);
thrust::device_vector<int> d_answer(1);
thrust::device_vector<int> d_partial(blocks);
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
int *raw_point_nums = thrust::raw_pointer_cast(&d_vec[0]);
int *raw_point_partial = thrust::raw_pointer_cast(&d_partial[0]);
int *raw_point_answer = thrust::raw_pointer_cast(&d_answer[0]);
struct
{
std::string name;
pfnReduction func;
} rgTests[] = {
{ "simple loop", reduction1 },
{ "simple loop template", reduction1t },
{ "atomicAdd", reduction2 },
{ "atomicAdd template", reduction2t },
{ "single pass", reduction3 },
{ "single pass template", reduction3t },
};
int numTests = sizeof(rgTests) / sizeof(rgTests[0]);
int host_answer = thrust::reduce(h_vec.begin(), h_vec.end());
for (int i = 0; i < numTests; ++i)
{
double time = run(100, raw_point_answer, raw_point_partial,
raw_point_nums, NUM_COUNT, blocks,
NUM_THREADS, rgTests[i].func);
int h_answer = d_answer[0];
std::string equal = (host_answer == h_answer) ? "=" : "!=";
std::cout << rgTests[i].name << " time: " << time
<< "ms host answer (" << host_answer << ") "
<< equal << " device answer (" << h_answer << ")"
<< std::endl;
}
return 0;
} | c7a23011700b1fab0ebd5e3bcb18bb2c00d84865.cu | #include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/reduce.h>
#include <iostream>
#include "reduction1.cuh"
#include "reduction1_template.cuh"
#include "reduction2.cuh"
#include "reduction2_template.cuh"
#include "reduction3.cuh"
#include "reduction3_template.cuh"
typedef void (*pfnReduction)(int*, int*, const int*, size_t, int, int);
#define NUM_COUNT (1024 * 1024 * 1)
#define NUM_THREADS (1024)
#define MAX_BLOCKS (1024)
double run(int cIterations, int *answer, int *partial, const int *in, const size_t N,
const int numBlocks, int numThreads, pfnReduction func)
{
cudaEvent_t start, stop;
checkCudaErrors( cudaEventCreate(&start) );
checkCudaErrors( cudaEventCreate(&stop) );
cudaEventRecord(start);
for (int i = 0; i < cIterations; ++i)
{
func(answer, partial, in, N, numBlocks, numThreads);
}
cudaEventRecord(stop);
checkCudaErrors( cudaThreadSynchronize() );
float time = 0;
cudaEventElapsedTime(&time, start, stop);
return time;
}
int main(int argc, char const *argv[])
{
int blocks = (NUM_COUNT + NUM_THREADS - 1) / NUM_THREADS;
if (blocks > MAX_BLOCKS) blocks = MAX_BLOCKS;
std::cout << "blocks: " << blocks << " threads: " << NUM_THREADS << std::endl;
thrust::host_vector<int> h_vec(NUM_COUNT);
thrust::fill(h_vec.begin(), h_vec.end(), 1);
thrust::device_vector<int> d_vec(NUM_COUNT);
thrust::device_vector<int> d_answer(1);
thrust::device_vector<int> d_partial(blocks);
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
int *raw_point_nums = thrust::raw_pointer_cast(&d_vec[0]);
int *raw_point_partial = thrust::raw_pointer_cast(&d_partial[0]);
int *raw_point_answer = thrust::raw_pointer_cast(&d_answer[0]);
struct
{
std::string name;
pfnReduction func;
} rgTests[] = {
{ "simple loop", reduction1 },
{ "simple loop template", reduction1t },
{ "atomicAdd", reduction2 },
{ "atomicAdd template", reduction2t },
{ "single pass", reduction3 },
{ "single pass template", reduction3t },
};
int numTests = sizeof(rgTests) / sizeof(rgTests[0]);
int host_answer = thrust::reduce(h_vec.begin(), h_vec.end());
for (int i = 0; i < numTests; ++i)
{
double time = run(100, raw_point_answer, raw_point_partial,
raw_point_nums, NUM_COUNT, blocks,
NUM_THREADS, rgTests[i].func);
int h_answer = d_answer[0];
std::string equal = (host_answer == h_answer) ? "=" : "!=";
std::cout << rgTests[i].name << " time: " << time
<< "ms host answer (" << host_answer << ") "
<< equal << " device answer (" << h_answer << ")"
<< std::endl;
}
return 0;
} |
97af1a706ea9c9ddd2cdee00172f285ec91d2d26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <primitiv/config.h>
#include <primitiv/devices/cuda16/device.h>
#include <primitiv/devices/cuda16/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
CUDA16_KERNEL_FW_X_CONST(multiply_const, X_VAL * k);
CUDA16_KERNEL_BW_X_CONST(multiply_const, k * GY_VAL);
CUDA16_KERNEL_FW_X_SCALAR_R(multiply_scalar, ::__fmul_rn);
CUDA16_KERNEL_FW_AB(multiply, ::__fmul_rn);
DECL_ATOMIC_OP(atomicHAdd, ::__fadd_rn);
__global__ void multiply_bw_dev(
const half *pa, const half *pb, const half *, const half *pgy,
std::uint32_t size, std::uint32_t mba, std::uint32_t mbb,
half *pga, half *pgb) {
const std::uint32_t i = IDX;
const std::uint32_t shift = blockIdx.y * size;
if (i < size) {
const float gy = ::__half2float(pgy[i + shift]);
const std::uint32_t a_ofs = i + mba * shift;
const std::uint32_t b_ofs = i + mbb * shift;
::atomicHAdd(pga, a_ofs, gy * ::__half2float(pb[b_ofs]));
::atomicHAdd(pgb, b_ofs, gy * ::__half2float(pa[a_ofs]));
}
}
} // namespace
namespace primitiv {
namespace devices {
CUDA16_DEV_FW_X_CONST(multiply_const);
CUDA16_DEV_BW_X_CONST(multiply_const);
CUDA16_DEV_FW_X_SCALAR(multiply_scalar);
CUDA16_DEV_FW_AB(multiply);
CUDA16_DEV_BW_AB(multiply);
} // namespace devices
} // namespace primitiv
| 97af1a706ea9c9ddd2cdee00172f285ec91d2d26.cu | #include <primitiv/config.h>
#include <primitiv/devices/cuda16/device.h>
#include <primitiv/devices/cuda16/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
CUDA16_KERNEL_FW_X_CONST(multiply_const, X_VAL * k);
CUDA16_KERNEL_BW_X_CONST(multiply_const, k * GY_VAL);
CUDA16_KERNEL_FW_X_SCALAR_R(multiply_scalar, ::__fmul_rn);
CUDA16_KERNEL_FW_AB(multiply, ::__fmul_rn);
DECL_ATOMIC_OP(atomicHAdd, ::__fadd_rn);
__global__ void multiply_bw_dev(
const half *pa, const half *pb, const half *, const half *pgy,
std::uint32_t size, std::uint32_t mba, std::uint32_t mbb,
half *pga, half *pgb) {
const std::uint32_t i = IDX;
const std::uint32_t shift = blockIdx.y * size;
if (i < size) {
const float gy = ::__half2float(pgy[i + shift]);
const std::uint32_t a_ofs = i + mba * shift;
const std::uint32_t b_ofs = i + mbb * shift;
::atomicHAdd(pga, a_ofs, gy * ::__half2float(pb[b_ofs]));
::atomicHAdd(pgb, b_ofs, gy * ::__half2float(pa[a_ofs]));
}
}
} // namespace
namespace primitiv {
namespace devices {
CUDA16_DEV_FW_X_CONST(multiply_const);
CUDA16_DEV_BW_X_CONST(multiply_const);
CUDA16_DEV_FW_X_SCALAR(multiply_scalar);
CUDA16_DEV_FW_AB(multiply);
CUDA16_DEV_BW_AB(multiply);
} // namespace devices
} // namespace primitiv
|
b78e2a1ff5214151011dbc96f69d015f9e82b6c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
__global__ void add(int n, float* x, float* y) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int pulo = gridDim.x * blockDim.x;
for(int i=tid;i < n; i += pulo) {
y[i] = x[i] + y[i];
}
}
int main() {
int n = 1 << 25;
float *x,*y;
hipMallocManaged(&x, n*sizeof(float));
hipMallocManaged(&y, n*sizeof(float));
for(int i=0;i<n;i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
int block_size = 128;
int num_blocks = 4096;
hipLaunchKernelGGL(( add), dim3(num_blocks), dim3(block_size), 0, 0, n,x,y);
hipDeviceSynchronize();
float error = 0.0f;
for(int i=0;i<n;i++) {
error = fmax(error, fabs(y[i]-3.0f));
}
printf("Max error: %f\n", error);
hipFree(x);
hipFree(y);
return 0;
}
| b78e2a1ff5214151011dbc96f69d015f9e82b6c8.cu | #include <stdio.h>
#include <math.h>
__global__ void add(int n, float* x, float* y) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int pulo = gridDim.x * blockDim.x;
for(int i=tid;i < n; i += pulo) {
y[i] = x[i] + y[i];
}
}
int main() {
int n = 1 << 25;
float *x,*y;
cudaMallocManaged(&x, n*sizeof(float));
cudaMallocManaged(&y, n*sizeof(float));
for(int i=0;i<n;i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
int block_size = 128;
int num_blocks = 4096;
add<<<num_blocks, block_size>>>(n,x,y);
cudaDeviceSynchronize();
float error = 0.0f;
for(int i=0;i<n;i++) {
error = fmax(error, fabs(y[i]-3.0f));
}
printf("Max error: %f\n", error);
cudaFree(x);
cudaFree(y);
return 0;
}
|
ff39b6da82f12abd84c4b697764de334e7f00444.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <ctime>
#include <iostream>
#include <sstream>
#include <string>
#include <fstream>
#include "ImageData.h"
#include "NeuralNet.h"
#define IMG_SIZE 6*6
#define ALPHABET_SIZE 10
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true_
{
if (code != hipSuccess)
{
fprintf(stderr, "CUDA_SAFE_CALL: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using namespace std;
void save_float_results(vector<float>* data, char* data_location);
void load_float_results(vector<float>* data, char* data_location);
int process_ocr(bool training, NeuralNet& nn, float bias, int iterations) {
int correct = 0;
int target_size = 6;
char file_string[100];
vector<float>* inputs = new vector<float>(IMG_SIZE);
vector<float>* outputs = new vector<float>(ALPHABET_SIZE);
for (int j = 0; j < iterations; j++) {
for (int i = 0; i < ALPHABET_SIZE; i++) {
delete outputs;
ostringstream os;
os << "data/" << i << "/data" << i << "_" << j << ".jpg";
ImageData input(os.str(), target_size, false);
if (input.error()&&0) {
cout << "Error reading " << os.str() << "\n";
delete inputs;
return 1;
}
input.getPixels(inputs);
sprintf(file_string, "data_text/%d/data_%d_%d.txt", i, i, j);
save_float_results(inputs, file_string);
outputs = new vector<float>(ALPHABET_SIZE);
/*
load_float_results(inputs, string(file_string));
nn.feedForward(inputs, outputs, bias);
if (training) {
float max_val = 0;
int max_index = 0;
for (int k = 0; k < outputs->size(); k++) {
if ((*outputs)[k] > max_val) {
max_val = (*outputs)[k];
max_index = k;
}
}
if (max_index == i) {
correct++;
}
} else {
nn.backPropagate(outputs, i);
}
*/
}
}
delete inputs;
delete outputs;
return correct;
}
void process_and() {
NeuralNet nn(2, 2, 1, 6, 1, .57);
vector<float>* inputs = new vector<float>(2);
vector<float>* outputs = new vector<float>(2);
int correct = 0;
for (int i = 0; i < 10000; i++) {
float a, b, t;
(*inputs)[0] = (rand() % 2 == 1) ? 1.0 : 0.0;
(*inputs)[1] = (rand() % 2 == 1) ? 1.0 : 0.0;
t = (a == 1.0 && b == 1.0) ? 1.0 : 0.0;
nn.feedForward(inputs, outputs, 0);
nn.backPropagate(outputs, t);
}
nn.print();
cout << "INPUT\tINPUT\tOUTPUT\tOUTPUT\n";
for (int i = 0; i < 100; i++) {
float a, b, t;
(*inputs)[0] = (rand() % 2 == 1) ? 1.0 : 0.0;
(*inputs)[1] = (rand() % 2 == 1) ? 1.0 : 0.0;
t = (a == 1.0 && b == 1.0) ? 1.0 : 0.0;
nn.feedForward(inputs, outputs, 0);
cout << (*inputs)[0] <<"\t" << (*inputs)[1] << "\t"
<< (*outputs)[0] << "\t" << (*outputs)[1] << "\n";
if (((*outputs)[0] > (*outputs)[1] && t == 0.0)
|| ((*outputs)[0] < (*outputs)[1] && t == 1.0)) {
correct++;
}
}
cout << "AND success: " << correct << " / " << 100 << "\n";
delete inputs;
delete outputs;
exit(0);
}
int main(int argc, char *argv[]) {
srand((unsigned)time(NULL));
int training = 0, layers = 2, testing = 0;
float bias = 0, responseThreshold = 1, learningRate = 1;
int layerHeight = 10;
// argc is 1 if the command line was given the name of the binary
// and no additional parameters.
if (argc == 1) {
cout << "usage: " << argv[0] << " -t # -l # -b # -a # -r # -h #\n"
<< "-t: the number of training samples per digit.\n"
<< "-T: the number of testing samples per digit.\n"
<< "-l: the number of hidden layers; default = 2.\n"
<< "-b: the weight of the bias.\n"
<< "-a: the learning rate for back propagation.\n"
<< "-r: the response threshold for the sigmoid function.\n"
<< "-h: the number of neurons per hidden layer.\n";
return 0;
}
// Process command line arguments.
for (int i = 1; i < argc; i++) {
if (strcmp(argv[i], "-t") == 0) {
training = atoi(argv[++i]);
} else if (strcmp(argv[i], "-T") == 0) {
testing = atoi(argv[++i]);
} else if (strcmp(argv[i], "-l") == 0) {
layers = atoi(argv[++i]);
} else if (strcmp(argv[i], "-b") == 0) {
bias = atof(argv[++i]);
} else if (strcmp(argv[i], "-r") == 0) {
responseThreshold = atof(argv[++i]);
} else if (strcmp(argv[i], "-a") == 0) {
learningRate = atof(argv[++i]);
} else if (strcmp(argv[i], "-h") == 0) {
layerHeight = atoi(argv[++i]);
}
}
if (layers < 0 || training <= 0 || testing <= 0 || responseThreshold <= 0
|| layerHeight <= 0 || learningRate < 0) {
cout << "Invalid argument specified.\n";
return 1;
}
NeuralNet nn(IMG_SIZE,
ALPHABET_SIZE,
layers,
layerHeight,
learningRate,
responseThreshold);
process_ocr(false, nn, bias, training);
int correct = process_ocr(true, nn, bias, testing);
cout << "Success: " << correct << " / " << testing * 10
<< " (" << ((float)correct / (float)testing * 10) << "%)\n";
return 0;
}
void save_float_results(vector<float>* data, char* data_location){
FILE *pFile;
pFile = fopen(data_location, "w");
for(int ii = 0; ii < data->size(); ii++){
fprintf(pFile, "%.15f\n", (*data)[ii]);
}
fclose(pFile);
}
void load_float_results(vector<float>* data, char* data_location){
data->clear();
ifstream file;
file.open(data_location);
string str;
while(getline(file, str)){
data->push_back(strtod(str.c_str(), NULL));
}
file.close();
}
| ff39b6da82f12abd84c4b697764de334e7f00444.cu | #include <cstdio>
#include <cstdlib>
#include <ctime>
#include <iostream>
#include <sstream>
#include <string>
#include <fstream>
#include "ImageData.h"
#include "NeuralNet.h"
#define IMG_SIZE 6*6
#define ALPHABET_SIZE 10
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true_
{
if (code != cudaSuccess)
{
fprintf(stderr, "CUDA_SAFE_CALL: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using namespace std;
void save_float_results(vector<float>* data, char* data_location);
void load_float_results(vector<float>* data, char* data_location);
int process_ocr(bool training, NeuralNet& nn, float bias, int iterations) {
int correct = 0;
int target_size = 6;
char file_string[100];
vector<float>* inputs = new vector<float>(IMG_SIZE);
vector<float>* outputs = new vector<float>(ALPHABET_SIZE);
for (int j = 0; j < iterations; j++) {
for (int i = 0; i < ALPHABET_SIZE; i++) {
delete outputs;
ostringstream os;
os << "data/" << i << "/data" << i << "_" << j << ".jpg";
ImageData input(os.str(), target_size, false);
if (input.error()&&0) {
cout << "Error reading " << os.str() << "\n";
delete inputs;
return 1;
}
input.getPixels(inputs);
sprintf(file_string, "data_text/%d/data_%d_%d.txt", i, i, j);
save_float_results(inputs, file_string);
outputs = new vector<float>(ALPHABET_SIZE);
/*
load_float_results(inputs, string(file_string));
nn.feedForward(inputs, outputs, bias);
if (training) {
float max_val = 0;
int max_index = 0;
for (int k = 0; k < outputs->size(); k++) {
if ((*outputs)[k] > max_val) {
max_val = (*outputs)[k];
max_index = k;
}
}
if (max_index == i) {
correct++;
}
} else {
nn.backPropagate(outputs, i);
}
*/
}
}
delete inputs;
delete outputs;
return correct;
}
void process_and() {
NeuralNet nn(2, 2, 1, 6, 1, .57);
vector<float>* inputs = new vector<float>(2);
vector<float>* outputs = new vector<float>(2);
int correct = 0;
for (int i = 0; i < 10000; i++) {
float a, b, t;
(*inputs)[0] = (rand() % 2 == 1) ? 1.0 : 0.0;
(*inputs)[1] = (rand() % 2 == 1) ? 1.0 : 0.0;
t = (a == 1.0 && b == 1.0) ? 1.0 : 0.0;
nn.feedForward(inputs, outputs, 0);
nn.backPropagate(outputs, t);
}
nn.print();
cout << "INPUT\tINPUT\tOUTPUT\tOUTPUT\n";
for (int i = 0; i < 100; i++) {
float a, b, t;
(*inputs)[0] = (rand() % 2 == 1) ? 1.0 : 0.0;
(*inputs)[1] = (rand() % 2 == 1) ? 1.0 : 0.0;
t = (a == 1.0 && b == 1.0) ? 1.0 : 0.0;
nn.feedForward(inputs, outputs, 0);
cout << (*inputs)[0] <<"\t" << (*inputs)[1] << "\t"
<< (*outputs)[0] << "\t" << (*outputs)[1] << "\n";
if (((*outputs)[0] > (*outputs)[1] && t == 0.0)
|| ((*outputs)[0] < (*outputs)[1] && t == 1.0)) {
correct++;
}
}
cout << "AND success: " << correct << " / " << 100 << "\n";
delete inputs;
delete outputs;
exit(0);
}
int main(int argc, char *argv[]) {
srand((unsigned)time(NULL));
int training = 0, layers = 2, testing = 0;
float bias = 0, responseThreshold = 1, learningRate = 1;
int layerHeight = 10;
// argc is 1 if the command line was given the name of the binary
// and no additional parameters.
if (argc == 1) {
cout << "usage: " << argv[0] << " -t # -l # -b # -a # -r # -h #\n"
<< "-t: the number of training samples per digit.\n"
<< "-T: the number of testing samples per digit.\n"
<< "-l: the number of hidden layers; default = 2.\n"
<< "-b: the weight of the bias.\n"
<< "-a: the learning rate for back propagation.\n"
<< "-r: the response threshold for the sigmoid function.\n"
<< "-h: the number of neurons per hidden layer.\n";
return 0;
}
// Process command line arguments.
for (int i = 1; i < argc; i++) {
if (strcmp(argv[i], "-t") == 0) {
training = atoi(argv[++i]);
} else if (strcmp(argv[i], "-T") == 0) {
testing = atoi(argv[++i]);
} else if (strcmp(argv[i], "-l") == 0) {
layers = atoi(argv[++i]);
} else if (strcmp(argv[i], "-b") == 0) {
bias = atof(argv[++i]);
} else if (strcmp(argv[i], "-r") == 0) {
responseThreshold = atof(argv[++i]);
} else if (strcmp(argv[i], "-a") == 0) {
learningRate = atof(argv[++i]);
} else if (strcmp(argv[i], "-h") == 0) {
layerHeight = atoi(argv[++i]);
}
}
if (layers < 0 || training <= 0 || testing <= 0 || responseThreshold <= 0
|| layerHeight <= 0 || learningRate < 0) {
cout << "Invalid argument specified.\n";
return 1;
}
NeuralNet nn(IMG_SIZE,
ALPHABET_SIZE,
layers,
layerHeight,
learningRate,
responseThreshold);
process_ocr(false, nn, bias, training);
int correct = process_ocr(true, nn, bias, testing);
cout << "Success: " << correct << " / " << testing * 10
<< " (" << ((float)correct / (float)testing * 10) << "%)\n";
return 0;
}
void save_float_results(vector<float>* data, char* data_location){
FILE *pFile;
pFile = fopen(data_location, "w");
for(int ii = 0; ii < data->size(); ii++){
fprintf(pFile, "%.15f\n", (*data)[ii]);
}
fclose(pFile);
}
void load_float_results(vector<float>* data, char* data_location){
data->clear();
ifstream file;
file.open(data_location);
string str;
while(getline(file, str)){
data->push_back(strtod(str.c_str(), NULL));
}
file.close();
}
|
aa9ad1919eae01c781ba3d61209e25b706bd496f.hip | // !!! This is a file automatically generated by hipify!!!
/*
Defines the matrix operations for sequential dense with CUDA
*/
#include <petscpkg_version.h>
#define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1
#include <../src/mat/impls/dense/seq/dense.h> /*I "petscmat.h" I*/
#include <petsccublas.h>
/* cublas definitions are here */
#include <../src/vec/vec/impls/seq/seqcuda/cudavecimpl.h>
#if defined(PETSC_USE_COMPLEX)
#if defined(PETSC_USE_REAL_SINGLE)
#define hipsolverDnXpotrf(a,b,c,d,e,f,g,h) hipsolverDnCpotrf((a),(b),(c),(hipComplex*)(d),(e),(hipComplex*)(f),(g),(h))
#define hipsolverDnXpotrf_bufferSize(a,b,c,d,e,f) hipsolverDnCpotrf_bufferSize((a),(b),(c),(hipComplex*)(d),(e),(f))
#define hipsolverDnXpotrs(a,b,c,d,e,f,g,h,i) hipsolverDnCpotrs((a),(b),(c),(d),(hipComplex*)(e),(f),(hipComplex*)(g),(h),(i))
#define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnCpotri((a),(b),(c),(hipComplex*)(d),(e),(hipComplex*)(f),(g),(h))
#define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnCpotri_bufferSize((a),(b),(c),(hipComplex*)(d),(e),(f))
#define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) hipsolverDnCsytrf((a),(b),(c),(hipComplex*)(d),(e),(f),(hipComplex*)(g),(h),(i))
#define cusolverDnXsytrf_bufferSize(a,b,c,d,e) hipsolverDnCsytrf_bufferSize((a),(b),(hipComplex*)(c),(d),(e))
#define cusolverDnXgetrf(a,b,c,d,e,f,g,h) hipsolverDnCgetrf((a),(b),(c),(hipComplex*)(d),(e),(hipComplex*)(f),(g),(h))
#define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) hipsolverDnCgetrf_bufferSize((a),(b),(c),(hipComplex*)(d),(e),(f))
#define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) hipsolverDnCgetrs((a),(b),(c),(d),(hipComplex*)(e),(f),(g),(hipComplex*)(h),(i),(j))
#else /* complex double */
#define hipsolverDnXpotrf(a,b,c,d,e,f,g,h) hipsolverDnZpotrf((a),(b),(c),(hipDoubleComplex*)(d),(e),(hipDoubleComplex*)(f),(g),(h))
#define hipsolverDnXpotrf_bufferSize(a,b,c,d,e,f) hipsolverDnZpotrf_bufferSize((a),(b),(c),(hipDoubleComplex*)(d),(e),(f))
#define hipsolverDnXpotrs(a,b,c,d,e,f,g,h,i) hipsolverDnZpotrs((a),(b),(c),(d),(hipDoubleComplex*)(e),(f),(hipDoubleComplex*)(g),(h),(i))
#define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnZpotri((a),(b),(c),(hipDoubleComplex*)(d),(e),(hipDoubleComplex*)(f),(g),(h))
#define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnZpotri_bufferSize((a),(b),(c),(hipDoubleComplex*)(d),(e),(f))
#define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) hipsolverDnZsytrf((a),(b),(c),(hipDoubleComplex*)(d),(e),(f),(hipDoubleComplex*)(g),(h),(i))
#define cusolverDnXsytrf_bufferSize(a,b,c,d,e) hipsolverDnZsytrf_bufferSize((a),(b),(hipDoubleComplex*)(c),(d),(e))
#define cusolverDnXgetrf(a,b,c,d,e,f,g,h) hipsolverDnZgetrf((a),(b),(c),(hipDoubleComplex*)(d),(e),(hipDoubleComplex*)(f),(g),(h))
#define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) hipsolverDnZgetrf_bufferSize((a),(b),(c),(hipDoubleComplex*)(d),(e),(f))
#define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) hipsolverDnZgetrs((a),(b),(c),(d),(hipDoubleComplex*)(e),(f),(g),(hipDoubleComplex*)(h),(i),(j))
#endif
#else /* real single */
#if defined(PETSC_USE_REAL_SINGLE)
#define hipsolverDnXpotrf(a,b,c,d,e,f,g,h) hipsolverDnSpotrf((a),(b),(c),(d),(e),(f),(g),(h))
#define hipsolverDnXpotrf_bufferSize(a,b,c,d,e,f) hipsolverDnSpotrf_bufferSize((a),(b),(c),(d),(e),(f))
#define hipsolverDnXpotrs(a,b,c,d,e,f,g,h,i) hipsolverDnSpotrs((a),(b),(c),(d),(e),(f),(g),(h),(i))
#define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnSpotri((a),(b),(c),(d),(e),(f),(g),(h))
#define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnSpotri_bufferSize((a),(b),(c),(d),(e),(f))
#define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) hipsolverDnSsytrf((a),(b),(c),(d),(e),(f),(g),(h),(i))
#define cusolverDnXsytrf_bufferSize(a,b,c,d,e) hipsolverDnSsytrf_bufferSize((a),(b),(c),(d),(e))
#define cusolverDnXgetrf(a,b,c,d,e,f,g,h) hipsolverDnSgetrf((a),(b),(c),(d),(e),(f),(g),(h))
#define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) hipsolverDnSgetrf_bufferSize((a),(b),(c),(d),(e),(f))
#define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) hipsolverDnSgetrs((a),(b),(c),(d),(e),(f),(g),(h),(i),(j))
#else /* real double */
#define hipsolverDnXpotrf(a,b,c,d,e,f,g,h) hipsolverDnDpotrf((a),(b),(c),(d),(e),(f),(g),(h))
#define hipsolverDnXpotrf_bufferSize(a,b,c,d,e,f) hipsolverDnDpotrf_bufferSize((a),(b),(c),(d),(e),(f))
#define hipsolverDnXpotrs(a,b,c,d,e,f,g,h,i) hipsolverDnDpotrs((a),(b),(c),(d),(e),(f),(g),(h),(i))
#define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnDpotri((a),(b),(c),(d),(e),(f),(g),(h))
#define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnDpotri_bufferSize((a),(b),(c),(d),(e),(f))
#define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) hipsolverDnDsytrf((a),(b),(c),(d),(e),(f),(g),(h),(i))
#define cusolverDnXsytrf_bufferSize(a,b,c,d,e) hipsolverDnDsytrf_bufferSize((a),(b),(c),(d),(e))
#define cusolverDnXgetrf(a,b,c,d,e,f,g,h) hipsolverDnDgetrf((a),(b),(c),(d),(e),(f),(g),(h))
#define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) hipsolverDnDgetrf_bufferSize((a),(b),(c),(d),(e),(f))
#define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) hipsolverDnDgetrs((a),(b),(c),(d),(e),(f),(g),(h),(i),(j))
#endif
#endif
typedef struct {
PetscScalar *d_v; /* pointer to the matrix on the GPU */
PetscBool user_alloc;
PetscScalar *unplacedarray; /* if one called MatCUDADensePlaceArray(), this is where it stashed the original */
PetscBool unplaced_user_alloc;
/* factorization support */
int *d_fact_ipiv; /* device pivots */
PetscScalar *d_fact_work; /* device workspace */
int fact_lwork;
int *d_fact_info; /* device info */
/* workspace */
Vec workvec;
} Mat_SeqDenseCUDA;
PetscErrorCode MatSeqDenseCUDASetPreallocation(Mat A, PetscScalar *d_data)
{
Mat_SeqDense *cA = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscErrorCode ierr;
PetscBool iscuda;
hipError_t cerr;
PetscFunctionBegin;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQDENSECUDA,&iscuda);CHKERRQ(ierr);
if (!iscuda) PetscFunctionReturn(0);
/* it may happen CPU preallocation has not been performed */
ierr = PetscLayoutSetUp(A->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(A->cmap);CHKERRQ(ierr);
if (cA->lda <= 0) cA->lda = A->rmap->n;
if (!dA->user_alloc) { cerr = hipFree(dA->d_v);CHKERRCUDA(cerr); }
if (!d_data) { /* petsc-allocated storage */
ierr = PetscIntMultError(cA->lda,A->cmap->n,NULL);CHKERRQ(ierr);
cerr = hipMalloc((void**)&dA->d_v,cA->lda*A->cmap->n*sizeof(PetscScalar));CHKERRCUDA(cerr);
dA->user_alloc = PETSC_FALSE;
} else { /* user-allocated storage */
dA->d_v = d_data;
dA->user_alloc = PETSC_TRUE;
A->offloadmask = PETSC_OFFLOAD_GPU;
}
A->preallocated = PETSC_TRUE;
A->assembled = PETSC_TRUE;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqDenseCUDACopyFromGPU(Mat A)
{
Mat_SeqDense *cA = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscErrorCode ierr;
hipError_t cerr;
PetscFunctionBegin;
PetscCheckTypeName(A,MATSEQDENSECUDA);
ierr = PetscInfo3(A,"%s matrix %d x %d\n",A->offloadmask == PETSC_OFFLOAD_GPU ? "Copy" : "Reusing",A->rmap->n,A->cmap->n);CHKERRQ(ierr);
if (A->offloadmask == PETSC_OFFLOAD_GPU) {
if (!cA->v) { /* MatCreateSeqDenseCUDA may not allocate CPU memory. Allocate if needed */
ierr = MatSeqDenseSetPreallocation(A,NULL);CHKERRQ(ierr);
}
ierr = PetscLogEventBegin(MAT_DenseCopyFromGPU,A,0,0,0);CHKERRQ(ierr);
if (cA->lda > A->rmap->n) {
PetscInt j,m = A->rmap->n;
for (j=0; j<A->cmap->n; j++) { /* TODO: it can be done better */
cerr = hipMemcpy(cA->v + j*cA->lda,dA->d_v + j*cA->lda,m*sizeof(PetscScalar),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
} else {
cerr = hipMemcpy(cA->v,dA->d_v,cA->lda*sizeof(PetscScalar)*A->cmap->n,hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
ierr = PetscLogGpuToCpu(cA->lda*sizeof(PetscScalar)*A->cmap->n);CHKERRQ(ierr);
ierr = PetscLogEventEnd(MAT_DenseCopyFromGPU,A,0,0,0);CHKERRQ(ierr);
A->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqDenseCUDACopyToGPU(Mat A)
{
Mat_SeqDense *cA = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscBool copy;
PetscErrorCode ierr;
hipError_t cerr;
PetscFunctionBegin;
PetscCheckTypeName(A,MATSEQDENSECUDA);
if (A->boundtocpu) PetscFunctionReturn(0);
copy = (PetscBool)(A->offloadmask == PETSC_OFFLOAD_CPU || A->offloadmask == PETSC_OFFLOAD_UNALLOCATED);
ierr = PetscInfo3(A,"%s matrix %d x %d\n",copy ? "Copy" : "Reusing",A->rmap->n,A->cmap->n);CHKERRQ(ierr);
if (copy) {
if (!dA->d_v) { /* Allocate GPU memory if not present */
ierr = MatSeqDenseCUDASetPreallocation(A,NULL);CHKERRQ(ierr);
}
ierr = PetscLogEventBegin(MAT_DenseCopyToGPU,A,0,0,0);CHKERRQ(ierr);
if (cA->lda > A->rmap->n) {
PetscInt j,m = A->rmap->n;
for (j=0; j<A->cmap->n; j++) { /* TODO: it can be done better */
cerr = hipMemcpy(dA->d_v + j*cA->lda,cA->v + j*cA->lda,m*sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
}
} else {
cerr = hipMemcpy(dA->d_v,cA->v,cA->lda*sizeof(PetscScalar)*A->cmap->n,hipMemcpyHostToDevice);CHKERRCUDA(cerr);
}
ierr = PetscLogCpuToGpu(cA->lda*sizeof(PetscScalar)*A->cmap->n);CHKERRQ(ierr);
ierr = PetscLogEventEnd(MAT_DenseCopyToGPU,A,0,0,0);CHKERRQ(ierr);
A->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseCUDAPlaceArray_SeqDenseCUDA(Mat A, const PetscScalar *a)
{
Mat_SeqDense *aa = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
if (aa->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
if (aa->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
if (dA->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"MatDenseCUDAResetArray() must be called first");
if (aa->v) { ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); }
dA->unplacedarray = dA->d_v;
dA->unplaced_user_alloc = dA->user_alloc;
dA->d_v = (PetscScalar*)a;
dA->user_alloc = PETSC_TRUE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseCUDAResetArray_SeqDenseCUDA(Mat A)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
if (a->v) { ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); }
dA->d_v = dA->unplacedarray;
dA->user_alloc = dA->unplaced_user_alloc;
dA->unplacedarray = NULL;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseCUDAReplaceArray_SeqDenseCUDA(Mat A, const PetscScalar *a)
{
Mat_SeqDense *aa = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
hipError_t cerr;
PetscFunctionBegin;
if (aa->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
if (aa->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
if (dA->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"MatDenseCUDAResetArray() must be called first");
if (!dA->user_alloc) { cerr = hipFree(dA->d_v);CHKERRCUDA(cerr); }
dA->d_v = (PetscScalar*)a;
dA->user_alloc = PETSC_FALSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseCUDAGetArrayWrite_SeqDenseCUDA(Mat A, PetscScalar **a)
{
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!dA->d_v) {
ierr = MatSeqDenseCUDASetPreallocation(A,NULL);CHKERRQ(ierr);
}
*a = dA->d_v;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseCUDARestoreArrayWrite_SeqDenseCUDA(Mat A, PetscScalar **a)
{
PetscFunctionBegin;
*a = NULL;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseCUDAGetArrayRead_SeqDenseCUDA(Mat A, const PetscScalar **a)
{
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr);
*a = dA->d_v;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseCUDARestoreArrayRead_SeqDenseCUDA(Mat A, const PetscScalar **a)
{
PetscFunctionBegin;
*a = NULL;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseCUDAGetArray_SeqDenseCUDA(Mat A, PetscScalar **a)
{
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr);
*a = dA->d_v;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseCUDARestoreArray_SeqDenseCUDA(Mat A, PetscScalar **a)
{
PetscFunctionBegin;
*a = NULL;
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode MatSeqDenseCUDAInvertFactors_Private(Mat A)
{
#if PETSC_PKG_CUDA_VERSION_GE(10,1,0)
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscScalar *da;
PetscErrorCode ierr;
hipError_t ccer;
cusolverStatus_t cerr;
hipsolverDnHandle_t handle;
int n,lda;
#if defined(PETSC_USE_DEBUG)
int info;
#endif
PetscFunctionBegin;
if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0);
ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr);
ierr = PetscMPIIntCast(A->cmap->n,&n);CHKERRQ(ierr);
ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr);
if (A->factortype == MAT_FACTOR_LU) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDngetri not implemented");
else if (A->factortype == MAT_FACTOR_CHOLESKY) {
if (!dA->d_fact_ipiv) { /* spd */
int il;
ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr);
cerr = cusolverDnXpotri_bufferSize(handle,HIPBLAS_FILL_MODE_LOWER,n,da,lda,&il);CHKERRCUSOLVER(cerr);
if (il > dA->fact_lwork) {
dA->fact_lwork = il;
ccer = hipFree(dA->d_fact_work);CHKERRCUDA(ccer);
ccer = hipMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer);
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
cerr = cusolverDnXpotri(handle,HIPBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr);
ccer = WaitForGPU();CHKERRCUDA(ccer);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr);
/* TODO (write cuda kernel) */
ierr = MatSeqDenseSymmetrize_Private(A,PETSC_TRUE);CHKERRQ(ierr);
} else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytri not implemented");
}
#if defined(PETSC_USE_DEBUG)
ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(int), hipMemcpyDeviceToHost);CHKERRCUDA(ccer);
if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: leading minor of order %d is zero",info);
else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info);
#endif
ierr = PetscLogGpuFlops(1.0*n*n*n/3.0);CHKERRQ(ierr);
A->ops->solve = NULL;
A->ops->solvetranspose = NULL;
A->ops->matsolve = NULL;
A->factortype = MAT_FACTOR_NONE;
ierr = PetscFree(A->solvertype);CHKERRQ(ierr);
PetscFunctionReturn(0);
#else
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Upgrade to CUDA version 10.1.0 or higher");
#endif
}
static PetscErrorCode MatMatSolve_SeqDenseCUDA(Mat A,Mat B,Mat X)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
Mat_SeqDense *x = (Mat_SeqDense*)X->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
const PetscScalar *da;
PetscScalar *dx;
hipsolverDnHandle_t handle;
PetscBool iscuda;
int nrhs,n,lda,ldx;
#if defined(PETSC_USE_DEBUG)
int info;
#endif
hipError_t ccer;
cusolverStatus_t cerr;
PetscErrorCode ierr;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve");
if (!dA->d_fact_work) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve");
ierr = PetscObjectTypeCompareAny((PetscObject)X,&iscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr);
if (X != B) {
ierr = MatCopy(B,X,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
}
ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr);
/* MatMatSolve does not have a dispatching mechanism, we may end up with a MATSEQDENSE here */
ierr = PetscObjectTypeCompare((PetscObject)X,MATSEQDENSECUDA,&iscuda);CHKERRQ(ierr);
if (!iscuda) {
ierr = MatConvert(X,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&X);CHKERRQ(ierr);
}
ierr = MatDenseCUDAGetArray(X,&dx);CHKERRQ(ierr);
ierr = PetscMPIIntCast(A->rmap->n,&n);CHKERRQ(ierr);
ierr = PetscMPIIntCast(X->cmap->n,&nrhs);CHKERRQ(ierr);
ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr);
ierr = PetscMPIIntCast(x->lda,&ldx);CHKERRQ(ierr);
ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (A->factortype == MAT_FACTOR_LU) {
ierr = PetscInfo2(A,"LU solve %d x %d on backend\n",n,n);CHKERRQ(ierr);
cerr = cusolverDnXgetrs(handle,HIPBLAS_OP_N,n,nrhs,da,lda,dA->d_fact_ipiv,dx,ldx,dA->d_fact_info);CHKERRCUSOLVER(cerr);
} else if (A->factortype == MAT_FACTOR_CHOLESKY) {
ierr = PetscInfo2(A,"Cholesky solve %d x %d on backend\n",n,n);CHKERRQ(ierr);
if (!dA->d_fact_ipiv) { /* spd */
/* ========= Program hit hipErrorNotReady (error 34) due to "device not ready" on CUDA API call to hipEventQuery. */
cerr = hipsolverDnXpotrs(handle,HIPBLAS_FILL_MODE_LOWER,n,nrhs,da,lda,dx,ldx,dA->d_fact_info);CHKERRCUSOLVER(cerr);
} else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytrs not implemented");
} else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Unknown factor type %d",A->factortype);
ccer = WaitForGPU();CHKERRCUDA(ccer);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArray(X,&dx);CHKERRQ(ierr);
if (!iscuda) {
ierr = MatConvert(X,MATSEQDENSE,MAT_INPLACE_MATRIX,&X);CHKERRQ(ierr);
}
#if defined(PETSC_USE_DEBUG)
ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(int), hipMemcpyDeviceToHost);CHKERRCUDA(ccer);
if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1);
else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info);
#endif
ierr = PetscLogGpuFlops(nrhs*(2.0*n*n - n));CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolve_SeqDenseCUDA_Private(Mat A,Vec xx,Vec yy,PetscBool trans)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
const PetscScalar *da;
PetscScalar *y;
hipsolverDnHandle_t handle;
int one = 1,n,lda;
#if defined(PETSC_USE_DEBUG)
int info;
#endif
hipError_t ccer;
cusolverStatus_t cerr;
PetscBool iscuda;
PetscErrorCode ierr;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve");
if (!dA->d_fact_work) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve");
ierr = PetscMPIIntCast(A->rmap->n,&n);CHKERRQ(ierr);
/* MatSolve does not have a dispatching mechanism, we may end up with a VECSTANDARD here */
ierr = PetscObjectTypeCompareAny((PetscObject)yy,&iscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr);
if (iscuda) {
ierr = VecCopy(xx,yy);CHKERRQ(ierr);
ierr = VecCUDAGetArray(yy,&y);CHKERRQ(ierr);
} else {
if (!dA->workvec) {
ierr = MatCreateVecs(A,&dA->workvec,NULL);CHKERRQ(ierr);
}
ierr = VecCopy(xx,dA->workvec);CHKERRQ(ierr);
ierr = VecCUDAGetArray(dA->workvec,&y);CHKERRQ(ierr);
}
ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr);
ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr);
ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (A->factortype == MAT_FACTOR_LU) {
ierr = PetscInfo2(A,"LU solve %d x %d on backend\n",n,n);CHKERRQ(ierr);
cerr = cusolverDnXgetrs(handle,trans ? HIPBLAS_OP_T : HIPBLAS_OP_N,n,one,da,lda,dA->d_fact_ipiv,y,n,dA->d_fact_info);CHKERRCUSOLVER(cerr);
} else if (A->factortype == MAT_FACTOR_CHOLESKY) {
ierr = PetscInfo2(A,"Cholesky solve %d x %d on backend\n",n,n);CHKERRQ(ierr);
if (!dA->d_fact_ipiv) { /* spd */
/* ========= Program hit hipErrorNotReady (error 34) due to "device not ready" on CUDA API call to hipEventQuery. */
cerr = hipsolverDnXpotrs(handle,HIPBLAS_FILL_MODE_LOWER,n,one,da,lda,y,n,dA->d_fact_info);CHKERRCUSOLVER(cerr);
} else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytrs not implemented");
} else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Unknown factor type %d",A->factortype);
ccer = WaitForGPU();CHKERRCUDA(ccer);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
if (iscuda) {
ierr = VecCUDARestoreArray(yy,&y);CHKERRQ(ierr);
} else {
ierr = VecCUDARestoreArray(dA->workvec,&y);CHKERRQ(ierr);
ierr = VecCopy(dA->workvec,yy);CHKERRQ(ierr);
}
ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr);
#if defined(PETSC_USE_DEBUG)
ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(int), hipMemcpyDeviceToHost);CHKERRCUDA(ccer);
if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1);
else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info);
#endif
ierr = PetscLogGpuFlops(2.0*n*n - n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolve_SeqDenseCUDA(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSolve_SeqDenseCUDA_Private(A,xx,yy,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolveTranspose_SeqDenseCUDA(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSolve_SeqDenseCUDA_Private(A,xx,yy,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatLUFactor_SeqDenseCUDA(Mat A,IS rperm,IS cperm,const MatFactorInfo *factinfo)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscScalar *da;
int m,n,lda;
#if defined(PETSC_USE_DEBUG)
int info;
#endif
cusolverStatus_t cerr;
hipsolverDnHandle_t handle;
hipError_t ccer;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0);
ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr);
ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr);
ierr = PetscMPIIntCast(A->cmap->n,&n);CHKERRQ(ierr);
ierr = PetscMPIIntCast(A->rmap->n,&m);CHKERRQ(ierr);
ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr);
ierr = PetscInfo2(A,"LU factor %d x %d on backend\n",m,n);CHKERRQ(ierr);
if (!dA->d_fact_ipiv) {
ccer = hipMalloc((void**)&dA->d_fact_ipiv,n*sizeof(*dA->d_fact_ipiv));CHKERRCUDA(ccer);
}
if (!dA->fact_lwork) {
cerr = cusolverDnXgetrf_bufferSize(handle,m,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr);
ccer = hipMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer);
}
if (!dA->d_fact_info) {
ccer = hipMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer);
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
cerr = cusolverDnXgetrf(handle,m,n,da,lda,dA->d_fact_work,dA->d_fact_ipiv,dA->d_fact_info);CHKERRCUSOLVER(cerr);
ccer = WaitForGPU();CHKERRCUDA(ccer);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr);
#if defined(PETSC_USE_DEBUG)
ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(int), hipMemcpyDeviceToHost);CHKERRCUDA(ccer);
if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_LU_ZRPVT,"Bad factorization: zero pivot in row %d",info-1);
else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info);
#endif
A->factortype = MAT_FACTOR_LU;
ierr = PetscLogGpuFlops(2.0*n*n*m/3.0);CHKERRQ(ierr);
A->ops->solve = MatSolve_SeqDenseCUDA;
A->ops->solvetranspose = MatSolveTranspose_SeqDenseCUDA;
A->ops->matsolve = MatMatSolve_SeqDenseCUDA;
ierr = PetscFree(A->solvertype);CHKERRQ(ierr);
ierr = PetscStrallocpy(MATSOLVERCUDA,&A->solvertype);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatCholeskyFactor_SeqDenseCUDA(Mat A,IS perm,const MatFactorInfo *factinfo)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscScalar *da;
int n,lda;
#if defined(PETSC_USE_DEBUG)
int info;
#endif
cusolverStatus_t cerr;
hipsolverDnHandle_t handle;
hipError_t ccer;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0);
ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr);
ierr = PetscMPIIntCast(A->rmap->n,&n);CHKERRQ(ierr);
ierr = PetscInfo2(A,"Cholesky factor %d x %d on backend\n",n,n);CHKERRQ(ierr);
if (A->spd) {
ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr);
ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr);
if (!dA->fact_lwork) {
cerr = hipsolverDnXpotrf_bufferSize(handle,HIPBLAS_FILL_MODE_LOWER,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr);
ccer = hipMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer);
}
if (!dA->d_fact_info) {
ccer = hipMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer);
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
cerr = hipsolverDnXpotrf(handle,HIPBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr);
ccer = WaitForGPU();CHKERRCUDA(ccer);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr);
#if defined(PETSC_USE_DEBUG)
ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(int), hipMemcpyDeviceToHost);CHKERRCUDA(ccer);
if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1);
else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info);
#endif
A->factortype = MAT_FACTOR_CHOLESKY;
ierr = PetscLogGpuFlops(1.0*n*n*n/3.0);CHKERRQ(ierr);
} else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cusolverDnsytrs unavailable. Use MAT_FACTOR_LU");
#if 0
/* at the time of writing this interface (cuda 10.0), cusolverDn does not implement *sytrs and *hetr* routines
The code below should work, and it can be activated when *sytrs routines will be available */
if (!dA->d_fact_ipiv) {
ccer = hipMalloc((void**)&dA->d_fact_ipiv,n*sizeof(*dA->d_fact_ipiv));CHKERRCUDA(ccer);
}
if (!dA->fact_lwork) {
cerr = cusolverDnXsytrf_bufferSize(handle,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr);
ccer = hipMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer);
}
if (!dA->d_fact_info) {
ccer = hipMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer);
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
cerr = cusolverDnXsytrf(handle,HIPBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_ipiv,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
#endif
A->ops->solve = MatSolve_SeqDenseCUDA;
A->ops->solvetranspose = MatSolveTranspose_SeqDenseCUDA;
A->ops->matsolve = MatMatSolve_SeqDenseCUDA;
ierr = PetscFree(A->solvertype);CHKERRQ(ierr);
ierr = PetscStrallocpy(MATSOLVERCUDA,&A->solvertype);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* GEMM kernel: C = op(A)*op(B), tA, tB flag transposition */
PETSC_INTERN PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(Mat A,Mat B,Mat C,PetscBool tA,PetscBool tB)
{
const PetscScalar *da,*db;
PetscScalar *dc;
PetscScalar one=1.0,zero=0.0;
int m,n,k;
PetscInt alda,blda,clda;
PetscErrorCode ierr;
hipblasHandle_t cublasv2handle;
PetscBool Aiscuda,Biscuda;
hipblasStatus_t berr;
hipError_t cerr;
PetscFunctionBegin;
/* we may end up with SEQDENSE as one of the arguments */
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQDENSECUDA,&Aiscuda);CHKERRQ(ierr);
ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQDENSECUDA,&Biscuda);CHKERRQ(ierr);
if (!Aiscuda) {
ierr = MatConvert(A,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr);
}
if (!Biscuda) {
ierr = MatConvert(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
}
ierr = PetscMPIIntCast(C->rmap->n,&m);CHKERRQ(ierr);
ierr = PetscMPIIntCast(C->cmap->n,&n);CHKERRQ(ierr);
if (tA) {
ierr = PetscMPIIntCast(A->rmap->n,&k);CHKERRQ(ierr);
} else {
ierr = PetscMPIIntCast(A->cmap->n,&k);CHKERRQ(ierr);
}
if (!m || !n || !k) PetscFunctionReturn(0);
ierr = PetscInfo3(C,"Matrix-Matrix product %d x %d x %d on backend\n",m,k,n);CHKERRQ(ierr);
ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr);
ierr = MatDenseCUDAGetArrayRead(B,&db);CHKERRQ(ierr);
ierr = MatDenseCUDAGetArrayWrite(C,&dc);CHKERRQ(ierr);
ierr = MatDenseGetLDA(A,&alda);CHKERRQ(ierr);
ierr = MatDenseGetLDA(B,&blda);CHKERRQ(ierr);
ierr = MatDenseGetLDA(C,&clda);CHKERRQ(ierr);
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
berr = cublasXgemm(cublasv2handle,tA ? HIPBLAS_OP_T : HIPBLAS_OP_N,tB ? HIPBLAS_OP_T : HIPBLAS_OP_N,
m,n,k,&one,da,alda,db,blda,&zero,dc,clda);CHKERRCUBLAS(berr);
cerr = WaitForGPU();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(1.0*m*n*k + 1.0*m*n*(k-1));CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayRead(B,&db);CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayWrite(C,&dc);CHKERRQ(ierr);
if (!Aiscuda) {
ierr = MatConvert(A,MATSEQDENSE,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr);
}
if (!Biscuda) {
ierr = MatConvert(B,MATSEQDENSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
PetscErrorCode MatTransposeMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatMatTransposeMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_FALSE,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatProductSetFromOptions_SeqDenseCUDA(Mat C)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatProductSetFromOptions_SeqDense(C);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* zz = op(A)*xx + yy
if yy == NULL, only MatMult */
static PetscErrorCode MatMultAdd_SeqDenseCUDA_Private(Mat A,Vec xx,Vec yy,Vec zz,PetscBool trans)
{
Mat_SeqDense *mat = (Mat_SeqDense*)A->data;
const PetscScalar *xarray,*da;
PetscScalar *zarray;
PetscScalar one=1.0,zero=0.0;
int m, n, lda; /* Use PetscMPIInt as it is typedef'ed to int */
hipblasHandle_t cublasv2handle;
hipblasStatus_t berr;
PetscErrorCode ierr;
PetscFunctionBegin;
if (yy && yy != zz) { /* mult add */
ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr);
}
if (!A->rmap->n || !A->cmap->n) {
if (!yy) { /* mult only */
ierr = VecSet_SeqCUDA(zz,0.0);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
ierr = PetscInfo2(A,"Matrix-vector product %d x %d on backend\n",A->rmap->n,A->cmap->n);CHKERRQ(ierr);
ierr = PetscMPIIntCast(A->rmap->n,&m);CHKERRQ(ierr);
ierr = PetscMPIIntCast(A->cmap->n,&n);CHKERRQ(ierr);
ierr = PetscMPIIntCast(mat->lda,&lda);CHKERRQ(ierr);
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArray(zz,&zarray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
berr = cublasXgemv(cublasv2handle,trans ? HIPBLAS_OP_T : HIPBLAS_OP_N,
m,n,&one,da,lda,xarray,1,(yy ? &one : &zero),zarray,1);CHKERRCUBLAS(berr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*A->rmap->n*A->cmap->n - (yy ? 0 : A->rmap->n));CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArray(zz,&zarray);CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatMultAdd_SeqDenseCUDA(Mat A,Vec xx,Vec yy,Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,yy,zz,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatMultTransposeAdd_SeqDenseCUDA(Mat A,Vec xx,Vec yy,Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,yy,zz,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatMult_SeqDenseCUDA(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,NULL,yy,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatMultTranspose_SeqDenseCUDA(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,NULL,yy,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatDenseGetArrayRead_SeqDenseCUDA(Mat A,const PetscScalar **array)
{
Mat_SeqDense *mat = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr);
*array = mat->v;
PetscFunctionReturn(0);
}
PetscErrorCode MatDenseGetArray_SeqDenseCUDA(Mat A,PetscScalar **array)
{
Mat_SeqDense *mat = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr);
*array = mat->v;
A->offloadmask = PETSC_OFFLOAD_CPU;
PetscFunctionReturn(0);
}
PetscErrorCode MatDenseRestoreArray_SeqDenseCUDA(Mat A,PetscScalar **array)
{
PetscFunctionBegin;
*array = NULL;
PetscFunctionReturn(0);
}
PetscErrorCode MatScale_SeqDenseCUDA(Mat Y,PetscScalar alpha)
{
Mat_SeqDense *y = (Mat_SeqDense*)Y->data;
PetscScalar *dy;
int j,N,m,lday,one = 1;
hipblasHandle_t cublasv2handle;
hipblasStatus_t berr;
PetscErrorCode ierr;
hipError_t cerr;
PetscFunctionBegin;
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
ierr = MatDenseCUDAGetArray(Y,&dy);CHKERRQ(ierr);
ierr = PetscMPIIntCast(Y->rmap->n*Y->cmap->n,&N);CHKERRQ(ierr);
ierr = PetscMPIIntCast(Y->rmap->n,&m);CHKERRQ(ierr);
ierr = PetscMPIIntCast(y->lda,&lday);CHKERRQ(ierr);
ierr = PetscInfo2(Y,"Performing Scale %d x %d on backend\n",Y->rmap->n,Y->cmap->n);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (lday>m) {
for (j=0; j<Y->cmap->n; j++) {
berr = cublasXscal(cublasv2handle,m,&alpha,dy+lday*j,one);CHKERRCUBLAS(berr);
}
} else {
berr = cublasXscal(cublasv2handle,N,&alpha,dy,one);CHKERRCUBLAS(berr);
}
cerr = WaitForGPU();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(N);CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArray(Y,&dy);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatAXPY_SeqDenseCUDA(Mat Y,PetscScalar alpha,Mat X,MatStructure str)
{
Mat_SeqDense *x = (Mat_SeqDense*)X->data;
Mat_SeqDense *y = (Mat_SeqDense*)Y->data;
const PetscScalar *dx;
PetscScalar *dy;
int j,N,m,ldax,lday,one = 1;
hipblasHandle_t cublasv2handle;
hipblasStatus_t berr;
PetscErrorCode ierr;
hipError_t cerr;
PetscFunctionBegin;
if (!X->rmap->n || !X->cmap->n) PetscFunctionReturn(0);
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
ierr = MatDenseCUDAGetArrayRead(X,&dx);CHKERRQ(ierr);
if (alpha != 0.0) {
ierr = MatDenseCUDAGetArray(Y,&dy);CHKERRQ(ierr);
} else {
ierr = MatDenseCUDAGetArrayWrite(Y,&dy);CHKERRQ(ierr);
}
ierr = PetscMPIIntCast(X->rmap->n*X->cmap->n,&N);CHKERRQ(ierr);
ierr = PetscMPIIntCast(X->rmap->n,&m);CHKERRQ(ierr);
ierr = PetscMPIIntCast(x->lda,&ldax);CHKERRQ(ierr);
ierr = PetscMPIIntCast(y->lda,&lday);CHKERRQ(ierr);
ierr = PetscInfo2(Y,"Performing AXPY %d x %d on backend\n",Y->rmap->n,Y->cmap->n);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (ldax>m || lday>m) {
for (j=0; j<X->cmap->n; j++) {
berr = cublasXaxpy(cublasv2handle,m,&alpha,dx+j*ldax,one,dy+j*lday,one);CHKERRCUBLAS(berr);
}
} else {
berr = cublasXaxpy(cublasv2handle,N,&alpha,dx,one,dy,one);CHKERRCUBLAS(berr);
}
cerr = WaitForGPU();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(PetscMax(2.*N-1,0));CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayRead(X,&dx);CHKERRQ(ierr);
if (alpha != 0.0) {
ierr = MatDenseCUDARestoreArray(Y,&dy);CHKERRQ(ierr);
} else {
ierr = MatDenseCUDARestoreArrayWrite(Y,&dy);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatReset_SeqDenseCUDA(Mat A)
{
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
hipError_t cerr;
PetscErrorCode ierr;
PetscFunctionBegin;
if (dA) {
if (dA->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"MatDenseCUDAResetArray() must be called first");
if (!dA->user_alloc) { cerr = hipFree(dA->d_v);CHKERRCUDA(cerr); }
cerr = hipFree(dA->d_fact_ipiv);CHKERRCUDA(cerr);
cerr = hipFree(dA->d_fact_info);CHKERRCUDA(cerr);
cerr = hipFree(dA->d_fact_work);CHKERRCUDA(cerr);
ierr = VecDestroy(&dA->workvec);CHKERRQ(ierr);
}
ierr = PetscFree(A->spptr);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatDestroy_SeqDenseCUDA(Mat A)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
/* prevent to copy back data if we own the data pointer */
if (!a->user_alloc) { A->offloadmask = PETSC_OFFLOAD_CPU; }
ierr = MatConvert_SeqDenseCUDA_SeqDense(A,MATSEQDENSE,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr);
ierr = MatDestroy_SeqDense(A);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatDuplicate_SeqDenseCUDA(Mat A,MatDuplicateOption cpvalues,Mat *B)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr);
ierr = MatSetSizes(*B,A->rmap->n,A->cmap->n,A->rmap->n,A->cmap->n);CHKERRQ(ierr);
ierr = MatSetType(*B,((PetscObject)A)->type_name);CHKERRQ(ierr);
ierr = MatDuplicateNoCreate_SeqDense(*B,A,cpvalues);CHKERRQ(ierr);
if (cpvalues == MAT_COPY_VALUES && A->offloadmask != PETSC_OFFLOAD_CPU) {
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
const PetscScalar *da;
PetscScalar *db;
hipError_t cerr;
PetscInt ldb;
ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr);
ierr = MatDenseCUDAGetArrayWrite(*B,&db);CHKERRQ(ierr);
ierr = MatDenseGetLDA(*B,&ldb);CHKERRQ(ierr);
if (a->lda > A->rmap->n || ldb > A->rmap->n) {
PetscInt j,m = A->rmap->n;
for (j=0; j<A->cmap->n; j++) { /* it can be done better */
cerr = hipMemcpy(db+j*ldb,da+j*a->lda,m*sizeof(PetscScalar),hipMemcpyDeviceToDevice);CHKERRCUDA(cerr);
}
} else {
cerr = hipMemcpy(db,da,(sizeof(PetscScalar)*A->cmap->n)*A->rmap->n,hipMemcpyDeviceToDevice);CHKERRCUDA(cerr);
}
ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayWrite(*B,&db);CHKERRQ(ierr);
(*B)->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(0);
}
#include <petsc/private/vecimpl.h>
static PetscErrorCode MatGetColumnVector_SeqDenseCUDA(Mat A,Vec v,PetscInt col)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscErrorCode ierr;
PetscScalar *x;
PetscBool viscuda;
hipError_t cerr;
PetscFunctionBegin;
ierr = PetscObjectTypeCompareAny((PetscObject)v,&viscuda,VECSEQCUDA,VECMPICUDA,VECCUDA,"");CHKERRQ(ierr);
if (viscuda && !v->boundtocpu) { /* update device data */
ierr = VecCUDAGetArrayWrite(v,&x);CHKERRQ(ierr);
if (A->offloadmask & PETSC_OFFLOAD_GPU) {
cerr = hipMemcpy(x,dA->d_v + col*a->lda,A->rmap->n*sizeof(PetscScalar),hipMemcpyHostToHost);CHKERRCUDA(cerr);
} else {
cerr = hipMemcpy(x,a->v + col*a->lda,A->rmap->n*sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr);
}
ierr = VecCUDARestoreArrayWrite(v,&x);CHKERRQ(ierr);
} else { /* update host data */
ierr = VecGetArrayWrite(v,&x);CHKERRQ(ierr);
if (A->offloadmask & PETSC_OFFLOAD_CPU) {
ierr = PetscArraycpy(x,a->v+col*a->lda,A->rmap->n);CHKERRQ(ierr);
} else {
cerr = hipMemcpy(x,dA->d_v + col*a->lda,A->rmap->n*sizeof(PetscScalar),hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
ierr = VecRestoreArrayWrite(v,&x);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatGetFactor_seqdense_cuda(Mat A,MatFactorType ftype,Mat *fact)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatCreate(PetscObjectComm((PetscObject)A),fact);CHKERRQ(ierr);
ierr = MatSetSizes(*fact,A->rmap->n,A->cmap->n,A->rmap->n,A->cmap->n);CHKERRQ(ierr);
ierr = MatSetType(*fact,MATSEQDENSECUDA);CHKERRQ(ierr);
if (ftype == MAT_FACTOR_LU) {
(*fact)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqDense;
} else {
(*fact)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqDense;
}
(*fact)->factortype = ftype;
ierr = PetscFree((*fact)->solvertype);CHKERRQ(ierr);
ierr = PetscStrallocpy(MATSOLVERCUDA,&(*fact)->solvertype);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseGetColumnVec_SeqDenseCUDA(Mat A,PetscInt col,Vec *v)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
if (!a->cvec) {
ierr = VecCreateSeqCUDAWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,NULL,&a->cvec);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cvec);CHKERRQ(ierr);
}
a->vecinuse = col + 1;
ierr = MatDenseCUDAGetArray(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr);
ierr = VecCUDAPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)a->lda);CHKERRQ(ierr);
*v = a->cvec;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseRestoreColumnVec_SeqDenseCUDA(Mat A,PetscInt col,Vec *v)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first");
if (!a->cvec) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column vector");
a->vecinuse = 0;
ierr = MatDenseCUDARestoreArray(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr);
ierr = VecCUDAResetArray(a->cvec);CHKERRQ(ierr);
*v = NULL;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseGetColumnVecRead_SeqDenseCUDA(Mat A,PetscInt col,Vec *v)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
if (!a->cvec) {
ierr = VecCreateSeqCUDAWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,NULL,&a->cvec);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cvec);CHKERRQ(ierr);
}
a->vecinuse = col + 1;
ierr = MatDenseCUDAGetArrayRead(A,&a->ptrinuse);CHKERRQ(ierr);
ierr = VecCUDAPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)a->lda);CHKERRQ(ierr);
ierr = VecLockReadPush(a->cvec);CHKERRQ(ierr);
*v = a->cvec;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseRestoreColumnVecRead_SeqDenseCUDA(Mat A,PetscInt col,Vec *v)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first");
if (!a->cvec) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column vector");
a->vecinuse = 0;
ierr = MatDenseCUDARestoreArrayRead(A,&a->ptrinuse);CHKERRQ(ierr);
ierr = VecLockReadPop(a->cvec);CHKERRQ(ierr);
ierr = VecCUDAResetArray(a->cvec);CHKERRQ(ierr);
*v = NULL;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseGetColumnVecWrite_SeqDenseCUDA(Mat A,PetscInt col,Vec *v)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
if (!a->cvec) {
ierr = VecCreateSeqCUDAWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,NULL,&a->cvec);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cvec);CHKERRQ(ierr);
}
a->vecinuse = col + 1;
ierr = MatDenseCUDAGetArrayWrite(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr);
ierr = VecCUDAPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)a->lda);CHKERRQ(ierr);
*v = a->cvec;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseRestoreColumnVecWrite_SeqDenseCUDA(Mat A,PetscInt col,Vec *v)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first");
if (!a->cvec) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column vector");
a->vecinuse = 0;
ierr = MatDenseCUDARestoreArrayWrite(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr);
ierr = VecCUDAResetArray(a->cvec);CHKERRQ(ierr);
*v = NULL;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseGetSubMatrix_SeqDenseCUDA(Mat A,PetscInt cbegin,PetscInt cend,Mat *v)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
if (a->cmat && cend-cbegin != a->cmat->cmap->N) {
ierr = MatDestroy(&a->cmat);CHKERRQ(ierr);
}
ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr);
if (!a->cmat) {
ierr = MatCreateDenseCUDA(PetscObjectComm((PetscObject)A),A->rmap->n,PETSC_DECIDE,A->rmap->N,cend-cbegin,dA->d_v + (size_t)cbegin * (size_t)a->lda,&a->cmat);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cmat);CHKERRQ(ierr);
} else {
ierr = MatDenseCUDAPlaceArray(a->cmat,dA->d_v + (size_t)cbegin * (size_t)a->lda);CHKERRQ(ierr);
}
ierr = MatDenseSetLDA(a->cmat,a->lda);CHKERRQ(ierr);
if (a->v) { ierr = MatDensePlaceArray(a->cmat,a->v + (size_t)cbegin * (size_t)a->lda);CHKERRQ(ierr); }
a->cmat->offloadmask = A->offloadmask;
a->matinuse = cbegin + 1;
*v = a->cmat;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseRestoreSubMatrix_SeqDenseCUDA(Mat A,Mat *v)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetSubMatrix() first");
if (!a->cmat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column matrix");
if (*v != a->cmat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Not the matrix obtained from MatDenseGetSubMatrix()");
a->matinuse = 0;
A->offloadmask = PETSC_OFFLOAD_GPU;
ierr = MatDenseCUDAResetArray(a->cmat);CHKERRQ(ierr);
ierr = MatDenseResetArray(a->cmat);CHKERRQ(ierr);
*v = NULL;
PetscFunctionReturn(0);
}
static PetscErrorCode MatBindToCPU_SeqDenseCUDA(Mat A,PetscBool flg)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
A->boundtocpu = flg;
if (!flg) {
PetscBool iscuda;
ierr = PetscObjectTypeCompare((PetscObject)a->cvec,VECSEQCUDA,&iscuda);CHKERRQ(ierr);
if (!iscuda) {
ierr = VecDestroy(&a->cvec);CHKERRQ(ierr);
}
ierr = PetscObjectTypeCompare((PetscObject)a->cmat,MATSEQDENSECUDA,&iscuda);CHKERRQ(ierr);
if (!iscuda) {
ierr = MatDestroy(&a->cmat);CHKERRQ(ierr);
}
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArray_C",MatDenseGetArray_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayRead_C",MatDenseGetArrayRead_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreArray_C",MatDenseRestoreArray_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVec_C",MatDenseGetColumnVec_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVec_C",MatDenseRestoreColumnVec_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecRead_C",MatDenseGetColumnVecRead_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecRead_C",MatDenseRestoreColumnVecRead_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecWrite_C",MatDenseGetColumnVecWrite_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecWrite_C",MatDenseRestoreColumnVecWrite_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetSubMatrix_C",MatDenseGetSubMatrix_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreSubMatrix_C",MatDenseRestoreSubMatrix_SeqDenseCUDA);CHKERRQ(ierr);
A->ops->duplicate = MatDuplicate_SeqDenseCUDA;
A->ops->mult = MatMult_SeqDenseCUDA;
A->ops->multadd = MatMultAdd_SeqDenseCUDA;
A->ops->multtranspose = MatMultTranspose_SeqDenseCUDA;
A->ops->multtransposeadd = MatMultTransposeAdd_SeqDenseCUDA;
A->ops->matmultnumeric = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA;
A->ops->mattransposemultnumeric = MatMatTransposeMultNumeric_SeqDenseCUDA_SeqDenseCUDA;
A->ops->transposematmultnumeric = MatTransposeMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA;
A->ops->axpy = MatAXPY_SeqDenseCUDA;
A->ops->choleskyfactor = MatCholeskyFactor_SeqDenseCUDA;
A->ops->lufactor = MatLUFactor_SeqDenseCUDA;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDenseCUDA;
A->ops->getcolumnvector = MatGetColumnVector_SeqDenseCUDA;
A->ops->scale = MatScale_SeqDenseCUDA;
} else {
/* make sure we have an up-to-date copy on the CPU */
ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArray_C",MatDenseGetArray_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayRead_C",MatDenseGetArray_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreArray_C",MatDenseRestoreArray_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVec_C",MatDenseGetColumnVec_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVec_C",MatDenseRestoreColumnVec_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecRead_C",MatDenseGetColumnVecRead_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecRead_C",MatDenseRestoreColumnVecRead_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecWrite_C",MatDenseGetColumnVecWrite_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecWrite_C",MatDenseRestoreColumnVecWrite_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetSubMatrix_C",MatDenseGetSubMatrix_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreSubMatrix_C",MatDenseRestoreSubMatrix_SeqDense);CHKERRQ(ierr);
A->ops->duplicate = MatDuplicate_SeqDense;
A->ops->mult = MatMult_SeqDense;
A->ops->multadd = MatMultAdd_SeqDense;
A->ops->multtranspose = MatMultTranspose_SeqDense;
A->ops->multtransposeadd = MatMultTransposeAdd_SeqDense;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDense;
A->ops->matmultnumeric = MatMatMultNumeric_SeqDense_SeqDense;
A->ops->mattransposemultnumeric = MatMatTransposeMultNumeric_SeqDense_SeqDense;
A->ops->transposematmultnumeric = MatTransposeMatMultNumeric_SeqDense_SeqDense;
A->ops->axpy = MatAXPY_SeqDense;
A->ops->choleskyfactor = MatCholeskyFactor_SeqDense;
A->ops->lufactor = MatLUFactor_SeqDense;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDense;
A->ops->getcolumnvector = MatGetColumnVector_SeqDense;
A->ops->scale = MatScale_SeqDense;
}
if (a->cmat) {
ierr = MatBindToCPU(a->cmat,flg);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
PetscErrorCode MatConvert_SeqDenseCUDA_SeqDense(Mat M,MatType type,MatReuse reuse,Mat *newmat)
{
Mat B;
PetscErrorCode ierr;
PetscFunctionBegin;
if (reuse == MAT_REUSE_MATRIX || reuse == MAT_INITIAL_MATRIX) {
/* TODO these cases should be optimized */
ierr = MatConvert_Basic(M,type,reuse,newmat);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
B = *newmat;
ierr = MatBindToCPU_SeqDenseCUDA(B,PETSC_TRUE);CHKERRQ(ierr);
ierr = MatReset_SeqDenseCUDA(B);CHKERRQ(ierr);
ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr);
ierr = PetscStrallocpy(VECSTANDARD,&B->defaultvectype);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQDENSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqdensecuda_seqdense_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayRead_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayWrite_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayRead_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayWrite_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAPlaceArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAResetArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAReplaceArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_seqaij_seqdensecuda_C",NULL);CHKERRQ(ierr);
B->ops->bindtocpu = NULL;
B->ops->destroy = MatDestroy_SeqDense;
B->offloadmask = PETSC_OFFLOAD_CPU;
PetscFunctionReturn(0);
}
PetscErrorCode MatConvert_SeqDense_SeqDenseCUDA(Mat M,MatType type,MatReuse reuse,Mat *newmat)
{
Mat_SeqDenseCUDA *dB;
Mat B;
PetscErrorCode ierr;
PetscFunctionBegin;
if (reuse == MAT_REUSE_MATRIX || reuse == MAT_INITIAL_MATRIX) {
/* TODO these cases should be optimized */
ierr = MatConvert_Basic(M,type,reuse,newmat);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
B = *newmat;
ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr);
ierr = PetscStrallocpy(VECCUDA,&B->defaultvectype);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQDENSECUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqdensecuda_seqdense_C", MatConvert_SeqDenseCUDA_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArray_C", MatDenseCUDAGetArray_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayRead_C", MatDenseCUDAGetArrayRead_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayWrite_C", MatDenseCUDAGetArrayWrite_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArray_C", MatDenseCUDARestoreArray_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayRead_C", MatDenseCUDARestoreArrayRead_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayWrite_C", MatDenseCUDARestoreArrayWrite_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAPlaceArray_C", MatDenseCUDAPlaceArray_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAResetArray_C", MatDenseCUDAResetArray_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAReplaceArray_C", MatDenseCUDAReplaceArray_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_seqaij_seqdensecuda_C",MatProductSetFromOptions_SeqAIJ_SeqDense);CHKERRQ(ierr);
ierr = PetscNewLog(B,&dB);CHKERRQ(ierr);
B->spptr = dB;
B->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
ierr = MatBindToCPU_SeqDenseCUDA(B,PETSC_FALSE);CHKERRQ(ierr);
B->ops->bindtocpu = MatBindToCPU_SeqDenseCUDA;
B->ops->destroy = MatDestroy_SeqDenseCUDA;
PetscFunctionReturn(0);
}
/*@C
MatCreateSeqDenseCUDA - Creates a sequential matrix in dense format using CUDA.
Collective
Input Parameters:
+ comm - MPI communicator
. m - number of rows
. n - number of columns
- data - optional location of GPU matrix data. Set data=NULL for PETSc
to control matrix memory allocation.
Output Parameter:
. A - the matrix
Notes:
Level: intermediate
.seealso: MatCreate(), MatCreateSeqDense()
@*/
PetscErrorCode MatCreateSeqDenseCUDA(MPI_Comm comm,PetscInt m,PetscInt n,PetscScalar *data,Mat *A)
{
PetscErrorCode ierr;
PetscMPIInt size;
PetscFunctionBegin;
ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
if (size > 1) SETERRQ1(comm,PETSC_ERR_ARG_WRONG,"Invalid communicator size %d",size);
ierr = MatCreate(comm,A);CHKERRQ(ierr);
ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr);
ierr = MatSetType(*A,MATSEQDENSECUDA);CHKERRQ(ierr);
ierr = MatSeqDenseCUDASetPreallocation(*A,data);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*MC
MATSEQDENSECUDA - MATSEQDENSECUDA = "seqdensecuda" - A matrix type to be used for sequential dense matrices on GPUs.
Options Database Keys:
. -mat_type seqdensecuda - sets the matrix type to "seqdensecuda" during a call to MatSetFromOptions()
Level: beginner
M*/
PETSC_EXTERN PetscErrorCode MatCreate_SeqDenseCUDA(Mat B)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatCreate_SeqDense(B);CHKERRQ(ierr);
ierr = MatConvert_SeqDense_SeqDenseCUDA(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
| aa9ad1919eae01c781ba3d61209e25b706bd496f.cu | /*
Defines the matrix operations for sequential dense with CUDA
*/
#include <petscpkg_version.h>
#define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1
#include <../src/mat/impls/dense/seq/dense.h> /*I "petscmat.h" I*/
#include <petsccublas.h>
/* cublas definitions are here */
#include <../src/vec/vec/impls/seq/seqcuda/cudavecimpl.h>
#if defined(PETSC_USE_COMPLEX)
#if defined(PETSC_USE_REAL_SINGLE)
#define cusolverDnXpotrf(a,b,c,d,e,f,g,h) cusolverDnCpotrf((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h))
#define cusolverDnXpotrf_bufferSize(a,b,c,d,e,f) cusolverDnCpotrf_bufferSize((a),(b),(c),(cuComplex*)(d),(e),(f))
#define cusolverDnXpotrs(a,b,c,d,e,f,g,h,i) cusolverDnCpotrs((a),(b),(c),(d),(cuComplex*)(e),(f),(cuComplex*)(g),(h),(i))
#define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnCpotri((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h))
#define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnCpotri_bufferSize((a),(b),(c),(cuComplex*)(d),(e),(f))
#define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) cusolverDnCsytrf((a),(b),(c),(cuComplex*)(d),(e),(f),(cuComplex*)(g),(h),(i))
#define cusolverDnXsytrf_bufferSize(a,b,c,d,e) cusolverDnCsytrf_bufferSize((a),(b),(cuComplex*)(c),(d),(e))
#define cusolverDnXgetrf(a,b,c,d,e,f,g,h) cusolverDnCgetrf((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h))
#define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) cusolverDnCgetrf_bufferSize((a),(b),(c),(cuComplex*)(d),(e),(f))
#define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) cusolverDnCgetrs((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(cuComplex*)(h),(i),(j))
#else /* complex double */
#define cusolverDnXpotrf(a,b,c,d,e,f,g,h) cusolverDnZpotrf((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h))
#define cusolverDnXpotrf_bufferSize(a,b,c,d,e,f) cusolverDnZpotrf_bufferSize((a),(b),(c),(cuDoubleComplex*)(d),(e),(f))
#define cusolverDnXpotrs(a,b,c,d,e,f,g,h,i) cusolverDnZpotrs((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(cuDoubleComplex*)(g),(h),(i))
#define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnZpotri((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h))
#define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnZpotri_bufferSize((a),(b),(c),(cuDoubleComplex*)(d),(e),(f))
#define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) cusolverDnZsytrf((a),(b),(c),(cuDoubleComplex*)(d),(e),(f),(cuDoubleComplex*)(g),(h),(i))
#define cusolverDnXsytrf_bufferSize(a,b,c,d,e) cusolverDnZsytrf_bufferSize((a),(b),(cuDoubleComplex*)(c),(d),(e))
#define cusolverDnXgetrf(a,b,c,d,e,f,g,h) cusolverDnZgetrf((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h))
#define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) cusolverDnZgetrf_bufferSize((a),(b),(c),(cuDoubleComplex*)(d),(e),(f))
#define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) cusolverDnZgetrs((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(cuDoubleComplex*)(h),(i),(j))
#endif
#else /* real single */
#if defined(PETSC_USE_REAL_SINGLE)
#define cusolverDnXpotrf(a,b,c,d,e,f,g,h) cusolverDnSpotrf((a),(b),(c),(d),(e),(f),(g),(h))
#define cusolverDnXpotrf_bufferSize(a,b,c,d,e,f) cusolverDnSpotrf_bufferSize((a),(b),(c),(d),(e),(f))
#define cusolverDnXpotrs(a,b,c,d,e,f,g,h,i) cusolverDnSpotrs((a),(b),(c),(d),(e),(f),(g),(h),(i))
#define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnSpotri((a),(b),(c),(d),(e),(f),(g),(h))
#define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnSpotri_bufferSize((a),(b),(c),(d),(e),(f))
#define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) cusolverDnSsytrf((a),(b),(c),(d),(e),(f),(g),(h),(i))
#define cusolverDnXsytrf_bufferSize(a,b,c,d,e) cusolverDnSsytrf_bufferSize((a),(b),(c),(d),(e))
#define cusolverDnXgetrf(a,b,c,d,e,f,g,h) cusolverDnSgetrf((a),(b),(c),(d),(e),(f),(g),(h))
#define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) cusolverDnSgetrf_bufferSize((a),(b),(c),(d),(e),(f))
#define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) cusolverDnSgetrs((a),(b),(c),(d),(e),(f),(g),(h),(i),(j))
#else /* real double */
#define cusolverDnXpotrf(a,b,c,d,e,f,g,h) cusolverDnDpotrf((a),(b),(c),(d),(e),(f),(g),(h))
#define cusolverDnXpotrf_bufferSize(a,b,c,d,e,f) cusolverDnDpotrf_bufferSize((a),(b),(c),(d),(e),(f))
#define cusolverDnXpotrs(a,b,c,d,e,f,g,h,i) cusolverDnDpotrs((a),(b),(c),(d),(e),(f),(g),(h),(i))
#define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnDpotri((a),(b),(c),(d),(e),(f),(g),(h))
#define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnDpotri_bufferSize((a),(b),(c),(d),(e),(f))
#define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) cusolverDnDsytrf((a),(b),(c),(d),(e),(f),(g),(h),(i))
#define cusolverDnXsytrf_bufferSize(a,b,c,d,e) cusolverDnDsytrf_bufferSize((a),(b),(c),(d),(e))
#define cusolverDnXgetrf(a,b,c,d,e,f,g,h) cusolverDnDgetrf((a),(b),(c),(d),(e),(f),(g),(h))
#define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) cusolverDnDgetrf_bufferSize((a),(b),(c),(d),(e),(f))
#define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) cusolverDnDgetrs((a),(b),(c),(d),(e),(f),(g),(h),(i),(j))
#endif
#endif
typedef struct {
PetscScalar *d_v; /* pointer to the matrix on the GPU */
PetscBool user_alloc;
PetscScalar *unplacedarray; /* if one called MatCUDADensePlaceArray(), this is where it stashed the original */
PetscBool unplaced_user_alloc;
/* factorization support */
int *d_fact_ipiv; /* device pivots */
PetscScalar *d_fact_work; /* device workspace */
int fact_lwork;
int *d_fact_info; /* device info */
/* workspace */
Vec workvec;
} Mat_SeqDenseCUDA;
PetscErrorCode MatSeqDenseCUDASetPreallocation(Mat A, PetscScalar *d_data)
{
Mat_SeqDense *cA = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscErrorCode ierr;
PetscBool iscuda;
cudaError_t cerr;
PetscFunctionBegin;
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQDENSECUDA,&iscuda);CHKERRQ(ierr);
if (!iscuda) PetscFunctionReturn(0);
/* it may happen CPU preallocation has not been performed */
ierr = PetscLayoutSetUp(A->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(A->cmap);CHKERRQ(ierr);
if (cA->lda <= 0) cA->lda = A->rmap->n;
if (!dA->user_alloc) { cerr = cudaFree(dA->d_v);CHKERRCUDA(cerr); }
if (!d_data) { /* petsc-allocated storage */
ierr = PetscIntMultError(cA->lda,A->cmap->n,NULL);CHKERRQ(ierr);
cerr = cudaMalloc((void**)&dA->d_v,cA->lda*A->cmap->n*sizeof(PetscScalar));CHKERRCUDA(cerr);
dA->user_alloc = PETSC_FALSE;
} else { /* user-allocated storage */
dA->d_v = d_data;
dA->user_alloc = PETSC_TRUE;
A->offloadmask = PETSC_OFFLOAD_GPU;
}
A->preallocated = PETSC_TRUE;
A->assembled = PETSC_TRUE;
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqDenseCUDACopyFromGPU(Mat A)
{
Mat_SeqDense *cA = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscErrorCode ierr;
cudaError_t cerr;
PetscFunctionBegin;
PetscCheckTypeName(A,MATSEQDENSECUDA);
ierr = PetscInfo3(A,"%s matrix %d x %d\n",A->offloadmask == PETSC_OFFLOAD_GPU ? "Copy" : "Reusing",A->rmap->n,A->cmap->n);CHKERRQ(ierr);
if (A->offloadmask == PETSC_OFFLOAD_GPU) {
if (!cA->v) { /* MatCreateSeqDenseCUDA may not allocate CPU memory. Allocate if needed */
ierr = MatSeqDenseSetPreallocation(A,NULL);CHKERRQ(ierr);
}
ierr = PetscLogEventBegin(MAT_DenseCopyFromGPU,A,0,0,0);CHKERRQ(ierr);
if (cA->lda > A->rmap->n) {
PetscInt j,m = A->rmap->n;
for (j=0; j<A->cmap->n; j++) { /* TODO: it can be done better */
cerr = cudaMemcpy(cA->v + j*cA->lda,dA->d_v + j*cA->lda,m*sizeof(PetscScalar),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
} else {
cerr = cudaMemcpy(cA->v,dA->d_v,cA->lda*sizeof(PetscScalar)*A->cmap->n,cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
ierr = PetscLogGpuToCpu(cA->lda*sizeof(PetscScalar)*A->cmap->n);CHKERRQ(ierr);
ierr = PetscLogEventEnd(MAT_DenseCopyFromGPU,A,0,0,0);CHKERRQ(ierr);
A->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(0);
}
PetscErrorCode MatSeqDenseCUDACopyToGPU(Mat A)
{
Mat_SeqDense *cA = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscBool copy;
PetscErrorCode ierr;
cudaError_t cerr;
PetscFunctionBegin;
PetscCheckTypeName(A,MATSEQDENSECUDA);
if (A->boundtocpu) PetscFunctionReturn(0);
copy = (PetscBool)(A->offloadmask == PETSC_OFFLOAD_CPU || A->offloadmask == PETSC_OFFLOAD_UNALLOCATED);
ierr = PetscInfo3(A,"%s matrix %d x %d\n",copy ? "Copy" : "Reusing",A->rmap->n,A->cmap->n);CHKERRQ(ierr);
if (copy) {
if (!dA->d_v) { /* Allocate GPU memory if not present */
ierr = MatSeqDenseCUDASetPreallocation(A,NULL);CHKERRQ(ierr);
}
ierr = PetscLogEventBegin(MAT_DenseCopyToGPU,A,0,0,0);CHKERRQ(ierr);
if (cA->lda > A->rmap->n) {
PetscInt j,m = A->rmap->n;
for (j=0; j<A->cmap->n; j++) { /* TODO: it can be done better */
cerr = cudaMemcpy(dA->d_v + j*cA->lda,cA->v + j*cA->lda,m*sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
}
} else {
cerr = cudaMemcpy(dA->d_v,cA->v,cA->lda*sizeof(PetscScalar)*A->cmap->n,cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
}
ierr = PetscLogCpuToGpu(cA->lda*sizeof(PetscScalar)*A->cmap->n);CHKERRQ(ierr);
ierr = PetscLogEventEnd(MAT_DenseCopyToGPU,A,0,0,0);CHKERRQ(ierr);
A->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseCUDAPlaceArray_SeqDenseCUDA(Mat A, const PetscScalar *a)
{
Mat_SeqDense *aa = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
if (aa->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
if (aa->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
if (dA->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"MatDenseCUDAResetArray() must be called first");
if (aa->v) { ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); }
dA->unplacedarray = dA->d_v;
dA->unplaced_user_alloc = dA->user_alloc;
dA->d_v = (PetscScalar*)a;
dA->user_alloc = PETSC_TRUE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseCUDAResetArray_SeqDenseCUDA(Mat A)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
if (a->v) { ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); }
dA->d_v = dA->unplacedarray;
dA->user_alloc = dA->unplaced_user_alloc;
dA->unplacedarray = NULL;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseCUDAReplaceArray_SeqDenseCUDA(Mat A, const PetscScalar *a)
{
Mat_SeqDense *aa = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
cudaError_t cerr;
PetscFunctionBegin;
if (aa->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
if (aa->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
if (dA->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"MatDenseCUDAResetArray() must be called first");
if (!dA->user_alloc) { cerr = cudaFree(dA->d_v);CHKERRCUDA(cerr); }
dA->d_v = (PetscScalar*)a;
dA->user_alloc = PETSC_FALSE;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseCUDAGetArrayWrite_SeqDenseCUDA(Mat A, PetscScalar **a)
{
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!dA->d_v) {
ierr = MatSeqDenseCUDASetPreallocation(A,NULL);CHKERRQ(ierr);
}
*a = dA->d_v;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseCUDARestoreArrayWrite_SeqDenseCUDA(Mat A, PetscScalar **a)
{
PetscFunctionBegin;
*a = NULL;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseCUDAGetArrayRead_SeqDenseCUDA(Mat A, const PetscScalar **a)
{
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr);
*a = dA->d_v;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseCUDARestoreArrayRead_SeqDenseCUDA(Mat A, const PetscScalar **a)
{
PetscFunctionBegin;
*a = NULL;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseCUDAGetArray_SeqDenseCUDA(Mat A, PetscScalar **a)
{
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr);
*a = dA->d_v;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseCUDARestoreArray_SeqDenseCUDA(Mat A, PetscScalar **a)
{
PetscFunctionBegin;
*a = NULL;
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode MatSeqDenseCUDAInvertFactors_Private(Mat A)
{
#if PETSC_PKG_CUDA_VERSION_GE(10,1,0)
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscScalar *da;
PetscErrorCode ierr;
cudaError_t ccer;
cusolverStatus_t cerr;
cusolverDnHandle_t handle;
int n,lda;
#if defined(PETSC_USE_DEBUG)
int info;
#endif
PetscFunctionBegin;
if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0);
ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr);
ierr = PetscMPIIntCast(A->cmap->n,&n);CHKERRQ(ierr);
ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr);
if (A->factortype == MAT_FACTOR_LU) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDngetri not implemented");
else if (A->factortype == MAT_FACTOR_CHOLESKY) {
if (!dA->d_fact_ipiv) { /* spd */
int il;
ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr);
cerr = cusolverDnXpotri_bufferSize(handle,CUBLAS_FILL_MODE_LOWER,n,da,lda,&il);CHKERRCUSOLVER(cerr);
if (il > dA->fact_lwork) {
dA->fact_lwork = il;
ccer = cudaFree(dA->d_fact_work);CHKERRCUDA(ccer);
ccer = cudaMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer);
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
cerr = cusolverDnXpotri(handle,CUBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr);
ccer = WaitForGPU();CHKERRCUDA(ccer);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr);
/* TODO (write cuda kernel) */
ierr = MatSeqDenseSymmetrize_Private(A,PETSC_TRUE);CHKERRQ(ierr);
} else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytri not implemented");
}
#if defined(PETSC_USE_DEBUG)
ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(int), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer);
if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: leading minor of order %d is zero",info);
else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info);
#endif
ierr = PetscLogGpuFlops(1.0*n*n*n/3.0);CHKERRQ(ierr);
A->ops->solve = NULL;
A->ops->solvetranspose = NULL;
A->ops->matsolve = NULL;
A->factortype = MAT_FACTOR_NONE;
ierr = PetscFree(A->solvertype);CHKERRQ(ierr);
PetscFunctionReturn(0);
#else
SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Upgrade to CUDA version 10.1.0 or higher");
#endif
}
static PetscErrorCode MatMatSolve_SeqDenseCUDA(Mat A,Mat B,Mat X)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
Mat_SeqDense *x = (Mat_SeqDense*)X->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
const PetscScalar *da;
PetscScalar *dx;
cusolverDnHandle_t handle;
PetscBool iscuda;
int nrhs,n,lda,ldx;
#if defined(PETSC_USE_DEBUG)
int info;
#endif
cudaError_t ccer;
cusolverStatus_t cerr;
PetscErrorCode ierr;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve");
if (!dA->d_fact_work) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve");
ierr = PetscObjectTypeCompareAny((PetscObject)X,&iscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr);
if (X != B) {
ierr = MatCopy(B,X,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
}
ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr);
/* MatMatSolve does not have a dispatching mechanism, we may end up with a MATSEQDENSE here */
ierr = PetscObjectTypeCompare((PetscObject)X,MATSEQDENSECUDA,&iscuda);CHKERRQ(ierr);
if (!iscuda) {
ierr = MatConvert(X,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&X);CHKERRQ(ierr);
}
ierr = MatDenseCUDAGetArray(X,&dx);CHKERRQ(ierr);
ierr = PetscMPIIntCast(A->rmap->n,&n);CHKERRQ(ierr);
ierr = PetscMPIIntCast(X->cmap->n,&nrhs);CHKERRQ(ierr);
ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr);
ierr = PetscMPIIntCast(x->lda,&ldx);CHKERRQ(ierr);
ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (A->factortype == MAT_FACTOR_LU) {
ierr = PetscInfo2(A,"LU solve %d x %d on backend\n",n,n);CHKERRQ(ierr);
cerr = cusolverDnXgetrs(handle,CUBLAS_OP_N,n,nrhs,da,lda,dA->d_fact_ipiv,dx,ldx,dA->d_fact_info);CHKERRCUSOLVER(cerr);
} else if (A->factortype == MAT_FACTOR_CHOLESKY) {
ierr = PetscInfo2(A,"Cholesky solve %d x %d on backend\n",n,n);CHKERRQ(ierr);
if (!dA->d_fact_ipiv) { /* spd */
/* ========= Program hit cudaErrorNotReady (error 34) due to "device not ready" on CUDA API call to cudaEventQuery. */
cerr = cusolverDnXpotrs(handle,CUBLAS_FILL_MODE_LOWER,n,nrhs,da,lda,dx,ldx,dA->d_fact_info);CHKERRCUSOLVER(cerr);
} else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytrs not implemented");
} else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Unknown factor type %d",A->factortype);
ccer = WaitForGPU();CHKERRCUDA(ccer);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArray(X,&dx);CHKERRQ(ierr);
if (!iscuda) {
ierr = MatConvert(X,MATSEQDENSE,MAT_INPLACE_MATRIX,&X);CHKERRQ(ierr);
}
#if defined(PETSC_USE_DEBUG)
ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(int), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer);
if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1);
else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info);
#endif
ierr = PetscLogGpuFlops(nrhs*(2.0*n*n - n));CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolve_SeqDenseCUDA_Private(Mat A,Vec xx,Vec yy,PetscBool trans)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
const PetscScalar *da;
PetscScalar *y;
cusolverDnHandle_t handle;
int one = 1,n,lda;
#if defined(PETSC_USE_DEBUG)
int info;
#endif
cudaError_t ccer;
cusolverStatus_t cerr;
PetscBool iscuda;
PetscErrorCode ierr;
PetscFunctionBegin;
if (A->factortype == MAT_FACTOR_NONE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve");
if (!dA->d_fact_work) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve");
ierr = PetscMPIIntCast(A->rmap->n,&n);CHKERRQ(ierr);
/* MatSolve does not have a dispatching mechanism, we may end up with a VECSTANDARD here */
ierr = PetscObjectTypeCompareAny((PetscObject)yy,&iscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr);
if (iscuda) {
ierr = VecCopy(xx,yy);CHKERRQ(ierr);
ierr = VecCUDAGetArray(yy,&y);CHKERRQ(ierr);
} else {
if (!dA->workvec) {
ierr = MatCreateVecs(A,&dA->workvec,NULL);CHKERRQ(ierr);
}
ierr = VecCopy(xx,dA->workvec);CHKERRQ(ierr);
ierr = VecCUDAGetArray(dA->workvec,&y);CHKERRQ(ierr);
}
ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr);
ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr);
ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (A->factortype == MAT_FACTOR_LU) {
ierr = PetscInfo2(A,"LU solve %d x %d on backend\n",n,n);CHKERRQ(ierr);
cerr = cusolverDnXgetrs(handle,trans ? CUBLAS_OP_T : CUBLAS_OP_N,n,one,da,lda,dA->d_fact_ipiv,y,n,dA->d_fact_info);CHKERRCUSOLVER(cerr);
} else if (A->factortype == MAT_FACTOR_CHOLESKY) {
ierr = PetscInfo2(A,"Cholesky solve %d x %d on backend\n",n,n);CHKERRQ(ierr);
if (!dA->d_fact_ipiv) { /* spd */
/* ========= Program hit cudaErrorNotReady (error 34) due to "device not ready" on CUDA API call to cudaEventQuery. */
cerr = cusolverDnXpotrs(handle,CUBLAS_FILL_MODE_LOWER,n,one,da,lda,y,n,dA->d_fact_info);CHKERRCUSOLVER(cerr);
} else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytrs not implemented");
} else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Unknown factor type %d",A->factortype);
ccer = WaitForGPU();CHKERRCUDA(ccer);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
if (iscuda) {
ierr = VecCUDARestoreArray(yy,&y);CHKERRQ(ierr);
} else {
ierr = VecCUDARestoreArray(dA->workvec,&y);CHKERRQ(ierr);
ierr = VecCopy(dA->workvec,yy);CHKERRQ(ierr);
}
ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr);
#if defined(PETSC_USE_DEBUG)
ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(int), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer);
if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1);
else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info);
#endif
ierr = PetscLogGpuFlops(2.0*n*n - n);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolve_SeqDenseCUDA(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSolve_SeqDenseCUDA_Private(A,xx,yy,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatSolveTranspose_SeqDenseCUDA(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSolve_SeqDenseCUDA_Private(A,xx,yy,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatLUFactor_SeqDenseCUDA(Mat A,IS rperm,IS cperm,const MatFactorInfo *factinfo)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscScalar *da;
int m,n,lda;
#if defined(PETSC_USE_DEBUG)
int info;
#endif
cusolverStatus_t cerr;
cusolverDnHandle_t handle;
cudaError_t ccer;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0);
ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr);
ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr);
ierr = PetscMPIIntCast(A->cmap->n,&n);CHKERRQ(ierr);
ierr = PetscMPIIntCast(A->rmap->n,&m);CHKERRQ(ierr);
ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr);
ierr = PetscInfo2(A,"LU factor %d x %d on backend\n",m,n);CHKERRQ(ierr);
if (!dA->d_fact_ipiv) {
ccer = cudaMalloc((void**)&dA->d_fact_ipiv,n*sizeof(*dA->d_fact_ipiv));CHKERRCUDA(ccer);
}
if (!dA->fact_lwork) {
cerr = cusolverDnXgetrf_bufferSize(handle,m,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr);
ccer = cudaMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer);
}
if (!dA->d_fact_info) {
ccer = cudaMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer);
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
cerr = cusolverDnXgetrf(handle,m,n,da,lda,dA->d_fact_work,dA->d_fact_ipiv,dA->d_fact_info);CHKERRCUSOLVER(cerr);
ccer = WaitForGPU();CHKERRCUDA(ccer);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr);
#if defined(PETSC_USE_DEBUG)
ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(int), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer);
if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_LU_ZRPVT,"Bad factorization: zero pivot in row %d",info-1);
else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info);
#endif
A->factortype = MAT_FACTOR_LU;
ierr = PetscLogGpuFlops(2.0*n*n*m/3.0);CHKERRQ(ierr);
A->ops->solve = MatSolve_SeqDenseCUDA;
A->ops->solvetranspose = MatSolveTranspose_SeqDenseCUDA;
A->ops->matsolve = MatMatSolve_SeqDenseCUDA;
ierr = PetscFree(A->solvertype);CHKERRQ(ierr);
ierr = PetscStrallocpy(MATSOLVERCUDA,&A->solvertype);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatCholeskyFactor_SeqDenseCUDA(Mat A,IS perm,const MatFactorInfo *factinfo)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscScalar *da;
int n,lda;
#if defined(PETSC_USE_DEBUG)
int info;
#endif
cusolverStatus_t cerr;
cusolverDnHandle_t handle;
cudaError_t ccer;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0);
ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr);
ierr = PetscMPIIntCast(A->rmap->n,&n);CHKERRQ(ierr);
ierr = PetscInfo2(A,"Cholesky factor %d x %d on backend\n",n,n);CHKERRQ(ierr);
if (A->spd) {
ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr);
ierr = PetscMPIIntCast(a->lda,&lda);CHKERRQ(ierr);
if (!dA->fact_lwork) {
cerr = cusolverDnXpotrf_bufferSize(handle,CUBLAS_FILL_MODE_LOWER,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr);
ccer = cudaMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer);
}
if (!dA->d_fact_info) {
ccer = cudaMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer);
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
cerr = cusolverDnXpotrf(handle,CUBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr);
ccer = WaitForGPU();CHKERRCUDA(ccer);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr);
#if defined(PETSC_USE_DEBUG)
ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(int), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer);
if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1);
else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info);
#endif
A->factortype = MAT_FACTOR_CHOLESKY;
ierr = PetscLogGpuFlops(1.0*n*n*n/3.0);CHKERRQ(ierr);
} else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cusolverDnsytrs unavailable. Use MAT_FACTOR_LU");
#if 0
/* at the time of writing this interface (cuda 10.0), cusolverDn does not implement *sytrs and *hetr* routines
The code below should work, and it can be activated when *sytrs routines will be available */
if (!dA->d_fact_ipiv) {
ccer = cudaMalloc((void**)&dA->d_fact_ipiv,n*sizeof(*dA->d_fact_ipiv));CHKERRCUDA(ccer);
}
if (!dA->fact_lwork) {
cerr = cusolverDnXsytrf_bufferSize(handle,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr);
ccer = cudaMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer);
}
if (!dA->d_fact_info) {
ccer = cudaMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer);
}
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
cerr = cusolverDnXsytrf(handle,CUBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_ipiv,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
#endif
A->ops->solve = MatSolve_SeqDenseCUDA;
A->ops->solvetranspose = MatSolveTranspose_SeqDenseCUDA;
A->ops->matsolve = MatMatSolve_SeqDenseCUDA;
ierr = PetscFree(A->solvertype);CHKERRQ(ierr);
ierr = PetscStrallocpy(MATSOLVERCUDA,&A->solvertype);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* GEMM kernel: C = op(A)*op(B), tA, tB flag transposition */
PETSC_INTERN PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(Mat A,Mat B,Mat C,PetscBool tA,PetscBool tB)
{
const PetscScalar *da,*db;
PetscScalar *dc;
PetscScalar one=1.0,zero=0.0;
int m,n,k;
PetscInt alda,blda,clda;
PetscErrorCode ierr;
cublasHandle_t cublasv2handle;
PetscBool Aiscuda,Biscuda;
cublasStatus_t berr;
cudaError_t cerr;
PetscFunctionBegin;
/* we may end up with SEQDENSE as one of the arguments */
ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQDENSECUDA,&Aiscuda);CHKERRQ(ierr);
ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQDENSECUDA,&Biscuda);CHKERRQ(ierr);
if (!Aiscuda) {
ierr = MatConvert(A,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr);
}
if (!Biscuda) {
ierr = MatConvert(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
}
ierr = PetscMPIIntCast(C->rmap->n,&m);CHKERRQ(ierr);
ierr = PetscMPIIntCast(C->cmap->n,&n);CHKERRQ(ierr);
if (tA) {
ierr = PetscMPIIntCast(A->rmap->n,&k);CHKERRQ(ierr);
} else {
ierr = PetscMPIIntCast(A->cmap->n,&k);CHKERRQ(ierr);
}
if (!m || !n || !k) PetscFunctionReturn(0);
ierr = PetscInfo3(C,"Matrix-Matrix product %d x %d x %d on backend\n",m,k,n);CHKERRQ(ierr);
ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr);
ierr = MatDenseCUDAGetArrayRead(B,&db);CHKERRQ(ierr);
ierr = MatDenseCUDAGetArrayWrite(C,&dc);CHKERRQ(ierr);
ierr = MatDenseGetLDA(A,&alda);CHKERRQ(ierr);
ierr = MatDenseGetLDA(B,&blda);CHKERRQ(ierr);
ierr = MatDenseGetLDA(C,&clda);CHKERRQ(ierr);
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
berr = cublasXgemm(cublasv2handle,tA ? CUBLAS_OP_T : CUBLAS_OP_N,tB ? CUBLAS_OP_T : CUBLAS_OP_N,
m,n,k,&one,da,alda,db,blda,&zero,dc,clda);CHKERRCUBLAS(berr);
cerr = WaitForGPU();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(1.0*m*n*k + 1.0*m*n*(k-1));CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayRead(B,&db);CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayWrite(C,&dc);CHKERRQ(ierr);
if (!Aiscuda) {
ierr = MatConvert(A,MATSEQDENSE,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr);
}
if (!Biscuda) {
ierr = MatConvert(B,MATSEQDENSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
PetscErrorCode MatTransposeMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatMatTransposeMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_FALSE,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatProductSetFromOptions_SeqDenseCUDA(Mat C)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatProductSetFromOptions_SeqDense(C);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/* zz = op(A)*xx + yy
if yy == NULL, only MatMult */
static PetscErrorCode MatMultAdd_SeqDenseCUDA_Private(Mat A,Vec xx,Vec yy,Vec zz,PetscBool trans)
{
Mat_SeqDense *mat = (Mat_SeqDense*)A->data;
const PetscScalar *xarray,*da;
PetscScalar *zarray;
PetscScalar one=1.0,zero=0.0;
int m, n, lda; /* Use PetscMPIInt as it is typedef'ed to int */
cublasHandle_t cublasv2handle;
cublasStatus_t berr;
PetscErrorCode ierr;
PetscFunctionBegin;
if (yy && yy != zz) { /* mult add */
ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr);
}
if (!A->rmap->n || !A->cmap->n) {
if (!yy) { /* mult only */
ierr = VecSet_SeqCUDA(zz,0.0);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
ierr = PetscInfo2(A,"Matrix-vector product %d x %d on backend\n",A->rmap->n,A->cmap->n);CHKERRQ(ierr);
ierr = PetscMPIIntCast(A->rmap->n,&m);CHKERRQ(ierr);
ierr = PetscMPIIntCast(A->cmap->n,&n);CHKERRQ(ierr);
ierr = PetscMPIIntCast(mat->lda,&lda);CHKERRQ(ierr);
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr);
ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDAGetArray(zz,&zarray);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
berr = cublasXgemv(cublasv2handle,trans ? CUBLAS_OP_T : CUBLAS_OP_N,
m,n,&one,da,lda,xarray,1,(yy ? &one : &zero),zarray,1);CHKERRCUBLAS(berr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(2.0*A->rmap->n*A->cmap->n - (yy ? 0 : A->rmap->n));CHKERRQ(ierr);
ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr);
ierr = VecCUDARestoreArray(zz,&zarray);CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatMultAdd_SeqDenseCUDA(Mat A,Vec xx,Vec yy,Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,yy,zz,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatMultTransposeAdd_SeqDenseCUDA(Mat A,Vec xx,Vec yy,Vec zz)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,yy,zz,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatMult_SeqDenseCUDA(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,NULL,yy,PETSC_FALSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatMultTranspose_SeqDenseCUDA(Mat A,Vec xx,Vec yy)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,NULL,yy,PETSC_TRUE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatDenseGetArrayRead_SeqDenseCUDA(Mat A,const PetscScalar **array)
{
Mat_SeqDense *mat = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr);
*array = mat->v;
PetscFunctionReturn(0);
}
PetscErrorCode MatDenseGetArray_SeqDenseCUDA(Mat A,PetscScalar **array)
{
Mat_SeqDense *mat = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr);
*array = mat->v;
A->offloadmask = PETSC_OFFLOAD_CPU;
PetscFunctionReturn(0);
}
PetscErrorCode MatDenseRestoreArray_SeqDenseCUDA(Mat A,PetscScalar **array)
{
PetscFunctionBegin;
*array = NULL;
PetscFunctionReturn(0);
}
PetscErrorCode MatScale_SeqDenseCUDA(Mat Y,PetscScalar alpha)
{
Mat_SeqDense *y = (Mat_SeqDense*)Y->data;
PetscScalar *dy;
int j,N,m,lday,one = 1;
cublasHandle_t cublasv2handle;
cublasStatus_t berr;
PetscErrorCode ierr;
cudaError_t cerr;
PetscFunctionBegin;
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
ierr = MatDenseCUDAGetArray(Y,&dy);CHKERRQ(ierr);
ierr = PetscMPIIntCast(Y->rmap->n*Y->cmap->n,&N);CHKERRQ(ierr);
ierr = PetscMPIIntCast(Y->rmap->n,&m);CHKERRQ(ierr);
ierr = PetscMPIIntCast(y->lda,&lday);CHKERRQ(ierr);
ierr = PetscInfo2(Y,"Performing Scale %d x %d on backend\n",Y->rmap->n,Y->cmap->n);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (lday>m) {
for (j=0; j<Y->cmap->n; j++) {
berr = cublasXscal(cublasv2handle,m,&alpha,dy+lday*j,one);CHKERRCUBLAS(berr);
}
} else {
berr = cublasXscal(cublasv2handle,N,&alpha,dy,one);CHKERRCUBLAS(berr);
}
cerr = WaitForGPU();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(N);CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArray(Y,&dy);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatAXPY_SeqDenseCUDA(Mat Y,PetscScalar alpha,Mat X,MatStructure str)
{
Mat_SeqDense *x = (Mat_SeqDense*)X->data;
Mat_SeqDense *y = (Mat_SeqDense*)Y->data;
const PetscScalar *dx;
PetscScalar *dy;
int j,N,m,ldax,lday,one = 1;
cublasHandle_t cublasv2handle;
cublasStatus_t berr;
PetscErrorCode ierr;
cudaError_t cerr;
PetscFunctionBegin;
if (!X->rmap->n || !X->cmap->n) PetscFunctionReturn(0);
ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr);
ierr = MatDenseCUDAGetArrayRead(X,&dx);CHKERRQ(ierr);
if (alpha != 0.0) {
ierr = MatDenseCUDAGetArray(Y,&dy);CHKERRQ(ierr);
} else {
ierr = MatDenseCUDAGetArrayWrite(Y,&dy);CHKERRQ(ierr);
}
ierr = PetscMPIIntCast(X->rmap->n*X->cmap->n,&N);CHKERRQ(ierr);
ierr = PetscMPIIntCast(X->rmap->n,&m);CHKERRQ(ierr);
ierr = PetscMPIIntCast(x->lda,&ldax);CHKERRQ(ierr);
ierr = PetscMPIIntCast(y->lda,&lday);CHKERRQ(ierr);
ierr = PetscInfo2(Y,"Performing AXPY %d x %d on backend\n",Y->rmap->n,Y->cmap->n);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
if (ldax>m || lday>m) {
for (j=0; j<X->cmap->n; j++) {
berr = cublasXaxpy(cublasv2handle,m,&alpha,dx+j*ldax,one,dy+j*lday,one);CHKERRCUBLAS(berr);
}
} else {
berr = cublasXaxpy(cublasv2handle,N,&alpha,dx,one,dy,one);CHKERRCUBLAS(berr);
}
cerr = WaitForGPU();CHKERRCUDA(cerr);
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(PetscMax(2.*N-1,0));CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayRead(X,&dx);CHKERRQ(ierr);
if (alpha != 0.0) {
ierr = MatDenseCUDARestoreArray(Y,&dy);CHKERRQ(ierr);
} else {
ierr = MatDenseCUDARestoreArrayWrite(Y,&dy);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
static PetscErrorCode MatReset_SeqDenseCUDA(Mat A)
{
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
cudaError_t cerr;
PetscErrorCode ierr;
PetscFunctionBegin;
if (dA) {
if (dA->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"MatDenseCUDAResetArray() must be called first");
if (!dA->user_alloc) { cerr = cudaFree(dA->d_v);CHKERRCUDA(cerr); }
cerr = cudaFree(dA->d_fact_ipiv);CHKERRCUDA(cerr);
cerr = cudaFree(dA->d_fact_info);CHKERRCUDA(cerr);
cerr = cudaFree(dA->d_fact_work);CHKERRCUDA(cerr);
ierr = VecDestroy(&dA->workvec);CHKERRQ(ierr);
}
ierr = PetscFree(A->spptr);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatDestroy_SeqDenseCUDA(Mat A)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
/* prevent to copy back data if we own the data pointer */
if (!a->user_alloc) { A->offloadmask = PETSC_OFFLOAD_CPU; }
ierr = MatConvert_SeqDenseCUDA_SeqDense(A,MATSEQDENSE,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr);
ierr = MatDestroy_SeqDense(A);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatDuplicate_SeqDenseCUDA(Mat A,MatDuplicateOption cpvalues,Mat *B)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr);
ierr = MatSetSizes(*B,A->rmap->n,A->cmap->n,A->rmap->n,A->cmap->n);CHKERRQ(ierr);
ierr = MatSetType(*B,((PetscObject)A)->type_name);CHKERRQ(ierr);
ierr = MatDuplicateNoCreate_SeqDense(*B,A,cpvalues);CHKERRQ(ierr);
if (cpvalues == MAT_COPY_VALUES && A->offloadmask != PETSC_OFFLOAD_CPU) {
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
const PetscScalar *da;
PetscScalar *db;
cudaError_t cerr;
PetscInt ldb;
ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr);
ierr = MatDenseCUDAGetArrayWrite(*B,&db);CHKERRQ(ierr);
ierr = MatDenseGetLDA(*B,&ldb);CHKERRQ(ierr);
if (a->lda > A->rmap->n || ldb > A->rmap->n) {
PetscInt j,m = A->rmap->n;
for (j=0; j<A->cmap->n; j++) { /* it can be done better */
cerr = cudaMemcpy(db+j*ldb,da+j*a->lda,m*sizeof(PetscScalar),cudaMemcpyDeviceToDevice);CHKERRCUDA(cerr);
}
} else {
cerr = cudaMemcpy(db,da,(sizeof(PetscScalar)*A->cmap->n)*A->rmap->n,cudaMemcpyDeviceToDevice);CHKERRCUDA(cerr);
}
ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr);
ierr = MatDenseCUDARestoreArrayWrite(*B,&db);CHKERRQ(ierr);
(*B)->offloadmask = PETSC_OFFLOAD_BOTH;
}
PetscFunctionReturn(0);
}
#include <petsc/private/vecimpl.h>
static PetscErrorCode MatGetColumnVector_SeqDenseCUDA(Mat A,Vec v,PetscInt col)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscErrorCode ierr;
PetscScalar *x;
PetscBool viscuda;
cudaError_t cerr;
PetscFunctionBegin;
ierr = PetscObjectTypeCompareAny((PetscObject)v,&viscuda,VECSEQCUDA,VECMPICUDA,VECCUDA,"");CHKERRQ(ierr);
if (viscuda && !v->boundtocpu) { /* update device data */
ierr = VecCUDAGetArrayWrite(v,&x);CHKERRQ(ierr);
if (A->offloadmask & PETSC_OFFLOAD_GPU) {
cerr = cudaMemcpy(x,dA->d_v + col*a->lda,A->rmap->n*sizeof(PetscScalar),cudaMemcpyHostToHost);CHKERRCUDA(cerr);
} else {
cerr = cudaMemcpy(x,a->v + col*a->lda,A->rmap->n*sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
}
ierr = VecCUDARestoreArrayWrite(v,&x);CHKERRQ(ierr);
} else { /* update host data */
ierr = VecGetArrayWrite(v,&x);CHKERRQ(ierr);
if (A->offloadmask & PETSC_OFFLOAD_CPU) {
ierr = PetscArraycpy(x,a->v+col*a->lda,A->rmap->n);CHKERRQ(ierr);
} else {
cerr = cudaMemcpy(x,dA->d_v + col*a->lda,A->rmap->n*sizeof(PetscScalar),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
}
ierr = VecRestoreArrayWrite(v,&x);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
PETSC_INTERN PetscErrorCode MatGetFactor_seqdense_cuda(Mat A,MatFactorType ftype,Mat *fact)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatCreate(PetscObjectComm((PetscObject)A),fact);CHKERRQ(ierr);
ierr = MatSetSizes(*fact,A->rmap->n,A->cmap->n,A->rmap->n,A->cmap->n);CHKERRQ(ierr);
ierr = MatSetType(*fact,MATSEQDENSECUDA);CHKERRQ(ierr);
if (ftype == MAT_FACTOR_LU) {
(*fact)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqDense;
} else {
(*fact)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqDense;
}
(*fact)->factortype = ftype;
ierr = PetscFree((*fact)->solvertype);CHKERRQ(ierr);
ierr = PetscStrallocpy(MATSOLVERCUDA,&(*fact)->solvertype);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseGetColumnVec_SeqDenseCUDA(Mat A,PetscInt col,Vec *v)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
if (!a->cvec) {
ierr = VecCreateSeqCUDAWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,NULL,&a->cvec);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cvec);CHKERRQ(ierr);
}
a->vecinuse = col + 1;
ierr = MatDenseCUDAGetArray(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr);
ierr = VecCUDAPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)a->lda);CHKERRQ(ierr);
*v = a->cvec;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseRestoreColumnVec_SeqDenseCUDA(Mat A,PetscInt col,Vec *v)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first");
if (!a->cvec) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column vector");
a->vecinuse = 0;
ierr = MatDenseCUDARestoreArray(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr);
ierr = VecCUDAResetArray(a->cvec);CHKERRQ(ierr);
*v = NULL;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseGetColumnVecRead_SeqDenseCUDA(Mat A,PetscInt col,Vec *v)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
if (!a->cvec) {
ierr = VecCreateSeqCUDAWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,NULL,&a->cvec);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cvec);CHKERRQ(ierr);
}
a->vecinuse = col + 1;
ierr = MatDenseCUDAGetArrayRead(A,&a->ptrinuse);CHKERRQ(ierr);
ierr = VecCUDAPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)a->lda);CHKERRQ(ierr);
ierr = VecLockReadPush(a->cvec);CHKERRQ(ierr);
*v = a->cvec;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseRestoreColumnVecRead_SeqDenseCUDA(Mat A,PetscInt col,Vec *v)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first");
if (!a->cvec) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column vector");
a->vecinuse = 0;
ierr = MatDenseCUDARestoreArrayRead(A,&a->ptrinuse);CHKERRQ(ierr);
ierr = VecLockReadPop(a->cvec);CHKERRQ(ierr);
ierr = VecCUDAResetArray(a->cvec);CHKERRQ(ierr);
*v = NULL;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseGetColumnVecWrite_SeqDenseCUDA(Mat A,PetscInt col,Vec *v)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
if (!a->cvec) {
ierr = VecCreateSeqCUDAWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,NULL,&a->cvec);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cvec);CHKERRQ(ierr);
}
a->vecinuse = col + 1;
ierr = MatDenseCUDAGetArrayWrite(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr);
ierr = VecCUDAPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)a->lda);CHKERRQ(ierr);
*v = a->cvec;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseRestoreColumnVecWrite_SeqDenseCUDA(Mat A,PetscInt col,Vec *v)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first");
if (!a->cvec) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column vector");
a->vecinuse = 0;
ierr = MatDenseCUDARestoreArrayWrite(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr);
ierr = VecCUDAResetArray(a->cvec);CHKERRQ(ierr);
*v = NULL;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseGetSubMatrix_SeqDenseCUDA(Mat A,PetscInt cbegin,PetscInt cend,Mat *v)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr;
PetscErrorCode ierr;
PetscFunctionBegin;
if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
if (a->cmat && cend-cbegin != a->cmat->cmap->N) {
ierr = MatDestroy(&a->cmat);CHKERRQ(ierr);
}
ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr);
if (!a->cmat) {
ierr = MatCreateDenseCUDA(PetscObjectComm((PetscObject)A),A->rmap->n,PETSC_DECIDE,A->rmap->N,cend-cbegin,dA->d_v + (size_t)cbegin * (size_t)a->lda,&a->cmat);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cmat);CHKERRQ(ierr);
} else {
ierr = MatDenseCUDAPlaceArray(a->cmat,dA->d_v + (size_t)cbegin * (size_t)a->lda);CHKERRQ(ierr);
}
ierr = MatDenseSetLDA(a->cmat,a->lda);CHKERRQ(ierr);
if (a->v) { ierr = MatDensePlaceArray(a->cmat,a->v + (size_t)cbegin * (size_t)a->lda);CHKERRQ(ierr); }
a->cmat->offloadmask = A->offloadmask;
a->matinuse = cbegin + 1;
*v = a->cmat;
PetscFunctionReturn(0);
}
static PetscErrorCode MatDenseRestoreSubMatrix_SeqDenseCUDA(Mat A,Mat *v)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (!a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetSubMatrix() first");
if (!a->cmat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column matrix");
if (*v != a->cmat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Not the matrix obtained from MatDenseGetSubMatrix()");
a->matinuse = 0;
A->offloadmask = PETSC_OFFLOAD_GPU;
ierr = MatDenseCUDAResetArray(a->cmat);CHKERRQ(ierr);
ierr = MatDenseResetArray(a->cmat);CHKERRQ(ierr);
*v = NULL;
PetscFunctionReturn(0);
}
static PetscErrorCode MatBindToCPU_SeqDenseCUDA(Mat A,PetscBool flg)
{
Mat_SeqDense *a = (Mat_SeqDense*)A->data;
PetscErrorCode ierr;
PetscFunctionBegin;
if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first");
if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first");
A->boundtocpu = flg;
if (!flg) {
PetscBool iscuda;
ierr = PetscObjectTypeCompare((PetscObject)a->cvec,VECSEQCUDA,&iscuda);CHKERRQ(ierr);
if (!iscuda) {
ierr = VecDestroy(&a->cvec);CHKERRQ(ierr);
}
ierr = PetscObjectTypeCompare((PetscObject)a->cmat,MATSEQDENSECUDA,&iscuda);CHKERRQ(ierr);
if (!iscuda) {
ierr = MatDestroy(&a->cmat);CHKERRQ(ierr);
}
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArray_C",MatDenseGetArray_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayRead_C",MatDenseGetArrayRead_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreArray_C",MatDenseRestoreArray_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVec_C",MatDenseGetColumnVec_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVec_C",MatDenseRestoreColumnVec_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecRead_C",MatDenseGetColumnVecRead_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecRead_C",MatDenseRestoreColumnVecRead_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecWrite_C",MatDenseGetColumnVecWrite_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecWrite_C",MatDenseRestoreColumnVecWrite_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetSubMatrix_C",MatDenseGetSubMatrix_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreSubMatrix_C",MatDenseRestoreSubMatrix_SeqDenseCUDA);CHKERRQ(ierr);
A->ops->duplicate = MatDuplicate_SeqDenseCUDA;
A->ops->mult = MatMult_SeqDenseCUDA;
A->ops->multadd = MatMultAdd_SeqDenseCUDA;
A->ops->multtranspose = MatMultTranspose_SeqDenseCUDA;
A->ops->multtransposeadd = MatMultTransposeAdd_SeqDenseCUDA;
A->ops->matmultnumeric = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA;
A->ops->mattransposemultnumeric = MatMatTransposeMultNumeric_SeqDenseCUDA_SeqDenseCUDA;
A->ops->transposematmultnumeric = MatTransposeMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA;
A->ops->axpy = MatAXPY_SeqDenseCUDA;
A->ops->choleskyfactor = MatCholeskyFactor_SeqDenseCUDA;
A->ops->lufactor = MatLUFactor_SeqDenseCUDA;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDenseCUDA;
A->ops->getcolumnvector = MatGetColumnVector_SeqDenseCUDA;
A->ops->scale = MatScale_SeqDenseCUDA;
} else {
/* make sure we have an up-to-date copy on the CPU */
ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArray_C",MatDenseGetArray_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayRead_C",MatDenseGetArray_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreArray_C",MatDenseRestoreArray_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVec_C",MatDenseGetColumnVec_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVec_C",MatDenseRestoreColumnVec_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecRead_C",MatDenseGetColumnVecRead_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecRead_C",MatDenseRestoreColumnVecRead_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecWrite_C",MatDenseGetColumnVecWrite_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecWrite_C",MatDenseRestoreColumnVecWrite_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetSubMatrix_C",MatDenseGetSubMatrix_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreSubMatrix_C",MatDenseRestoreSubMatrix_SeqDense);CHKERRQ(ierr);
A->ops->duplicate = MatDuplicate_SeqDense;
A->ops->mult = MatMult_SeqDense;
A->ops->multadd = MatMultAdd_SeqDense;
A->ops->multtranspose = MatMultTranspose_SeqDense;
A->ops->multtransposeadd = MatMultTransposeAdd_SeqDense;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDense;
A->ops->matmultnumeric = MatMatMultNumeric_SeqDense_SeqDense;
A->ops->mattransposemultnumeric = MatMatTransposeMultNumeric_SeqDense_SeqDense;
A->ops->transposematmultnumeric = MatTransposeMatMultNumeric_SeqDense_SeqDense;
A->ops->axpy = MatAXPY_SeqDense;
A->ops->choleskyfactor = MatCholeskyFactor_SeqDense;
A->ops->lufactor = MatLUFactor_SeqDense;
A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDense;
A->ops->getcolumnvector = MatGetColumnVector_SeqDense;
A->ops->scale = MatScale_SeqDense;
}
if (a->cmat) {
ierr = MatBindToCPU(a->cmat,flg);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
PetscErrorCode MatConvert_SeqDenseCUDA_SeqDense(Mat M,MatType type,MatReuse reuse,Mat *newmat)
{
Mat B;
PetscErrorCode ierr;
PetscFunctionBegin;
if (reuse == MAT_REUSE_MATRIX || reuse == MAT_INITIAL_MATRIX) {
/* TODO these cases should be optimized */
ierr = MatConvert_Basic(M,type,reuse,newmat);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
B = *newmat;
ierr = MatBindToCPU_SeqDenseCUDA(B,PETSC_TRUE);CHKERRQ(ierr);
ierr = MatReset_SeqDenseCUDA(B);CHKERRQ(ierr);
ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr);
ierr = PetscStrallocpy(VECSTANDARD,&B->defaultvectype);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQDENSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqdensecuda_seqdense_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayRead_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayWrite_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayRead_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayWrite_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAPlaceArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAResetArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAReplaceArray_C",NULL);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_seqaij_seqdensecuda_C",NULL);CHKERRQ(ierr);
B->ops->bindtocpu = NULL;
B->ops->destroy = MatDestroy_SeqDense;
B->offloadmask = PETSC_OFFLOAD_CPU;
PetscFunctionReturn(0);
}
PetscErrorCode MatConvert_SeqDense_SeqDenseCUDA(Mat M,MatType type,MatReuse reuse,Mat *newmat)
{
Mat_SeqDenseCUDA *dB;
Mat B;
PetscErrorCode ierr;
PetscFunctionBegin;
if (reuse == MAT_REUSE_MATRIX || reuse == MAT_INITIAL_MATRIX) {
/* TODO these cases should be optimized */
ierr = MatConvert_Basic(M,type,reuse,newmat);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
B = *newmat;
ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr);
ierr = PetscStrallocpy(VECCUDA,&B->defaultvectype);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQDENSECUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqdensecuda_seqdense_C", MatConvert_SeqDenseCUDA_SeqDense);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArray_C", MatDenseCUDAGetArray_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayRead_C", MatDenseCUDAGetArrayRead_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayWrite_C", MatDenseCUDAGetArrayWrite_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArray_C", MatDenseCUDARestoreArray_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayRead_C", MatDenseCUDARestoreArrayRead_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayWrite_C", MatDenseCUDARestoreArrayWrite_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAPlaceArray_C", MatDenseCUDAPlaceArray_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAResetArray_C", MatDenseCUDAResetArray_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAReplaceArray_C", MatDenseCUDAReplaceArray_SeqDenseCUDA);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_seqaij_seqdensecuda_C",MatProductSetFromOptions_SeqAIJ_SeqDense);CHKERRQ(ierr);
ierr = PetscNewLog(B,&dB);CHKERRQ(ierr);
B->spptr = dB;
B->offloadmask = PETSC_OFFLOAD_UNALLOCATED;
ierr = MatBindToCPU_SeqDenseCUDA(B,PETSC_FALSE);CHKERRQ(ierr);
B->ops->bindtocpu = MatBindToCPU_SeqDenseCUDA;
B->ops->destroy = MatDestroy_SeqDenseCUDA;
PetscFunctionReturn(0);
}
/*@C
MatCreateSeqDenseCUDA - Creates a sequential matrix in dense format using CUDA.
Collective
Input Parameters:
+ comm - MPI communicator
. m - number of rows
. n - number of columns
- data - optional location of GPU matrix data. Set data=NULL for PETSc
to control matrix memory allocation.
Output Parameter:
. A - the matrix
Notes:
Level: intermediate
.seealso: MatCreate(), MatCreateSeqDense()
@*/
PetscErrorCode MatCreateSeqDenseCUDA(MPI_Comm comm,PetscInt m,PetscInt n,PetscScalar *data,Mat *A)
{
PetscErrorCode ierr;
PetscMPIInt size;
PetscFunctionBegin;
ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
if (size > 1) SETERRQ1(comm,PETSC_ERR_ARG_WRONG,"Invalid communicator size %d",size);
ierr = MatCreate(comm,A);CHKERRQ(ierr);
ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr);
ierr = MatSetType(*A,MATSEQDENSECUDA);CHKERRQ(ierr);
ierr = MatSeqDenseCUDASetPreallocation(*A,data);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*MC
MATSEQDENSECUDA - MATSEQDENSECUDA = "seqdensecuda" - A matrix type to be used for sequential dense matrices on GPUs.
Options Database Keys:
. -mat_type seqdensecuda - sets the matrix type to "seqdensecuda" during a call to MatSetFromOptions()
Level: beginner
M*/
PETSC_EXTERN PetscErrorCode MatCreate_SeqDenseCUDA(Mat B)
{
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = MatCreate_SeqDense(B);CHKERRQ(ierr);
ierr = MatConvert_SeqDense_SeqDenseCUDA(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
|
5e9ad29ea5e01fd9a40399889b320b72d361fc3c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
| 5e9ad29ea5e01fd9a40399889b320b72d361fc3c.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
8e4c4505ed077147fab9815e6577525bf8920fa9.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "gMat.cuh"
#include <vector>
#include "mytime.h"
#include <functional>
#include <math.h>
#include <stdlib.h>
#include <string>
void manyMult(gMat& A, gMat& B, gMat& C,int iter,int mem){
for (int i=0; i!=iter; ++i){
prod(A,B,C,mem);
prod(A,C,B,mem);
}
}
void multstat(int mem, int row, int col){
std::cout << "multiplying " << row << " by "<< col <<" matrices.\n";
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop,0);
std::cout << "using "<< mem << " of "<<prop.sharedMemPerBlock << "shared memory\n";
int w=floor(sqrt(mem/2/sizeof(float)));
std::cout << "Tile sizes will be: "<< w<< " by "<< w << "\n";
std::cout << "This amounts to: " << w*w << " threads \n";
std::cout << ceil(row/ (float) w)*ceil(col/ (float) w) << " tiles will be used "<<std::endl;
}
int main(int argc, char *argv[]){
int w;
if ( argc >= 2) w=atoi(argv[1]);
else w=20;
int r=1000;
int c=1000;
gMat A=eye(r);
gMat B=randgMat(r,c);
gMat C=randgMat(r,c);
A.name="A";
B.name="B";
C.name="C";
int mem=2*w*w*sizeof(float);
auto mm=std::bind(manyMult,A,B,C,300,mem);
multstat(mem,r,c);
mm();
A.cleanup();
B.cleanup();
C.cleanup();
}
void test1(){
std::vector<float> adat={1,2,3,4};
std::vector<float> bdat={2,0,0,2,0,2};
std::vector<float> cdat={0,0,0,0,0,0};
gMat A{adat,2,2};
gMat B{bdat,2,3};
gMat C{cdat,2,3};
std::cout <<"A:\n" << A << std::flush;
std::cout <<"B:\n" << B << std::flush;
int mem=8*sizeof(float);
prod(A,B,C,mem);
std::cout <<"C:\n" << C << std::flush;
}
| 8e4c4505ed077147fab9815e6577525bf8920fa9.cu | #include <iostream>
#include "gMat.cuh"
#include <vector>
#include "mytime.h"
#include <functional>
#include <math.h>
#include <stdlib.h>
#include <string>
void manyMult(gMat& A, gMat& B, gMat& C,int iter,int mem){
for (int i=0; i!=iter; ++i){
prod(A,B,C,mem);
prod(A,C,B,mem);
}
}
void multstat(int mem, int row, int col){
std::cout << "multiplying " << row << " by "<< col <<" matrices.\n";
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,0);
std::cout << "using "<< mem << " of "<<prop.sharedMemPerBlock << "shared memory\n";
int w=floor(sqrt(mem/2/sizeof(float)));
std::cout << "Tile sizes will be: "<< w<< " by "<< w << "\n";
std::cout << "This amounts to: " << w*w << " threads \n";
std::cout << ceil(row/ (float) w)*ceil(col/ (float) w) << " tiles will be used "<<std::endl;
}
int main(int argc, char *argv[]){
int w;
if ( argc >= 2) w=atoi(argv[1]);
else w=20;
int r=1000;
int c=1000;
gMat A=eye(r);
gMat B=randgMat(r,c);
gMat C=randgMat(r,c);
A.name="A";
B.name="B";
C.name="C";
int mem=2*w*w*sizeof(float);
auto mm=std::bind(manyMult,A,B,C,300,mem);
multstat(mem,r,c);
mm();
A.cleanup();
B.cleanup();
C.cleanup();
}
void test1(){
std::vector<float> adat={1,2,3,4};
std::vector<float> bdat={2,0,0,2,0,2};
std::vector<float> cdat={0,0,0,0,0,0};
gMat A{adat,2,2};
gMat B{bdat,2,3};
gMat C{cdat,2,3};
std::cout <<"A:\n" << A << std::flush;
std::cout <<"B:\n" << B << std::flush;
int mem=8*sizeof(float);
prod(A,B,C,mem);
std::cout <<"C:\n" << C << std::flush;
}
|
6f253f1573b6ba658d50d286bf25d88c5689e757.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda.h>
#include<stdio.h>
#include<time.h>
#include<stdlib.h>
#include<iostream>
#include<cuda_runtime.h>
using namespace std ;
#define THR 512 // 2^9
#define BLK 2048 // 2^11
#define NUM BLK*THR // 2^20
void print_elapsed(clock_t start, clock_t stop)
{
double elapsed = ((double)(stop - start)) / CLOCKS_PER_SEC ; // elapsed = how much time goes on
cout << "Elapsed time : " << elapsed << "sec" << endl ; // print time (sec) ;
}
__global__ void oddeven(int *arr, int cond)
{
int index = blockIdx.x * blockDim.x + threadIdx.x ;
int temp ;
if (index >= (NUM / 2) - 1 && cond % 2 != 0) return ;
if( cond % 2 == 0 ) // if cond is even
{
if( arr[index * 2] > arr[index * 2 + 1] ) // compare arr[0]<->arr[1], arr[2]<->arr[3], ... // (0, 2, 4, ... even)
{
temp = arr[index * 2] ;
arr[index * 2] = arr[index * 2 + 1] ;
arr[index * 2 + 1] = temp ; // if arr[0] > arr[1], arr[2] > arr[3], ... change each other
}
}
else // if cond is odd
{
if( arr[index * 2 + 1] > arr[index * 2 + 2] ) // compare arr[1]<->arr[2], arr[3]<->arr[4], ... (1, 3, 5, ... even)
{
temp = arr[index * 2 + 1] ;
arr[index * 2 + 1] = arr[index * 2 + 2] ;
arr[index * 2 + 2] = temp ; // if arr[1] > arr[2], arr[3] > arr[4], ... change each other
}
}
}
int main()
{
clock_t start, stop ;
int *values ;
int *deva_values ;
int i ;
int size = sizeof(int) * NUM ;
values = (int *)malloc(size) ; // dynamic memory allocation
hipMalloc((void **)&deva_values, size) ; // using cuda memory allocation
srand(unsigned(time(NULL))) ; // it occurs each random number at every each time
for (i = 0; i < NUM; i++)
{
values[i] = rand(); // put random number into array
}
hipMemcpy(deva_values, values, size, hipMemcpyHostToDevice) ; // using cuda copy value -> dev_values (size) // Host to Device
start = clock() ; // time start
for (i = 0; i < NUM; i++)
{
oddeven << <NUM / 1024, 512 >> >(deva_values, i) ; //512 threads per block and total N/2/512 blocks
}
stop = clock() ; // time stop
hipMemcpy(values, deva_values, size, hipMemcpyDeviceToHost) ; // using cuda copy dev_values -> values (size) // Device to Host
cout << "12141680 GPU OddEven-Sort\n" << endl ;
print_elapsed(start, stop) ; // print elapsed time
cout << endl ;
for (i = 0; i<NUM; i++)
{
printf("%d ", values[i]) ; // print sorted number
}
hipFree(deva_values) ; // free memory
delete values ;
return 0;
} | 6f253f1573b6ba658d50d286bf25d88c5689e757.cu | #include<cuda.h>
#include<stdio.h>
#include<time.h>
#include<stdlib.h>
#include<iostream>
#include<cuda_runtime.h>
using namespace std ;
#define THR 512 // 2^9
#define BLK 2048 // 2^11
#define NUM BLK*THR // 2^20
void print_elapsed(clock_t start, clock_t stop)
{
double elapsed = ((double)(stop - start)) / CLOCKS_PER_SEC ; // elapsed = how much time goes on
cout << "Elapsed time : " << elapsed << "sec" << endl ; // print time (sec) ;
}
__global__ void oddeven(int *arr, int cond)
{
int index = blockIdx.x * blockDim.x + threadIdx.x ;
int temp ;
if (index >= (NUM / 2) - 1 && cond % 2 != 0) return ;
if( cond % 2 == 0 ) // if cond is even
{
if( arr[index * 2] > arr[index * 2 + 1] ) // compare arr[0]<->arr[1], arr[2]<->arr[3], ... // (0, 2, 4, ... even)
{
temp = arr[index * 2] ;
arr[index * 2] = arr[index * 2 + 1] ;
arr[index * 2 + 1] = temp ; // if arr[0] > arr[1], arr[2] > arr[3], ... change each other
}
}
else // if cond is odd
{
if( arr[index * 2 + 1] > arr[index * 2 + 2] ) // compare arr[1]<->arr[2], arr[3]<->arr[4], ... (1, 3, 5, ... even)
{
temp = arr[index * 2 + 1] ;
arr[index * 2 + 1] = arr[index * 2 + 2] ;
arr[index * 2 + 2] = temp ; // if arr[1] > arr[2], arr[3] > arr[4], ... change each other
}
}
}
int main()
{
clock_t start, stop ;
int *values ;
int *deva_values ;
int i ;
int size = sizeof(int) * NUM ;
values = (int *)malloc(size) ; // dynamic memory allocation
cudaMalloc((void **)&deva_values, size) ; // using cuda memory allocation
srand(unsigned(time(NULL))) ; // it occurs each random number at every each time
for (i = 0; i < NUM; i++)
{
values[i] = rand(); // put random number into array
}
cudaMemcpy(deva_values, values, size, cudaMemcpyHostToDevice) ; // using cuda copy value -> dev_values (size) // Host to Device
start = clock() ; // time start
for (i = 0; i < NUM; i++)
{
oddeven << <NUM / 1024, 512 >> >(deva_values, i) ; //512 threads per block and total N/2/512 blocks
}
stop = clock() ; // time stop
cudaMemcpy(values, deva_values, size, cudaMemcpyDeviceToHost) ; // using cuda copy dev_values -> values (size) // Device to Host
cout << "12141680 GPU OddEven-Sort\n" << endl ;
print_elapsed(start, stop) ; // print elapsed time
cout << endl ;
for (i = 0; i<NUM; i++)
{
printf("%d ", values[i]) ; // print sorted number
}
cudaFree(deva_values) ; // free memory
delete values ;
return 0;
} |
d32a9a85dc43afda1ab780c0c4467e93f26c8141.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* * APPROXIMATE PATTERN MATCHING
* *
* * INF560 X2016
* */
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/time.h>
#define APM_DEBUG 0
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
char *read_input_file(char * filename, int * size) {
char * buf ;
off_t fsize;
int fd = 0 ;
int n_bytes = 1 ;
/* Open the text file */
fd = open(filename, O_RDONLY);
if (fd == -1) {
fprintf(stderr, "Unable to open the text file <%s>\n", filename);
return NULL;
}
/* Get the number of characters in the textfile */
fsize = lseek(fd, 0, SEEK_END);
lseek(fd, 0, SEEK_SET);
/* TODO check return of lseek */
#if APM_DEBUG
printf( "File length: %lld\n", fsize ) ;
#endif
/* Allocate data to copy the target text */
buf = (char *)malloc( fsize * sizeof ( char ) ) ;
if (buf == NULL) {
fprintf(stderr,
"Unable to allocate %lld byte(s) for main array\n",
fsize);
return NULL;
}
n_bytes = read(fd, buf, fsize);
if (n_bytes != fsize) {
fprintf(stderr,
"Unable to copy %lld byte(s) from text file (%d byte(s) copied)\n",
fsize, n_bytes) ;
return NULL ;
}
#if APM_DEBUG
printf( "Number of read bytes: %d\n", n_bytes ) ;
#endif
*size = n_bytes ;
close(fd);
return buf;
}
#define MIN3(a, b, c) ((a) < (b) ? ((a) < (c) ? (a) : (c)) : ((b) < (c) ? (b) : (c)))
__global__
void cuda_levenshtein(char *s1, char *s2, int len, int * result, int n_max, int i_0) {
unsigned int x, y, lastdiag, olddiag;
int i = blockIdx.x * blockDim.x + threadIdx.x +i_0;
if (i<n_max){
int column[100];
for (y = 1; y <= len; y++) {
column[y] = y;
}
for (x = 1; x <= len; x++) {
column[0] = x;
lastdiag = x-1 ;
for (y = 1; y <= len; y++) {
olddiag = column[y];
column[y] = MIN3(
column[y] + 1,
column[y-1] + 1,
lastdiag + (s1[y-1+i] == s2[x-1] ? 0 : 1)
);
lastdiag = olddiag;
}
}
result[i] = column[len];
}
}
int levenshtein(char *s1, char *s2, int len, int * column) {
unsigned int x, y, lastdiag, olddiag;
for (y = 1; y <= len; y++) {
column[y] = y;
}
for (x = 1; x <= len; x++) {
column[0] = x;
lastdiag = x-1 ;
for (y = 1; y <= len; y++) {
olddiag = column[y];
column[y] = MIN3(
column[y] + 1,
column[y-1] + 1,
lastdiag + (s1[y-1] == s2[x-1] ? 0 : 1)
);
lastdiag = olddiag;
}
}
return(column[len]);
}
int main(int argc, char ** argv) {
char ** pattern ;
char * filename ;
int approx_factor = 0 ;
int nb_patterns = 0 ;
int i, j ;
char * buf ;
struct timeval t1, t2;
double duration ;
int * n_matches ;
int n_bytes ;
int rank;
int max_pat;
/* Check number of arguments */
if (argc < 4) {
printf("Usage: %s approximation_factor "
"dna_database pattern1 pattern2 ...\n",
argv[0]);
return 1;
}
n_bytes=0;
/* Get the distance factor */
approx_factor = atoi(argv[1]);
/* Grab the filename containing the target text */
filename = argv[2];
/* Get the number of patterns that the user wants to search for */
nb_patterns = argc - 3;
/* Fill the pattern array */
pattern = (char **)malloc( nb_patterns * sizeof( char * ) ) ;
if (pattern == NULL) {
fprintf(
stderr,
"Unable to allocate array of pattern of size %d\n",
nb_patterns
);
return 1 ;
}
/* Grab the patterns */
for (i = 0 ; i < nb_patterns ; i++) {
int l ;
l = strlen(argv[i+3]) ;
if (l <= 0) {
fprintf( stderr, "Error while parsing argument %d\n", i+3 ) ;
return 1 ;
}
pattern[i] = (char *)malloc( (l+1) * sizeof( char ) ) ;
if (pattern[i] == NULL) {
fprintf( stderr, "Unable to allocate string of size %d\n", l ) ;
return 1 ;
}
strncpy( pattern[i], argv[i+3], (l+1) ) ;
}
printf( "Approximate Pattern Mathing: "
"looking for %d pattern(s) in file %s w/ distance of %d\n",
nb_patterns, filename, approx_factor );
buf = read_input_file( filename, &n_bytes ) ;
if ( buf == NULL ) {
return 1 ;
}
/* Allocate the array of matches */
n_matches = (int *)malloc( nb_patterns * sizeof( int ) ) ;
if (n_matches == NULL) {
fprintf(
stderr,
"Error: unable to allocate memory for %ldB\n",
nb_patterns * sizeof( int )
);
return 1 ;
}
/*****
* * BEGIN MAIN LOOP
* ******/
/* Timer start */
gettimeofday(&t1, NULL);
max_pat=0;
for(i=0; i<nb_patterns; i++){
max_pat=max_pat>strlen(pattern[i]) ? max_pat : strlen(pattern[i]);
}
for ( i = 0 ; i < nb_patterns ; i++ ) {
int size_pattern = strlen(pattern[i]) ;
n_matches[i] = 0 ;
char * d_rcv_buf;
char * d_pattern;
int * d_result;
int n_max = (n_bytes-size_pattern + 1);
int * result;
result=(int *) malloc(sizeof(int)*n_max);
hipMalloc((void **)&d_rcv_buf, n_bytes*sizeof(char));
hipMalloc((void **)&d_pattern, size_pattern*sizeof(char));
hipMalloc((void **)&d_result, n_max*sizeof(int));
hipMemcpy(d_rcv_buf, buf, n_bytes*sizeof(char), hipMemcpyHostToDevice);
hipMemcpy(d_pattern, pattern[i], size_pattern*sizeof(char), hipMemcpyHostToDevice);
for(j=0; j<n_max/1024; j++){
hipLaunchKernelGGL(( cuda_levenshtein), dim3(1), dim3(1024), 0, 0, d_rcv_buf, d_pattern, size_pattern, d_result, n_max, j*1024);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
}
hipLaunchKernelGGL(( cuda_levenshtein), dim3(1), dim3(1024), 0, 0, d_rcv_buf, d_pattern, size_pattern, d_result, n_max, 1024*(n_max/1024));
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
gpuErrchk(hipMemcpy(result, d_result, n_max*sizeof(int), hipMemcpyDeviceToHost));
hipFree(d_rcv_buf);
hipFree(d_pattern);
hipFree(d_result);
int * column;
column=(int *) malloc((size_pattern+1)*sizeof(int));
for ( j = 0 ; j < n_bytes ; j++ ) {
int distance = 0 ;
int s ;
#if APM_DEBUG
if ( j % 100 == 0 )
{
printf( "Procesing byte %d (out of %d)\n", j, n_bytes ) ;
}
#endif
s = size_pattern ;
if ( n_bytes - j < size_pattern )
{
s = n_bytes - j ;
distance = levenshtein( pattern[i], &buf[j], s, column )+size_pattern-s;
}
else{
distance = result[j];
}
if ( distance <= approx_factor ) {
n_matches[i]++ ;
}
}
printf("%d matches from %d\n", n_matches[0], rank);
free( column );
}
/* Timer stop */
gettimeofday(&t2, NULL);
duration = (t2.tv_sec -t1.tv_sec)+((t2.tv_usec-t1.tv_usec)/1e6);
printf( "APM done in %lf s\n", duration ) ;
/*****
* * END MAIN LOOP
* ******/
for ( i = 0 ; i < nb_patterns ; i++ )
{
printf( "Number of matches for pattern <%s>: %d\n",
pattern[i], n_matches[i] ) ;
}
return 0 ;
}
| d32a9a85dc43afda1ab780c0c4467e93f26c8141.cu | /**
* * APPROXIMATE PATTERN MATCHING
* *
* * INF560 X2016
* */
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/time.h>
#define APM_DEBUG 0
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
char *read_input_file(char * filename, int * size) {
char * buf ;
off_t fsize;
int fd = 0 ;
int n_bytes = 1 ;
/* Open the text file */
fd = open(filename, O_RDONLY);
if (fd == -1) {
fprintf(stderr, "Unable to open the text file <%s>\n", filename);
return NULL;
}
/* Get the number of characters in the textfile */
fsize = lseek(fd, 0, SEEK_END);
lseek(fd, 0, SEEK_SET);
/* TODO check return of lseek */
#if APM_DEBUG
printf( "File length: %lld\n", fsize ) ;
#endif
/* Allocate data to copy the target text */
buf = (char *)malloc( fsize * sizeof ( char ) ) ;
if (buf == NULL) {
fprintf(stderr,
"Unable to allocate %lld byte(s) for main array\n",
fsize);
return NULL;
}
n_bytes = read(fd, buf, fsize);
if (n_bytes != fsize) {
fprintf(stderr,
"Unable to copy %lld byte(s) from text file (%d byte(s) copied)\n",
fsize, n_bytes) ;
return NULL ;
}
#if APM_DEBUG
printf( "Number of read bytes: %d\n", n_bytes ) ;
#endif
*size = n_bytes ;
close(fd);
return buf;
}
#define MIN3(a, b, c) ((a) < (b) ? ((a) < (c) ? (a) : (c)) : ((b) < (c) ? (b) : (c)))
__global__
void cuda_levenshtein(char *s1, char *s2, int len, int * result, int n_max, int i_0) {
unsigned int x, y, lastdiag, olddiag;
int i = blockIdx.x * blockDim.x + threadIdx.x +i_0;
if (i<n_max){
int column[100];
for (y = 1; y <= len; y++) {
column[y] = y;
}
for (x = 1; x <= len; x++) {
column[0] = x;
lastdiag = x-1 ;
for (y = 1; y <= len; y++) {
olddiag = column[y];
column[y] = MIN3(
column[y] + 1,
column[y-1] + 1,
lastdiag + (s1[y-1+i] == s2[x-1] ? 0 : 1)
);
lastdiag = olddiag;
}
}
result[i] = column[len];
}
}
int levenshtein(char *s1, char *s2, int len, int * column) {
unsigned int x, y, lastdiag, olddiag;
for (y = 1; y <= len; y++) {
column[y] = y;
}
for (x = 1; x <= len; x++) {
column[0] = x;
lastdiag = x-1 ;
for (y = 1; y <= len; y++) {
olddiag = column[y];
column[y] = MIN3(
column[y] + 1,
column[y-1] + 1,
lastdiag + (s1[y-1] == s2[x-1] ? 0 : 1)
);
lastdiag = olddiag;
}
}
return(column[len]);
}
int main(int argc, char ** argv) {
char ** pattern ;
char * filename ;
int approx_factor = 0 ;
int nb_patterns = 0 ;
int i, j ;
char * buf ;
struct timeval t1, t2;
double duration ;
int * n_matches ;
int n_bytes ;
int rank;
int max_pat;
/* Check number of arguments */
if (argc < 4) {
printf("Usage: %s approximation_factor "
"dna_database pattern1 pattern2 ...\n",
argv[0]);
return 1;
}
n_bytes=0;
/* Get the distance factor */
approx_factor = atoi(argv[1]);
/* Grab the filename containing the target text */
filename = argv[2];
/* Get the number of patterns that the user wants to search for */
nb_patterns = argc - 3;
/* Fill the pattern array */
pattern = (char **)malloc( nb_patterns * sizeof( char * ) ) ;
if (pattern == NULL) {
fprintf(
stderr,
"Unable to allocate array of pattern of size %d\n",
nb_patterns
);
return 1 ;
}
/* Grab the patterns */
for (i = 0 ; i < nb_patterns ; i++) {
int l ;
l = strlen(argv[i+3]) ;
if (l <= 0) {
fprintf( stderr, "Error while parsing argument %d\n", i+3 ) ;
return 1 ;
}
pattern[i] = (char *)malloc( (l+1) * sizeof( char ) ) ;
if (pattern[i] == NULL) {
fprintf( stderr, "Unable to allocate string of size %d\n", l ) ;
return 1 ;
}
strncpy( pattern[i], argv[i+3], (l+1) ) ;
}
printf( "Approximate Pattern Mathing: "
"looking for %d pattern(s) in file %s w/ distance of %d\n",
nb_patterns, filename, approx_factor );
buf = read_input_file( filename, &n_bytes ) ;
if ( buf == NULL ) {
return 1 ;
}
/* Allocate the array of matches */
n_matches = (int *)malloc( nb_patterns * sizeof( int ) ) ;
if (n_matches == NULL) {
fprintf(
stderr,
"Error: unable to allocate memory for %ldB\n",
nb_patterns * sizeof( int )
);
return 1 ;
}
/*****
* * BEGIN MAIN LOOP
* ******/
/* Timer start */
gettimeofday(&t1, NULL);
max_pat=0;
for(i=0; i<nb_patterns; i++){
max_pat=max_pat>strlen(pattern[i]) ? max_pat : strlen(pattern[i]);
}
for ( i = 0 ; i < nb_patterns ; i++ ) {
int size_pattern = strlen(pattern[i]) ;
n_matches[i] = 0 ;
char * d_rcv_buf;
char * d_pattern;
int * d_result;
int n_max = (n_bytes-size_pattern + 1);
int * result;
result=(int *) malloc(sizeof(int)*n_max);
cudaMalloc((void **)&d_rcv_buf, n_bytes*sizeof(char));
cudaMalloc((void **)&d_pattern, size_pattern*sizeof(char));
cudaMalloc((void **)&d_result, n_max*sizeof(int));
cudaMemcpy(d_rcv_buf, buf, n_bytes*sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(d_pattern, pattern[i], size_pattern*sizeof(char), cudaMemcpyHostToDevice);
for(j=0; j<n_max/1024; j++){
cuda_levenshtein<<<1, 1024>>>(d_rcv_buf, d_pattern, size_pattern, d_result, n_max, j*1024);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
}
cuda_levenshtein<<<1, 1024>>>(d_rcv_buf, d_pattern, size_pattern, d_result, n_max, 1024*(n_max/1024));
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
gpuErrchk(cudaMemcpy(result, d_result, n_max*sizeof(int), cudaMemcpyDeviceToHost));
cudaFree(d_rcv_buf);
cudaFree(d_pattern);
cudaFree(d_result);
int * column;
column=(int *) malloc((size_pattern+1)*sizeof(int));
for ( j = 0 ; j < n_bytes ; j++ ) {
int distance = 0 ;
int s ;
#if APM_DEBUG
if ( j % 100 == 0 )
{
printf( "Procesing byte %d (out of %d)\n", j, n_bytes ) ;
}
#endif
s = size_pattern ;
if ( n_bytes - j < size_pattern )
{
s = n_bytes - j ;
distance = levenshtein( pattern[i], &buf[j], s, column )+size_pattern-s;
}
else{
distance = result[j];
}
if ( distance <= approx_factor ) {
n_matches[i]++ ;
}
}
printf("%d matches from %d\n", n_matches[0], rank);
free( column );
}
/* Timer stop */
gettimeofday(&t2, NULL);
duration = (t2.tv_sec -t1.tv_sec)+((t2.tv_usec-t1.tv_usec)/1e6);
printf( "APM done in %lf s\n", duration ) ;
/*****
* * END MAIN LOOP
* ******/
for ( i = 0 ; i < nb_patterns ; i++ )
{
printf( "Number of matches for pattern <%s>: %d\n",
pattern[i], n_matches[i] ) ;
}
return 0 ;
}
|
534f558e692e1bee1cff1fe36ae10f711669ce5e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MSELoss.h"
#include "../Common.cuh"
#include <iostream>
#define MSELOSS_PRINT_DEBUG false
using namespace std;
extern hipError_t cudaStatus;
__global__ void mse_loss(CUDATensor* input, CUDATensor* target, CUDATensor* output) {
int input_idx = blockIdx.z * gridDim.y * gridDim.x +
blockIdx.y * gridDim.x +
blockIdx.x;
int n = gridDim.z * gridDim.y * gridDim.x;
float err = target->data[input_idx] - input->data[input_idx];
atomicAdd(output->data, err * err / n);
#if MSELOSS_PRINT_DEBUG
printf("Output: %2.3f, N: %i\n", output->data[0], n);
#endif
}
__global__ void mse_loss_backward(CUDATensor* input, CUDATensor* target, CUDATensor* output) {
#if MSELOSS_PRINT_DEBUG
printf("Backprop: %2.3f, %2.3f, %i\n", input->data[0], target->data[0], blockDim.y * blockDim.x);
#endif
int idx = blockIdx.x * blockDim.y * blockDim.x +
threadIdx.y * blockDim.x + threadIdx.x;
//output->data[idx] = 2 * (target->data[idx] - input->data[idx]) / (blockDim.y * blockDim.x);
output->data[idx] = -2 * (target->data[idx] - input->data[idx]) / (blockDim.x);
}
MSELoss::MSELoss() {
// TODO: Initialize loss weights
}
MSELoss::~MSELoss() {
//
}
void MSELoss::run(Tensor* output, Tensor* input, Tensor* target) {
record_flow(output, input, target);
output->clear();
vector<int> input_shape = input->getShape();
vector<int> output_shape = output->getShape();
dim3 grid(input_shape[0], input_shape[1], input_shape[2]);
dim3 block;
mse_loss << <grid, block >> > (input->getCudaData(),
target->getCudaData(),
output->getCudaData());
HE(hipPeekAtLastError());
}
void MSELoss::update(float lr) {
//
}
void MSELoss::propagate() {
// No changes to weights
// (a2(a1 * X + b1) + b2 - T)^2 -> 0
// 2 * E * I2
// 2 * E * a2 * X
// Set input vector gradient to 2 * E, twice the output
vector<int> input_shape = flow_input1->getShape();
dim3 grid(input_shape[0], 1, 1);
dim3 block(input_shape[1], input_shape[2], 1);
mse_loss_backward << <grid, block >> > (flow_input1->getCudaData(),
flow_input2->getCudaData(),
flow_input1->getCudaGrad());
HE(hipPeekAtLastError());
}
| 534f558e692e1bee1cff1fe36ae10f711669ce5e.cu | #include "MSELoss.h"
#include "../Common.cuh"
#include <iostream>
#define MSELOSS_PRINT_DEBUG false
using namespace std;
extern cudaError_t cudaStatus;
__global__ void mse_loss(CUDATensor* input, CUDATensor* target, CUDATensor* output) {
int input_idx = blockIdx.z * gridDim.y * gridDim.x +
blockIdx.y * gridDim.x +
blockIdx.x;
int n = gridDim.z * gridDim.y * gridDim.x;
float err = target->data[input_idx] - input->data[input_idx];
atomicAdd(output->data, err * err / n);
#if MSELOSS_PRINT_DEBUG
printf("Output: %2.3f, N: %i\n", output->data[0], n);
#endif
}
__global__ void mse_loss_backward(CUDATensor* input, CUDATensor* target, CUDATensor* output) {
#if MSELOSS_PRINT_DEBUG
printf("Backprop: %2.3f, %2.3f, %i\n", input->data[0], target->data[0], blockDim.y * blockDim.x);
#endif
int idx = blockIdx.x * blockDim.y * blockDim.x +
threadIdx.y * blockDim.x + threadIdx.x;
//output->data[idx] = 2 * (target->data[idx] - input->data[idx]) / (blockDim.y * blockDim.x);
output->data[idx] = -2 * (target->data[idx] - input->data[idx]) / (blockDim.x);
}
MSELoss::MSELoss() {
// TODO: Initialize loss weights
}
MSELoss::~MSELoss() {
//
}
void MSELoss::run(Tensor* output, Tensor* input, Tensor* target) {
record_flow(output, input, target);
output->clear();
vector<int> input_shape = input->getShape();
vector<int> output_shape = output->getShape();
dim3 grid(input_shape[0], input_shape[1], input_shape[2]);
dim3 block;
mse_loss << <grid, block >> > (input->getCudaData(),
target->getCudaData(),
output->getCudaData());
HE(cudaPeekAtLastError());
}
void MSELoss::update(float lr) {
//
}
void MSELoss::propagate() {
// No changes to weights
// (a2(a1 * X + b1) + b2 - T)^2 -> 0
// 2 * E * I2
// 2 * E * a2 * X
// Set input vector gradient to 2 * E, twice the output
vector<int> input_shape = flow_input1->getShape();
dim3 grid(input_shape[0], 1, 1);
dim3 block(input_shape[1], input_shape[2], 1);
mse_loss_backward << <grid, block >> > (flow_input1->getCudaData(),
flow_input2->getCudaData(),
flow_input1->getCudaGrad());
HE(cudaPeekAtLastError());
}
|
03b9e09c682beff921ac2024911e10854a691d0a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/TensorUtils.h>
#include <ATen/hip/HIPContext.h>
#include <c10/util/Exception.h>
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHTensorMathReduce.cuh>
#include <THH/THHTensorSort.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
namespace at { namespace native {
namespace {
#ifdef __HIP_PLATFORM_HCC__
static const int WARP_SIZE = 64;
static const int BLOCKDIMY = 16;
#else
static const int WARP_SIZE = 32;
static const int BLOCKDIMY = 32;
#endif
template
<typename scalar_t,
typename accscalar_t>
__global__ void embedding_backward_feature_kernel
(int64_t* indices,
const scalar_t* __restrict__ grad,
scalar_t* __restrict__ grad_weight,
int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot
int64_t stride,
int padding_idx)
{
extern __shared__ char buf[];
accscalar_t* smem = (accscalar_t*)buf;
accscalar_t* my_s = smem + WARP_SIZE*threadIdx.y;
int* indices_batch = (int*)(buf + sizeof(accscalar_t)*WARP_SIZE*blockDim.y);
const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size
const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim
for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y)
{
// Entire block cooperates to load a batch of 1024 indices to process
int tid = threadIdx.x + threadIdx.y*blockDim.x;
if(batch_start + tid < n)
indices_batch[tid] = (int)indices[batch_start + tid];
int batch_end = batch_start + blockDim.x*blockDim.y < n ?
batch_start + blockDim.x*blockDim.y : n;
// Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32
for(int chunk_start = batch_start; chunk_start < batch_end; chunk_start += blockDim.y)
{
// This does double duty: it makes sure indices_batch is ready, and it makes sure match-group
// leaders are done with their accumulates before other warps start loading again.
__syncthreads();
int n_this_chunk = (batch_end - chunk_start) < blockDim.y ?
(batch_end - chunk_start) : blockDim.y;
int src_row = chunk_start + threadIdx.y;
int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight
// All warps load their smem segments with incoming grad data
if(src_row < n && f < s && dst_row != padding_idx)
my_s[threadIdx.x] = static_cast<accscalar_t>(grad[src_row*stride + f]);
__syncthreads();
// To ensure determinism, we can't just have each warp add its grad data to its dst_row.
// We need to check if any other warps pulled grad data targeting dst_row.
// If so, we elect the first warp in each matching group as the leader.
// Each leader warp serializes the accumulates targeting dst_row in shared memory,
// then finishes by adding the accumulated buffer to dst_row in grad_weight.
if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync
{
int match_found_this_thread =
(dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]);
if(threadIdx.x >= n_this_chunk)
match_found_this_thread = 0;
#ifdef __HIP_PLATFORM_HCC__
unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread);
int first_remaining_peer = __ffsll(matchmask) - 1;
#else
unsigned int matchmask = WARP_BALLOT(match_found_this_thread);
int first_remaining_peer = __ffs(matchmask) - 1;
#endif
if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader
{
matchmask ^= (1 << first_remaining_peer);
while(matchmask)
{
#ifdef __HIP_PLATFORM_HCC__
first_remaining_peer = __ffsll(matchmask) - 1;
#else
first_remaining_peer = __ffs(matchmask) - 1;
#endif
my_s[threadIdx.x] += smem[threadIdx.x + WARP_SIZE*first_remaining_peer];
matchmask ^= (1 << first_remaining_peer);
}
if(f < s)
grad_weight[dst_row*stride + f] += static_cast<scalar_t>(my_s[threadIdx.x]);
}
}
}
}
}
template <typename scalar_t>
__global__ void embedding_backward_kernel(
int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t* count, int64_t numel, int64_t stride, int padding_idx) {
using accscalar_t = acc_type<scalar_t, true>;
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel
&& (idx == 0 || input[idx] != input[idx - 1])
&& input[idx] != padding_idx) {
do {
const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weight_row = ((int) input[idx]) * stride;
const int grad_row = ((int) indices[idx]) * stride;
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
/* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */
template <typename scalar_t, typename accscalar_t>
__global__ void renorm_kernel(
scalar_t* weights, int64_t* indices, accscalar_t max_norm,
accscalar_t norm_type, int64_t dim,
int64_t weights_stride0, int64_t weights_stride1) {
// Some casting hacks since dynamic shared memory and templates don't work together:
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
int tid = threadIdx.x;
int base_index = indices[blockIdx.x] * weights_stride0;
accscalar_t v = 0;
for (int i = tid; i < dim; i += blockDim.x) {
auto x = static_cast<accscalar_t>(weights[base_index + i * weights_stride1]);
if (norm_type == 1) {
v += std::abs(x);
} else if (norm_type == 2) {
v += x * x;
} else {
v += ::pow(x, norm_type);
}
}
using Op = ReduceAdd<accscalar_t>;
v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0);
if (tid == 0) {
sdata[0] = ::pow(v, static_cast<accscalar_t>(1.0 / norm_type));
}
__syncthreads();
// now we renormalize the blocks that need it
if (sdata[0] > max_norm) {
auto factor = static_cast<scalar_t>(max_norm / (sdata[0] + 1e-7));
for (int i = tid; i < dim; i += blockDim.x) {
weights[base_index + i * weights_stride1] *= factor;
}
}
}
} // anonymous namespace
Tensor embedding_dense_backward_cuda(const Tensor & grad_, const Tensor & indices,
int64_t num_weights, int64_t padding_idx,
bool scale_grad_by_freq) {
auto grad_arg = TensorArg(grad_, "grad", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_backward", indices_arg, kLong);
checkSameGPU("embedding_backward", grad_arg, indices_arg);
auto num_indices = indices.numel();
auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)});
auto grad_weight = at::zeros({num_weights, grad_.size(-1)}, grad_.options());
int64_t stride = grad_weight.stride(0);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (num_indices <= 768 && !scale_grad_by_freq) {
auto indices_contig = indices.contiguous();
dim3 grid(THCCeilDiv(stride, (int64_t)WARP_SIZE));
dim3 block(WARP_SIZE, BLOCKDIMY);
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(grad.type(),
"embedding_backward",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( embedding_backward_feature_kernel<scalar_t, accscalar_t>)
, dim3(grid),
dim3(block),
sizeof(accscalar_t)*WARP_SIZE*BLOCKDIMY + sizeof(int)*WARP_SIZE*BLOCKDIMY,
stream,
indices_contig.data<int64_t>(),
grad.data<scalar_t>(),
grad_weight.data<scalar_t>(),
static_cast<int>(num_indices),
static_cast<int64_t>(stride),
static_cast<int>(padding_idx));
});
THCudaCheck(hipGetLastError());
return grad_weight;
}
auto sorted_indices = at::empty_like(indices);
auto orig_indices = at::empty_like(indices);
using device_ptr = thrust::device_ptr<int64_t>;
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
sorted_indices.copy_(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort; a stable sort is not required
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data,
ThrustLTOp<int64_t>());
}
Tensor count;
if (scale_grad_by_freq) {
count = at::empty_like(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
auto count_data = device_ptr(count.data<int64_t>());
thrust::inclusive_scan_by_key(
policy,
sorted_data,
sorted_data + num_indices,
thrust::make_constant_iterator(1),
count_data
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
policy,
thrust::make_reverse_iterator(sorted_data + num_indices),
thrust::make_reverse_iterator(sorted_data),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::equal_to<int64_t>(),
thrust::maximum<int64_t>()
);
}
dim3 grid(THCCeilDiv(num_indices, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "embedding_backward", [&] {
hipLaunchKernelGGL(( embedding_backward_kernel), dim3(grid), dim3(block), 0, stream,
sorted_indices.data<int64_t>(),
orig_indices.data<int64_t>(),
grad.data<scalar_t>(),
grad_weight.data<scalar_t>(),
count.defined() ? count.data<int64_t>() : nullptr,
num_indices,
stride,
padding_idx);
});
THCudaCheck(hipGetLastError());
return grad_weight;
}
Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices,
double max_norm, double norm_type) {
auto self_arg = TensorArg(self, "self", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkDim("embedding_renorm_", self_arg, 2);
checkSameGPU("embedding_renorm", self_arg, indices_arg);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
using device_ptr = thrust::device_ptr<int64_t>;
auto num_indices = indices.numel();
auto indices_contig = indices.contiguous();
auto indices_data = device_ptr(indices_contig.data<int64_t>());
// FIXME: thrust::unique only removes consecutive elements that are equal.
// We have race conditions when indices contain duplicates which are not
// adjacent
auto unique_indices = at::empty(indices.numel(), indices.options());
auto unique_data = device_ptr(unique_indices.data<int64_t>());
auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data);
auto num_unique_indices = static_cast<int>(end - unique_data);
dim3 grid(num_unique_indices);
dim3 block(128);
int dim = self.stride(0);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "embedding_backward", [&] {
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( renorm_kernel), dim3(grid), dim3(block), 128 * sizeof(accscalar_t), stream,
self.data<scalar_t>(),
unique_indices.data<int64_t>(),
static_cast<accscalar_t>(max_norm),
static_cast<accscalar_t>(norm_type),
dim, self.stride(0), self.stride(1));
});
THCudaCheck(hipGetLastError());
return self;
}
}} // namespace at::native
| 03b9e09c682beff921ac2024911e10854a691d0a.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/TensorUtils.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/util/Exception.h>
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCTensorMathReduce.cuh>
#include <THC/THCTensorSort.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
namespace at { namespace native {
namespace {
#ifdef __HIP_PLATFORM_HCC__
static const int WARP_SIZE = 64;
static const int BLOCKDIMY = 16;
#else
static const int WARP_SIZE = 32;
static const int BLOCKDIMY = 32;
#endif
template
<typename scalar_t,
typename accscalar_t>
__global__ void embedding_backward_feature_kernel
(int64_t* indices,
const scalar_t* __restrict__ grad,
scalar_t* __restrict__ grad_weight,
int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot
int64_t stride,
int padding_idx)
{
extern __shared__ char buf[];
accscalar_t* smem = (accscalar_t*)buf;
accscalar_t* my_s = smem + WARP_SIZE*threadIdx.y;
int* indices_batch = (int*)(buf + sizeof(accscalar_t)*WARP_SIZE*blockDim.y);
const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size
const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim
for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y)
{
// Entire block cooperates to load a batch of 1024 indices to process
int tid = threadIdx.x + threadIdx.y*blockDim.x;
if(batch_start + tid < n)
indices_batch[tid] = (int)indices[batch_start + tid];
int batch_end = batch_start + blockDim.x*blockDim.y < n ?
batch_start + blockDim.x*blockDim.y : n;
// Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32
for(int chunk_start = batch_start; chunk_start < batch_end; chunk_start += blockDim.y)
{
// This does double duty: it makes sure indices_batch is ready, and it makes sure match-group
// leaders are done with their accumulates before other warps start loading again.
__syncthreads();
int n_this_chunk = (batch_end - chunk_start) < blockDim.y ?
(batch_end - chunk_start) : blockDim.y;
int src_row = chunk_start + threadIdx.y;
int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight
// All warps load their smem segments with incoming grad data
if(src_row < n && f < s && dst_row != padding_idx)
my_s[threadIdx.x] = static_cast<accscalar_t>(grad[src_row*stride + f]);
__syncthreads();
// To ensure determinism, we can't just have each warp add its grad data to its dst_row.
// We need to check if any other warps pulled grad data targeting dst_row.
// If so, we elect the first warp in each matching group as the leader.
// Each leader warp serializes the accumulates targeting dst_row in shared memory,
// then finishes by adding the accumulated buffer to dst_row in grad_weight.
if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync
{
int match_found_this_thread =
(dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]);
if(threadIdx.x >= n_this_chunk)
match_found_this_thread = 0;
#ifdef __HIP_PLATFORM_HCC__
unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread);
int first_remaining_peer = __ffsll(matchmask) - 1;
#else
unsigned int matchmask = WARP_BALLOT(match_found_this_thread);
int first_remaining_peer = __ffs(matchmask) - 1;
#endif
if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader
{
matchmask ^= (1 << first_remaining_peer);
while(matchmask)
{
#ifdef __HIP_PLATFORM_HCC__
first_remaining_peer = __ffsll(matchmask) - 1;
#else
first_remaining_peer = __ffs(matchmask) - 1;
#endif
my_s[threadIdx.x] += smem[threadIdx.x + WARP_SIZE*first_remaining_peer];
matchmask ^= (1 << first_remaining_peer);
}
if(f < s)
grad_weight[dst_row*stride + f] += static_cast<scalar_t>(my_s[threadIdx.x]);
}
}
}
}
}
template <typename scalar_t>
__global__ void embedding_backward_kernel(
int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t* count, int64_t numel, int64_t stride, int padding_idx) {
using accscalar_t = acc_type<scalar_t, true>;
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel
&& (idx == 0 || input[idx] != input[idx - 1])
&& input[idx] != padding_idx) {
do {
const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weight_row = ((int) input[idx]) * stride;
const int grad_row = ((int) indices[idx]) * stride;
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
/* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */
template <typename scalar_t, typename accscalar_t>
__global__ void renorm_kernel(
scalar_t* weights, int64_t* indices, accscalar_t max_norm,
accscalar_t norm_type, int64_t dim,
int64_t weights_stride0, int64_t weights_stride1) {
// Some casting hacks since dynamic shared memory and templates don't work together:
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
int tid = threadIdx.x;
int base_index = indices[blockIdx.x] * weights_stride0;
accscalar_t v = 0;
for (int i = tid; i < dim; i += blockDim.x) {
auto x = static_cast<accscalar_t>(weights[base_index + i * weights_stride1]);
if (norm_type == 1) {
v += std::abs(x);
} else if (norm_type == 2) {
v += x * x;
} else {
v += std::pow(x, norm_type);
}
}
using Op = ReduceAdd<accscalar_t>;
v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0);
if (tid == 0) {
sdata[0] = std::pow(v, static_cast<accscalar_t>(1.0 / norm_type));
}
__syncthreads();
// now we renormalize the blocks that need it
if (sdata[0] > max_norm) {
auto factor = static_cast<scalar_t>(max_norm / (sdata[0] + 1e-7));
for (int i = tid; i < dim; i += blockDim.x) {
weights[base_index + i * weights_stride1] *= factor;
}
}
}
} // anonymous namespace
Tensor embedding_dense_backward_cuda(const Tensor & grad_, const Tensor & indices,
int64_t num_weights, int64_t padding_idx,
bool scale_grad_by_freq) {
auto grad_arg = TensorArg(grad_, "grad", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_backward", indices_arg, kLong);
checkSameGPU("embedding_backward", grad_arg, indices_arg);
auto num_indices = indices.numel();
auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)});
auto grad_weight = at::zeros({num_weights, grad_.size(-1)}, grad_.options());
int64_t stride = grad_weight.stride(0);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (num_indices <= 768 && !scale_grad_by_freq) {
auto indices_contig = indices.contiguous();
dim3 grid(THCCeilDiv(stride, (int64_t)WARP_SIZE));
dim3 block(WARP_SIZE, BLOCKDIMY);
AT_DISPATCH_FLOATING_TYPES_AND_HALF
(grad.type(),
"embedding_backward",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
embedding_backward_feature_kernel<scalar_t, accscalar_t>
<<<grid,
block,
sizeof(accscalar_t)*WARP_SIZE*BLOCKDIMY + sizeof(int)*WARP_SIZE*BLOCKDIMY,
stream>>>
(indices_contig.data<int64_t>(),
grad.data<scalar_t>(),
grad_weight.data<scalar_t>(),
static_cast<int>(num_indices),
static_cast<int64_t>(stride),
static_cast<int>(padding_idx));
});
THCudaCheck(cudaGetLastError());
return grad_weight;
}
auto sorted_indices = at::empty_like(indices);
auto orig_indices = at::empty_like(indices);
using device_ptr = thrust::device_ptr<int64_t>;
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
sorted_indices.copy_(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort; a stable sort is not required
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data,
ThrustLTOp<int64_t>());
}
Tensor count;
if (scale_grad_by_freq) {
count = at::empty_like(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
auto count_data = device_ptr(count.data<int64_t>());
thrust::inclusive_scan_by_key(
policy,
sorted_data,
sorted_data + num_indices,
thrust::make_constant_iterator(1),
count_data
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
policy,
thrust::make_reverse_iterator(sorted_data + num_indices),
thrust::make_reverse_iterator(sorted_data),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::equal_to<int64_t>(),
thrust::maximum<int64_t>()
);
}
dim3 grid(THCCeilDiv(num_indices, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "embedding_backward", [&] {
embedding_backward_kernel<<<grid, block, 0, stream>>>(
sorted_indices.data<int64_t>(),
orig_indices.data<int64_t>(),
grad.data<scalar_t>(),
grad_weight.data<scalar_t>(),
count.defined() ? count.data<int64_t>() : nullptr,
num_indices,
stride,
padding_idx);
});
THCudaCheck(cudaGetLastError());
return grad_weight;
}
Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices,
double max_norm, double norm_type) {
auto self_arg = TensorArg(self, "self", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkDim("embedding_renorm_", self_arg, 2);
checkSameGPU("embedding_renorm", self_arg, indices_arg);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
using device_ptr = thrust::device_ptr<int64_t>;
auto num_indices = indices.numel();
auto indices_contig = indices.contiguous();
auto indices_data = device_ptr(indices_contig.data<int64_t>());
// FIXME: thrust::unique only removes consecutive elements that are equal.
// We have race conditions when indices contain duplicates which are not
// adjacent
auto unique_indices = at::empty(indices.numel(), indices.options());
auto unique_data = device_ptr(unique_indices.data<int64_t>());
auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data);
auto num_unique_indices = static_cast<int>(end - unique_data);
dim3 grid(num_unique_indices);
dim3 block(128);
int dim = self.stride(0);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "embedding_backward", [&] {
using accscalar_t = acc_type<scalar_t, true>;
renorm_kernel<<<grid, block, 128 * sizeof(accscalar_t), stream>>>(
self.data<scalar_t>(),
unique_indices.data<int64_t>(),
static_cast<accscalar_t>(max_norm),
static_cast<accscalar_t>(norm_type),
dim, self.stride(0), self.stride(1));
});
THCudaCheck(cudaGetLastError());
return self;
}
}} // namespace at::native
|
d584f926fed70552f830ac21aa92fa6b13d17aae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/resize_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/gpu_util.cuh"
namespace caffe {
template <typename Dtype>
__global__ void NNInterporationForward(const int nthreads,
const Dtype* bottom_data, Dtype* top_data, const int num, const int channels,
const int bot_h, const int bot_w, const int bot_hw, const int bot_chw, const int top_height_, const int top_width_) {
Dtype bot_hDtop_h = (Dtype)bot_h / top_height_;
Dtype bot_wDtop_w = (Dtype)bot_w / top_width_;
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % top_width_;
const int ph = (index / top_width_) % top_height_;
const int c = (index / top_width_ / top_height_) % channels;
const int n = index / top_width_ / top_height_ / channels;
int h_ = __max(0, __min(bot_h - 1, int(bot_hDtop_h*(ph + 1) - 0.5)));
int w_ = __max(0, __min(bot_w - 1, int(bot_wDtop_w*(pw + 1) - 0.5)));
int bot_index = n*bot_chw + c*bot_hw + h_*bot_w + w_;
top_data[index] = bottom_data[bot_index];
}
}
template <typename Dtype>
__global__ void LinearInterporationForward(const int nthreads,
const Dtype* bottom_data, Dtype* top_data, const int num, const int channels,
const int bot_h, const int bot_w, const int bot_hw, const int bot_chw, const int top_height_, const int top_width_) {
Dtype bot_hDtop_h = (Dtype)bot_h / top_height_;
Dtype bot_wDtop_w = (Dtype)bot_w / top_width_;
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % top_width_;
const int ph = (index / top_width_) % top_height_;
const int c = (index / top_width_ / top_height_) % channels;
const int n = index / top_width_ / top_height_ / channels;
Dtype h_ = __max(0, __min(bot_h - 1, bot_hDtop_h*(ph + 1) - 1.5));
Dtype w_ = __max(0, __min(bot_w - 1, bot_wDtop_w*(pw + 1) - 1.5));
int top = __max(0, __min(bot_h - 1, int(h_)));
int bot = __max(0, __min(bot_h - 1, top + 1));
int left = __max(0, __min(bot_w - 1, int(w_)));
int right = __max(0, __min(bot_w - 1, left + 1));
Dtype hr = __max(0, __min(1, h_ - top));
Dtype wr = __max(0, __min(1, w_ - left));
int bot_index_top_left = n*bot_chw + c*bot_hw + top*bot_w + left;
int bot_index_top_right = n*bot_chw + c*bot_hw + top*bot_w + right;
int bot_index_bot_left = n*bot_chw + c*bot_hw + bot*bot_w + left;
int bot_index_bot_right = n*bot_chw + c*bot_hw + bot*bot_w + right;
top_data[index] = bottom_data[bot_index_top_left] * (1 - hr)*(1 - wr)
+ bottom_data[bot_index_top_right] * (1 - hr)*(wr)
+bottom_data[bot_index_bot_left] * (hr)*(1 - wr)
+ bottom_data[bot_index_bot_right] * (hr)*(wr);
}
}
template <typename Dtype>
void ResizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = top[0]->count();
switch (this->layer_param_.resize_param().interpolation())
{
case ResizeParameter_InterpolationMethod_NN:
{
int bot_chw = bottom[0]->count() / bottom[0]->num();
int bot_hw = bot_chw / bottom[0]->channels();
int bot_w = bottom[0]->width();
int bot_h = bottom[0]->height();
NNInterporationForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_data, top_data, bottom[0]->num(), bottom[0]->channels(), bot_h, bot_w, bot_hw, bot_chw,
top_height_, top_width_);
break;
}
case ResizeParameter_InterpolationMethod_LINEAR:
{
int bot_chw = bottom[0]->count() / bottom[0]->num();
int bot_hw = bot_chw / bottom[0]->channels();
int bot_w = bottom[0]->width();
int bot_h = bottom[0]->height();
LinearInterporationForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_data, top_data, bottom[0]->num(), bottom[0]->channels(), bot_h, bot_w, bot_hw, bot_chw,
top_height_, top_width_);
break;
}
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void NNInterporationBackward(const int nthreads,
Dtype* bottom_diff, const Dtype* top_diff, const int num, const int channels,
const int bot_h, const int bot_w, const int bot_hw, const int bot_chw, const int top_height_, const int top_width_) {
Dtype bot_hDtop_h = (Dtype)bot_h / top_height_;
Dtype bot_wDtop_w = (Dtype)bot_w / top_width_;
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % top_width_;
const int ph = (index / top_width_) % top_height_;
const int c = (index / top_width_ / top_height_) % channels;
const int n = index / top_width_ / top_height_ / channels;
int h_ = __max(0, __min(bot_h - 1, int(bot_hDtop_h*(ph + 1) - 0.5)));
int w_ = __max(0, __min(bot_w - 1, int(bot_wDtop_w*(pw + 1) - 0.5)));
int bot_index = n*bot_chw + c*bot_hw + h_*bot_w + w_;
caffe_gpu_atomic_add(top_diff[index], bottom_diff + bot_index);
}
}
template <typename Dtype>
__global__ void LinearInterporationBackward(const int nthreads,
Dtype* bottom_diff, const Dtype* top_diff, const int num, const int channels,
const int bot_h, const int bot_w, const int bot_hw, const int bot_chw, const int top_height_, const int top_width_) {
Dtype bot_hDtop_h = (Dtype)bot_h / top_height_;
Dtype bot_wDtop_w = (Dtype)bot_w / top_width_;
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % top_width_;
const int ph = (index / top_width_) % top_height_;
const int c = (index / top_width_ / top_height_) % channels;
const int n = index / top_width_ / top_height_ / channels;
Dtype h_ = __max(0, __min(bot_h - 1, bot_hDtop_h*(ph + 1) - 1.5));
Dtype w_ = __max(0, __min(bot_w - 1, bot_wDtop_w*(pw + 1) - 1.5));
int top = __max(0, __min(bot_h - 1, int(h_)));
int bot = __max(0, __min(bot_h - 1, top + 1));
int left = __max(0, __min(bot_w - 1, int(w_)));
int right = __max(0, __min(bot_w - 1, left + 1));
Dtype hr = __max(0, __min(1, h_ - top));
Dtype wr = __max(0, __min(1, w_ - left));
int bot_index_top_left = n*bot_chw + c*bot_hw + top*bot_w + left;
int bot_index_top_right = n*bot_chw + c*bot_hw + top*bot_w + right;
int bot_index_bot_left = n*bot_chw + c*bot_hw + bot*bot_w + left;
int bot_index_bot_right = n*bot_chw + c*bot_hw + bot*bot_w + right;
caffe_gpu_atomic_add(top_diff[index] * (1 - hr)*(1 - wr), bottom_diff + bot_index_top_left);
caffe_gpu_atomic_add(top_diff[index] * (1 - hr)*(wr), bottom_diff + bot_index_top_right);
caffe_gpu_atomic_add(top_diff[index] * (hr)*(1 - wr), bottom_diff + bot_index_bot_left);
caffe_gpu_atomic_add(top_diff[index] * (hr)*(wr), bottom_diff + bot_index_bot_right);
}
}
template <typename Dtype>
void ResizeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = top[0]->count();
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom_diff);
switch (this->layer_param_.resize_param().interpolation())
{
case ResizeParameter_InterpolationMethod_NN:
{
int bot_chw = bottom[0]->count() / bottom[0]->num();
int bot_hw = bot_chw / bottom[0]->channels();
int bot_w = bottom[0]->width();
int bot_h = bottom[0]->height();
NNInterporationBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_diff, top_diff, bottom[0]->num(), bottom[0]->channels(), bot_h, bot_w, bot_hw, bot_chw,
top_height_, top_width_);
break;
}
case ResizeParameter_InterpolationMethod_LINEAR:
{
int bot_chw = bottom[0]->count() / bottom[0]->num();
int bot_hw = bot_chw / bottom[0]->channels();
int bot_w = bottom[0]->width();
int bot_h = bottom[0]->height();
LinearInterporationBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_diff, top_diff, bottom[0]->num(), bottom[0]->channels(), bot_h, bot_w, bot_hw, bot_chw,
top_height_, top_width_);
break;
}
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ResizeLayer);
} // namespace caffe
| d584f926fed70552f830ac21aa92fa6b13d17aae.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/resize_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/gpu_util.cuh"
namespace caffe {
template <typename Dtype>
__global__ void NNInterporationForward(const int nthreads,
const Dtype* bottom_data, Dtype* top_data, const int num, const int channels,
const int bot_h, const int bot_w, const int bot_hw, const int bot_chw, const int top_height_, const int top_width_) {
Dtype bot_hDtop_h = (Dtype)bot_h / top_height_;
Dtype bot_wDtop_w = (Dtype)bot_w / top_width_;
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % top_width_;
const int ph = (index / top_width_) % top_height_;
const int c = (index / top_width_ / top_height_) % channels;
const int n = index / top_width_ / top_height_ / channels;
int h_ = __max(0, __min(bot_h - 1, int(bot_hDtop_h*(ph + 1) - 0.5)));
int w_ = __max(0, __min(bot_w - 1, int(bot_wDtop_w*(pw + 1) - 0.5)));
int bot_index = n*bot_chw + c*bot_hw + h_*bot_w + w_;
top_data[index] = bottom_data[bot_index];
}
}
template <typename Dtype>
__global__ void LinearInterporationForward(const int nthreads,
const Dtype* bottom_data, Dtype* top_data, const int num, const int channels,
const int bot_h, const int bot_w, const int bot_hw, const int bot_chw, const int top_height_, const int top_width_) {
Dtype bot_hDtop_h = (Dtype)bot_h / top_height_;
Dtype bot_wDtop_w = (Dtype)bot_w / top_width_;
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % top_width_;
const int ph = (index / top_width_) % top_height_;
const int c = (index / top_width_ / top_height_) % channels;
const int n = index / top_width_ / top_height_ / channels;
Dtype h_ = __max(0, __min(bot_h - 1, bot_hDtop_h*(ph + 1) - 1.5));
Dtype w_ = __max(0, __min(bot_w - 1, bot_wDtop_w*(pw + 1) - 1.5));
int top = __max(0, __min(bot_h - 1, int(h_)));
int bot = __max(0, __min(bot_h - 1, top + 1));
int left = __max(0, __min(bot_w - 1, int(w_)));
int right = __max(0, __min(bot_w - 1, left + 1));
Dtype hr = __max(0, __min(1, h_ - top));
Dtype wr = __max(0, __min(1, w_ - left));
int bot_index_top_left = n*bot_chw + c*bot_hw + top*bot_w + left;
int bot_index_top_right = n*bot_chw + c*bot_hw + top*bot_w + right;
int bot_index_bot_left = n*bot_chw + c*bot_hw + bot*bot_w + left;
int bot_index_bot_right = n*bot_chw + c*bot_hw + bot*bot_w + right;
top_data[index] = bottom_data[bot_index_top_left] * (1 - hr)*(1 - wr)
+ bottom_data[bot_index_top_right] * (1 - hr)*(wr)
+bottom_data[bot_index_bot_left] * (hr)*(1 - wr)
+ bottom_data[bot_index_bot_right] * (hr)*(wr);
}
}
template <typename Dtype>
void ResizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = top[0]->count();
switch (this->layer_param_.resize_param().interpolation())
{
case ResizeParameter_InterpolationMethod_NN:
{
int bot_chw = bottom[0]->count() / bottom[0]->num();
int bot_hw = bot_chw / bottom[0]->channels();
int bot_w = bottom[0]->width();
int bot_h = bottom[0]->height();
NNInterporationForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_data, top_data, bottom[0]->num(), bottom[0]->channels(), bot_h, bot_w, bot_hw, bot_chw,
top_height_, top_width_);
break;
}
case ResizeParameter_InterpolationMethod_LINEAR:
{
int bot_chw = bottom[0]->count() / bottom[0]->num();
int bot_hw = bot_chw / bottom[0]->channels();
int bot_w = bottom[0]->width();
int bot_h = bottom[0]->height();
LinearInterporationForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_data, top_data, bottom[0]->num(), bottom[0]->channels(), bot_h, bot_w, bot_hw, bot_chw,
top_height_, top_width_);
break;
}
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void NNInterporationBackward(const int nthreads,
Dtype* bottom_diff, const Dtype* top_diff, const int num, const int channels,
const int bot_h, const int bot_w, const int bot_hw, const int bot_chw, const int top_height_, const int top_width_) {
Dtype bot_hDtop_h = (Dtype)bot_h / top_height_;
Dtype bot_wDtop_w = (Dtype)bot_w / top_width_;
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % top_width_;
const int ph = (index / top_width_) % top_height_;
const int c = (index / top_width_ / top_height_) % channels;
const int n = index / top_width_ / top_height_ / channels;
int h_ = __max(0, __min(bot_h - 1, int(bot_hDtop_h*(ph + 1) - 0.5)));
int w_ = __max(0, __min(bot_w - 1, int(bot_wDtop_w*(pw + 1) - 0.5)));
int bot_index = n*bot_chw + c*bot_hw + h_*bot_w + w_;
caffe_gpu_atomic_add(top_diff[index], bottom_diff + bot_index);
}
}
template <typename Dtype>
__global__ void LinearInterporationBackward(const int nthreads,
Dtype* bottom_diff, const Dtype* top_diff, const int num, const int channels,
const int bot_h, const int bot_w, const int bot_hw, const int bot_chw, const int top_height_, const int top_width_) {
Dtype bot_hDtop_h = (Dtype)bot_h / top_height_;
Dtype bot_wDtop_w = (Dtype)bot_w / top_width_;
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % top_width_;
const int ph = (index / top_width_) % top_height_;
const int c = (index / top_width_ / top_height_) % channels;
const int n = index / top_width_ / top_height_ / channels;
Dtype h_ = __max(0, __min(bot_h - 1, bot_hDtop_h*(ph + 1) - 1.5));
Dtype w_ = __max(0, __min(bot_w - 1, bot_wDtop_w*(pw + 1) - 1.5));
int top = __max(0, __min(bot_h - 1, int(h_)));
int bot = __max(0, __min(bot_h - 1, top + 1));
int left = __max(0, __min(bot_w - 1, int(w_)));
int right = __max(0, __min(bot_w - 1, left + 1));
Dtype hr = __max(0, __min(1, h_ - top));
Dtype wr = __max(0, __min(1, w_ - left));
int bot_index_top_left = n*bot_chw + c*bot_hw + top*bot_w + left;
int bot_index_top_right = n*bot_chw + c*bot_hw + top*bot_w + right;
int bot_index_bot_left = n*bot_chw + c*bot_hw + bot*bot_w + left;
int bot_index_bot_right = n*bot_chw + c*bot_hw + bot*bot_w + right;
caffe_gpu_atomic_add(top_diff[index] * (1 - hr)*(1 - wr), bottom_diff + bot_index_top_left);
caffe_gpu_atomic_add(top_diff[index] * (1 - hr)*(wr), bottom_diff + bot_index_top_right);
caffe_gpu_atomic_add(top_diff[index] * (hr)*(1 - wr), bottom_diff + bot_index_bot_left);
caffe_gpu_atomic_add(top_diff[index] * (hr)*(wr), bottom_diff + bot_index_bot_right);
}
}
template <typename Dtype>
void ResizeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = top[0]->count();
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom_diff);
switch (this->layer_param_.resize_param().interpolation())
{
case ResizeParameter_InterpolationMethod_NN:
{
int bot_chw = bottom[0]->count() / bottom[0]->num();
int bot_hw = bot_chw / bottom[0]->channels();
int bot_w = bottom[0]->width();
int bot_h = bottom[0]->height();
NNInterporationBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_diff, top_diff, bottom[0]->num(), bottom[0]->channels(), bot_h, bot_w, bot_hw, bot_chw,
top_height_, top_width_);
break;
}
case ResizeParameter_InterpolationMethod_LINEAR:
{
int bot_chw = bottom[0]->count() / bottom[0]->num();
int bot_hw = bot_chw / bottom[0]->channels();
int bot_w = bottom[0]->width();
int bot_h = bottom[0]->height();
LinearInterporationBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_diff, top_diff, bottom[0]->num(), bottom[0]->channels(), bot_h, bot_w, bot_hw, bot_chw,
top_height_, top_width_);
break;
}
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ResizeLayer);
} // namespace caffe
|
65708210af56a64366b3053fa9d17cbbc9a035f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../ops.cuh"
// feature dim = 1
__global__ void softmax_v(const float *features, const int *pointer,
const int *indices, float *__restrict__ next_layer) {
int neighbor_offset = pointer[blockIdx.x];
int degree = pointer[blockIdx.x + 1] - neighbor_offset;
float max_local = 0.0f;
for (int i = 0; i < degree / 32; i++) {
max_local = max(features[indices[neighbor_offset + i * 32 + threadIdx.x]],
max_local);
}
if (threadIdx.x < degree % 32) {
max_local = max(features[indices[neighbor_offset + degree - (degree % 32) +
threadIdx.x]],
max_local);
}
for (int offset = 16; offset > 0; offset /= 2) {
max_local = max(__shfl_down_sync(FULL_MASK, max_local, offset), max_local);
}
max_local = __shfl_sync(FULL_MASK, max_local, 0);
float exp_local = 0.0f;
for (int i = 0; i < degree / 32; i++) {
exp_local += expf(
features[indices[neighbor_offset + i * 32 + threadIdx.x]] - max_local);
}
if (threadIdx.x < degree % 32) {
exp_local += expf(features[indices[neighbor_offset + degree -
(degree % 32) + threadIdx.x]] -
max_local);
}
for (int offset = 16; offset > 0; offset /= 2) {
exp_local += __shfl_down_sync(FULL_MASK, exp_local, offset);
}
exp_local = __shfl_sync(FULL_MASK, exp_local, 0);
for (int i = 0; i < degree / 32; i++) {
int neighbor = indices[neighbor_offset + i * 32 + threadIdx.x];
next_layer[neighbor] = expf(features[neighbor] - max_local) / exp_local;
}
if (threadIdx.x < degree % 32) {
int neighbor =
indices[neighbor_offset + degree - (degree % 32) + threadIdx.x];
next_layer[neighbor] = expf(features[neighbor] - max_local) / exp_local;
}
return;
}
// feature dim > 1, needed for multi head attention
__global__ void softmax_m(const float *features, const int *pointer,
const int *indices, float *__restrict__ next_layer) {} | 65708210af56a64366b3053fa9d17cbbc9a035f8.cu | #include "../ops.cuh"
// feature dim = 1
__global__ void softmax_v(const float *features, const int *pointer,
const int *indices, float *__restrict__ next_layer) {
int neighbor_offset = pointer[blockIdx.x];
int degree = pointer[blockIdx.x + 1] - neighbor_offset;
float max_local = 0.0f;
for (int i = 0; i < degree / 32; i++) {
max_local = max(features[indices[neighbor_offset + i * 32 + threadIdx.x]],
max_local);
}
if (threadIdx.x < degree % 32) {
max_local = max(features[indices[neighbor_offset + degree - (degree % 32) +
threadIdx.x]],
max_local);
}
for (int offset = 16; offset > 0; offset /= 2) {
max_local = max(__shfl_down_sync(FULL_MASK, max_local, offset), max_local);
}
max_local = __shfl_sync(FULL_MASK, max_local, 0);
float exp_local = 0.0f;
for (int i = 0; i < degree / 32; i++) {
exp_local += expf(
features[indices[neighbor_offset + i * 32 + threadIdx.x]] - max_local);
}
if (threadIdx.x < degree % 32) {
exp_local += expf(features[indices[neighbor_offset + degree -
(degree % 32) + threadIdx.x]] -
max_local);
}
for (int offset = 16; offset > 0; offset /= 2) {
exp_local += __shfl_down_sync(FULL_MASK, exp_local, offset);
}
exp_local = __shfl_sync(FULL_MASK, exp_local, 0);
for (int i = 0; i < degree / 32; i++) {
int neighbor = indices[neighbor_offset + i * 32 + threadIdx.x];
next_layer[neighbor] = expf(features[neighbor] - max_local) / exp_local;
}
if (threadIdx.x < degree % 32) {
int neighbor =
indices[neighbor_offset + degree - (degree % 32) + threadIdx.x];
next_layer[neighbor] = expf(features[neighbor] - max_local) / exp_local;
}
return;
}
// feature dim > 1, needed for multi head attention
__global__ void softmax_m(const float *features, const int *pointer,
const int *indices, float *__restrict__ next_layer) {} |
f42a2ea6d7bfe7c914da8ab1682147285453aaf8.hip | // !!! This is a file automatically generated by hipify!!!
#include "include/setup.cuh"
#include "../cpp/include/files.h"
#include "../include/structs.h"
#include <iostream>
#include <iomanip>
#include <math.h>
#include <vector>
#include <string>
#include <hip/hip_runtime.h>
#include <fstream>
#include <time.h>
__global__ void wKernel(int Lx, int Ly, const prec* __restrict__ h,
const prec* __restrict__ b, prec* w) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < Lx*Ly) {
w[i] = h[i] + b[i];
}
}
#if IN == 1
__global__ void LBMpull(int Lx, int Ly, prec g, prec e, prec tau,
const prec* __restrict__ b, const prec* __restrict__ f1,
prec* f2, prec* h) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
int size = Lx * Ly, j;
prec ftemp[9], feq[9];
prec uxlocal, uylocal;
prec hlocal[9], blocal[9];
prec gh, usq, ux3, uy3, uxuy5, uxuy6;
prec fact1 = 1 / (9 * e*e);
prec fact2 = fact1 * 0.25;
prec factS = fact1 * 1.5;
unsigned char trilocal[8];
int nt[9];
if (i < size) {
hlocal[0] = h[i];
if (hlocal[0] > 0){
int y = (int)i / Lx;
int x = i - y * Lx;
blocal[0] = b[i];
blocal[1] = ( x != 0 ) ? b[i - 1] : 0;
blocal[2] = (y != 0 ) ? b[i - Lx ] : 0;
blocal[3] = ( x != Lx-1) ? b[i + 1] : 0;
blocal[4] = (y != Ly-1 ) ? b[i + Lx ] : 0;
blocal[5] = (y != 0 && x != 0 ) ? b[i - Lx - 1] : 0;
blocal[6] = (y != 0 && x != Lx-1) ? b[i - Lx + 1] : 0;
blocal[7] = (y != Ly-1 && x != Lx-1) ? b[i + Lx + 1] : 0;
blocal[8] = (y != Ly-1 && x != 0 ) ? b[i + Lx - 1] : 0;
hlocal[1] = ( x != 0 ) ? h[i - 1] : 0;
hlocal[2] = (y != 0 ) ? h[i - Lx ] : 0;
hlocal[3] = ( x != Lx-1) ? h[i + 1] : 0;
hlocal[4] = (y != Ly-1 ) ? h[i + Lx ] : 0;
hlocal[5] = (y != 0 && x != 0 ) ? h[i - Lx - 1] : 0;
hlocal[6] = (y != 0 && x != Lx-1) ? h[i - Lx + 1] : 0;
hlocal[7] = (y != Ly-1 && x != Lx-1) ? h[i + Lx + 1] : 0;
hlocal[8] = (y != Ly-1 && x != 0 ) ? h[i + Lx - 1] : 0;
ftemp[1] = ( x != 0 ) ? f1[i - 1 + size] : 0;
ftemp[2] = (y != 0 ) ? f1[i - Lx + 2 * size] : 0;
ftemp[3] = ( x != Lx-1) ? f1[i + 1 + 3 * size] : 0;
ftemp[4] = (y != Ly-1 ) ? f1[i + Lx + 4 * size] : 0;
ftemp[5] = (y != 0 && x != 0 ) ? f1[i - Lx - 1 + 5 * size] : 0;
ftemp[6] = (y != 0 && x != Lx-1) ? f1[i - Lx + 1 + 6 * size] : 0;
ftemp[7] = (y != Ly-1 && x != Lx-1) ? f1[i + Lx + 1 + 7 * size] : 0;
ftemp[8] = (y != Ly-1 && x != 0 ) ? f1[i + Lx - 1 + 8 * size] : 0;
for (int a = 0; a < 9; a++){
nt[a] = 0;
if (hlocal[a] > 0) nt[a] = 2;
}
for (int a = 1; a < 9; a++){
if (!((y == 0 && (a == 2 || a == 5 || a == 6)) ||
(y == Ly-1 && (a == 4 || a == 7 || a == 8)) ||
(x == 0 && (a == 1 || a == 5 || a == 8)) ||
(x == Lx-1 && (a == 3 || a == 6 || a == 7))))
if (nt[a] == 0) nt[0] = 1;
}
for (int a = 0; a<8; a++) trilocal[a] = 0;
if (nt[0] == 2) {
if (y == 0) {
if (x == 0) {
trilocal[2] = 1;
trilocal[3] = 1;
trilocal[6] = 1;
}
else if (x == Lx - 1) {
trilocal[0] = 1;
trilocal[3] = 1;
trilocal[7] = 1;
}
else {
trilocal[0] = 1;
trilocal[2] = 1;
trilocal[3] = 1;
trilocal[6] = 1;
trilocal[7] = 1;
}
}
else if (y == Ly - 1) {
if (x == 0) {
trilocal[1] = 1;
trilocal[2] = 1;
trilocal[5] = 1;
}
else if (x == Lx - 1) {
trilocal[0] = 1;
trilocal[1] = 1;
trilocal[4] = 1;
}
else {
trilocal[0] = 1;
trilocal[1] = 1;
trilocal[2] = 1;
trilocal[4] = 1;
trilocal[5] = 1;
}
}
else {
if (x == 0) {
trilocal[1] = 1;
trilocal[2] = 1;
trilocal[3] = 1;
trilocal[5] = 1;
trilocal[6] = 1;
}
else if (x == Lx - 1) {
trilocal[0] = 1;
trilocal[1] = 1;
trilocal[3] = 1;
trilocal[4] = 1;
trilocal[7] = 1;
}
else {
for (int a = 0; a<8; a++) {
trilocal[a] = 1;
}
}
}
}
else if (nt[0] == 1) {
if (y == 0) {
trilocal[0] = 1;
trilocal[3] = 1;
trilocal[7] = 1;
trilocal[2] = 2;
trilocal[5] = 2;
trilocal[6] = 2;
}
else if (y == Ly - 1) {
trilocal[0] = 1;
trilocal[1] = 1;
trilocal[4] = 1;
trilocal[2] = 2;
trilocal[5] = 2;
trilocal[6] = 2;
}
else {
for (int a = 1; a<9; a++){
if (nt[a] != 0)
trilocal[a-1] = 1;
else
trilocal[a-1] = 2;
}
if (nt[5] == 0 || (nt[5] != 0 && (nt[1] == 0 || nt[2] == 0))) trilocal[4] = 2;
if (nt[6] == 0 || (nt[6] != 0 && (nt[2] == 0 || nt[3] == 0))) trilocal[5] = 2;
if (nt[7] == 0 || (nt[7] != 0 && (nt[3] == 0 || nt[4] == 0))) trilocal[6] = 2;
if (nt[8] == 0 || (nt[8] != 0 && (nt[4] == 0 || nt[1] == 0))) trilocal[7] = 2;
}
}
ftemp[0] = f1[i];
#if BN == 1
if(trilocal[0] == 1) ftemp[1] = ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS; else ftemp[1] = f1[i + size];
if(trilocal[1] == 1) ftemp[2] = ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS; else ftemp[2] = f1[i + 2 * size];
if(trilocal[2] == 1) ftemp[3] = ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS; else ftemp[3] = f1[i + 3 * size];
if(trilocal[3] == 1) ftemp[4] = ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS; else ftemp[4] = f1[i + 4 * size];
if(trilocal[4] == 1) ftemp[5] = ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25; else ftemp[5] = f1[i + 5 * size];
if(trilocal[5] == 1) ftemp[6] = ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25; else ftemp[6] = f1[i + 6 * size];
if(trilocal[6] == 1) ftemp[7] = ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25; else ftemp[7] = f1[i + 7 * size];
if(trilocal[7] == 1) ftemp[8] = ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25; else ftemp[8] = f1[i + 8 * size];
if(trilocal[0] == 2) ftemp[1] = ftemp[3];
if(trilocal[1] == 2) ftemp[2] = ftemp[4];
if(trilocal[2] == 2) ftemp[3] = ftemp[1];
if(trilocal[3] == 2) ftemp[4] = ftemp[2];
if(trilocal[4] == 2) ftemp[5] = ftemp[7];
if(trilocal[5] == 2) ftemp[6] = ftemp[8];
if(trilocal[6] == 2) ftemp[7] = ftemp[5];
if(trilocal[7] == 2) ftemp[8] = ftemp[6];
#elif BN == 2
ftemp[1] = (trilocal[0] == 1) ? (ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS) : f1[i + size];
ftemp[2] = (trilocal[1] == 1) ? (ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS) : f1[i + 2 * size];
ftemp[3] = (trilocal[2] == 1) ? (ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS) : f1[i + 3 * size];
ftemp[4] = (trilocal[3] == 1) ? (ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS) : f1[i + 4 * size];
ftemp[5] = (trilocal[4] == 1) ? (ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25) : f1[i + 5 * size];
ftemp[6] = (trilocal[5] == 1) ? (ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25) : f1[i + 6 * size];
ftemp[7] = (trilocal[6] == 1) ? (ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25) : f1[i + 7 * size];
ftemp[8] = (trilocal[7] == 1) ? (ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25) : f1[i + 8 * size];
ftemp[1] = (trilocal[0] == 2) ? ftemp[3] : ftemp[1];
ftemp[2] = (trilocal[1] == 2) ? ftemp[4] : ftemp[2];
ftemp[3] = (trilocal[2] == 2) ? ftemp[1] : ftemp[3];
ftemp[4] = (trilocal[3] == 2) ? ftemp[2] : ftemp[4];
ftemp[5] = (trilocal[4] == 2) ? ftemp[7] : ftemp[5];
ftemp[6] = (trilocal[5] == 2) ? ftemp[8] : ftemp[6];
ftemp[7] = (trilocal[6] == 2) ? ftemp[5] : ftemp[7];
ftemp[8] = (trilocal[7] == 2) ? ftemp[6] : ftemp[8];
#else
ftemp[1] = (trilocal[0] == 1) * (ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS) + (trilocal[0] != 1) * f1[i + size];
ftemp[2] = (trilocal[1] == 1) * (ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS) + (trilocal[1] != 1) * f1[i + 2 * size];
ftemp[3] = (trilocal[2] == 1) * (ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS) + (trilocal[2] != 1) * f1[i + 3 * size];
ftemp[4] = (trilocal[3] == 1) * (ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS) + (trilocal[3] != 1) * f1[i + 4 * size];
ftemp[5] = (trilocal[4] == 1) * (ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25) + (trilocal[4] != 1) * f1[i + 5 * size];
ftemp[6] = (trilocal[5] == 1) * (ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25) + (trilocal[5] != 1) * f1[i + 6 * size];
ftemp[7] = (trilocal[6] == 1) * (ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25) + (trilocal[6] != 1) * f1[i + 7 * size];
ftemp[8] = (trilocal[7] == 1) * (ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25) + (trilocal[7] != 1) * f1[i + 8 * size];
ftemp[1] += (trilocal[0] == 2) * (ftemp[3] - ftemp[1]);
ftemp[2] += (trilocal[1] == 2) * (ftemp[4] - ftemp[2]);
ftemp[3] += (trilocal[2] == 2) * (ftemp[1] - ftemp[3]);
ftemp[4] += (trilocal[3] == 2) * (ftemp[2] - ftemp[4]);
ftemp[5] += (trilocal[4] == 2) * (ftemp[7] - ftemp[5]);
ftemp[6] += (trilocal[5] == 2) * (ftemp[8] - ftemp[6]);
ftemp[7] += (trilocal[6] == 2) * (ftemp[5] - ftemp[7]);
ftemp[8] += (trilocal[7] == 2) * (ftemp[6] - ftemp[8]);
#endif
hlocal[0] = ftemp[0] + (ftemp[1] + ftemp[2] + ftemp[3] + ftemp[4]) + (ftemp[5] + ftemp[6] + ftemp[7] + ftemp[8]);
uxlocal = e * ((ftemp[1] - ftemp[3]) + (ftemp[5] - ftemp[6] - ftemp[7] + ftemp[8])) / hlocal[0];
uylocal = e * ((ftemp[2] - ftemp[4]) + (ftemp[5] + ftemp[6] - ftemp[7] - ftemp[8])) / hlocal[0];
h[i] = hlocal[0];
gh = 1.5 * g * hlocal[0];
usq = 1.5 * (uxlocal * uxlocal + uylocal * uylocal);
ux3 = 3.0 * e * uxlocal;
uy3 = 3.0 * e * uylocal;
uxuy5 = ux3 + uy3;
uxuy6 = uy3 - ux3;
feq[0] = hlocal[0] - fact1 * hlocal[0] * (5.0 * gh + 4.0 * usq);
feq[1] = fact1 * hlocal[0] * (gh + ux3 + 0.5 * ux3*ux3 * 9 * fact1 - usq);
feq[2] = fact1 * hlocal[0] * (gh + uy3 + 0.5 * uy3*uy3 * 9 * fact1 - usq);
feq[3] = fact1 * hlocal[0] * (gh - ux3 + 0.5 * ux3*ux3 * 9 * fact1 - usq);
feq[4] = fact1 * hlocal[0] * (gh - uy3 + 0.5 * uy3*uy3 * 9 * fact1 - usq);
feq[5] = fact2 * hlocal[0] * (gh + uxuy5 + 0.5 * uxuy5*uxuy5 * 9 * fact1 - usq);
feq[6] = fact2 * hlocal[0] * (gh + uxuy6 + 0.5 * uxuy6*uxuy6 * 9 * fact1 - usq);
feq[7] = fact2 * hlocal[0] * (gh - uxuy5 + 0.5 * uxuy5*uxuy5 * 9 * fact1 - usq);
feq[8] = fact2 * hlocal[0] * (gh - uxuy6 + 0.5 * uxuy6*uxuy6 * 9 * fact1 - usq);
for (j = 0; j < 9; j++)
f2[i + j * size] = ftemp[j] - (ftemp[j] - feq[j]) / tau;
}
}
}
#elif IN == 2
__global__ void LBMpull(int Lx, int Ly, prec g, prec e, prec tau,
const prec* __restrict__ b, const int* __restrict__ node_types,
const prec* __restrict__ f1, prec* f2, prec* h) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
int size = Lx * Ly, j;
prec ftemp[9], feq[9];
prec uxlocal, uylocal;
prec hlocal[9], blocal[9];
prec gh, usq, ux3, uy3, uxuy5, uxuy6;
prec fact1 = 1 / (9 * e*e);
prec fact2 = fact1 * 0.25;
prec factS = fact1 * 1.5;
unsigned char trilocal[8];
int nt[9];
if (i < size) {
nt[0] = node_types[i];
if (nt[0] != 0){
int y = (int)i / Lx;
int x = i - y * Lx;
blocal[0] = b[i];
blocal[1] = ( x != 0 ) ? b[i - 1] : 0;
blocal[2] = (y != 0 ) ? b[i - Lx ] : 0;
blocal[3] = ( x != Lx-1) ? b[i + 1] : 0;
blocal[4] = (y != Ly-1 ) ? b[i + Lx ] : 0;
blocal[5] = (y != 0 && x != 0 ) ? b[i - Lx - 1] : 0;
blocal[6] = (y != 0 && x != Lx-1) ? b[i - Lx + 1] : 0;
blocal[7] = (y != Ly-1 && x != Lx-1) ? b[i + Lx + 1] : 0;
blocal[8] = (y != Ly-1 && x != 0 ) ? b[i + Lx - 1] : 0;
hlocal[0] = h[i];
hlocal[1] = ( x != 0 ) ? h[i - 1] : 0;
hlocal[2] = (y != 0 ) ? h[i - Lx ] : 0;
hlocal[3] = ( x != Lx-1) ? h[i + 1] : 0;
hlocal[4] = (y != Ly-1 ) ? h[i + Lx ] : 0;
hlocal[5] = (y != 0 && x != 0 ) ? h[i - Lx - 1] : 0;
hlocal[6] = (y != 0 && x != Lx-1) ? h[i - Lx + 1] : 0;
hlocal[7] = (y != Ly-1 && x != Lx-1) ? h[i + Lx + 1] : 0;
hlocal[8] = (y != Ly-1 && x != 0 ) ? h[i + Lx - 1] : 0;
ftemp[1] = ( x != 0 ) ? f1[i - 1 + size] : 0;
ftemp[2] = (y != 0 ) ? f1[i - Lx + 2 * size] : 0;
ftemp[3] = ( x != Lx-1) ? f1[i + 1 + 3 * size] : 0;
ftemp[4] = (y != Ly-1 ) ? f1[i + Lx + 4 * size] : 0;
ftemp[5] = (y != 0 && x != 0 ) ? f1[i - Lx - 1 + 5 * size] : 0;
ftemp[6] = (y != 0 && x != Lx-1) ? f1[i - Lx + 1 + 6 * size] : 0;
ftemp[7] = (y != Ly-1 && x != Lx-1) ? f1[i + Lx + 1 + 7 * size] : 0;
ftemp[8] = (y != Ly-1 && x != 0 ) ? f1[i + Lx - 1 + 8 * size] : 0;
for (int a = 0; a<8; a++) trilocal[a] = 0;
if (nt[0] == 2) {
if (y == 0) {
if (x == 0) {
trilocal[2] = 1;
trilocal[3] = 1;
trilocal[6] = 1;
}
else if (x == Lx - 1) {
trilocal[0] = 1;
trilocal[3] = 1;
trilocal[7] = 1;
}
else {
trilocal[0] = 1;
trilocal[2] = 1;
trilocal[3] = 1;
trilocal[6] = 1;
trilocal[7] = 1;
}
}
else if (y == Ly - 1) {
if (x == 0) {
trilocal[1] = 1;
trilocal[2] = 1;
trilocal[5] = 1;
}
else if (x == Lx - 1) {
trilocal[0] = 1;
trilocal[1] = 1;
trilocal[4] = 1;
}
else {
trilocal[0] = 1;
trilocal[1] = 1;
trilocal[2] = 1;
trilocal[4] = 1;
trilocal[5] = 1;
}
}
else {
if (x == 0) {
trilocal[1] = 1;
trilocal[2] = 1;
trilocal[3] = 1;
trilocal[5] = 1;
trilocal[6] = 1;
}
else if (x == Lx - 1) {
trilocal[0] = 1;
trilocal[1] = 1;
trilocal[3] = 1;
trilocal[4] = 1;
trilocal[7] = 1;
}
else {
for (int a = 0; a<8; a++) {
trilocal[a] = 1;
}
}
}
}
else if (nt[0] == 1) {
if (y == 0) {
trilocal[0] = 1;
trilocal[3] = 1;
trilocal[7] = 1;
trilocal[2] = 2;
trilocal[5] = 2;
trilocal[6] = 2;
}
else if (y == Ly - 1) {
trilocal[0] = 1;
trilocal[1] = 1;
trilocal[4] = 1;
trilocal[2] = 2;
trilocal[5] = 2;
trilocal[6] = 2;
}
else {
nt[1] = node_types[i - 1 ];
nt[2] = node_types[i - Lx];
nt[3] = node_types[i + 1 ];
nt[4] = node_types[i + Lx];
nt[5] = node_types[i - Lx - 1];
nt[6] = node_types[i - Lx + 1];
nt[7] = node_types[i + Lx + 1];
nt[8] = node_types[i + Lx - 1];
for (int a = 1; a<9; a++){
if (nt[a] != 0)
trilocal[a-1] = 1;
else
trilocal[a-1] = 2;
}
if (nt[5] == 0 || (nt[5] == 1 && (nt[1] == 0 || nt[2] == 0))) trilocal[4] = 2;
if (nt[6] == 0 || (nt[6] == 1 && (nt[2] == 0 || nt[3] == 0))) trilocal[5] = 2;
if (nt[7] == 0 || (nt[7] == 1 && (nt[3] == 0 || nt[4] == 0))) trilocal[6] = 2;
if (nt[8] == 0 || (nt[8] == 1 && (nt[4] == 0 || nt[1] == 0))) trilocal[7] = 2;
}
}
ftemp[0] = f1[i];
#if BN == 1
if(trilocal[0] == 1) ftemp[1] = ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS; else ftemp[1] = f1[i + size];
if(trilocal[1] == 1) ftemp[2] = ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS; else ftemp[2] = f1[i + 2 * size];
if(trilocal[2] == 1) ftemp[3] = ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS; else ftemp[3] = f1[i + 3 * size];
if(trilocal[3] == 1) ftemp[4] = ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS; else ftemp[4] = f1[i + 4 * size];
if(trilocal[4] == 1) ftemp[5] = ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25; else ftemp[5] = f1[i + 5 * size];
if(trilocal[5] == 1) ftemp[6] = ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25; else ftemp[6] = f1[i + 6 * size];
if(trilocal[6] == 1) ftemp[7] = ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25; else ftemp[7] = f1[i + 7 * size];
if(trilocal[7] == 1) ftemp[8] = ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25; else ftemp[8] = f1[i + 8 * size];
if(trilocal[0] == 2) ftemp[1] = ftemp[3];
if(trilocal[1] == 2) ftemp[2] = ftemp[4];
if(trilocal[2] == 2) ftemp[3] = ftemp[1];
if(trilocal[3] == 2) ftemp[4] = ftemp[2];
if(trilocal[4] == 2) ftemp[5] = ftemp[7];
if(trilocal[5] == 2) ftemp[6] = ftemp[8];
if(trilocal[6] == 2) ftemp[7] = ftemp[5];
if(trilocal[7] == 2) ftemp[8] = ftemp[6];
#elif BN == 2
ftemp[1] = (trilocal[0] == 1) ? (ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS) : f1[i + size];
ftemp[2] = (trilocal[1] == 1) ? (ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS) : f1[i + 2 * size];
ftemp[3] = (trilocal[2] == 1) ? (ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS) : f1[i + 3 * size];
ftemp[4] = (trilocal[3] == 1) ? (ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS) : f1[i + 4 * size];
ftemp[5] = (trilocal[4] == 1) ? (ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25) : f1[i + 5 * size];
ftemp[6] = (trilocal[5] == 1) ? (ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25) : f1[i + 6 * size];
ftemp[7] = (trilocal[6] == 1) ? (ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25) : f1[i + 7 * size];
ftemp[8] = (trilocal[7] == 1) ? (ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25) : f1[i + 8 * size];
ftemp[1] = (trilocal[0] == 2) ? ftemp[3] : ftemp[1];
ftemp[2] = (trilocal[1] == 2) ? ftemp[4] : ftemp[2];
ftemp[3] = (trilocal[2] == 2) ? ftemp[1] : ftemp[3];
ftemp[4] = (trilocal[3] == 2) ? ftemp[2] : ftemp[4];
ftemp[5] = (trilocal[4] == 2) ? ftemp[7] : ftemp[5];
ftemp[6] = (trilocal[5] == 2) ? ftemp[8] : ftemp[6];
ftemp[7] = (trilocal[6] == 2) ? ftemp[5] : ftemp[7];
ftemp[8] = (trilocal[7] == 2) ? ftemp[6] : ftemp[8];
#else
ftemp[1] = (trilocal[0] == 1) * (ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS) + (trilocal[0] != 1) * f1[i + size];
ftemp[2] = (trilocal[1] == 1) * (ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS) + (trilocal[1] != 1) * f1[i + 2 * size];
ftemp[3] = (trilocal[2] == 1) * (ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS) + (trilocal[2] != 1) * f1[i + 3 * size];
ftemp[4] = (trilocal[3] == 1) * (ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS) + (trilocal[3] != 1) * f1[i + 4 * size];
ftemp[5] = (trilocal[4] == 1) * (ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25) + (trilocal[4] != 1) * f1[i + 5 * size];
ftemp[6] = (trilocal[5] == 1) * (ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25) + (trilocal[5] != 1) * f1[i + 6 * size];
ftemp[7] = (trilocal[6] == 1) * (ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25) + (trilocal[6] != 1) * f1[i + 7 * size];
ftemp[8] = (trilocal[7] == 1) * (ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25) + (trilocal[7] != 1) * f1[i + 8 * size];
ftemp[1] += (trilocal[0] == 2) * (ftemp[3] - ftemp[1]);
ftemp[2] += (trilocal[1] == 2) * (ftemp[4] - ftemp[2]);
ftemp[3] += (trilocal[2] == 2) * (ftemp[1] - ftemp[3]);
ftemp[4] += (trilocal[3] == 2) * (ftemp[2] - ftemp[4]);
ftemp[5] += (trilocal[4] == 2) * (ftemp[7] - ftemp[5]);
ftemp[6] += (trilocal[5] == 2) * (ftemp[8] - ftemp[6]);
ftemp[7] += (trilocal[6] == 2) * (ftemp[5] - ftemp[7]);
ftemp[8] += (trilocal[7] == 2) * (ftemp[6] - ftemp[8]);
#endif
hlocal[0] = ftemp[0] + (ftemp[1] + ftemp[2] + ftemp[3] + ftemp[4]) + (ftemp[5] + ftemp[6] + ftemp[7] + ftemp[8]);
uxlocal = e * ((ftemp[1] - ftemp[3]) + (ftemp[5] - ftemp[6] - ftemp[7] + ftemp[8])) / hlocal[0];
uylocal = e * ((ftemp[2] - ftemp[4]) + (ftemp[5] + ftemp[6] - ftemp[7] - ftemp[8])) / hlocal[0];
h[i] = hlocal[0];
gh = 1.5 * g * hlocal[0];
usq = 1.5 * (uxlocal * uxlocal + uylocal * uylocal);
ux3 = 3.0 * e * uxlocal;
uy3 = 3.0 * e * uylocal;
uxuy5 = ux3 + uy3;
uxuy6 = uy3 - ux3;
feq[0] = hlocal[0] - fact1 * hlocal[0] * (5.0 * gh + 4.0 * usq);
feq[1] = fact1 * hlocal[0] * (gh + ux3 + 0.5 * ux3*ux3 * 9 * fact1 - usq);
feq[2] = fact1 * hlocal[0] * (gh + uy3 + 0.5 * uy3*uy3 * 9 * fact1 - usq);
feq[3] = fact1 * hlocal[0] * (gh - ux3 + 0.5 * ux3*ux3 * 9 * fact1 - usq);
feq[4] = fact1 * hlocal[0] * (gh - uy3 + 0.5 * uy3*uy3 * 9 * fact1 - usq);
feq[5] = fact2 * hlocal[0] * (gh + uxuy5 + 0.5 * uxuy5*uxuy5 * 9 * fact1 - usq);
feq[6] = fact2 * hlocal[0] * (gh + uxuy6 + 0.5 * uxuy6*uxuy6 * 9 * fact1 - usq);
feq[7] = fact2 * hlocal[0] * (gh - uxuy5 + 0.5 * uxuy5*uxuy5 * 9 * fact1 - usq);
feq[8] = fact2 * hlocal[0] * (gh - uxuy6 + 0.5 * uxuy6*uxuy6 * 9 * fact1 - usq);
for (j = 0; j < 9; j++)
f2[i + j * size] = ftemp[j] - (ftemp[j] - feq[j]) / tau;
}
}
}
#elif IN == 3
__global__ void LBMpull(int Lx, int Ly, prec g, prec e, prec tau,
const prec* __restrict__ b, const unsigned char* __restrict__ Arr_tri,
const prec* __restrict__ f1, prec* f2, prec* h) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
int size = Lx * Ly, j;
prec ftemp[9], feq[9];
prec uxlocal, uylocal;
prec hlocal[9], blocal[9];
prec gh, usq, ux3, uy3, uxuy5, uxuy6;
prec fact1 = 1 / (9 * e*e);
prec fact2 = fact1 * 0.25;
prec factS = fact1 * 1.5;
unsigned char trilocal[8];
if (i < size) {
int check = 0;
for (j = 1; j < 9; j++)
trilocal[j-1] = Arr_tri[i + j * size];
for (j = 0; j < 8; j++)
check += trilocal[j];
if (check != 0){
int y = (int)i / Lx;
int x = i - y * Lx;
blocal[0] = b[i];
blocal[1] = ( x != 0 ) ? b[i - 1] : 0;
blocal[2] = (y != 0 ) ? b[i - Lx ] : 0;
blocal[3] = ( x != Lx-1) ? b[i + 1] : 0;
blocal[4] = (y != Ly-1 ) ? b[i + Lx ] : 0;
blocal[5] = (y != 0 && x != 0 ) ? b[i - Lx - 1] : 0;
blocal[6] = (y != 0 && x != Lx-1) ? b[i - Lx + 1] : 0;
blocal[7] = (y != Ly-1 && x != Lx-1) ? b[i + Lx + 1] : 0;
blocal[8] = (y != Ly-1 && x != 0 ) ? b[i + Lx - 1] : 0;
hlocal[0] = h[i];
hlocal[1] = ( x != 0 ) ? h[i - 1] : 0;
hlocal[2] = (y != 0 ) ? h[i - Lx ] : 0;
hlocal[3] = ( x != Lx-1) ? h[i + 1] : 0;
hlocal[4] = (y != Ly-1 ) ? h[i + Lx ] : 0;
hlocal[5] = (y != 0 && x != 0 ) ? h[i - Lx - 1] : 0;
hlocal[6] = (y != 0 && x != Lx-1) ? h[i - Lx + 1] : 0;
hlocal[7] = (y != Ly-1 && x != Lx-1) ? h[i + Lx + 1] : 0;
hlocal[8] = (y != Ly-1 && x != 0 ) ? h[i + Lx - 1] : 0;
ftemp[1] = ( x != 0 ) ? f1[i - 1 + size] : 0;
ftemp[2] = (y != 0 ) ? f1[i - Lx + 2 * size] : 0;
ftemp[3] = ( x != Lx-1) ? f1[i + 1 + 3 * size] : 0;
ftemp[4] = (y != Ly-1 ) ? f1[i + Lx + 4 * size] : 0;
ftemp[5] = (y != 0 && x != 0 ) ? f1[i - Lx - 1 + 5 * size] : 0;
ftemp[6] = (y != 0 && x != Lx-1) ? f1[i - Lx + 1 + 6 * size] : 0;
ftemp[7] = (y != Ly-1 && x != Lx-1) ? f1[i + Lx + 1 + 7 * size] : 0;
ftemp[8] = (y != Ly-1 && x != 0 ) ? f1[i + Lx - 1 + 8 * size] : 0;
ftemp[0] = f1[i];
#if BN == 1
if(trilocal[0] == 1) ftemp[1] = ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS; else ftemp[1] = f1[i + size];
if(trilocal[1] == 1) ftemp[2] = ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS; else ftemp[2] = f1[i + 2 * size];
if(trilocal[2] == 1) ftemp[3] = ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS; else ftemp[3] = f1[i + 3 * size];
if(trilocal[3] == 1) ftemp[4] = ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS; else ftemp[4] = f1[i + 4 * size];
if(trilocal[4] == 1) ftemp[5] = ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25; else ftemp[5] = f1[i + 5 * size];
if(trilocal[5] == 1) ftemp[6] = ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25; else ftemp[6] = f1[i + 6 * size];
if(trilocal[6] == 1) ftemp[7] = ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25; else ftemp[7] = f1[i + 7 * size];
if(trilocal[7] == 1) ftemp[8] = ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25; else ftemp[8] = f1[i + 8 * size];
if(trilocal[0] == 2) ftemp[1] = ftemp[3];
if(trilocal[1] == 2) ftemp[2] = ftemp[4];
if(trilocal[2] == 2) ftemp[3] = ftemp[1];
if(trilocal[3] == 2) ftemp[4] = ftemp[2];
if(trilocal[4] == 2) ftemp[5] = ftemp[7];
if(trilocal[5] == 2) ftemp[6] = ftemp[8];
if(trilocal[6] == 2) ftemp[7] = ftemp[5];
if(trilocal[7] == 2) ftemp[8] = ftemp[6];
#elif BN == 2
ftemp[1] = (trilocal[0] == 1) ? (ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS) : f1[i + size];
ftemp[2] = (trilocal[1] == 1) ? (ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS) : f1[i + 2 * size];
ftemp[3] = (trilocal[2] == 1) ? (ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS) : f1[i + 3 * size];
ftemp[4] = (trilocal[3] == 1) ? (ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS) : f1[i + 4 * size];
ftemp[5] = (trilocal[4] == 1) ? (ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25) : f1[i + 5 * size];
ftemp[6] = (trilocal[5] == 1) ? (ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25) : f1[i + 6 * size];
ftemp[7] = (trilocal[6] == 1) ? (ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25) : f1[i + 7 * size];
ftemp[8] = (trilocal[7] == 1) ? (ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25) : f1[i + 8 * size];
ftemp[1] = (trilocal[0] == 2) ? ftemp[3] : ftemp[1];
ftemp[2] = (trilocal[1] == 2) ? ftemp[4] : ftemp[2];
ftemp[3] = (trilocal[2] == 2) ? ftemp[1] : ftemp[3];
ftemp[4] = (trilocal[3] == 2) ? ftemp[2] : ftemp[4];
ftemp[5] = (trilocal[4] == 2) ? ftemp[7] : ftemp[5];
ftemp[6] = (trilocal[5] == 2) ? ftemp[8] : ftemp[6];
ftemp[7] = (trilocal[6] == 2) ? ftemp[5] : ftemp[7];
ftemp[8] = (trilocal[7] == 2) ? ftemp[6] : ftemp[8];
#else
ftemp[1] = (trilocal[0] == 1) * (ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS) + (trilocal[0] != 1) * f1[i + size];
ftemp[2] = (trilocal[1] == 1) * (ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS) + (trilocal[1] != 1) * f1[i + 2 * size];
ftemp[3] = (trilocal[2] == 1) * (ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS) + (trilocal[2] != 1) * f1[i + 3 * size];
ftemp[4] = (trilocal[3] == 1) * (ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS) + (trilocal[3] != 1) * f1[i + 4 * size];
ftemp[5] = (trilocal[4] == 1) * (ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25) + (trilocal[4] != 1) * f1[i + 5 * size];
ftemp[6] = (trilocal[5] == 1) * (ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25) + (trilocal[5] != 1) * f1[i + 6 * size];
ftemp[7] = (trilocal[6] == 1) * (ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25) + (trilocal[6] != 1) * f1[i + 7 * size];
ftemp[8] = (trilocal[7] == 1) * (ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25) + (trilocal[7] != 1) * f1[i + 8 * size];
ftemp[1] += (trilocal[0] == 2) * (ftemp[3] - ftemp[1]);
ftemp[2] += (trilocal[1] == 2) * (ftemp[4] - ftemp[2]);
ftemp[3] += (trilocal[2] == 2) * (ftemp[1] - ftemp[3]);
ftemp[4] += (trilocal[3] == 2) * (ftemp[2] - ftemp[4]);
ftemp[5] += (trilocal[4] == 2) * (ftemp[7] - ftemp[5]);
ftemp[6] += (trilocal[5] == 2) * (ftemp[8] - ftemp[6]);
ftemp[7] += (trilocal[6] == 2) * (ftemp[5] - ftemp[7]);
ftemp[8] += (trilocal[7] == 2) * (ftemp[6] - ftemp[8]);
#endif
hlocal[0] = ftemp[0] + (ftemp[1] + ftemp[2] + ftemp[3] + ftemp[4]) + (ftemp[5] + ftemp[6] + ftemp[7] + ftemp[8]);
uxlocal = e * ((ftemp[1] - ftemp[3]) + (ftemp[5] - ftemp[6] - ftemp[7] + ftemp[8])) / hlocal[0];
uylocal = e * ((ftemp[2] - ftemp[4]) + (ftemp[5] + ftemp[6] - ftemp[7] - ftemp[8])) / hlocal[0];
h[i] = hlocal[0];
gh = 1.5 * g * hlocal[0];
usq = 1.5 * (uxlocal * uxlocal + uylocal * uylocal);
ux3 = 3.0 * e * uxlocal;
uy3 = 3.0 * e * uylocal;
uxuy5 = ux3 + uy3;
uxuy6 = uy3 - ux3;
feq[0] = hlocal[0] - fact1 * hlocal[0] * (5.0 * gh + 4.0 * usq);
feq[1] = fact1 * hlocal[0] * (gh + ux3 + 0.5 * ux3*ux3 * 9 * fact1 - usq);
feq[2] = fact1 * hlocal[0] * (gh + uy3 + 0.5 * uy3*uy3 * 9 * fact1 - usq);
feq[3] = fact1 * hlocal[0] * (gh - ux3 + 0.5 * ux3*ux3 * 9 * fact1 - usq);
feq[4] = fact1 * hlocal[0] * (gh - uy3 + 0.5 * uy3*uy3 * 9 * fact1 - usq);
feq[5] = fact2 * hlocal[0] * (gh + uxuy5 + 0.5 * uxuy5*uxuy5 * 9 * fact1 - usq);
feq[6] = fact2 * hlocal[0] * (gh + uxuy6 + 0.5 * uxuy6*uxuy6 * 9 * fact1 - usq);
feq[7] = fact2 * hlocal[0] * (gh - uxuy5 + 0.5 * uxuy5*uxuy5 * 9 * fact1 - usq);
feq[8] = fact2 * hlocal[0] * (gh - uxuy6 + 0.5 * uxuy6*uxuy6 * 9 * fact1 - usq);
for (j = 0; j < 9; j++)
f2[i + j * size] = ftemp[j] - (ftemp[j] - feq[j]) / tau;
}
}
}
#else
__global__ void LBMpull(int Lx, int Ly, prec g, prec e, prec tau,
const prec* __restrict__ b, const unsigned char* __restrict__ SC_bin,
const unsigned char* __restrict__ BB_bin, const prec* __restrict__ f1,
prec* f2, prec* h) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
int size = Lx * Ly, j;
prec ftemp[9], feq[9];
prec uxlocal, uylocal;
prec hlocal[9], blocal[9];
prec gh, usq, ux3, uy3, uxuy5, uxuy6;
prec fact1 = 1 / (9 * e*e);
prec fact2 = fact1 * 0.25;
prec factS = fact1 * 1.5;
unsigned char SC,BB;
if (i < size) {
SC = SC_bin[i];
BB = BB_bin[i];
if(SC + BB != 0){
int y = (int)i / Lx;
int x = i - y * Lx;
blocal[0] = b[i];
blocal[1] = ( x != 0 ) ? b[i - 1] : 0;
blocal[2] = (y != 0 ) ? b[i - Lx ] : 0;
blocal[3] = ( x != Lx-1) ? b[i + 1] : 0;
blocal[4] = (y != Ly-1 ) ? b[i + Lx ] : 0;
blocal[5] = (y != 0 && x != 0 ) ? b[i - Lx - 1] : 0;
blocal[6] = (y != 0 && x != Lx-1) ? b[i - Lx + 1] : 0;
blocal[7] = (y != Ly-1 && x != Lx-1) ? b[i + Lx + 1] : 0;
blocal[8] = (y != Ly-1 && x != 0 ) ? b[i + Lx - 1] : 0;
hlocal[0] = h[i];
hlocal[1] = ( x != 0 ) ? h[i - 1] : 0;
hlocal[2] = (y != 0 ) ? h[i - Lx ] : 0;
hlocal[3] = ( x != Lx-1) ? h[i + 1] : 0;
hlocal[4] = (y != Ly-1 ) ? h[i + Lx ] : 0;
hlocal[5] = (y != 0 && x != 0 ) ? h[i - Lx - 1] : 0;
hlocal[6] = (y != 0 && x != Lx-1) ? h[i - Lx + 1] : 0;
hlocal[7] = (y != Ly-1 && x != Lx-1) ? h[i + Lx + 1] : 0;
hlocal[8] = (y != Ly-1 && x != 0 ) ? h[i + Lx - 1] : 0;
ftemp[1] = ( x != 0 ) ? f1[i - 1 + size] : 0;
ftemp[2] = (y != 0 ) ? f1[i - Lx + 2 * size] : 0;
ftemp[3] = ( x != Lx-1) ? f1[i + 1 + 3 * size] : 0;
ftemp[4] = (y != Ly-1 ) ? f1[i + Lx + 4 * size] : 0;
ftemp[5] = (y != 0 && x != 0 ) ? f1[i - Lx - 1 + 5 * size] : 0;
ftemp[6] = (y != 0 && x != Lx-1) ? f1[i - Lx + 1 + 6 * size] : 0;
ftemp[7] = (y != Ly-1 && x != Lx-1) ? f1[i + Lx + 1 + 7 * size] : 0;
ftemp[8] = (y != Ly-1 && x != 0 ) ? f1[i + Lx - 1 + 8 * size] : 0;
ftemp[0] = f1[i];
#if BN == 1
if((SC>>0) & 1) ftemp[1] = ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS; else ftemp[1] = f1[i + size];
if((SC>>1) & 1) ftemp[2] = ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS; else ftemp[2] = f1[i + 2 * size];
if((SC>>2) & 1) ftemp[3] = ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS; else ftemp[3] = f1[i + 3 * size];
if((SC>>3) & 1) ftemp[4] = ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS; else ftemp[4] = f1[i + 4 * size];
if((SC>>4) & 1) ftemp[5] = ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25; else ftemp[5] = f1[i + 5 * size];
if((SC>>5) & 1) ftemp[6] = ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25; else ftemp[6] = f1[i + 6 * size];
if((SC>>6) & 1) ftemp[7] = ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25; else ftemp[7] = f1[i + 7 * size];
if((SC>>7) & 1) ftemp[8] = ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25; else ftemp[8] = f1[i + 8 * size];
if((BB>>(0)) & 1) ftemp[1] = ftemp[3];
if((BB>>(1)) & 1) ftemp[2] = ftemp[4];
if((BB>>(2)) & 1) ftemp[3] = ftemp[1];
if((BB>>(3)) & 1) ftemp[4] = ftemp[2];
if((BB>>(4)) & 1) ftemp[5] = ftemp[7];
if((BB>>(5)) & 1) ftemp[6] = ftemp[8];
if((BB>>(6)) & 1) ftemp[7] = ftemp[5];
if((BB>>(7)) & 1) ftemp[8] = ftemp[6];
#elif BN == 2
ftemp[1] = ((SC>>0) & 1) ? (ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS) : f1[i + size];
ftemp[2] = ((SC>>1) & 1) ? (ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS) : f1[i + 2 * size];
ftemp[3] = ((SC>>2) & 1) ? (ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS) : f1[i + 3 * size];
ftemp[4] = ((SC>>3) & 1) ? (ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS) : f1[i + 4 * size];
ftemp[5] = ((SC>>4) & 1) ? (ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25) : f1[i + 5 * size];
ftemp[6] = ((SC>>5) & 1) ? (ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25) : f1[i + 6 * size];
ftemp[7] = ((SC>>6) & 1) ? (ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25) : f1[i + 7 * size];
ftemp[8] = ((SC>>7) & 1) ? (ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25) : f1[i + 8 * size];
ftemp[1] = ((BB>>(0)) & 1) ? ftemp[3] : ftemp[1];
ftemp[2] = ((BB>>(1)) & 1) ? ftemp[4] : ftemp[2];
ftemp[3] = ((BB>>(2)) & 1) ? ftemp[1] : ftemp[3];
ftemp[4] = ((BB>>(3)) & 1) ? ftemp[2] : ftemp[4];
ftemp[5] = ((BB>>(4)) & 1) ? ftemp[7] : ftemp[5];
ftemp[6] = ((BB>>(5)) & 1) ? ftemp[8] : ftemp[6];
ftemp[7] = ((BB>>(6)) & 1) ? ftemp[5] : ftemp[7];
ftemp[8] = ((BB>>(7)) & 1) ? ftemp[6] : ftemp[8];
#else
//int x = i%Lx, y = i/Lx;
ftemp[1] = ((SC>>0) & 1) * (ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS ) + !((SC>>0) & 1) * f1[i + size];
ftemp[2] = ((SC>>1) & 1) * (ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS ) + !((SC>>1) & 1) * f1[i + 2 * size];
ftemp[3] = ((SC>>2) & 1) * (ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS ) + !((SC>>2) & 1) * f1[i + 3 * size];
ftemp[4] = ((SC>>3) & 1) * (ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS ) + !((SC>>3) & 1) * f1[i + 4 * size];
ftemp[5] = ((SC>>4) & 1) * (ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25) + !((SC>>4) & 1) * f1[i + 5 * size];
ftemp[6] = ((SC>>5) & 1) * (ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25) + !((SC>>5) & 1) * f1[i + 6 * size];
ftemp[7] = ((SC>>6) & 1) * (ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25) + !((SC>>6) & 1) * f1[i + 7 * size];
ftemp[8] = ((SC>>7) & 1) * (ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25) + !((SC>>7) & 1) * f1[i + 8 * size];
ftemp[1] += ((BB>>(0)) & 1) * (ftemp[3] - ftemp[1]);
ftemp[2] += ((BB>>(1)) & 1) * (ftemp[4] - ftemp[2]);
ftemp[3] += ((BB>>(2)) & 1) * (ftemp[1] - ftemp[3]);
ftemp[4] += ((BB>>(3)) & 1) * (ftemp[2] - ftemp[4]);
ftemp[5] += ((BB>>(4)) & 1) * (ftemp[7] - ftemp[5]);
ftemp[6] += ((BB>>(5)) & 1) * (ftemp[8] - ftemp[6]);
ftemp[7] += ((BB>>(6)) & 1) * (ftemp[5] - ftemp[7]);
ftemp[8] += ((BB>>(7)) & 1) * (ftemp[6] - ftemp[8]);
#endif
hlocal[0] = ftemp[0] + (ftemp[1] + ftemp[2] + ftemp[3] + ftemp[4]) + (ftemp[5] + ftemp[6] + ftemp[7] + ftemp[8]);
uxlocal = e * ((ftemp[1] - ftemp[3]) + (ftemp[5] - ftemp[6] - ftemp[7] + ftemp[8])) / hlocal[0];
uylocal = e * ((ftemp[2] - ftemp[4]) + (ftemp[5] + ftemp[6] - ftemp[7] - ftemp[8])) / hlocal[0];
h[i] = hlocal[0];
gh = 1.5 * g * hlocal[0];
usq = 1.5 * (uxlocal * uxlocal + uylocal * uylocal);
ux3 = 3.0 * e * uxlocal;
uy3 = 3.0 * e * uylocal;
uxuy5 = ux3 + uy3;
uxuy6 = uy3 - ux3;
feq[0] = hlocal[0] - fact1 * hlocal[0] * (5.0 * gh + 4.0 * usq);
feq[1] = fact1 * hlocal[0] * (gh + ux3 + 0.5 * ux3*ux3 * 9 * fact1 - usq);
feq[2] = fact1 * hlocal[0] * (gh + uy3 + 0.5 * uy3*uy3 * 9 * fact1 - usq);
feq[3] = fact1 * hlocal[0] * (gh - ux3 + 0.5 * ux3*ux3 * 9 * fact1 - usq);
feq[4] = fact1 * hlocal[0] * (gh - uy3 + 0.5 * uy3*uy3 * 9 * fact1 - usq);
feq[5] = fact2 * hlocal[0] * (gh + uxuy5 + 0.5 * uxuy5*uxuy5 * 9 * fact1 - usq);
feq[6] = fact2 * hlocal[0] * (gh + uxuy6 + 0.5 * uxuy6*uxuy6 * 9 * fact1 - usq);
feq[7] = fact2 * hlocal[0] * (gh - uxuy5 + 0.5 * uxuy5*uxuy5 * 9 * fact1 - usq);
feq[8] = fact2 * hlocal[0] * (gh - uxuy6 + 0.5 * uxuy6*uxuy6 * 9 * fact1 - usq);
for (j = 0; j < 9; j++)
f2[i + j * size] = ftemp[j] - (ftemp[j] - feq[j]) / tau;
}
}
}
#endif
__global__ void feqKernel(int Lx, int Ly, prec g, prec e,
const prec* __restrict__ h, prec* f) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < Lx*Ly) {
prec hi = h[i];
prec gh1 = g * hi * hi / (6.0 * e * e);
prec gh2 = gh1 / 4;
f[i] = hi - 5.0 * gh1;
f[i + ( Lx*Ly)] = gh1;
f[i + (2 * Lx*Ly)] = gh1;
f[i + (3 * Lx*Ly)] = gh1;
f[i + (4 * Lx*Ly)] = gh1;
f[i + (5 * Lx*Ly)] = gh2;
f[i + (6 * Lx*Ly)] = gh2;
f[i + (7 * Lx*Ly)] = gh2;
f[i + (8 * Lx*Ly)] = gh2;
}
}
__global__ void TSkernel(prec* TSdata, const prec* __restrict__ w,
const int* __restrict__ TSind, int t, int deltaTS, int NTS, int TTS) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < NTS) {
int n = t / deltaTS;
TSdata[i*TTS + n] = w[TSind[i]];
}
}
void LBMTimeStep(mainDStruct devi, cudaStruct devEx, int t, int deltaTS, hipEvent_t ct1, hipEvent_t ct2, prec *msecs) {
float dt;
if (t % 2 == 0){
hipEventRecord(ct1);
#if IN == 1
LBMpull << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.g, devEx.e, devEx.tau,
devi.b, devEx.f1, devEx.f2, devEx.h);
#elif IN == 2
LBMpull << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.g, devEx.e, devEx.tau,
devi.b, devi.node_types, devEx.f1, devEx.f2, devEx.h);
#elif IN == 3
LBMpull << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.g, devEx.e, devEx.tau,
devi.b, devEx.Arr_tri, devEx.f1, devEx.f2, devEx.h);
#else
LBMpull << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.g, devEx.e, devEx.tau,
devi.b, devEx.SC_bin, devEx.BB_bin, devEx.f1, devEx.f2, devEx.h);
#endif
}
else{
hipEventRecord(ct1);
#if IN == 1
LBMpull << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.g, devEx.e, devEx.tau,
devi.b, devEx.f2, devEx.f1, devEx.h);
#elif IN == 2
LBMpull << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.g, devEx.e, devEx.tau,
devi.b, devi.node_types, devEx.f2, devEx.f1, devEx.h);
#elif IN == 3
LBMpull << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.g, devEx.e, devEx.tau,
devi.b, devEx.Arr_tri, devEx.f2, devEx.f1, devEx.h);
#else
LBMpull << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.g, devEx.e, devEx.tau,
devi.b, devEx.SC_bin, devEx.BB_bin, devEx.f2, devEx.f1, devEx.h);
#endif
}
hipEventRecord(ct2);
hipEventSynchronize(ct2);
hipEventElapsedTime(&dt, ct1, ct2);
*msecs += dt;
if (t%deltaTS == 0) {
wKernel << <devi.Ngrid, devi.Nblocks >> >(devi.Lx, devi.Ly, devEx.h, devi.b, devi.w);
TSkernel << <devi.NTS, 1 >> > (devi.TSdata, devi.w, devi.TSind, t, deltaTS, devi.NTS, devi.TTS);
}
}
void setup(mainDStruct devi, cudaStruct devEx, int deltaTS) {
#if IN == 3
auxArraysKernel << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.ex, devEx.ey, devi.node_types,
devEx.Arr_tri);
#elif IN == 4
auxArraysKernel << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.ex, devEx.ey, devi.node_types,
devEx.SC_bin, devEx.BB_bin);
#endif
hKernel << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devi.w, devi.b, devEx.h);
feqKernel << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.g, devEx.e, devEx.h, devEx.f1);
TSkernel << <devi.NTS, 1 >> > (devi.TSdata, devi.w, devi.TSind, 0, deltaTS, devi.NTS, devi.TTS);
}
void copyAndWriteResultData(mainHStruct host, mainDStruct devi, cudaStruct devEx, int t, std::string outputdir) {
wKernel << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.h, devi.b, devi.w);
hipMemcpy(host.w, devi.w, devi.Lx*devi.Ly * sizeof(prec), hipMemcpyDeviceToHost);
writeOutput(devi.Lx*devi.Ly, t, host.w, outputdir);
}
void copyAndWriteTSData(mainHStruct host, mainDStruct devi, int deltaTS, prec Dt, std::string outputdir) {
hipMemcpy(host.TSdata, devi.TSdata, devi.TTS*devi.NTS * sizeof(prec), hipMemcpyDeviceToHost);
writeTS(devi.TTS, devi.NTS, deltaTS, Dt, host.TSdata, outputdir);
}
void LBM(mainHStruct host, mainDStruct devi, cudaStruct devEx, int* time_array, prec Dt, std::string outputdir) {
hipFuncSetCacheConfig(LBMpull, hipFuncCachePreferL1);
hipFuncSetCacheConfig(feqKernel, hipFuncCachePreferL1);
int tMax = time_array[0];
int deltaOutput = time_array[1];
int deltaTS = time_array[2];
int t = 0;
hipEvent_t ct1, ct2;
hipEventCreate(&ct1);
hipEventCreate(&ct2);
prec msecs = 0;
setup(devi, devEx, deltaTS);
std::cout << std::fixed << std::setprecision(1);
while (t <= tMax) {
LBMTimeStep(devi, devEx, t, deltaTS, ct1, ct2, &msecs);
t++;
if (deltaOutput != 0 && t%deltaOutput == 0) {
std::cout << "\rTime step: " << t << " (" << 100.0*t / tMax << "%)";
copyAndWriteResultData(host, devi, devEx, t, outputdir);
}
}
copyAndWriteResultData(host, devi, devEx, t, outputdir);
copyAndWriteTSData(host, devi, deltaTS, Dt, outputdir);
std::cout << std::endl << "Tiempo total: " << msecs << "[ms]" << std::endl;
std::cout << std::endl << "Tiempo promedio por iteracion: " << msecs / tMax << "[ms]" << std::endl;
}
| f42a2ea6d7bfe7c914da8ab1682147285453aaf8.cu | #include "include/setup.cuh"
#include "../cpp/include/files.h"
#include "../include/structs.h"
#include <iostream>
#include <iomanip>
#include <math.h>
#include <vector>
#include <string>
#include <cuda_runtime.h>
#include <fstream>
#include <time.h>
__global__ void wKernel(int Lx, int Ly, const prec* __restrict__ h,
const prec* __restrict__ b, prec* w) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < Lx*Ly) {
w[i] = h[i] + b[i];
}
}
#if IN == 1
__global__ void LBMpull(int Lx, int Ly, prec g, prec e, prec tau,
const prec* __restrict__ b, const prec* __restrict__ f1,
prec* f2, prec* h) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
int size = Lx * Ly, j;
prec ftemp[9], feq[9];
prec uxlocal, uylocal;
prec hlocal[9], blocal[9];
prec gh, usq, ux3, uy3, uxuy5, uxuy6;
prec fact1 = 1 / (9 * e*e);
prec fact2 = fact1 * 0.25;
prec factS = fact1 * 1.5;
unsigned char trilocal[8];
int nt[9];
if (i < size) {
hlocal[0] = h[i];
if (hlocal[0] > 0){
int y = (int)i / Lx;
int x = i - y * Lx;
blocal[0] = b[i];
blocal[1] = ( x != 0 ) ? b[i - 1] : 0;
blocal[2] = (y != 0 ) ? b[i - Lx ] : 0;
blocal[3] = ( x != Lx-1) ? b[i + 1] : 0;
blocal[4] = (y != Ly-1 ) ? b[i + Lx ] : 0;
blocal[5] = (y != 0 && x != 0 ) ? b[i - Lx - 1] : 0;
blocal[6] = (y != 0 && x != Lx-1) ? b[i - Lx + 1] : 0;
blocal[7] = (y != Ly-1 && x != Lx-1) ? b[i + Lx + 1] : 0;
blocal[8] = (y != Ly-1 && x != 0 ) ? b[i + Lx - 1] : 0;
hlocal[1] = ( x != 0 ) ? h[i - 1] : 0;
hlocal[2] = (y != 0 ) ? h[i - Lx ] : 0;
hlocal[3] = ( x != Lx-1) ? h[i + 1] : 0;
hlocal[4] = (y != Ly-1 ) ? h[i + Lx ] : 0;
hlocal[5] = (y != 0 && x != 0 ) ? h[i - Lx - 1] : 0;
hlocal[6] = (y != 0 && x != Lx-1) ? h[i - Lx + 1] : 0;
hlocal[7] = (y != Ly-1 && x != Lx-1) ? h[i + Lx + 1] : 0;
hlocal[8] = (y != Ly-1 && x != 0 ) ? h[i + Lx - 1] : 0;
ftemp[1] = ( x != 0 ) ? f1[i - 1 + size] : 0;
ftemp[2] = (y != 0 ) ? f1[i - Lx + 2 * size] : 0;
ftemp[3] = ( x != Lx-1) ? f1[i + 1 + 3 * size] : 0;
ftemp[4] = (y != Ly-1 ) ? f1[i + Lx + 4 * size] : 0;
ftemp[5] = (y != 0 && x != 0 ) ? f1[i - Lx - 1 + 5 * size] : 0;
ftemp[6] = (y != 0 && x != Lx-1) ? f1[i - Lx + 1 + 6 * size] : 0;
ftemp[7] = (y != Ly-1 && x != Lx-1) ? f1[i + Lx + 1 + 7 * size] : 0;
ftemp[8] = (y != Ly-1 && x != 0 ) ? f1[i + Lx - 1 + 8 * size] : 0;
for (int a = 0; a < 9; a++){
nt[a] = 0;
if (hlocal[a] > 0) nt[a] = 2;
}
for (int a = 1; a < 9; a++){
if (!((y == 0 && (a == 2 || a == 5 || a == 6)) ||
(y == Ly-1 && (a == 4 || a == 7 || a == 8)) ||
(x == 0 && (a == 1 || a == 5 || a == 8)) ||
(x == Lx-1 && (a == 3 || a == 6 || a == 7))))
if (nt[a] == 0) nt[0] = 1;
}
for (int a = 0; a<8; a++) trilocal[a] = 0;
if (nt[0] == 2) {
if (y == 0) {
if (x == 0) {
trilocal[2] = 1;
trilocal[3] = 1;
trilocal[6] = 1;
}
else if (x == Lx - 1) {
trilocal[0] = 1;
trilocal[3] = 1;
trilocal[7] = 1;
}
else {
trilocal[0] = 1;
trilocal[2] = 1;
trilocal[3] = 1;
trilocal[6] = 1;
trilocal[7] = 1;
}
}
else if (y == Ly - 1) {
if (x == 0) {
trilocal[1] = 1;
trilocal[2] = 1;
trilocal[5] = 1;
}
else if (x == Lx - 1) {
trilocal[0] = 1;
trilocal[1] = 1;
trilocal[4] = 1;
}
else {
trilocal[0] = 1;
trilocal[1] = 1;
trilocal[2] = 1;
trilocal[4] = 1;
trilocal[5] = 1;
}
}
else {
if (x == 0) {
trilocal[1] = 1;
trilocal[2] = 1;
trilocal[3] = 1;
trilocal[5] = 1;
trilocal[6] = 1;
}
else if (x == Lx - 1) {
trilocal[0] = 1;
trilocal[1] = 1;
trilocal[3] = 1;
trilocal[4] = 1;
trilocal[7] = 1;
}
else {
for (int a = 0; a<8; a++) {
trilocal[a] = 1;
}
}
}
}
else if (nt[0] == 1) {
if (y == 0) {
trilocal[0] = 1;
trilocal[3] = 1;
trilocal[7] = 1;
trilocal[2] = 2;
trilocal[5] = 2;
trilocal[6] = 2;
}
else if (y == Ly - 1) {
trilocal[0] = 1;
trilocal[1] = 1;
trilocal[4] = 1;
trilocal[2] = 2;
trilocal[5] = 2;
trilocal[6] = 2;
}
else {
for (int a = 1; a<9; a++){
if (nt[a] != 0)
trilocal[a-1] = 1;
else
trilocal[a-1] = 2;
}
if (nt[5] == 0 || (nt[5] != 0 && (nt[1] == 0 || nt[2] == 0))) trilocal[4] = 2;
if (nt[6] == 0 || (nt[6] != 0 && (nt[2] == 0 || nt[3] == 0))) trilocal[5] = 2;
if (nt[7] == 0 || (nt[7] != 0 && (nt[3] == 0 || nt[4] == 0))) trilocal[6] = 2;
if (nt[8] == 0 || (nt[8] != 0 && (nt[4] == 0 || nt[1] == 0))) trilocal[7] = 2;
}
}
ftemp[0] = f1[i];
#if BN == 1
if(trilocal[0] == 1) ftemp[1] = ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS; else ftemp[1] = f1[i + size];
if(trilocal[1] == 1) ftemp[2] = ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS; else ftemp[2] = f1[i + 2 * size];
if(trilocal[2] == 1) ftemp[3] = ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS; else ftemp[3] = f1[i + 3 * size];
if(trilocal[3] == 1) ftemp[4] = ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS; else ftemp[4] = f1[i + 4 * size];
if(trilocal[4] == 1) ftemp[5] = ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25; else ftemp[5] = f1[i + 5 * size];
if(trilocal[5] == 1) ftemp[6] = ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25; else ftemp[6] = f1[i + 6 * size];
if(trilocal[6] == 1) ftemp[7] = ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25; else ftemp[7] = f1[i + 7 * size];
if(trilocal[7] == 1) ftemp[8] = ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25; else ftemp[8] = f1[i + 8 * size];
if(trilocal[0] == 2) ftemp[1] = ftemp[3];
if(trilocal[1] == 2) ftemp[2] = ftemp[4];
if(trilocal[2] == 2) ftemp[3] = ftemp[1];
if(trilocal[3] == 2) ftemp[4] = ftemp[2];
if(trilocal[4] == 2) ftemp[5] = ftemp[7];
if(trilocal[5] == 2) ftemp[6] = ftemp[8];
if(trilocal[6] == 2) ftemp[7] = ftemp[5];
if(trilocal[7] == 2) ftemp[8] = ftemp[6];
#elif BN == 2
ftemp[1] = (trilocal[0] == 1) ? (ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS) : f1[i + size];
ftemp[2] = (trilocal[1] == 1) ? (ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS) : f1[i + 2 * size];
ftemp[3] = (trilocal[2] == 1) ? (ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS) : f1[i + 3 * size];
ftemp[4] = (trilocal[3] == 1) ? (ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS) : f1[i + 4 * size];
ftemp[5] = (trilocal[4] == 1) ? (ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25) : f1[i + 5 * size];
ftemp[6] = (trilocal[5] == 1) ? (ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25) : f1[i + 6 * size];
ftemp[7] = (trilocal[6] == 1) ? (ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25) : f1[i + 7 * size];
ftemp[8] = (trilocal[7] == 1) ? (ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25) : f1[i + 8 * size];
ftemp[1] = (trilocal[0] == 2) ? ftemp[3] : ftemp[1];
ftemp[2] = (trilocal[1] == 2) ? ftemp[4] : ftemp[2];
ftemp[3] = (trilocal[2] == 2) ? ftemp[1] : ftemp[3];
ftemp[4] = (trilocal[3] == 2) ? ftemp[2] : ftemp[4];
ftemp[5] = (trilocal[4] == 2) ? ftemp[7] : ftemp[5];
ftemp[6] = (trilocal[5] == 2) ? ftemp[8] : ftemp[6];
ftemp[7] = (trilocal[6] == 2) ? ftemp[5] : ftemp[7];
ftemp[8] = (trilocal[7] == 2) ? ftemp[6] : ftemp[8];
#else
ftemp[1] = (trilocal[0] == 1) * (ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS) + (trilocal[0] != 1) * f1[i + size];
ftemp[2] = (trilocal[1] == 1) * (ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS) + (trilocal[1] != 1) * f1[i + 2 * size];
ftemp[3] = (trilocal[2] == 1) * (ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS) + (trilocal[2] != 1) * f1[i + 3 * size];
ftemp[4] = (trilocal[3] == 1) * (ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS) + (trilocal[3] != 1) * f1[i + 4 * size];
ftemp[5] = (trilocal[4] == 1) * (ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25) + (trilocal[4] != 1) * f1[i + 5 * size];
ftemp[6] = (trilocal[5] == 1) * (ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25) + (trilocal[5] != 1) * f1[i + 6 * size];
ftemp[7] = (trilocal[6] == 1) * (ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25) + (trilocal[6] != 1) * f1[i + 7 * size];
ftemp[8] = (trilocal[7] == 1) * (ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25) + (trilocal[7] != 1) * f1[i + 8 * size];
ftemp[1] += (trilocal[0] == 2) * (ftemp[3] - ftemp[1]);
ftemp[2] += (trilocal[1] == 2) * (ftemp[4] - ftemp[2]);
ftemp[3] += (trilocal[2] == 2) * (ftemp[1] - ftemp[3]);
ftemp[4] += (trilocal[3] == 2) * (ftemp[2] - ftemp[4]);
ftemp[5] += (trilocal[4] == 2) * (ftemp[7] - ftemp[5]);
ftemp[6] += (trilocal[5] == 2) * (ftemp[8] - ftemp[6]);
ftemp[7] += (trilocal[6] == 2) * (ftemp[5] - ftemp[7]);
ftemp[8] += (trilocal[7] == 2) * (ftemp[6] - ftemp[8]);
#endif
hlocal[0] = ftemp[0] + (ftemp[1] + ftemp[2] + ftemp[3] + ftemp[4]) + (ftemp[5] + ftemp[6] + ftemp[7] + ftemp[8]);
uxlocal = e * ((ftemp[1] - ftemp[3]) + (ftemp[5] - ftemp[6] - ftemp[7] + ftemp[8])) / hlocal[0];
uylocal = e * ((ftemp[2] - ftemp[4]) + (ftemp[5] + ftemp[6] - ftemp[7] - ftemp[8])) / hlocal[0];
h[i] = hlocal[0];
gh = 1.5 * g * hlocal[0];
usq = 1.5 * (uxlocal * uxlocal + uylocal * uylocal);
ux3 = 3.0 * e * uxlocal;
uy3 = 3.0 * e * uylocal;
uxuy5 = ux3 + uy3;
uxuy6 = uy3 - ux3;
feq[0] = hlocal[0] - fact1 * hlocal[0] * (5.0 * gh + 4.0 * usq);
feq[1] = fact1 * hlocal[0] * (gh + ux3 + 0.5 * ux3*ux3 * 9 * fact1 - usq);
feq[2] = fact1 * hlocal[0] * (gh + uy3 + 0.5 * uy3*uy3 * 9 * fact1 - usq);
feq[3] = fact1 * hlocal[0] * (gh - ux3 + 0.5 * ux3*ux3 * 9 * fact1 - usq);
feq[4] = fact1 * hlocal[0] * (gh - uy3 + 0.5 * uy3*uy3 * 9 * fact1 - usq);
feq[5] = fact2 * hlocal[0] * (gh + uxuy5 + 0.5 * uxuy5*uxuy5 * 9 * fact1 - usq);
feq[6] = fact2 * hlocal[0] * (gh + uxuy6 + 0.5 * uxuy6*uxuy6 * 9 * fact1 - usq);
feq[7] = fact2 * hlocal[0] * (gh - uxuy5 + 0.5 * uxuy5*uxuy5 * 9 * fact1 - usq);
feq[8] = fact2 * hlocal[0] * (gh - uxuy6 + 0.5 * uxuy6*uxuy6 * 9 * fact1 - usq);
for (j = 0; j < 9; j++)
f2[i + j * size] = ftemp[j] - (ftemp[j] - feq[j]) / tau;
}
}
}
#elif IN == 2
__global__ void LBMpull(int Lx, int Ly, prec g, prec e, prec tau,
const prec* __restrict__ b, const int* __restrict__ node_types,
const prec* __restrict__ f1, prec* f2, prec* h) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
int size = Lx * Ly, j;
prec ftemp[9], feq[9];
prec uxlocal, uylocal;
prec hlocal[9], blocal[9];
prec gh, usq, ux3, uy3, uxuy5, uxuy6;
prec fact1 = 1 / (9 * e*e);
prec fact2 = fact1 * 0.25;
prec factS = fact1 * 1.5;
unsigned char trilocal[8];
int nt[9];
if (i < size) {
nt[0] = node_types[i];
if (nt[0] != 0){
int y = (int)i / Lx;
int x = i - y * Lx;
blocal[0] = b[i];
blocal[1] = ( x != 0 ) ? b[i - 1] : 0;
blocal[2] = (y != 0 ) ? b[i - Lx ] : 0;
blocal[3] = ( x != Lx-1) ? b[i + 1] : 0;
blocal[4] = (y != Ly-1 ) ? b[i + Lx ] : 0;
blocal[5] = (y != 0 && x != 0 ) ? b[i - Lx - 1] : 0;
blocal[6] = (y != 0 && x != Lx-1) ? b[i - Lx + 1] : 0;
blocal[7] = (y != Ly-1 && x != Lx-1) ? b[i + Lx + 1] : 0;
blocal[8] = (y != Ly-1 && x != 0 ) ? b[i + Lx - 1] : 0;
hlocal[0] = h[i];
hlocal[1] = ( x != 0 ) ? h[i - 1] : 0;
hlocal[2] = (y != 0 ) ? h[i - Lx ] : 0;
hlocal[3] = ( x != Lx-1) ? h[i + 1] : 0;
hlocal[4] = (y != Ly-1 ) ? h[i + Lx ] : 0;
hlocal[5] = (y != 0 && x != 0 ) ? h[i - Lx - 1] : 0;
hlocal[6] = (y != 0 && x != Lx-1) ? h[i - Lx + 1] : 0;
hlocal[7] = (y != Ly-1 && x != Lx-1) ? h[i + Lx + 1] : 0;
hlocal[8] = (y != Ly-1 && x != 0 ) ? h[i + Lx - 1] : 0;
ftemp[1] = ( x != 0 ) ? f1[i - 1 + size] : 0;
ftemp[2] = (y != 0 ) ? f1[i - Lx + 2 * size] : 0;
ftemp[3] = ( x != Lx-1) ? f1[i + 1 + 3 * size] : 0;
ftemp[4] = (y != Ly-1 ) ? f1[i + Lx + 4 * size] : 0;
ftemp[5] = (y != 0 && x != 0 ) ? f1[i - Lx - 1 + 5 * size] : 0;
ftemp[6] = (y != 0 && x != Lx-1) ? f1[i - Lx + 1 + 6 * size] : 0;
ftemp[7] = (y != Ly-1 && x != Lx-1) ? f1[i + Lx + 1 + 7 * size] : 0;
ftemp[8] = (y != Ly-1 && x != 0 ) ? f1[i + Lx - 1 + 8 * size] : 0;
for (int a = 0; a<8; a++) trilocal[a] = 0;
if (nt[0] == 2) {
if (y == 0) {
if (x == 0) {
trilocal[2] = 1;
trilocal[3] = 1;
trilocal[6] = 1;
}
else if (x == Lx - 1) {
trilocal[0] = 1;
trilocal[3] = 1;
trilocal[7] = 1;
}
else {
trilocal[0] = 1;
trilocal[2] = 1;
trilocal[3] = 1;
trilocal[6] = 1;
trilocal[7] = 1;
}
}
else if (y == Ly - 1) {
if (x == 0) {
trilocal[1] = 1;
trilocal[2] = 1;
trilocal[5] = 1;
}
else if (x == Lx - 1) {
trilocal[0] = 1;
trilocal[1] = 1;
trilocal[4] = 1;
}
else {
trilocal[0] = 1;
trilocal[1] = 1;
trilocal[2] = 1;
trilocal[4] = 1;
trilocal[5] = 1;
}
}
else {
if (x == 0) {
trilocal[1] = 1;
trilocal[2] = 1;
trilocal[3] = 1;
trilocal[5] = 1;
trilocal[6] = 1;
}
else if (x == Lx - 1) {
trilocal[0] = 1;
trilocal[1] = 1;
trilocal[3] = 1;
trilocal[4] = 1;
trilocal[7] = 1;
}
else {
for (int a = 0; a<8; a++) {
trilocal[a] = 1;
}
}
}
}
else if (nt[0] == 1) {
if (y == 0) {
trilocal[0] = 1;
trilocal[3] = 1;
trilocal[7] = 1;
trilocal[2] = 2;
trilocal[5] = 2;
trilocal[6] = 2;
}
else if (y == Ly - 1) {
trilocal[0] = 1;
trilocal[1] = 1;
trilocal[4] = 1;
trilocal[2] = 2;
trilocal[5] = 2;
trilocal[6] = 2;
}
else {
nt[1] = node_types[i - 1 ];
nt[2] = node_types[i - Lx];
nt[3] = node_types[i + 1 ];
nt[4] = node_types[i + Lx];
nt[5] = node_types[i - Lx - 1];
nt[6] = node_types[i - Lx + 1];
nt[7] = node_types[i + Lx + 1];
nt[8] = node_types[i + Lx - 1];
for (int a = 1; a<9; a++){
if (nt[a] != 0)
trilocal[a-1] = 1;
else
trilocal[a-1] = 2;
}
if (nt[5] == 0 || (nt[5] == 1 && (nt[1] == 0 || nt[2] == 0))) trilocal[4] = 2;
if (nt[6] == 0 || (nt[6] == 1 && (nt[2] == 0 || nt[3] == 0))) trilocal[5] = 2;
if (nt[7] == 0 || (nt[7] == 1 && (nt[3] == 0 || nt[4] == 0))) trilocal[6] = 2;
if (nt[8] == 0 || (nt[8] == 1 && (nt[4] == 0 || nt[1] == 0))) trilocal[7] = 2;
}
}
ftemp[0] = f1[i];
#if BN == 1
if(trilocal[0] == 1) ftemp[1] = ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS; else ftemp[1] = f1[i + size];
if(trilocal[1] == 1) ftemp[2] = ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS; else ftemp[2] = f1[i + 2 * size];
if(trilocal[2] == 1) ftemp[3] = ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS; else ftemp[3] = f1[i + 3 * size];
if(trilocal[3] == 1) ftemp[4] = ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS; else ftemp[4] = f1[i + 4 * size];
if(trilocal[4] == 1) ftemp[5] = ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25; else ftemp[5] = f1[i + 5 * size];
if(trilocal[5] == 1) ftemp[6] = ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25; else ftemp[6] = f1[i + 6 * size];
if(trilocal[6] == 1) ftemp[7] = ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25; else ftemp[7] = f1[i + 7 * size];
if(trilocal[7] == 1) ftemp[8] = ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25; else ftemp[8] = f1[i + 8 * size];
if(trilocal[0] == 2) ftemp[1] = ftemp[3];
if(trilocal[1] == 2) ftemp[2] = ftemp[4];
if(trilocal[2] == 2) ftemp[3] = ftemp[1];
if(trilocal[3] == 2) ftemp[4] = ftemp[2];
if(trilocal[4] == 2) ftemp[5] = ftemp[7];
if(trilocal[5] == 2) ftemp[6] = ftemp[8];
if(trilocal[6] == 2) ftemp[7] = ftemp[5];
if(trilocal[7] == 2) ftemp[8] = ftemp[6];
#elif BN == 2
ftemp[1] = (trilocal[0] == 1) ? (ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS) : f1[i + size];
ftemp[2] = (trilocal[1] == 1) ? (ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS) : f1[i + 2 * size];
ftemp[3] = (trilocal[2] == 1) ? (ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS) : f1[i + 3 * size];
ftemp[4] = (trilocal[3] == 1) ? (ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS) : f1[i + 4 * size];
ftemp[5] = (trilocal[4] == 1) ? (ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25) : f1[i + 5 * size];
ftemp[6] = (trilocal[5] == 1) ? (ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25) : f1[i + 6 * size];
ftemp[7] = (trilocal[6] == 1) ? (ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25) : f1[i + 7 * size];
ftemp[8] = (trilocal[7] == 1) ? (ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25) : f1[i + 8 * size];
ftemp[1] = (trilocal[0] == 2) ? ftemp[3] : ftemp[1];
ftemp[2] = (trilocal[1] == 2) ? ftemp[4] : ftemp[2];
ftemp[3] = (trilocal[2] == 2) ? ftemp[1] : ftemp[3];
ftemp[4] = (trilocal[3] == 2) ? ftemp[2] : ftemp[4];
ftemp[5] = (trilocal[4] == 2) ? ftemp[7] : ftemp[5];
ftemp[6] = (trilocal[5] == 2) ? ftemp[8] : ftemp[6];
ftemp[7] = (trilocal[6] == 2) ? ftemp[5] : ftemp[7];
ftemp[8] = (trilocal[7] == 2) ? ftemp[6] : ftemp[8];
#else
ftemp[1] = (trilocal[0] == 1) * (ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS) + (trilocal[0] != 1) * f1[i + size];
ftemp[2] = (trilocal[1] == 1) * (ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS) + (trilocal[1] != 1) * f1[i + 2 * size];
ftemp[3] = (trilocal[2] == 1) * (ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS) + (trilocal[2] != 1) * f1[i + 3 * size];
ftemp[4] = (trilocal[3] == 1) * (ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS) + (trilocal[3] != 1) * f1[i + 4 * size];
ftemp[5] = (trilocal[4] == 1) * (ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25) + (trilocal[4] != 1) * f1[i + 5 * size];
ftemp[6] = (trilocal[5] == 1) * (ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25) + (trilocal[5] != 1) * f1[i + 6 * size];
ftemp[7] = (trilocal[6] == 1) * (ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25) + (trilocal[6] != 1) * f1[i + 7 * size];
ftemp[8] = (trilocal[7] == 1) * (ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25) + (trilocal[7] != 1) * f1[i + 8 * size];
ftemp[1] += (trilocal[0] == 2) * (ftemp[3] - ftemp[1]);
ftemp[2] += (trilocal[1] == 2) * (ftemp[4] - ftemp[2]);
ftemp[3] += (trilocal[2] == 2) * (ftemp[1] - ftemp[3]);
ftemp[4] += (trilocal[3] == 2) * (ftemp[2] - ftemp[4]);
ftemp[5] += (trilocal[4] == 2) * (ftemp[7] - ftemp[5]);
ftemp[6] += (trilocal[5] == 2) * (ftemp[8] - ftemp[6]);
ftemp[7] += (trilocal[6] == 2) * (ftemp[5] - ftemp[7]);
ftemp[8] += (trilocal[7] == 2) * (ftemp[6] - ftemp[8]);
#endif
hlocal[0] = ftemp[0] + (ftemp[1] + ftemp[2] + ftemp[3] + ftemp[4]) + (ftemp[5] + ftemp[6] + ftemp[7] + ftemp[8]);
uxlocal = e * ((ftemp[1] - ftemp[3]) + (ftemp[5] - ftemp[6] - ftemp[7] + ftemp[8])) / hlocal[0];
uylocal = e * ((ftemp[2] - ftemp[4]) + (ftemp[5] + ftemp[6] - ftemp[7] - ftemp[8])) / hlocal[0];
h[i] = hlocal[0];
gh = 1.5 * g * hlocal[0];
usq = 1.5 * (uxlocal * uxlocal + uylocal * uylocal);
ux3 = 3.0 * e * uxlocal;
uy3 = 3.0 * e * uylocal;
uxuy5 = ux3 + uy3;
uxuy6 = uy3 - ux3;
feq[0] = hlocal[0] - fact1 * hlocal[0] * (5.0 * gh + 4.0 * usq);
feq[1] = fact1 * hlocal[0] * (gh + ux3 + 0.5 * ux3*ux3 * 9 * fact1 - usq);
feq[2] = fact1 * hlocal[0] * (gh + uy3 + 0.5 * uy3*uy3 * 9 * fact1 - usq);
feq[3] = fact1 * hlocal[0] * (gh - ux3 + 0.5 * ux3*ux3 * 9 * fact1 - usq);
feq[4] = fact1 * hlocal[0] * (gh - uy3 + 0.5 * uy3*uy3 * 9 * fact1 - usq);
feq[5] = fact2 * hlocal[0] * (gh + uxuy5 + 0.5 * uxuy5*uxuy5 * 9 * fact1 - usq);
feq[6] = fact2 * hlocal[0] * (gh + uxuy6 + 0.5 * uxuy6*uxuy6 * 9 * fact1 - usq);
feq[7] = fact2 * hlocal[0] * (gh - uxuy5 + 0.5 * uxuy5*uxuy5 * 9 * fact1 - usq);
feq[8] = fact2 * hlocal[0] * (gh - uxuy6 + 0.5 * uxuy6*uxuy6 * 9 * fact1 - usq);
for (j = 0; j < 9; j++)
f2[i + j * size] = ftemp[j] - (ftemp[j] - feq[j]) / tau;
}
}
}
#elif IN == 3
__global__ void LBMpull(int Lx, int Ly, prec g, prec e, prec tau,
const prec* __restrict__ b, const unsigned char* __restrict__ Arr_tri,
const prec* __restrict__ f1, prec* f2, prec* h) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
int size = Lx * Ly, j;
prec ftemp[9], feq[9];
prec uxlocal, uylocal;
prec hlocal[9], blocal[9];
prec gh, usq, ux3, uy3, uxuy5, uxuy6;
prec fact1 = 1 / (9 * e*e);
prec fact2 = fact1 * 0.25;
prec factS = fact1 * 1.5;
unsigned char trilocal[8];
if (i < size) {
int check = 0;
for (j = 1; j < 9; j++)
trilocal[j-1] = Arr_tri[i + j * size];
for (j = 0; j < 8; j++)
check += trilocal[j];
if (check != 0){
int y = (int)i / Lx;
int x = i - y * Lx;
blocal[0] = b[i];
blocal[1] = ( x != 0 ) ? b[i - 1] : 0;
blocal[2] = (y != 0 ) ? b[i - Lx ] : 0;
blocal[3] = ( x != Lx-1) ? b[i + 1] : 0;
blocal[4] = (y != Ly-1 ) ? b[i + Lx ] : 0;
blocal[5] = (y != 0 && x != 0 ) ? b[i - Lx - 1] : 0;
blocal[6] = (y != 0 && x != Lx-1) ? b[i - Lx + 1] : 0;
blocal[7] = (y != Ly-1 && x != Lx-1) ? b[i + Lx + 1] : 0;
blocal[8] = (y != Ly-1 && x != 0 ) ? b[i + Lx - 1] : 0;
hlocal[0] = h[i];
hlocal[1] = ( x != 0 ) ? h[i - 1] : 0;
hlocal[2] = (y != 0 ) ? h[i - Lx ] : 0;
hlocal[3] = ( x != Lx-1) ? h[i + 1] : 0;
hlocal[4] = (y != Ly-1 ) ? h[i + Lx ] : 0;
hlocal[5] = (y != 0 && x != 0 ) ? h[i - Lx - 1] : 0;
hlocal[6] = (y != 0 && x != Lx-1) ? h[i - Lx + 1] : 0;
hlocal[7] = (y != Ly-1 && x != Lx-1) ? h[i + Lx + 1] : 0;
hlocal[8] = (y != Ly-1 && x != 0 ) ? h[i + Lx - 1] : 0;
ftemp[1] = ( x != 0 ) ? f1[i - 1 + size] : 0;
ftemp[2] = (y != 0 ) ? f1[i - Lx + 2 * size] : 0;
ftemp[3] = ( x != Lx-1) ? f1[i + 1 + 3 * size] : 0;
ftemp[4] = (y != Ly-1 ) ? f1[i + Lx + 4 * size] : 0;
ftemp[5] = (y != 0 && x != 0 ) ? f1[i - Lx - 1 + 5 * size] : 0;
ftemp[6] = (y != 0 && x != Lx-1) ? f1[i - Lx + 1 + 6 * size] : 0;
ftemp[7] = (y != Ly-1 && x != Lx-1) ? f1[i + Lx + 1 + 7 * size] : 0;
ftemp[8] = (y != Ly-1 && x != 0 ) ? f1[i + Lx - 1 + 8 * size] : 0;
ftemp[0] = f1[i];
#if BN == 1
if(trilocal[0] == 1) ftemp[1] = ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS; else ftemp[1] = f1[i + size];
if(trilocal[1] == 1) ftemp[2] = ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS; else ftemp[2] = f1[i + 2 * size];
if(trilocal[2] == 1) ftemp[3] = ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS; else ftemp[3] = f1[i + 3 * size];
if(trilocal[3] == 1) ftemp[4] = ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS; else ftemp[4] = f1[i + 4 * size];
if(trilocal[4] == 1) ftemp[5] = ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25; else ftemp[5] = f1[i + 5 * size];
if(trilocal[5] == 1) ftemp[6] = ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25; else ftemp[6] = f1[i + 6 * size];
if(trilocal[6] == 1) ftemp[7] = ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25; else ftemp[7] = f1[i + 7 * size];
if(trilocal[7] == 1) ftemp[8] = ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25; else ftemp[8] = f1[i + 8 * size];
if(trilocal[0] == 2) ftemp[1] = ftemp[3];
if(trilocal[1] == 2) ftemp[2] = ftemp[4];
if(trilocal[2] == 2) ftemp[3] = ftemp[1];
if(trilocal[3] == 2) ftemp[4] = ftemp[2];
if(trilocal[4] == 2) ftemp[5] = ftemp[7];
if(trilocal[5] == 2) ftemp[6] = ftemp[8];
if(trilocal[6] == 2) ftemp[7] = ftemp[5];
if(trilocal[7] == 2) ftemp[8] = ftemp[6];
#elif BN == 2
ftemp[1] = (trilocal[0] == 1) ? (ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS) : f1[i + size];
ftemp[2] = (trilocal[1] == 1) ? (ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS) : f1[i + 2 * size];
ftemp[3] = (trilocal[2] == 1) ? (ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS) : f1[i + 3 * size];
ftemp[4] = (trilocal[3] == 1) ? (ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS) : f1[i + 4 * size];
ftemp[5] = (trilocal[4] == 1) ? (ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25) : f1[i + 5 * size];
ftemp[6] = (trilocal[5] == 1) ? (ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25) : f1[i + 6 * size];
ftemp[7] = (trilocal[6] == 1) ? (ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25) : f1[i + 7 * size];
ftemp[8] = (trilocal[7] == 1) ? (ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25) : f1[i + 8 * size];
ftemp[1] = (trilocal[0] == 2) ? ftemp[3] : ftemp[1];
ftemp[2] = (trilocal[1] == 2) ? ftemp[4] : ftemp[2];
ftemp[3] = (trilocal[2] == 2) ? ftemp[1] : ftemp[3];
ftemp[4] = (trilocal[3] == 2) ? ftemp[2] : ftemp[4];
ftemp[5] = (trilocal[4] == 2) ? ftemp[7] : ftemp[5];
ftemp[6] = (trilocal[5] == 2) ? ftemp[8] : ftemp[6];
ftemp[7] = (trilocal[6] == 2) ? ftemp[5] : ftemp[7];
ftemp[8] = (trilocal[7] == 2) ? ftemp[6] : ftemp[8];
#else
ftemp[1] = (trilocal[0] == 1) * (ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS) + (trilocal[0] != 1) * f1[i + size];
ftemp[2] = (trilocal[1] == 1) * (ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS) + (trilocal[1] != 1) * f1[i + 2 * size];
ftemp[3] = (trilocal[2] == 1) * (ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS) + (trilocal[2] != 1) * f1[i + 3 * size];
ftemp[4] = (trilocal[3] == 1) * (ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS) + (trilocal[3] != 1) * f1[i + 4 * size];
ftemp[5] = (trilocal[4] == 1) * (ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25) + (trilocal[4] != 1) * f1[i + 5 * size];
ftemp[6] = (trilocal[5] == 1) * (ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25) + (trilocal[5] != 1) * f1[i + 6 * size];
ftemp[7] = (trilocal[6] == 1) * (ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25) + (trilocal[6] != 1) * f1[i + 7 * size];
ftemp[8] = (trilocal[7] == 1) * (ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25) + (trilocal[7] != 1) * f1[i + 8 * size];
ftemp[1] += (trilocal[0] == 2) * (ftemp[3] - ftemp[1]);
ftemp[2] += (trilocal[1] == 2) * (ftemp[4] - ftemp[2]);
ftemp[3] += (trilocal[2] == 2) * (ftemp[1] - ftemp[3]);
ftemp[4] += (trilocal[3] == 2) * (ftemp[2] - ftemp[4]);
ftemp[5] += (trilocal[4] == 2) * (ftemp[7] - ftemp[5]);
ftemp[6] += (trilocal[5] == 2) * (ftemp[8] - ftemp[6]);
ftemp[7] += (trilocal[6] == 2) * (ftemp[5] - ftemp[7]);
ftemp[8] += (trilocal[7] == 2) * (ftemp[6] - ftemp[8]);
#endif
hlocal[0] = ftemp[0] + (ftemp[1] + ftemp[2] + ftemp[3] + ftemp[4]) + (ftemp[5] + ftemp[6] + ftemp[7] + ftemp[8]);
uxlocal = e * ((ftemp[1] - ftemp[3]) + (ftemp[5] - ftemp[6] - ftemp[7] + ftemp[8])) / hlocal[0];
uylocal = e * ((ftemp[2] - ftemp[4]) + (ftemp[5] + ftemp[6] - ftemp[7] - ftemp[8])) / hlocal[0];
h[i] = hlocal[0];
gh = 1.5 * g * hlocal[0];
usq = 1.5 * (uxlocal * uxlocal + uylocal * uylocal);
ux3 = 3.0 * e * uxlocal;
uy3 = 3.0 * e * uylocal;
uxuy5 = ux3 + uy3;
uxuy6 = uy3 - ux3;
feq[0] = hlocal[0] - fact1 * hlocal[0] * (5.0 * gh + 4.0 * usq);
feq[1] = fact1 * hlocal[0] * (gh + ux3 + 0.5 * ux3*ux3 * 9 * fact1 - usq);
feq[2] = fact1 * hlocal[0] * (gh + uy3 + 0.5 * uy3*uy3 * 9 * fact1 - usq);
feq[3] = fact1 * hlocal[0] * (gh - ux3 + 0.5 * ux3*ux3 * 9 * fact1 - usq);
feq[4] = fact1 * hlocal[0] * (gh - uy3 + 0.5 * uy3*uy3 * 9 * fact1 - usq);
feq[5] = fact2 * hlocal[0] * (gh + uxuy5 + 0.5 * uxuy5*uxuy5 * 9 * fact1 - usq);
feq[6] = fact2 * hlocal[0] * (gh + uxuy6 + 0.5 * uxuy6*uxuy6 * 9 * fact1 - usq);
feq[7] = fact2 * hlocal[0] * (gh - uxuy5 + 0.5 * uxuy5*uxuy5 * 9 * fact1 - usq);
feq[8] = fact2 * hlocal[0] * (gh - uxuy6 + 0.5 * uxuy6*uxuy6 * 9 * fact1 - usq);
for (j = 0; j < 9; j++)
f2[i + j * size] = ftemp[j] - (ftemp[j] - feq[j]) / tau;
}
}
}
#else
__global__ void LBMpull(int Lx, int Ly, prec g, prec e, prec tau,
const prec* __restrict__ b, const unsigned char* __restrict__ SC_bin,
const unsigned char* __restrict__ BB_bin, const prec* __restrict__ f1,
prec* f2, prec* h) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
int size = Lx * Ly, j;
prec ftemp[9], feq[9];
prec uxlocal, uylocal;
prec hlocal[9], blocal[9];
prec gh, usq, ux3, uy3, uxuy5, uxuy6;
prec fact1 = 1 / (9 * e*e);
prec fact2 = fact1 * 0.25;
prec factS = fact1 * 1.5;
unsigned char SC,BB;
if (i < size) {
SC = SC_bin[i];
BB = BB_bin[i];
if(SC + BB != 0){
int y = (int)i / Lx;
int x = i - y * Lx;
blocal[0] = b[i];
blocal[1] = ( x != 0 ) ? b[i - 1] : 0;
blocal[2] = (y != 0 ) ? b[i - Lx ] : 0;
blocal[3] = ( x != Lx-1) ? b[i + 1] : 0;
blocal[4] = (y != Ly-1 ) ? b[i + Lx ] : 0;
blocal[5] = (y != 0 && x != 0 ) ? b[i - Lx - 1] : 0;
blocal[6] = (y != 0 && x != Lx-1) ? b[i - Lx + 1] : 0;
blocal[7] = (y != Ly-1 && x != Lx-1) ? b[i + Lx + 1] : 0;
blocal[8] = (y != Ly-1 && x != 0 ) ? b[i + Lx - 1] : 0;
hlocal[0] = h[i];
hlocal[1] = ( x != 0 ) ? h[i - 1] : 0;
hlocal[2] = (y != 0 ) ? h[i - Lx ] : 0;
hlocal[3] = ( x != Lx-1) ? h[i + 1] : 0;
hlocal[4] = (y != Ly-1 ) ? h[i + Lx ] : 0;
hlocal[5] = (y != 0 && x != 0 ) ? h[i - Lx - 1] : 0;
hlocal[6] = (y != 0 && x != Lx-1) ? h[i - Lx + 1] : 0;
hlocal[7] = (y != Ly-1 && x != Lx-1) ? h[i + Lx + 1] : 0;
hlocal[8] = (y != Ly-1 && x != 0 ) ? h[i + Lx - 1] : 0;
ftemp[1] = ( x != 0 ) ? f1[i - 1 + size] : 0;
ftemp[2] = (y != 0 ) ? f1[i - Lx + 2 * size] : 0;
ftemp[3] = ( x != Lx-1) ? f1[i + 1 + 3 * size] : 0;
ftemp[4] = (y != Ly-1 ) ? f1[i + Lx + 4 * size] : 0;
ftemp[5] = (y != 0 && x != 0 ) ? f1[i - Lx - 1 + 5 * size] : 0;
ftemp[6] = (y != 0 && x != Lx-1) ? f1[i - Lx + 1 + 6 * size] : 0;
ftemp[7] = (y != Ly-1 && x != Lx-1) ? f1[i + Lx + 1 + 7 * size] : 0;
ftemp[8] = (y != Ly-1 && x != 0 ) ? f1[i + Lx - 1 + 8 * size] : 0;
ftemp[0] = f1[i];
#if BN == 1
if((SC>>0) & 1) ftemp[1] = ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS; else ftemp[1] = f1[i + size];
if((SC>>1) & 1) ftemp[2] = ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS; else ftemp[2] = f1[i + 2 * size];
if((SC>>2) & 1) ftemp[3] = ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS; else ftemp[3] = f1[i + 3 * size];
if((SC>>3) & 1) ftemp[4] = ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS; else ftemp[4] = f1[i + 4 * size];
if((SC>>4) & 1) ftemp[5] = ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25; else ftemp[5] = f1[i + 5 * size];
if((SC>>5) & 1) ftemp[6] = ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25; else ftemp[6] = f1[i + 6 * size];
if((SC>>6) & 1) ftemp[7] = ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25; else ftemp[7] = f1[i + 7 * size];
if((SC>>7) & 1) ftemp[8] = ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25; else ftemp[8] = f1[i + 8 * size];
if((BB>>(0)) & 1) ftemp[1] = ftemp[3];
if((BB>>(1)) & 1) ftemp[2] = ftemp[4];
if((BB>>(2)) & 1) ftemp[3] = ftemp[1];
if((BB>>(3)) & 1) ftemp[4] = ftemp[2];
if((BB>>(4)) & 1) ftemp[5] = ftemp[7];
if((BB>>(5)) & 1) ftemp[6] = ftemp[8];
if((BB>>(6)) & 1) ftemp[7] = ftemp[5];
if((BB>>(7)) & 1) ftemp[8] = ftemp[6];
#elif BN == 2
ftemp[1] = ((SC>>0) & 1) ? (ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS) : f1[i + size];
ftemp[2] = ((SC>>1) & 1) ? (ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS) : f1[i + 2 * size];
ftemp[3] = ((SC>>2) & 1) ? (ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS) : f1[i + 3 * size];
ftemp[4] = ((SC>>3) & 1) ? (ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS) : f1[i + 4 * size];
ftemp[5] = ((SC>>4) & 1) ? (ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25) : f1[i + 5 * size];
ftemp[6] = ((SC>>5) & 1) ? (ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25) : f1[i + 6 * size];
ftemp[7] = ((SC>>6) & 1) ? (ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25) : f1[i + 7 * size];
ftemp[8] = ((SC>>7) & 1) ? (ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25) : f1[i + 8 * size];
ftemp[1] = ((BB>>(0)) & 1) ? ftemp[3] : ftemp[1];
ftemp[2] = ((BB>>(1)) & 1) ? ftemp[4] : ftemp[2];
ftemp[3] = ((BB>>(2)) & 1) ? ftemp[1] : ftemp[3];
ftemp[4] = ((BB>>(3)) & 1) ? ftemp[2] : ftemp[4];
ftemp[5] = ((BB>>(4)) & 1) ? ftemp[7] : ftemp[5];
ftemp[6] = ((BB>>(5)) & 1) ? ftemp[8] : ftemp[6];
ftemp[7] = ((BB>>(6)) & 1) ? ftemp[5] : ftemp[7];
ftemp[8] = ((BB>>(7)) & 1) ? ftemp[6] : ftemp[8];
#else
//int x = i%Lx, y = i/Lx;
ftemp[1] = ((SC>>0) & 1) * (ftemp[1] - g * (hlocal[0] + hlocal[1]) * (blocal[0] - blocal[1]) * factS ) + !((SC>>0) & 1) * f1[i + size];
ftemp[2] = ((SC>>1) & 1) * (ftemp[2] - g * (hlocal[0] + hlocal[2]) * (blocal[0] - blocal[2]) * factS ) + !((SC>>1) & 1) * f1[i + 2 * size];
ftemp[3] = ((SC>>2) & 1) * (ftemp[3] - g * (hlocal[0] + hlocal[3]) * (blocal[0] - blocal[3]) * factS ) + !((SC>>2) & 1) * f1[i + 3 * size];
ftemp[4] = ((SC>>3) & 1) * (ftemp[4] - g * (hlocal[0] + hlocal[4]) * (blocal[0] - blocal[4]) * factS ) + !((SC>>3) & 1) * f1[i + 4 * size];
ftemp[5] = ((SC>>4) & 1) * (ftemp[5] - g * (hlocal[0] + hlocal[5]) * (blocal[0] - blocal[5]) * factS * 0.25) + !((SC>>4) & 1) * f1[i + 5 * size];
ftemp[6] = ((SC>>5) & 1) * (ftemp[6] - g * (hlocal[0] + hlocal[6]) * (blocal[0] - blocal[6]) * factS * 0.25) + !((SC>>5) & 1) * f1[i + 6 * size];
ftemp[7] = ((SC>>6) & 1) * (ftemp[7] - g * (hlocal[0] + hlocal[7]) * (blocal[0] - blocal[7]) * factS * 0.25) + !((SC>>6) & 1) * f1[i + 7 * size];
ftemp[8] = ((SC>>7) & 1) * (ftemp[8] - g * (hlocal[0] + hlocal[8]) * (blocal[0] - blocal[8]) * factS * 0.25) + !((SC>>7) & 1) * f1[i + 8 * size];
ftemp[1] += ((BB>>(0)) & 1) * (ftemp[3] - ftemp[1]);
ftemp[2] += ((BB>>(1)) & 1) * (ftemp[4] - ftemp[2]);
ftemp[3] += ((BB>>(2)) & 1) * (ftemp[1] - ftemp[3]);
ftemp[4] += ((BB>>(3)) & 1) * (ftemp[2] - ftemp[4]);
ftemp[5] += ((BB>>(4)) & 1) * (ftemp[7] - ftemp[5]);
ftemp[6] += ((BB>>(5)) & 1) * (ftemp[8] - ftemp[6]);
ftemp[7] += ((BB>>(6)) & 1) * (ftemp[5] - ftemp[7]);
ftemp[8] += ((BB>>(7)) & 1) * (ftemp[6] - ftemp[8]);
#endif
hlocal[0] = ftemp[0] + (ftemp[1] + ftemp[2] + ftemp[3] + ftemp[4]) + (ftemp[5] + ftemp[6] + ftemp[7] + ftemp[8]);
uxlocal = e * ((ftemp[1] - ftemp[3]) + (ftemp[5] - ftemp[6] - ftemp[7] + ftemp[8])) / hlocal[0];
uylocal = e * ((ftemp[2] - ftemp[4]) + (ftemp[5] + ftemp[6] - ftemp[7] - ftemp[8])) / hlocal[0];
h[i] = hlocal[0];
gh = 1.5 * g * hlocal[0];
usq = 1.5 * (uxlocal * uxlocal + uylocal * uylocal);
ux3 = 3.0 * e * uxlocal;
uy3 = 3.0 * e * uylocal;
uxuy5 = ux3 + uy3;
uxuy6 = uy3 - ux3;
feq[0] = hlocal[0] - fact1 * hlocal[0] * (5.0 * gh + 4.0 * usq);
feq[1] = fact1 * hlocal[0] * (gh + ux3 + 0.5 * ux3*ux3 * 9 * fact1 - usq);
feq[2] = fact1 * hlocal[0] * (gh + uy3 + 0.5 * uy3*uy3 * 9 * fact1 - usq);
feq[3] = fact1 * hlocal[0] * (gh - ux3 + 0.5 * ux3*ux3 * 9 * fact1 - usq);
feq[4] = fact1 * hlocal[0] * (gh - uy3 + 0.5 * uy3*uy3 * 9 * fact1 - usq);
feq[5] = fact2 * hlocal[0] * (gh + uxuy5 + 0.5 * uxuy5*uxuy5 * 9 * fact1 - usq);
feq[6] = fact2 * hlocal[0] * (gh + uxuy6 + 0.5 * uxuy6*uxuy6 * 9 * fact1 - usq);
feq[7] = fact2 * hlocal[0] * (gh - uxuy5 + 0.5 * uxuy5*uxuy5 * 9 * fact1 - usq);
feq[8] = fact2 * hlocal[0] * (gh - uxuy6 + 0.5 * uxuy6*uxuy6 * 9 * fact1 - usq);
for (j = 0; j < 9; j++)
f2[i + j * size] = ftemp[j] - (ftemp[j] - feq[j]) / tau;
}
}
}
#endif
__global__ void feqKernel(int Lx, int Ly, prec g, prec e,
const prec* __restrict__ h, prec* f) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < Lx*Ly) {
prec hi = h[i];
prec gh1 = g * hi * hi / (6.0 * e * e);
prec gh2 = gh1 / 4;
f[i] = hi - 5.0 * gh1;
f[i + ( Lx*Ly)] = gh1;
f[i + (2 * Lx*Ly)] = gh1;
f[i + (3 * Lx*Ly)] = gh1;
f[i + (4 * Lx*Ly)] = gh1;
f[i + (5 * Lx*Ly)] = gh2;
f[i + (6 * Lx*Ly)] = gh2;
f[i + (7 * Lx*Ly)] = gh2;
f[i + (8 * Lx*Ly)] = gh2;
}
}
__global__ void TSkernel(prec* TSdata, const prec* __restrict__ w,
const int* __restrict__ TSind, int t, int deltaTS, int NTS, int TTS) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < NTS) {
int n = t / deltaTS;
TSdata[i*TTS + n] = w[TSind[i]];
}
}
void LBMTimeStep(mainDStruct devi, cudaStruct devEx, int t, int deltaTS, cudaEvent_t ct1, cudaEvent_t ct2, prec *msecs) {
float dt;
if (t % 2 == 0){
cudaEventRecord(ct1);
#if IN == 1
LBMpull << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.g, devEx.e, devEx.tau,
devi.b, devEx.f1, devEx.f2, devEx.h);
#elif IN == 2
LBMpull << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.g, devEx.e, devEx.tau,
devi.b, devi.node_types, devEx.f1, devEx.f2, devEx.h);
#elif IN == 3
LBMpull << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.g, devEx.e, devEx.tau,
devi.b, devEx.Arr_tri, devEx.f1, devEx.f2, devEx.h);
#else
LBMpull << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.g, devEx.e, devEx.tau,
devi.b, devEx.SC_bin, devEx.BB_bin, devEx.f1, devEx.f2, devEx.h);
#endif
}
else{
cudaEventRecord(ct1);
#if IN == 1
LBMpull << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.g, devEx.e, devEx.tau,
devi.b, devEx.f2, devEx.f1, devEx.h);
#elif IN == 2
LBMpull << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.g, devEx.e, devEx.tau,
devi.b, devi.node_types, devEx.f2, devEx.f1, devEx.h);
#elif IN == 3
LBMpull << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.g, devEx.e, devEx.tau,
devi.b, devEx.Arr_tri, devEx.f2, devEx.f1, devEx.h);
#else
LBMpull << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.g, devEx.e, devEx.tau,
devi.b, devEx.SC_bin, devEx.BB_bin, devEx.f2, devEx.f1, devEx.h);
#endif
}
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
*msecs += dt;
if (t%deltaTS == 0) {
wKernel << <devi.Ngrid, devi.Nblocks >> >(devi.Lx, devi.Ly, devEx.h, devi.b, devi.w);
TSkernel << <devi.NTS, 1 >> > (devi.TSdata, devi.w, devi.TSind, t, deltaTS, devi.NTS, devi.TTS);
}
}
void setup(mainDStruct devi, cudaStruct devEx, int deltaTS) {
#if IN == 3
auxArraysKernel << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.ex, devEx.ey, devi.node_types,
devEx.Arr_tri);
#elif IN == 4
auxArraysKernel << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.ex, devEx.ey, devi.node_types,
devEx.SC_bin, devEx.BB_bin);
#endif
hKernel << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devi.w, devi.b, devEx.h);
feqKernel << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.g, devEx.e, devEx.h, devEx.f1);
TSkernel << <devi.NTS, 1 >> > (devi.TSdata, devi.w, devi.TSind, 0, deltaTS, devi.NTS, devi.TTS);
}
void copyAndWriteResultData(mainHStruct host, mainDStruct devi, cudaStruct devEx, int t, std::string outputdir) {
wKernel << <devi.Ngrid, devi.Nblocks >> > (devi.Lx, devi.Ly, devEx.h, devi.b, devi.w);
cudaMemcpy(host.w, devi.w, devi.Lx*devi.Ly * sizeof(prec), cudaMemcpyDeviceToHost);
writeOutput(devi.Lx*devi.Ly, t, host.w, outputdir);
}
void copyAndWriteTSData(mainHStruct host, mainDStruct devi, int deltaTS, prec Dt, std::string outputdir) {
cudaMemcpy(host.TSdata, devi.TSdata, devi.TTS*devi.NTS * sizeof(prec), cudaMemcpyDeviceToHost);
writeTS(devi.TTS, devi.NTS, deltaTS, Dt, host.TSdata, outputdir);
}
void LBM(mainHStruct host, mainDStruct devi, cudaStruct devEx, int* time_array, prec Dt, std::string outputdir) {
cudaFuncSetCacheConfig(LBMpull, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(feqKernel, cudaFuncCachePreferL1);
int tMax = time_array[0];
int deltaOutput = time_array[1];
int deltaTS = time_array[2];
int t = 0;
cudaEvent_t ct1, ct2;
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
prec msecs = 0;
setup(devi, devEx, deltaTS);
std::cout << std::fixed << std::setprecision(1);
while (t <= tMax) {
LBMTimeStep(devi, devEx, t, deltaTS, ct1, ct2, &msecs);
t++;
if (deltaOutput != 0 && t%deltaOutput == 0) {
std::cout << "\rTime step: " << t << " (" << 100.0*t / tMax << "%)";
copyAndWriteResultData(host, devi, devEx, t, outputdir);
}
}
copyAndWriteResultData(host, devi, devEx, t, outputdir);
copyAndWriteTSData(host, devi, deltaTS, Dt, outputdir);
std::cout << std::endl << "Tiempo total: " << msecs << "[ms]" << std::endl;
std::cout << std::endl << "Tiempo promedio por iteracion: " << msecs / tMax << "[ms]" << std::endl;
}
|
c320999469dc9d9d27e14de691662845addcb916.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
using namespace PyTorchMemEffAttention;
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 128, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 128, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_128x64_k128_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 128, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 128, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_128x64_k128_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k128_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k128_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k128_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k128_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k128_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k128_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k128_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k128_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
| c320999469dc9d9d27e14de691662845addcb916.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
using namespace PyTorchMemEffAttention;
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 128, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 128, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_128x64_k128_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 128, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 128, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_128x64_k128_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k128_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, true, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k128_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k128_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, true, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k128_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k128_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, true, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k128_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_aligned_64x64_k128_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, float, true, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_aligned_64x64_k128_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
767f06a2cf35a685ccc73d8faa002daa6327da1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define _USE_MATH_DEFINES
#include "particle_positions.cuh"
#include "utilities.cuh"
#include "VTK.cuh"
#include "hashing.cuh"
#include "particle_parameters.cuh"
#include <math.h>
#include <future>
#include <chrono>
#include <math.h>
//declaration of all global variables that are going to be used in this file
char main_path[1024];
char vtk_group_path[1024];
char vtu_fullpath[1024];
char vtu_path[1024];
std::string pointDataNames[] = { "density" , "pressure" };
std::string vectorDataNames[] = {"velocity","pressure force","viscosity force","st force" };
int size_pointData;
int size_vectorData;
vec3d* d_POSITION;
vec3d* d_PRED_POSITION;
vec3d* d_VELOCITY;
vec3d* d_PRED_VELOCITY;
vec3d* d_ST_FORCE;
vec3d* d_VISCOSITY_FORCE;
vec3d* d_PRESSURE_FORCE;
vec3d* d_NORMAL;
float* DENSITY;
float* d_DENSITY;
float* d_PRESSURE;
float* d_MASS;
int* d_TYPE;
int* d_hashtable;
vec3d gravity;
//physical constants
float rho_0; //rest density
float visc_const; //viscosity constant
float st_const; // surface tension constant
float epsilon; // dumping coefficient for collision
//initial conditions
float PARTICLE_RADIUS;
float MASS_calc;// = rho_0 * (float)M_PI * pow(PARTICLE_RADIUS, 3.f) / 3.f * 4.f;
float USER_MASS;
float PARTICLE_DIAMETER;
float F_INITIAL_POSITION[3]; // = { 0.f,0.f,0.f }; //Fluid particles initial position
float F_FINAL_POSITION[3]; // = { 0.5f,1.f,0.5f }; //Fluid particles final position
float B_INITIAL_POSITION[3]; // = { 0.f,0.f,0.f }; //Boundary particles final position
float B_FINAL_POSITION[3]; // = { 1.f,1.f,1.f }; //Boundary particles final position
float V_INITIAL[3];
//controlling iteration number and simulation time
int iteration = 1;
float simulation_time; //in seconds
float final_time; //in seconds
//number of particles
int N; //fluid particles
int B; //bondary particles
int T; //total particles
//variables for hashtable
size_t pitch;
int particles_per_row = 200;
int hashtable_size;
//simulation parameters
float invh;
float h;
//CUDA variables
int block_size;
int grid_size;
//PCISPH variables
float vol_comp_perc;
float dens_fluc_perc;
float* d_max_force;
float* d_max_velocity;
float* d_max_rho_err;
float* d_sum_rho_err;
float delta_t;
float max_vol_comp;
float max_rho_fluc;
float BOUNDARY_DIAMETER;
float BOUNDARY_RADIUS;
float pressure_delta;
float max_rho_err_t_1 = 0.f;
float max_rho_err = 0.f;
bool write_pvd = true;
char* user_results_folder = new char[256];
float save_steps;
int fileReader() {
//allocating memory
char* row = new char[256];
int row_buff_index = 0;
char* num_buffer = new char[256];
int num_buffer_index = 0;
float num;
vec3d vec;
//names
char* phys_props_names[] = { "rho_0","visc_const","surface_tension_const","collision_dumping_coeff" };
char* init_cond_names[] = {"particle_radius","mass","fluid_initial_coord","fluid_final_coord","boundary_initial_coord","boundary_final_coord","fluid_initial_velocity","maximum_volume_compression","maximum_density_fluctuation"};
char* system_names[] = { "initial_delta_t","initial_time","final_time","neighbors_per_particle", "save_steps","results_folder"};
int phys_props_size = sizeof(phys_props_names) / 8;
int init_cond_size = sizeof(init_cond_names) / 8;
int system_size = sizeof(system_names) / 8;
//paths
char* phys_props_path = "./props/physical_props.txt";
char* initial_conditions_path = "./props/initial_conditions.txt";
char* system_path = "./props/system.txt";
if (fileExists(phys_props_path) != 0) {
std::cout << "\nERROR! Could not find physical properties file at " << phys_props_path << "\n";
return 1;
}
if (fileExists(phys_props_path) != 0) {
std::cout << "\nERROR! Could not find initial conditions file at " << phys_props_path << "\n";
return 1;
}
if (fileExists(phys_props_path) != 0) {
std::cout << "\nERROR! Could not find system names file at " << phys_props_path << "\n";
return 1;
}
//reading physical properties
std::ifstream phys_props (phys_props_path);
for (char write2line; phys_props.get(write2line);) {
if (phys_props.eof()) {
break;
}
if (write2line == 10) {
int i = 0;
for (i; i < phys_props_size; i++) {
if (strstr(row, phys_props_names[i]) != nullptr) {
break;
}
}
if (i < phys_props_size) {
bool save_char = false;
for (int j = 0; j < strlen(row); j++) {
if (row[j] == 61) {
save_char = true;
for (int k = j; k < strlen(row); k++) {
if (!isdigit(row[k + 1])) {
j++;
}
else { break; }
}
}
else if (row[j] == 59) {
num = (float)atof(num_buffer);
num_buffer_index = 0;
num_buffer = new char[256];
break;
}
else if ((isdigit(row[j]) || row[j] == 46 || row[j] == 45) && save_char) {
num_buffer[num_buffer_index] = row[j];
num_buffer_index++;
}
}
if (i == 0) {
rho_0 = num;
}
else if (i == 1) {
visc_const = num;
}
else if (i == 2) {
st_const = num;
}
else if (i == 3) {
epsilon = num;
}
}
row = new char[256];
row_buff_index = 0;
}
else if (write2line != 10) {
row[row_buff_index] = write2line;
row_buff_index++;
}
}
row = new char[256];
row_buff_index = 0;
phys_props.close();
//reading initial conditions
std::ifstream init_conds(initial_conditions_path);
for (char write2line; init_conds.get(write2line);) {
if (init_conds.eof()) {
break;
}
if (write2line == 10) {
int i = 0;
for (i; i < init_cond_size; i++) {
if (strstr(row, init_cond_names[i]) != nullptr) {
break;
}
}
if (i < init_cond_size) {
if (strstr(row, "[") != nullptr) {
bool save_char = false;
int axis_count = 0;
for (int j = 0; j < strlen(row); j++) {
if (axis_count > 2) {
axis_count = 0;
break;
}
if (row[j] == 91) {
save_char = true;
for (int k = j; k < strlen(row); k++) {
if (!isdigit(row[k + 1])) {
j++;
}
else { break; }
}
}
else if (row[j] == 44 || row[j] == 93) {
num = (float)atof(num_buffer);
if (axis_count == 0) {
vec.x = num;
} else if (axis_count == 1) {
vec.y = num;
}
else if (axis_count == 2) {
vec.z = num;
}
axis_count++;
if (row[j] == 32) {
j++;
}
num_buffer_index = 0;
num_buffer = new char[256];
}
else if ((isdigit(row[j]) || row[j] == 46 || row[j] == 45) && save_char) {
num_buffer[num_buffer_index] = row[j];
num_buffer_index++;
}
}
}
else {
bool save_char = false;
for (int j = 0; j < strlen(row); j++) {
if (row[j] == 61) {
save_char = true;
for (int k = j; k < strlen(row); k++) {
if (!isdigit(row[k + 1])) {
j++;
}
else { break; }
}
}
else if (row[j] == 59) {
num = (float)atof(num_buffer);
num_buffer_index = 0;
num_buffer = new char[256];
break;
}
else if ((isdigit(row[j]) || row[j] == 46 || row[j] == 45) && save_char) {
num_buffer[num_buffer_index] = row[j];
num_buffer_index++;
}
}
}
if (i == 0) {
PARTICLE_RADIUS = num;
}
else if (i == 1) {
USER_MASS = num;
}
else if (i == 2) {
F_INITIAL_POSITION[0] = vec.x;
F_INITIAL_POSITION[1] = vec.y;
F_INITIAL_POSITION[2] = vec.z;
}
else if (i == 3) {
F_FINAL_POSITION[0] = vec.x;
F_FINAL_POSITION[1] = vec.y;
F_FINAL_POSITION[2] = vec.z;
}
else if (i == 4) {
B_INITIAL_POSITION[0] = vec.x;
B_INITIAL_POSITION[1] = vec.y;
B_INITIAL_POSITION[2] = vec.z;
}
else if (i == 5) {
B_FINAL_POSITION[0] = vec.x;
B_FINAL_POSITION[1] = vec.y;
B_FINAL_POSITION[2] = vec.z;
}
else if (i == 6) {
V_INITIAL[0] = vec.x;
V_INITIAL[1] = vec.y;
V_INITIAL[2] = vec.z;
}
else if (i == 7) {
vol_comp_perc = num;
}
else if (i == 8) {
dens_fluc_perc = num;
}
}
row = new char[256];
row_buff_index = 0;
}
else if (write2line != 10) {
row[row_buff_index] = write2line;
row_buff_index++;
}
}
row = new char[256];
row_buff_index = 0;
init_conds.close();
std::ifstream system_vars(system_path);
for (char write2line; system_vars.get(write2line);) {
if (system_vars.eof()) {
break;
}
if (write2line == 10) {
int i = 0;
for (i; i < system_size; i++) {
if (strstr(row, system_names[i]) != nullptr) {
break;
}
}
if (i < system_size) {
bool save_char = false;
if (strstr(row, "\"") != nullptr) {
for (int j = 0; j < strlen(row); j++) {
if (row[j] == 34 && !save_char) {
save_char = true;
for (int k = j; k < strlen(row); k++) {
if (row[k+1] == 32) {
j++;
}
else { break; }
}
}
else if (row[j] == 34 && save_char) {
break;
}
else if (save_char){
num_buffer[num_buffer_index] = row[j];
num_buffer_index++;
}
}
}
else {
for (int j = 0; j < strlen(row); j++) {
if (row[j] == 61) {
save_char = true;
for (int k = j; k < strlen(row); k++) {
if (!isdigit(row[k + 1])) {
j++;
}
else { break; }
}
}
else if (row[j] == 59) {
num = (float)atof(num_buffer);
num_buffer_index = 0;
num_buffer = new char[256];
break;
}
else if ((isdigit(row[j]) || row[j] == 46 || row[j] == 45) && save_char) {
num_buffer[num_buffer_index] = row[j];
num_buffer_index++;
}
}
}
if (i == 0) {
delta_t = num;
}
else if (i == 1) {
simulation_time = num;
}
else if (i == 2) {
final_time = num;
}
else if (i == 3) {
particles_per_row = (int)num;
}
else if (i == 4) {
save_steps = num;
}
else if (i == 5) {
user_results_folder = num_buffer;
}
}
row = new char[256];
row_buff_index = 0;
}
else if (write2line != 10) {
row[row_buff_index] = write2line;
row_buff_index++;
}
}
return 0;
}
int initialize() {
hipDeviceProp_t* prop = new hipDeviceProp_t;
gpuErrchk(hipGetDeviceProperties(prop,0));
std::cout << "-----------------------------------------------\n";
std::cout << "DEVICE PROPERTIES:\n" << "Device name: " << prop->name << "\n" <<
"Max number of threads per block: " << prop->maxThreadsPerBlock << "\n" <<
"Total global memory: " << dround(prop->totalGlobalMem/1e9,2) << " gigabytes\n" <<
"Registers per block: " << prop->regsPerBlock << "\n" <<
"Shared Memory per block: " << prop->sharedMemPerBlock << " bytes\n" <<
"-----------------------------------------------\n";
block_size = prop->maxThreadsPerBlock;
max_vol_comp = rho_0 * vol_comp_perc / 100;
max_rho_fluc = rho_0 * dens_fluc_perc / 100;
if (USER_MASS == 0) {
MASS_calc = rho_0 * (float)M_PI * pow(PARTICLE_RADIUS, 3.f) / 3.f * 4.f;
}
else {
MASS_calc = USER_MASS;
}
PARTICLE_DIAMETER = 2 * PARTICLE_RADIUS;
// get main path of simulation
getMainPath(main_path);
// write path for vtu files
strcpy(vtu_path, main_path);
strcat(vtu_path, "/vtu");
// write path for vtk group file
strcpy(vtk_group_path, main_path);
strcat(vtk_group_path, "/PCISPH.pvd");
// create directory for vtu files
CreateDir(vtu_path);
float VOLUME = 1;
const int SIMULATION_DIMENSION = 3;
// Get number per dimension (NPD) of FLUID particles for hexadecimal packing (assuming use of makeprism function)
int NPD[3];
for (int i = 0; i < 3; i++) {
if (i == 1) {
NPD[i] = static_cast<int>(floor((F_FINAL_POSITION[i] - F_INITIAL_POSITION[i]) / (sqrt(3.f) / 2.f * PARTICLE_DIAMETER)));
VOLUME = VOLUME * (F_FINAL_POSITION[i] - F_INITIAL_POSITION[i]);
}
else {
NPD[i] = static_cast<int>(floor((F_FINAL_POSITION[i] - F_INITIAL_POSITION[i]) / PARTICLE_DIAMETER));
VOLUME = VOLUME * (F_FINAL_POSITION[i] - F_INITIAL_POSITION[i]);
}
}
//Passing NPD to device
int* D_NPD;
gpuErrchk(hipMalloc((void**)&D_NPD, SIMULATION_DIMENSION * sizeof(float)));
gpuErrchk(hipMemcpy(D_NPD, NPD, SIMULATION_DIMENSION * sizeof(float), hipMemcpyHostToDevice));
N = NPD[0] * NPD[1] * NPD[2]; //number of fluid particles
int SIM_SIZE = N * SIMULATION_DIMENSION;
const int x = 40; // Number of particles inside the smoothing length
h = powf(3.f * VOLUME * x / (4.f * (float)M_PI * N), 1.f / 3.f);
//h = 0.02;
invh = 1 / h;
vec3d f_initial;
f_initial.x = F_INITIAL_POSITION[0] + PARTICLE_RADIUS;
f_initial.y = F_INITIAL_POSITION[1] + PARTICLE_RADIUS;
f_initial.z = F_INITIAL_POSITION[2] + PARTICLE_RADIUS;
size_t bytes_fluid_particles = SIM_SIZE * sizeof(float);
vec3d* FLUID_POSITIONS; //host pointer
FLUID_POSITIONS = (vec3d*)malloc(bytes_fluid_particles);
vec3d* D_FLUID_POSITIONS; //device pointer
gpuErrchk(hipMalloc((void**)&D_FLUID_POSITIONS, bytes_fluid_particles));
// grid -> number of blocks
// block -> number of threads
grid_size = N / block_size + 1;
//generate locations for each particle
makePrism << <grid_size, block_size >> > (D_FLUID_POSITIONS, PARTICLE_DIAMETER, f_initial, D_NPD, N);
BOUNDARY_DIAMETER = h/2;
BOUNDARY_RADIUS = h/4;
// Get number per dimension (NPD) of BOUNDARY particles without compact packing (assuming use of makebox function)
for (int i = 0; i < 3; i++) {
NPD[i] = static_cast<int>(ceil((B_FINAL_POSITION[i] - B_INITIAL_POSITION[i]) / BOUNDARY_DIAMETER)) + 2;
}
B = NPD[0] * NPD[1] * NPD[2] - (NPD[0] - 2) * (NPD[1] - 2) * (NPD[2] - 2); //Number of boundary particles
SIM_SIZE = NPD[0] * NPD[1] * NPD[2] * SIMULATION_DIMENSION;
vec3d b_initial;
b_initial.x = B_INITIAL_POSITION[0] - BOUNDARY_RADIUS;
b_initial.y = B_INITIAL_POSITION[1] - BOUNDARY_RADIUS;
b_initial.z = B_INITIAL_POSITION[2] - BOUNDARY_RADIUS;
vec3d b_final;
b_final.x = b_initial.x + BOUNDARY_DIAMETER * (NPD[0] - 1);
b_final.y = b_initial.y + BOUNDARY_DIAMETER * (NPD[1] - 1);
b_final.z = b_initial.z + BOUNDARY_DIAMETER * (NPD[2] - 1);
//printf("[%g %g %g] [%g %g %g]\n", b_final.x, b_final.y, b_final.z, B_FINAL_POSITION[0] + BOUNDARY_RADIUS, B_FINAL_POSITION[1] + BOUNDARY_RADIUS, B_FINAL_POSITION[2] + BOUNDARY_RADIUS);
size_t bytes_boundary_particles = SIM_SIZE * sizeof(float);
vec3d* BOUNDARY_POSITIONS; //host pointer
BOUNDARY_POSITIONS = (vec3d*)malloc(bytes_boundary_particles); //allocate memory in the host
vec3d* D_BOUNDARY_POSITIONS; //device pointer
gpuErrchk(hipMalloc((void**)&D_BOUNDARY_POSITIONS, bytes_boundary_particles)); // allocate memory in the device
makeBox(D_BOUNDARY_POSITIONS, BOUNDARY_DIAMETER, b_initial, b_final, block_size, D_NPD,NPD, SIMULATION_DIMENSION);
T = N + B; //Total number of particles
gpuErrchk(hipMemcpy(FLUID_POSITIONS, D_FLUID_POSITIONS, bytes_fluid_particles, hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(BOUNDARY_POSITIONS, D_BOUNDARY_POSITIONS, bytes_boundary_particles, hipMemcpyDeviceToHost));
// Free GPU memory for fluid particles
hipFree(D_FLUID_POSITIONS);
// HASHING ONLY FOR BOUNDARY PARTICLES
hashtable_size = powf(2, 19);
Hash b_hash(hashtable_size);
const int particles_per_row = 200;
pitch = 0;
int* hashtable = new int[hashtable_size * particles_per_row];
for (int i = 0; i < hashtable_size; ++i) {
for (int j = 0; j < particles_per_row; j++) {
hashtable[i * particles_per_row + j] = -1;
}
}
gpuErrchk(hipMallocPitch(&d_hashtable, &pitch, particles_per_row * sizeof(int), hashtable_size));
gpuErrchk(hipMemcpy2D(d_hashtable, pitch, hashtable, particles_per_row * sizeof(int), particles_per_row * sizeof(int), hashtable_size, hipMemcpyHostToDevice));
grid_size = B / block_size + 1;
hashParticlePositions << <grid_size, block_size >> > (d_hashtable, D_BOUNDARY_POSITIONS, invh, b_hash, B, pitch, particles_per_row);
// Calculate mass (or psi) for each boundary particle
float* d_boundary_mass;
gpuErrchk(hipMalloc((void**)&d_boundary_mass, B * sizeof(float)));
boundaryPsi << <grid_size, block_size >> > (d_boundary_mass, d_hashtable, rho_0, D_BOUNDARY_POSITIONS, h, invh, particles_per_row, pitch, b_hash, B);
float* boundary_mass = (float*)malloc(B * sizeof(float));
gpuErrchk(hipMemcpy(boundary_mass, d_boundary_mass, (size_t)B * sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipFree(d_boundary_mass));
//Calculate normal for boundary particles
vec3d* d_boundary_normal;
gpuErrchk(hipMalloc((void**)&d_boundary_normal, B * 3 * sizeof(float)));
boundaryNormal << <grid_size, block_size >> > (d_boundary_normal, D_BOUNDARY_POSITIONS, b_initial, b_final, B);
vec3d* boundary_normal = (vec3d*)malloc(B * 3 * sizeof(float));
gpuErrchk(hipMemcpy(boundary_normal, d_boundary_normal, (size_t)B * 3 * sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipFree(d_boundary_normal));
//Write boundary vtu file
float** boundary_point_data[] = { &boundary_mass };
size_pointData = sizeof(boundary_point_data) / sizeof(double);
vec3d** boundary_vectorData[] = { &boundary_normal };
size_vectorData = sizeof(boundary_vectorData) / sizeof(double);
std::string boundary_pointDataNames[] = { "psi" };
std::string boundary_vectorDataNames[] = { "normal" };
VTU_Writer(main_path, iteration, BOUNDARY_POSITIONS, B, boundary_point_data, boundary_vectorData, boundary_pointDataNames, boundary_vectorDataNames, size_pointData, size_vectorData, vtu_fullpath, 1);
//gpuErrchk(hipMemcpy2D(hashtable, particles_per_row * sizeof(int), d_hashtable, pitch, width, height, hipMemcpyDeviceToHost));
hipDeviceSynchronize();
//END OF HASHING FOR BOUNDARIES
hipFree(d_hashtable);
hipFree(D_BOUNDARY_POSITIONS);
//Calculating pressure delta
int count = 0;
float min_r = std::numeric_limits<float>::infinity();
int selected_index;
int tmp_size = static_cast<int>(ceil((2 * (h + PARTICLE_DIAMETER)) / PARTICLE_DIAMETER));
vec3d* tmp_points = (vec3d*)malloc(tmp_size * tmp_size * tmp_size * 3 * sizeof(float));
for (float i = -h - PARTICLE_DIAMETER; i <= h + PARTICLE_DIAMETER; i += PARTICLE_DIAMETER) {
for (float j = -h - PARTICLE_DIAMETER; j <= h + PARTICLE_DIAMETER; j += PARTICLE_DIAMETER) {
for (float k = -h - PARTICLE_DIAMETER; k <= h + PARTICLE_DIAMETER; k += PARTICLE_DIAMETER) {
tmp_points[count].x = i;
tmp_points[count].y = j;
tmp_points[count].z = k;
count++;
float r = sqrt(i*i+j*j+k*k);
if (r < min_r) {
min_r = r;
selected_index = count;
}
}
}
}
vec3d selected_point = tmp_points[selected_index];
vec3d r_vector;
float r;
vec3d Grad_W;
Grad_W.x = 0.f;
Grad_W.y = 0.f;
Grad_W.z = 0.f;
float dot_Grad_W = 0.f;
for (int i = 0; i < count; i++) {
r_vector.x = tmp_points[i].x - selected_point.x;
r_vector.y = tmp_points[i].y - selected_point.y;
r_vector.z = tmp_points[i].z - selected_point.z;
r = sqrt(r_vector.x* r_vector.x + r_vector.y* r_vector.y + r_vector.z* r_vector.z);
if (r <= h) {
vec3d inst_Grad_W = Poly6_Gradient(selected_index, i, tmp_points, r, h, invh);
Grad_W.x += inst_Grad_W.x;
Grad_W.y += inst_Grad_W.y;
Grad_W.z += inst_Grad_W.z;
dot_Grad_W += dot_product(inst_Grad_W, inst_Grad_W);
}
}
pressure_delta = -dot_product(Grad_W, Grad_W) - dot_Grad_W;
//Initializing main particle variables
//Defining and allocating main position variable
vec3d* POSITION = (vec3d*)malloc(3*T*sizeof(float));
for (int i = 0; i < N; i++) {
POSITION[i].x = FLUID_POSITIONS[i].x;
POSITION[i].y = FLUID_POSITIONS[i].y;
POSITION[i].z = FLUID_POSITIONS[i].z;
}
for (int i = N; i < T; i++) {
POSITION[i].x = BOUNDARY_POSITIONS[i - N].x;
POSITION[i].y = BOUNDARY_POSITIONS[i - N].y;
POSITION[i].z = BOUNDARY_POSITIONS[i - N].z;
}
free(BOUNDARY_POSITIONS);
free(FLUID_POSITIONS);
gpuErrchk(hipMalloc((void**)&d_POSITION, 3*T*sizeof(float)));
gpuErrchk(hipMemcpy(d_POSITION, POSITION, 3*T*sizeof(float), hipMemcpyHostToDevice));
//Allocating memory for predicted positions and copying previous position vectors
gpuErrchk(hipMalloc((void**)&d_PRED_POSITION, 3 * T * sizeof(float)));
gpuErrchk(hipMemcpy(d_PRED_POSITION, POSITION, 3 * T * sizeof(float), hipMemcpyHostToDevice));
//Allocating memory for predicted velocity
gpuErrchk(hipMalloc((void**)&d_PRED_VELOCITY, 3 * N * sizeof(float)));
//Defining and allocating main velocity variable
vec3d* VELOCITY = (vec3d*)malloc(3*N*sizeof(float));
for (int i = 0; i < N; i++) {
VELOCITY[i].x = V_INITIAL[0];
VELOCITY[i].y = V_INITIAL[1];
VELOCITY[i].z = V_INITIAL[2];
}
gpuErrchk(hipMalloc((void**)&d_VELOCITY, 3*N*sizeof(float)));
gpuErrchk(hipMemcpy(d_VELOCITY, VELOCITY, 3*N*sizeof(float), hipMemcpyHostToDevice));
//Defining and allocating main st force variable
vec3d* ST_FORCE = (vec3d*)malloc(3*N*sizeof(float));
for (int i = 0; i < N; i++) {
ST_FORCE[i].x = 0.f;
ST_FORCE[i].y = 0.f;
ST_FORCE[i].z = 0.f;
}
gpuErrchk(hipMalloc((void**)&d_ST_FORCE, 3*N*sizeof(float)));
//Defining and allocating main viscosity force variable
vec3d* VISCOSITY_FORCE = (vec3d*)malloc(3*N*sizeof(float));
for (int i = 0; i < N; i++) {
VISCOSITY_FORCE[i].x = 0.f;
VISCOSITY_FORCE[i].y = 0.f;
VISCOSITY_FORCE[i].z = 0.f;
}
gpuErrchk(hipMalloc((void**)&d_VISCOSITY_FORCE, 3*N*sizeof(float)));
//Defining and allocating main pressure force variable
vec3d* PRESSURE_FORCE = (vec3d*)malloc(3*N*sizeof(float));
for (int i = 0; i < N; i++) {
PRESSURE_FORCE[i].x = 0.f;
PRESSURE_FORCE[i].y = 0.f;
PRESSURE_FORCE[i].z = 0.f;
}
gpuErrchk(hipMalloc((void**)&d_PRESSURE_FORCE, 3*N*sizeof(float)));
//Defining and allocating main normal variable
vec3d* NORMAL = (vec3d*)malloc(3*T*sizeof(float));
for (int i = 0; i < N; i++) {
NORMAL[i].x = 0.f;
NORMAL[i].y = 0.f;
NORMAL[i].z = 0.f;
}
for (int i = N; i < T; i++) {
NORMAL[i].x = boundary_normal[i - N].x;
NORMAL[i].y = boundary_normal[i - N].y;
NORMAL[i].z = boundary_normal[i - N].z;
}
free(boundary_normal);
gpuErrchk(hipMalloc((void**)&d_NORMAL, 3*T*sizeof(float)));
gpuErrchk(hipMemcpy(d_NORMAL, NORMAL, 3*T*sizeof(float), hipMemcpyHostToDevice));
//Defining and allocating main density array
DENSITY = (float*)malloc(N * sizeof(float));
for (int i = 0; i < N; i++) {
DENSITY[i] = rho_0;
}
gpuErrchk(hipMalloc((void**)&d_DENSITY, N * sizeof(float)));
gpuErrchk(hipMemcpy(d_DENSITY, DENSITY, N * sizeof(float), hipMemcpyHostToDevice));
//Defining and allocating main pressure array
float* PRESSURE = (float*)malloc(N * sizeof(float));
for (int i = 0; i < N; i++) {
PRESSURE[i] = 0;
}
gpuErrchk(hipMalloc((void**)&d_PRESSURE, N * sizeof(float)));
//Defining and allocating main mass array
float* MASS = (float*)malloc(T * sizeof(float));
for (int i = 0; i < N; i++) {
MASS[i] = MASS_calc;
}
for (int i = N; i < T; i++) {
MASS[i] = boundary_mass[i - N];
}
free(boundary_mass);
gpuErrchk(hipMalloc((void**)&d_MASS, T * sizeof(float)));
gpuErrchk(hipMemcpy(d_MASS, MASS, T * sizeof(float), hipMemcpyHostToDevice));
//Defining and allocating main type array (0 if fluid, 1 if boundary)
int* TYPE = (int*)malloc(T * sizeof(int));
for (int i = 0; i < N; i++) {
TYPE[i] = 0;
}
for (int i = N; i < T; i++) {
TYPE[i] = 1;
}
gpuErrchk(hipMalloc((void**)&d_TYPE, T * sizeof(int)));
gpuErrchk(hipMemcpy(d_TYPE, TYPE, T * sizeof(int), hipMemcpyHostToDevice));
//Defining and allocating memory to store max density error
gpuErrchk(hipMalloc((void**)&d_max_rho_err, sizeof(float)));
//Defining and allocating memory to store max force value
gpuErrchk(hipMalloc((void**)&d_max_force, sizeof(float)));
//Defining and allocating memory to store max velocity value
gpuErrchk(hipMalloc((void**)&d_max_velocity, sizeof(float)));
//Defining and allocating memory to store summation of density errors to calculate average error
gpuErrchk(hipMalloc((void**)&d_sum_rho_err, sizeof(float)));
//defining gravity vector
gravity.x = 0.f;
gravity.y = -9.81f;
gravity.z = 0.f;
//Defining variables to write VTU files
float** pointData[2];
vec3d** vectorData[4];
pointData[0] = &DENSITY;
pointData[1] = &PRESSURE;
size_pointData = sizeof(pointData) / sizeof(double);
vectorData[0] = &VELOCITY;
vectorData[1] = &PRESSURE_FORCE;
vectorData[2] = &VISCOSITY_FORCE;
vectorData[3] = &ST_FORCE;
size_vectorData = sizeof(vectorData) / sizeof(double);
VTU_Writer(vtu_path, iteration, POSITION, N, pointData, vectorData, pointDataNames, vectorDataNames, size_pointData, size_vectorData, vtu_fullpath);
VTK_Group(vtk_group_path, vtu_fullpath, simulation_time);
// Initialize main hashtable
hashtable = new int[hashtable_size * particles_per_row];
for (int i = 0; i < hashtable_size; ++i) {
for (int j = 0; j < particles_per_row; j++) {
hashtable[i * particles_per_row + j] = -1;
}
}
gpuErrchk(hipMallocPitch(&d_hashtable, &pitch, particles_per_row * sizeof(int), hashtable_size));
gpuErrchk(hipMemcpy2D(d_hashtable, pitch, hashtable, particles_per_row * sizeof(int), particles_per_row * sizeof(int), hashtable_size, hipMemcpyHostToDevice));
writeTimeKeeper(main_path);
std::cout << N << " Fluid particles\n"
<< B << " Boundary particles\n"
<< "Total of " << T << " particles.\n"
<< "Smoothing radius = " << h << " m.\n"
<< "hashtable size = " << hashtable_size << "\n";
return 0;
}
int mainLoop() {
Hash hash(hashtable_size);
grid_size = hashtable_size / block_size + 1;
hashtableReset << <grid_size, block_size >> > (d_hashtable, particles_per_row, pitch, hashtable_size);
grid_size = T / block_size + 1;
hashParticlePositions << <grid_size, block_size >> > (d_hashtable, d_POSITION, invh, hash, T, pitch, particles_per_row);
grid_size = N / block_size + 1;
DensityCalc << <grid_size, block_size >> > (d_POSITION, d_MASS, d_DENSITY, h, invh, rho_0, particles_per_row, pitch, d_hashtable, hash, N);
fluidNormal << <grid_size, block_size >> > (d_NORMAL, d_POSITION, d_MASS, d_DENSITY,d_TYPE, rho_0, h,invh, hash,d_hashtable, particles_per_row,pitch, N);
nonPressureForces << <grid_size, block_size >> > (d_POSITION, d_VISCOSITY_FORCE, d_ST_FORCE, d_MASS, d_DENSITY, d_VELOCITY, d_NORMAL, gravity,d_TYPE, h, invh, rho_0, visc_const, st_const, particles_per_row, pitch,d_hashtable, hash, N);
gpuErrchk(hipPeekAtLastError());
//reseting values of pressure
resetPressure << <grid_size, block_size >> > (d_PRESSURE, N);
float pressure_coeff = -1 / (2 * powf(MASS_calc * delta_t / rho_0, 2) * pressure_delta);
/*std::cout << pressure_coeff << std::endl;*/
int _k_ = 0;
while (_k_ < 3) {
positionAndVelocity << <grid_size, block_size >> > (d_PRED_POSITION,d_PRED_VELOCITY,d_POSITION, d_VELOCITY, d_PRESSURE_FORCE, d_VISCOSITY_FORCE, d_ST_FORCE, gravity, d_MASS, delta_t, N);
grid_size = hashtable_size / block_size + 1;
hashtableReset << <grid_size, block_size >> >(d_hashtable, particles_per_row, pitch, hashtable_size);
grid_size = T / block_size + 1;
hashParticlePositions << <grid_size, block_size >> > (d_hashtable, d_PRED_POSITION, invh, hash, T, pitch, particles_per_row);
grid_size = N / block_size + 1;
collisionHandler << <grid_size, block_size >> > (d_PRED_POSITION, d_PRED_VELOCITY, d_NORMAL, d_TYPE, d_hashtable, h, invh, pitch, hash, particles_per_row, BOUNDARY_DIAMETER, epsilon, N);
DensityCalc << <grid_size, block_size >> > (d_PRED_POSITION, d_MASS, d_DENSITY, h, invh, rho_0, particles_per_row, pitch, d_hashtable, hash, N);
PressureCalc << <grid_size, block_size >> > (d_PRESSURE, d_DENSITY, rho_0, pressure_coeff, N);
PressureForceCalc << <grid_size, block_size >> > (d_PRED_POSITION, d_PRESSURE_FORCE, d_PRESSURE, d_MASS, d_DENSITY,d_TYPE, h, invh, particles_per_row, pitch, d_hashtable, hash, N);
_k_++;
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
}
positionAndVelocity << <grid_size, block_size >> > (d_POSITION, d_VELOCITY, d_POSITION, d_VELOCITY, d_PRESSURE_FORCE, d_VISCOSITY_FORCE, d_ST_FORCE, gravity, d_MASS, delta_t, N);
collisionHandler << <grid_size, block_size >> > (d_POSITION, d_VELOCITY, d_NORMAL, d_TYPE, d_hashtable, h, invh, pitch, hash, particles_per_row, BOUNDARY_DIAMETER, epsilon, N);
//criterias for changes in delta_t value
gpuErrchk(hipMemcpy(DENSITY, d_DENSITY, N * sizeof(float), hipMemcpyDeviceToHost));
max_rho_err_t_1 = max_rho_err;
float max_velocity = 0.f;
float max_force = 0.f;
float sum_rho_err = 0.f;
hipLaunchKernelGGL(( resetValues), dim3(1),dim3(1), 0, 0, d_max_velocity, d_max_force, d_sum_rho_err, d_max_rho_err);
grid_size = N / block_size + 1;
getMaxVandF << <grid_size, block_size >> > (d_max_velocity, d_max_force, d_VELOCITY, d_PRESSURE_FORCE, d_VISCOSITY_FORCE, d_ST_FORCE, gravity, d_MASS,d_DENSITY,d_sum_rho_err,d_max_rho_err, rho_0, N);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipMemcpy(&max_velocity, d_max_velocity, sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(&max_force, d_max_force, sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(&sum_rho_err, d_sum_rho_err, sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(&max_rho_err, d_max_rho_err, sizeof(float), hipMemcpyDeviceToHost));
float avg_rho_err = sum_rho_err / N;
// delta_t increase
bool criteria1 = 0.19f * sqrt(h / max_force) > delta_t;
bool criteria2 = max_rho_err < 4.5f * max_vol_comp;
bool criteria3 = avg_rho_err < 0.9f * max_vol_comp;
bool criteria4 = 0.39f * (h/max_velocity) > delta_t;
if (criteria1 && criteria2 && criteria3 && criteria4) {
delta_t += delta_t * 0.2f / 100;
}
//delta_t decrease
criteria1 = 0.2f * sqrt(h / max_force) < delta_t;
criteria2 = max_rho_err > 5.5f * max_vol_comp;
criteria3 = avg_rho_err > max_vol_comp;
criteria4 = 0.4f * (h / max_velocity) <= delta_t;
if (criteria1 || criteria2 || criteria3 || criteria4) {
delta_t -= delta_t * 0.2f / 100;
}
//shock handling
criteria1 = max_rho_err - max_rho_err_t_1 > 8 * max_vol_comp;
criteria2 = max_rho_err > max_rho_fluc;
criteria3 = 0.45f * (h/max_velocity) < delta_t;
if (criteria1 || criteria2 || criteria3) {
int last_iter = getLastIter(main_path);
char* iter_path = new char[100];
char* num_buffer = new char[32];
while (iteration - last_iter < 2) {
itoa(last_iter, num_buffer, 10);
strcpy(iter_path, vtu_path);
strcat(iter_path, "/iter");
strcat(iter_path, num_buffer);
strcat(iter_path, ".vtu");
//printf("%s\n", iter_path);
remove(iter_path);
last_iter = getLastIter(main_path);
//printf("%d\n", last_iter);
num_buffer = new char[32];
iter_path = new char[100];
}
std::cout << "\n\nSHOCK DETECTED! RETURNING " << iteration - last_iter << " ITERATIONS!\n" << std::endl;
write_pvd = false;
//SHOCK DETECTED
//delta_t = fminf(0.2f * sqrt(h/max_force),0.08f*h/max_velocity);
delta_t = delta_t / 5;
//Return 2 iterations
iteration = last_iter;
if (iteration <= 0) {
std::cout << "\nIMPOSSIBLE TO RETURN 2 ITERATIONS! TERMINATING SIMULATION\n" << std::endl;
return 1;
}
vec3d* position = (vec3d*)malloc(N * sizeof(vec3d));
vec3d* velocity = (vec3d*)malloc(N * sizeof(vec3d));
itoa(iteration, num_buffer, 10);
strcpy(iter_path, vtu_path);
strcat(iter_path, "/iter");
strcat(iter_path, num_buffer);
strcat(iter_path, ".vtu");
readVTU(iter_path, position, velocity);
getNewSimTime(main_path);
rewritePVD(main_path);
gpuErrchk(hipMemcpy(d_POSITION, position, 3 * N * sizeof(float), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_VELOCITY, velocity, 3 * N * sizeof(float), hipMemcpyHostToDevice));
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
return 0;
}
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
simulation_time += delta_t;
iteration++;
writeTimeKeeper(main_path);
return 0;
}
void multiprocessor_writer() {
char buf[1024];
itoa(iteration, buf, 10);
strcpy(vtu_fullpath, vtu_path);
strcat(vtu_fullpath, "/iter");
strcat(vtu_fullpath, buf);
strcat(vtu_fullpath, ".vtu");
std::future<void> write_vtu;
vec3d* write_position = (vec3d*)malloc(3 * N * sizeof(float));
vec3d* write_velocity = (vec3d*)malloc(3 * N * sizeof(float));
vec3d* write_viscosity_force = (vec3d*)malloc(3 * N * sizeof(float));
vec3d* write_st_force = (vec3d*)malloc(3 * N * sizeof(float));
vec3d* write_presure_force = (vec3d*)malloc(3 * N * sizeof(float));
float* write_density = (float*)malloc(N * sizeof(float));
float* write_pressure = (float*)malloc(N * sizeof(float));
gpuErrchk(hipMemcpy(write_position, d_POSITION, N * 3 * sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(write_velocity, d_VELOCITY, N * 3 * sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(write_viscosity_force, d_VISCOSITY_FORCE, N * 3 * sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(write_st_force, d_ST_FORCE, N * 3 * sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(write_presure_force, d_PRESSURE_FORCE, N * 3 * sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(write_density, d_DENSITY, N * sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(write_pressure, d_PRESSURE, N * sizeof(float), hipMemcpyDeviceToHost));
//auto started = std::chrono::high_resolution_clock::now();
float** pointData[2];
vec3d** vectorData[4];
pointData[0] = &write_density;
pointData[1] = &write_pressure;
size_pointData = sizeof(pointData) / sizeof(double);
vectorData[0] = &write_velocity;
vectorData[1] = &write_presure_force;
vectorData[2] = &write_viscosity_force;
vectorData[3] = &write_st_force;
//vectorData[4] = &NORMAL;
size_vectorData = sizeof(vectorData) / sizeof(double);
write_vtu = std::async(std::launch::async, VTU_Writer, vtu_path, iteration, write_position, N, pointData, vectorData, pointDataNames, vectorDataNames, size_pointData, size_vectorData, vtu_fullpath,2);
//auto done = std::chrono::high_resolution_clock::now();
//std::cout << "Second VTU_Writer() -> " << std::chrono::duration_cast<std::chrono::milliseconds>(done - started).count() << " ms\n";
if (write_pvd == true) {
strcpy(buf, vtu_fullpath);
VTK_Group(vtk_group_path, buf, simulation_time);
}
write_pvd = true;
//write_vtu.get();
return;
} | 767f06a2cf35a685ccc73d8faa002daa6327da1c.cu | #define _USE_MATH_DEFINES
#include "particle_positions.cuh"
#include "utilities.cuh"
#include "VTK.cuh"
#include "hashing.cuh"
#include "particle_parameters.cuh"
#include <math.h>
#include <future>
#include <chrono>
#include <math.h>
//declaration of all global variables that are going to be used in this file
char main_path[1024];
char vtk_group_path[1024];
char vtu_fullpath[1024];
char vtu_path[1024];
std::string pointDataNames[] = { "density" , "pressure" };
std::string vectorDataNames[] = {"velocity","pressure force","viscosity force","st force" };
int size_pointData;
int size_vectorData;
vec3d* d_POSITION;
vec3d* d_PRED_POSITION;
vec3d* d_VELOCITY;
vec3d* d_PRED_VELOCITY;
vec3d* d_ST_FORCE;
vec3d* d_VISCOSITY_FORCE;
vec3d* d_PRESSURE_FORCE;
vec3d* d_NORMAL;
float* DENSITY;
float* d_DENSITY;
float* d_PRESSURE;
float* d_MASS;
int* d_TYPE;
int* d_hashtable;
vec3d gravity;
//physical constants
float rho_0; //rest density
float visc_const; //viscosity constant
float st_const; // surface tension constant
float epsilon; // dumping coefficient for collision
//initial conditions
float PARTICLE_RADIUS;
float MASS_calc;// = rho_0 * (float)M_PI * pow(PARTICLE_RADIUS, 3.f) / 3.f * 4.f;
float USER_MASS;
float PARTICLE_DIAMETER;
float F_INITIAL_POSITION[3]; // = { 0.f,0.f,0.f }; //Fluid particles initial position
float F_FINAL_POSITION[3]; // = { 0.5f,1.f,0.5f }; //Fluid particles final position
float B_INITIAL_POSITION[3]; // = { 0.f,0.f,0.f }; //Boundary particles final position
float B_FINAL_POSITION[3]; // = { 1.f,1.f,1.f }; //Boundary particles final position
float V_INITIAL[3];
//controlling iteration number and simulation time
int iteration = 1;
float simulation_time; //in seconds
float final_time; //in seconds
//number of particles
int N; //fluid particles
int B; //bondary particles
int T; //total particles
//variables for hashtable
size_t pitch;
int particles_per_row = 200;
int hashtable_size;
//simulation parameters
float invh;
float h;
//CUDA variables
int block_size;
int grid_size;
//PCISPH variables
float vol_comp_perc;
float dens_fluc_perc;
float* d_max_force;
float* d_max_velocity;
float* d_max_rho_err;
float* d_sum_rho_err;
float delta_t;
float max_vol_comp;
float max_rho_fluc;
float BOUNDARY_DIAMETER;
float BOUNDARY_RADIUS;
float pressure_delta;
float max_rho_err_t_1 = 0.f;
float max_rho_err = 0.f;
bool write_pvd = true;
char* user_results_folder = new char[256];
float save_steps;
int fileReader() {
//allocating memory
char* row = new char[256];
int row_buff_index = 0;
char* num_buffer = new char[256];
int num_buffer_index = 0;
float num;
vec3d vec;
//names
char* phys_props_names[] = { "rho_0","visc_const","surface_tension_const","collision_dumping_coeff" };
char* init_cond_names[] = {"particle_radius","mass","fluid_initial_coord","fluid_final_coord","boundary_initial_coord","boundary_final_coord","fluid_initial_velocity","maximum_volume_compression","maximum_density_fluctuation"};
char* system_names[] = { "initial_delta_t","initial_time","final_time","neighbors_per_particle", "save_steps","results_folder"};
int phys_props_size = sizeof(phys_props_names) / 8;
int init_cond_size = sizeof(init_cond_names) / 8;
int system_size = sizeof(system_names) / 8;
//paths
char* phys_props_path = "./props/physical_props.txt";
char* initial_conditions_path = "./props/initial_conditions.txt";
char* system_path = "./props/system.txt";
if (fileExists(phys_props_path) != 0) {
std::cout << "\nERROR! Could not find physical properties file at " << phys_props_path << "\n";
return 1;
}
if (fileExists(phys_props_path) != 0) {
std::cout << "\nERROR! Could not find initial conditions file at " << phys_props_path << "\n";
return 1;
}
if (fileExists(phys_props_path) != 0) {
std::cout << "\nERROR! Could not find system names file at " << phys_props_path << "\n";
return 1;
}
//reading physical properties
std::ifstream phys_props (phys_props_path);
for (char write2line; phys_props.get(write2line);) {
if (phys_props.eof()) {
break;
}
if (write2line == 10) {
int i = 0;
for (i; i < phys_props_size; i++) {
if (strstr(row, phys_props_names[i]) != nullptr) {
break;
}
}
if (i < phys_props_size) {
bool save_char = false;
for (int j = 0; j < strlen(row); j++) {
if (row[j] == 61) {
save_char = true;
for (int k = j; k < strlen(row); k++) {
if (!isdigit(row[k + 1])) {
j++;
}
else { break; }
}
}
else if (row[j] == 59) {
num = (float)atof(num_buffer);
num_buffer_index = 0;
num_buffer = new char[256];
break;
}
else if ((isdigit(row[j]) || row[j] == 46 || row[j] == 45) && save_char) {
num_buffer[num_buffer_index] = row[j];
num_buffer_index++;
}
}
if (i == 0) {
rho_0 = num;
}
else if (i == 1) {
visc_const = num;
}
else if (i == 2) {
st_const = num;
}
else if (i == 3) {
epsilon = num;
}
}
row = new char[256];
row_buff_index = 0;
}
else if (write2line != 10) {
row[row_buff_index] = write2line;
row_buff_index++;
}
}
row = new char[256];
row_buff_index = 0;
phys_props.close();
//reading initial conditions
std::ifstream init_conds(initial_conditions_path);
for (char write2line; init_conds.get(write2line);) {
if (init_conds.eof()) {
break;
}
if (write2line == 10) {
int i = 0;
for (i; i < init_cond_size; i++) {
if (strstr(row, init_cond_names[i]) != nullptr) {
break;
}
}
if (i < init_cond_size) {
if (strstr(row, "[") != nullptr) {
bool save_char = false;
int axis_count = 0;
for (int j = 0; j < strlen(row); j++) {
if (axis_count > 2) {
axis_count = 0;
break;
}
if (row[j] == 91) {
save_char = true;
for (int k = j; k < strlen(row); k++) {
if (!isdigit(row[k + 1])) {
j++;
}
else { break; }
}
}
else if (row[j] == 44 || row[j] == 93) {
num = (float)atof(num_buffer);
if (axis_count == 0) {
vec.x = num;
} else if (axis_count == 1) {
vec.y = num;
}
else if (axis_count == 2) {
vec.z = num;
}
axis_count++;
if (row[j] == 32) {
j++;
}
num_buffer_index = 0;
num_buffer = new char[256];
}
else if ((isdigit(row[j]) || row[j] == 46 || row[j] == 45) && save_char) {
num_buffer[num_buffer_index] = row[j];
num_buffer_index++;
}
}
}
else {
bool save_char = false;
for (int j = 0; j < strlen(row); j++) {
if (row[j] == 61) {
save_char = true;
for (int k = j; k < strlen(row); k++) {
if (!isdigit(row[k + 1])) {
j++;
}
else { break; }
}
}
else if (row[j] == 59) {
num = (float)atof(num_buffer);
num_buffer_index = 0;
num_buffer = new char[256];
break;
}
else if ((isdigit(row[j]) || row[j] == 46 || row[j] == 45) && save_char) {
num_buffer[num_buffer_index] = row[j];
num_buffer_index++;
}
}
}
if (i == 0) {
PARTICLE_RADIUS = num;
}
else if (i == 1) {
USER_MASS = num;
}
else if (i == 2) {
F_INITIAL_POSITION[0] = vec.x;
F_INITIAL_POSITION[1] = vec.y;
F_INITIAL_POSITION[2] = vec.z;
}
else if (i == 3) {
F_FINAL_POSITION[0] = vec.x;
F_FINAL_POSITION[1] = vec.y;
F_FINAL_POSITION[2] = vec.z;
}
else if (i == 4) {
B_INITIAL_POSITION[0] = vec.x;
B_INITIAL_POSITION[1] = vec.y;
B_INITIAL_POSITION[2] = vec.z;
}
else if (i == 5) {
B_FINAL_POSITION[0] = vec.x;
B_FINAL_POSITION[1] = vec.y;
B_FINAL_POSITION[2] = vec.z;
}
else if (i == 6) {
V_INITIAL[0] = vec.x;
V_INITIAL[1] = vec.y;
V_INITIAL[2] = vec.z;
}
else if (i == 7) {
vol_comp_perc = num;
}
else if (i == 8) {
dens_fluc_perc = num;
}
}
row = new char[256];
row_buff_index = 0;
}
else if (write2line != 10) {
row[row_buff_index] = write2line;
row_buff_index++;
}
}
row = new char[256];
row_buff_index = 0;
init_conds.close();
std::ifstream system_vars(system_path);
for (char write2line; system_vars.get(write2line);) {
if (system_vars.eof()) {
break;
}
if (write2line == 10) {
int i = 0;
for (i; i < system_size; i++) {
if (strstr(row, system_names[i]) != nullptr) {
break;
}
}
if (i < system_size) {
bool save_char = false;
if (strstr(row, "\"") != nullptr) {
for (int j = 0; j < strlen(row); j++) {
if (row[j] == 34 && !save_char) {
save_char = true;
for (int k = j; k < strlen(row); k++) {
if (row[k+1] == 32) {
j++;
}
else { break; }
}
}
else if (row[j] == 34 && save_char) {
break;
}
else if (save_char){
num_buffer[num_buffer_index] = row[j];
num_buffer_index++;
}
}
}
else {
for (int j = 0; j < strlen(row); j++) {
if (row[j] == 61) {
save_char = true;
for (int k = j; k < strlen(row); k++) {
if (!isdigit(row[k + 1])) {
j++;
}
else { break; }
}
}
else if (row[j] == 59) {
num = (float)atof(num_buffer);
num_buffer_index = 0;
num_buffer = new char[256];
break;
}
else if ((isdigit(row[j]) || row[j] == 46 || row[j] == 45) && save_char) {
num_buffer[num_buffer_index] = row[j];
num_buffer_index++;
}
}
}
if (i == 0) {
delta_t = num;
}
else if (i == 1) {
simulation_time = num;
}
else if (i == 2) {
final_time = num;
}
else if (i == 3) {
particles_per_row = (int)num;
}
else if (i == 4) {
save_steps = num;
}
else if (i == 5) {
user_results_folder = num_buffer;
}
}
row = new char[256];
row_buff_index = 0;
}
else if (write2line != 10) {
row[row_buff_index] = write2line;
row_buff_index++;
}
}
return 0;
}
int initialize() {
cudaDeviceProp* prop = new cudaDeviceProp;
gpuErrchk(cudaGetDeviceProperties(prop,0));
std::cout << "-----------------------------------------------\n";
std::cout << "DEVICE PROPERTIES:\n" << "Device name: " << prop->name << "\n" <<
"Max number of threads per block: " << prop->maxThreadsPerBlock << "\n" <<
"Total global memory: " << dround(prop->totalGlobalMem/1e9,2) << " gigabytes\n" <<
"Registers per block: " << prop->regsPerBlock << "\n" <<
"Shared Memory per block: " << prop->sharedMemPerBlock << " bytes\n" <<
"-----------------------------------------------\n";
block_size = prop->maxThreadsPerBlock;
max_vol_comp = rho_0 * vol_comp_perc / 100;
max_rho_fluc = rho_0 * dens_fluc_perc / 100;
if (USER_MASS == 0) {
MASS_calc = rho_0 * (float)M_PI * pow(PARTICLE_RADIUS, 3.f) / 3.f * 4.f;
}
else {
MASS_calc = USER_MASS;
}
PARTICLE_DIAMETER = 2 * PARTICLE_RADIUS;
// get main path of simulation
getMainPath(main_path);
// write path for vtu files
strcpy(vtu_path, main_path);
strcat(vtu_path, "/vtu");
// write path for vtk group file
strcpy(vtk_group_path, main_path);
strcat(vtk_group_path, "/PCISPH.pvd");
// create directory for vtu files
CreateDir(vtu_path);
float VOLUME = 1;
const int SIMULATION_DIMENSION = 3;
// Get number per dimension (NPD) of FLUID particles for hexadecimal packing (assuming use of makeprism function)
int NPD[3];
for (int i = 0; i < 3; i++) {
if (i == 1) {
NPD[i] = static_cast<int>(floor((F_FINAL_POSITION[i] - F_INITIAL_POSITION[i]) / (sqrt(3.f) / 2.f * PARTICLE_DIAMETER)));
VOLUME = VOLUME * (F_FINAL_POSITION[i] - F_INITIAL_POSITION[i]);
}
else {
NPD[i] = static_cast<int>(floor((F_FINAL_POSITION[i] - F_INITIAL_POSITION[i]) / PARTICLE_DIAMETER));
VOLUME = VOLUME * (F_FINAL_POSITION[i] - F_INITIAL_POSITION[i]);
}
}
//Passing NPD to device
int* D_NPD;
gpuErrchk(cudaMalloc((void**)&D_NPD, SIMULATION_DIMENSION * sizeof(float)));
gpuErrchk(cudaMemcpy(D_NPD, NPD, SIMULATION_DIMENSION * sizeof(float), cudaMemcpyHostToDevice));
N = NPD[0] * NPD[1] * NPD[2]; //number of fluid particles
int SIM_SIZE = N * SIMULATION_DIMENSION;
const int x = 40; // Number of particles inside the smoothing length
h = powf(3.f * VOLUME * x / (4.f * (float)M_PI * N), 1.f / 3.f);
//h = 0.02;
invh = 1 / h;
vec3d f_initial;
f_initial.x = F_INITIAL_POSITION[0] + PARTICLE_RADIUS;
f_initial.y = F_INITIAL_POSITION[1] + PARTICLE_RADIUS;
f_initial.z = F_INITIAL_POSITION[2] + PARTICLE_RADIUS;
size_t bytes_fluid_particles = SIM_SIZE * sizeof(float);
vec3d* FLUID_POSITIONS; //host pointer
FLUID_POSITIONS = (vec3d*)malloc(bytes_fluid_particles);
vec3d* D_FLUID_POSITIONS; //device pointer
gpuErrchk(cudaMalloc((void**)&D_FLUID_POSITIONS, bytes_fluid_particles));
// grid -> number of blocks
// block -> number of threads
grid_size = N / block_size + 1;
//generate locations for each particle
makePrism << <grid_size, block_size >> > (D_FLUID_POSITIONS, PARTICLE_DIAMETER, f_initial, D_NPD, N);
BOUNDARY_DIAMETER = h/2;
BOUNDARY_RADIUS = h/4;
// Get number per dimension (NPD) of BOUNDARY particles without compact packing (assuming use of makebox function)
for (int i = 0; i < 3; i++) {
NPD[i] = static_cast<int>(ceil((B_FINAL_POSITION[i] - B_INITIAL_POSITION[i]) / BOUNDARY_DIAMETER)) + 2;
}
B = NPD[0] * NPD[1] * NPD[2] - (NPD[0] - 2) * (NPD[1] - 2) * (NPD[2] - 2); //Number of boundary particles
SIM_SIZE = NPD[0] * NPD[1] * NPD[2] * SIMULATION_DIMENSION;
vec3d b_initial;
b_initial.x = B_INITIAL_POSITION[0] - BOUNDARY_RADIUS;
b_initial.y = B_INITIAL_POSITION[1] - BOUNDARY_RADIUS;
b_initial.z = B_INITIAL_POSITION[2] - BOUNDARY_RADIUS;
vec3d b_final;
b_final.x = b_initial.x + BOUNDARY_DIAMETER * (NPD[0] - 1);
b_final.y = b_initial.y + BOUNDARY_DIAMETER * (NPD[1] - 1);
b_final.z = b_initial.z + BOUNDARY_DIAMETER * (NPD[2] - 1);
//printf("[%g %g %g] [%g %g %g]\n", b_final.x, b_final.y, b_final.z, B_FINAL_POSITION[0] + BOUNDARY_RADIUS, B_FINAL_POSITION[1] + BOUNDARY_RADIUS, B_FINAL_POSITION[2] + BOUNDARY_RADIUS);
size_t bytes_boundary_particles = SIM_SIZE * sizeof(float);
vec3d* BOUNDARY_POSITIONS; //host pointer
BOUNDARY_POSITIONS = (vec3d*)malloc(bytes_boundary_particles); //allocate memory in the host
vec3d* D_BOUNDARY_POSITIONS; //device pointer
gpuErrchk(cudaMalloc((void**)&D_BOUNDARY_POSITIONS, bytes_boundary_particles)); // allocate memory in the device
makeBox(D_BOUNDARY_POSITIONS, BOUNDARY_DIAMETER, b_initial, b_final, block_size, D_NPD,NPD, SIMULATION_DIMENSION);
T = N + B; //Total number of particles
gpuErrchk(cudaMemcpy(FLUID_POSITIONS, D_FLUID_POSITIONS, bytes_fluid_particles, cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(BOUNDARY_POSITIONS, D_BOUNDARY_POSITIONS, bytes_boundary_particles, cudaMemcpyDeviceToHost));
// Free GPU memory for fluid particles
cudaFree(D_FLUID_POSITIONS);
// HASHING ONLY FOR BOUNDARY PARTICLES
hashtable_size = powf(2, 19);
Hash b_hash(hashtable_size);
const int particles_per_row = 200;
pitch = 0;
int* hashtable = new int[hashtable_size * particles_per_row];
for (int i = 0; i < hashtable_size; ++i) {
for (int j = 0; j < particles_per_row; j++) {
hashtable[i * particles_per_row + j] = -1;
}
}
gpuErrchk(cudaMallocPitch(&d_hashtable, &pitch, particles_per_row * sizeof(int), hashtable_size));
gpuErrchk(cudaMemcpy2D(d_hashtable, pitch, hashtable, particles_per_row * sizeof(int), particles_per_row * sizeof(int), hashtable_size, cudaMemcpyHostToDevice));
grid_size = B / block_size + 1;
hashParticlePositions << <grid_size, block_size >> > (d_hashtable, D_BOUNDARY_POSITIONS, invh, b_hash, B, pitch, particles_per_row);
// Calculate mass (or psi) for each boundary particle
float* d_boundary_mass;
gpuErrchk(cudaMalloc((void**)&d_boundary_mass, B * sizeof(float)));
boundaryPsi << <grid_size, block_size >> > (d_boundary_mass, d_hashtable, rho_0, D_BOUNDARY_POSITIONS, h, invh, particles_per_row, pitch, b_hash, B);
float* boundary_mass = (float*)malloc(B * sizeof(float));
gpuErrchk(cudaMemcpy(boundary_mass, d_boundary_mass, (size_t)B * sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(d_boundary_mass));
//Calculate normal for boundary particles
vec3d* d_boundary_normal;
gpuErrchk(cudaMalloc((void**)&d_boundary_normal, B * 3 * sizeof(float)));
boundaryNormal << <grid_size, block_size >> > (d_boundary_normal, D_BOUNDARY_POSITIONS, b_initial, b_final, B);
vec3d* boundary_normal = (vec3d*)malloc(B * 3 * sizeof(float));
gpuErrchk(cudaMemcpy(boundary_normal, d_boundary_normal, (size_t)B * 3 * sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(d_boundary_normal));
//Write boundary vtu file
float** boundary_point_data[] = { &boundary_mass };
size_pointData = sizeof(boundary_point_data) / sizeof(double);
vec3d** boundary_vectorData[] = { &boundary_normal };
size_vectorData = sizeof(boundary_vectorData) / sizeof(double);
std::string boundary_pointDataNames[] = { "psi" };
std::string boundary_vectorDataNames[] = { "normal" };
VTU_Writer(main_path, iteration, BOUNDARY_POSITIONS, B, boundary_point_data, boundary_vectorData, boundary_pointDataNames, boundary_vectorDataNames, size_pointData, size_vectorData, vtu_fullpath, 1);
//gpuErrchk(cudaMemcpy2D(hashtable, particles_per_row * sizeof(int), d_hashtable, pitch, width, height, cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
//END OF HASHING FOR BOUNDARIES
cudaFree(d_hashtable);
cudaFree(D_BOUNDARY_POSITIONS);
//Calculating pressure delta
int count = 0;
float min_r = std::numeric_limits<float>::infinity();
int selected_index;
int tmp_size = static_cast<int>(ceil((2 * (h + PARTICLE_DIAMETER)) / PARTICLE_DIAMETER));
vec3d* tmp_points = (vec3d*)malloc(tmp_size * tmp_size * tmp_size * 3 * sizeof(float));
for (float i = -h - PARTICLE_DIAMETER; i <= h + PARTICLE_DIAMETER; i += PARTICLE_DIAMETER) {
for (float j = -h - PARTICLE_DIAMETER; j <= h + PARTICLE_DIAMETER; j += PARTICLE_DIAMETER) {
for (float k = -h - PARTICLE_DIAMETER; k <= h + PARTICLE_DIAMETER; k += PARTICLE_DIAMETER) {
tmp_points[count].x = i;
tmp_points[count].y = j;
tmp_points[count].z = k;
count++;
float r = sqrt(i*i+j*j+k*k);
if (r < min_r) {
min_r = r;
selected_index = count;
}
}
}
}
vec3d selected_point = tmp_points[selected_index];
vec3d r_vector;
float r;
vec3d Grad_W;
Grad_W.x = 0.f;
Grad_W.y = 0.f;
Grad_W.z = 0.f;
float dot_Grad_W = 0.f;
for (int i = 0; i < count; i++) {
r_vector.x = tmp_points[i].x - selected_point.x;
r_vector.y = tmp_points[i].y - selected_point.y;
r_vector.z = tmp_points[i].z - selected_point.z;
r = sqrt(r_vector.x* r_vector.x + r_vector.y* r_vector.y + r_vector.z* r_vector.z);
if (r <= h) {
vec3d inst_Grad_W = Poly6_Gradient(selected_index, i, tmp_points, r, h, invh);
Grad_W.x += inst_Grad_W.x;
Grad_W.y += inst_Grad_W.y;
Grad_W.z += inst_Grad_W.z;
dot_Grad_W += dot_product(inst_Grad_W, inst_Grad_W);
}
}
pressure_delta = -dot_product(Grad_W, Grad_W) - dot_Grad_W;
//Initializing main particle variables
//Defining and allocating main position variable
vec3d* POSITION = (vec3d*)malloc(3*T*sizeof(float));
for (int i = 0; i < N; i++) {
POSITION[i].x = FLUID_POSITIONS[i].x;
POSITION[i].y = FLUID_POSITIONS[i].y;
POSITION[i].z = FLUID_POSITIONS[i].z;
}
for (int i = N; i < T; i++) {
POSITION[i].x = BOUNDARY_POSITIONS[i - N].x;
POSITION[i].y = BOUNDARY_POSITIONS[i - N].y;
POSITION[i].z = BOUNDARY_POSITIONS[i - N].z;
}
free(BOUNDARY_POSITIONS);
free(FLUID_POSITIONS);
gpuErrchk(cudaMalloc((void**)&d_POSITION, 3*T*sizeof(float)));
gpuErrchk(cudaMemcpy(d_POSITION, POSITION, 3*T*sizeof(float), cudaMemcpyHostToDevice));
//Allocating memory for predicted positions and copying previous position vectors
gpuErrchk(cudaMalloc((void**)&d_PRED_POSITION, 3 * T * sizeof(float)));
gpuErrchk(cudaMemcpy(d_PRED_POSITION, POSITION, 3 * T * sizeof(float), cudaMemcpyHostToDevice));
//Allocating memory for predicted velocity
gpuErrchk(cudaMalloc((void**)&d_PRED_VELOCITY, 3 * N * sizeof(float)));
//Defining and allocating main velocity variable
vec3d* VELOCITY = (vec3d*)malloc(3*N*sizeof(float));
for (int i = 0; i < N; i++) {
VELOCITY[i].x = V_INITIAL[0];
VELOCITY[i].y = V_INITIAL[1];
VELOCITY[i].z = V_INITIAL[2];
}
gpuErrchk(cudaMalloc((void**)&d_VELOCITY, 3*N*sizeof(float)));
gpuErrchk(cudaMemcpy(d_VELOCITY, VELOCITY, 3*N*sizeof(float), cudaMemcpyHostToDevice));
//Defining and allocating main st force variable
vec3d* ST_FORCE = (vec3d*)malloc(3*N*sizeof(float));
for (int i = 0; i < N; i++) {
ST_FORCE[i].x = 0.f;
ST_FORCE[i].y = 0.f;
ST_FORCE[i].z = 0.f;
}
gpuErrchk(cudaMalloc((void**)&d_ST_FORCE, 3*N*sizeof(float)));
//Defining and allocating main viscosity force variable
vec3d* VISCOSITY_FORCE = (vec3d*)malloc(3*N*sizeof(float));
for (int i = 0; i < N; i++) {
VISCOSITY_FORCE[i].x = 0.f;
VISCOSITY_FORCE[i].y = 0.f;
VISCOSITY_FORCE[i].z = 0.f;
}
gpuErrchk(cudaMalloc((void**)&d_VISCOSITY_FORCE, 3*N*sizeof(float)));
//Defining and allocating main pressure force variable
vec3d* PRESSURE_FORCE = (vec3d*)malloc(3*N*sizeof(float));
for (int i = 0; i < N; i++) {
PRESSURE_FORCE[i].x = 0.f;
PRESSURE_FORCE[i].y = 0.f;
PRESSURE_FORCE[i].z = 0.f;
}
gpuErrchk(cudaMalloc((void**)&d_PRESSURE_FORCE, 3*N*sizeof(float)));
//Defining and allocating main normal variable
vec3d* NORMAL = (vec3d*)malloc(3*T*sizeof(float));
for (int i = 0; i < N; i++) {
NORMAL[i].x = 0.f;
NORMAL[i].y = 0.f;
NORMAL[i].z = 0.f;
}
for (int i = N; i < T; i++) {
NORMAL[i].x = boundary_normal[i - N].x;
NORMAL[i].y = boundary_normal[i - N].y;
NORMAL[i].z = boundary_normal[i - N].z;
}
free(boundary_normal);
gpuErrchk(cudaMalloc((void**)&d_NORMAL, 3*T*sizeof(float)));
gpuErrchk(cudaMemcpy(d_NORMAL, NORMAL, 3*T*sizeof(float), cudaMemcpyHostToDevice));
//Defining and allocating main density array
DENSITY = (float*)malloc(N * sizeof(float));
for (int i = 0; i < N; i++) {
DENSITY[i] = rho_0;
}
gpuErrchk(cudaMalloc((void**)&d_DENSITY, N * sizeof(float)));
gpuErrchk(cudaMemcpy(d_DENSITY, DENSITY, N * sizeof(float), cudaMemcpyHostToDevice));
//Defining and allocating main pressure array
float* PRESSURE = (float*)malloc(N * sizeof(float));
for (int i = 0; i < N; i++) {
PRESSURE[i] = 0;
}
gpuErrchk(cudaMalloc((void**)&d_PRESSURE, N * sizeof(float)));
//Defining and allocating main mass array
float* MASS = (float*)malloc(T * sizeof(float));
for (int i = 0; i < N; i++) {
MASS[i] = MASS_calc;
}
for (int i = N; i < T; i++) {
MASS[i] = boundary_mass[i - N];
}
free(boundary_mass);
gpuErrchk(cudaMalloc((void**)&d_MASS, T * sizeof(float)));
gpuErrchk(cudaMemcpy(d_MASS, MASS, T * sizeof(float), cudaMemcpyHostToDevice));
//Defining and allocating main type array (0 if fluid, 1 if boundary)
int* TYPE = (int*)malloc(T * sizeof(int));
for (int i = 0; i < N; i++) {
TYPE[i] = 0;
}
for (int i = N; i < T; i++) {
TYPE[i] = 1;
}
gpuErrchk(cudaMalloc((void**)&d_TYPE, T * sizeof(int)));
gpuErrchk(cudaMemcpy(d_TYPE, TYPE, T * sizeof(int), cudaMemcpyHostToDevice));
//Defining and allocating memory to store max density error
gpuErrchk(cudaMalloc((void**)&d_max_rho_err, sizeof(float)));
//Defining and allocating memory to store max force value
gpuErrchk(cudaMalloc((void**)&d_max_force, sizeof(float)));
//Defining and allocating memory to store max velocity value
gpuErrchk(cudaMalloc((void**)&d_max_velocity, sizeof(float)));
//Defining and allocating memory to store summation of density errors to calculate average error
gpuErrchk(cudaMalloc((void**)&d_sum_rho_err, sizeof(float)));
//defining gravity vector
gravity.x = 0.f;
gravity.y = -9.81f;
gravity.z = 0.f;
//Defining variables to write VTU files
float** pointData[2];
vec3d** vectorData[4];
pointData[0] = &DENSITY;
pointData[1] = &PRESSURE;
size_pointData = sizeof(pointData) / sizeof(double);
vectorData[0] = &VELOCITY;
vectorData[1] = &PRESSURE_FORCE;
vectorData[2] = &VISCOSITY_FORCE;
vectorData[3] = &ST_FORCE;
size_vectorData = sizeof(vectorData) / sizeof(double);
VTU_Writer(vtu_path, iteration, POSITION, N, pointData, vectorData, pointDataNames, vectorDataNames, size_pointData, size_vectorData, vtu_fullpath);
VTK_Group(vtk_group_path, vtu_fullpath, simulation_time);
// Initialize main hashtable
hashtable = new int[hashtable_size * particles_per_row];
for (int i = 0; i < hashtable_size; ++i) {
for (int j = 0; j < particles_per_row; j++) {
hashtable[i * particles_per_row + j] = -1;
}
}
gpuErrchk(cudaMallocPitch(&d_hashtable, &pitch, particles_per_row * sizeof(int), hashtable_size));
gpuErrchk(cudaMemcpy2D(d_hashtable, pitch, hashtable, particles_per_row * sizeof(int), particles_per_row * sizeof(int), hashtable_size, cudaMemcpyHostToDevice));
writeTimeKeeper(main_path);
std::cout << N << " Fluid particles\n"
<< B << " Boundary particles\n"
<< "Total of " << T << " particles.\n"
<< "Smoothing radius = " << h << " m.\n"
<< "hashtable size = " << hashtable_size << "\n";
return 0;
}
int mainLoop() {
Hash hash(hashtable_size);
grid_size = hashtable_size / block_size + 1;
hashtableReset << <grid_size, block_size >> > (d_hashtable, particles_per_row, pitch, hashtable_size);
grid_size = T / block_size + 1;
hashParticlePositions << <grid_size, block_size >> > (d_hashtable, d_POSITION, invh, hash, T, pitch, particles_per_row);
grid_size = N / block_size + 1;
DensityCalc << <grid_size, block_size >> > (d_POSITION, d_MASS, d_DENSITY, h, invh, rho_0, particles_per_row, pitch, d_hashtable, hash, N);
fluidNormal << <grid_size, block_size >> > (d_NORMAL, d_POSITION, d_MASS, d_DENSITY,d_TYPE, rho_0, h,invh, hash,d_hashtable, particles_per_row,pitch, N);
nonPressureForces << <grid_size, block_size >> > (d_POSITION, d_VISCOSITY_FORCE, d_ST_FORCE, d_MASS, d_DENSITY, d_VELOCITY, d_NORMAL, gravity,d_TYPE, h, invh, rho_0, visc_const, st_const, particles_per_row, pitch,d_hashtable, hash, N);
gpuErrchk(cudaPeekAtLastError());
//reseting values of pressure
resetPressure << <grid_size, block_size >> > (d_PRESSURE, N);
float pressure_coeff = -1 / (2 * powf(MASS_calc * delta_t / rho_0, 2) * pressure_delta);
/*std::cout << pressure_coeff << std::endl;*/
int _k_ = 0;
while (_k_ < 3) {
positionAndVelocity << <grid_size, block_size >> > (d_PRED_POSITION,d_PRED_VELOCITY,d_POSITION, d_VELOCITY, d_PRESSURE_FORCE, d_VISCOSITY_FORCE, d_ST_FORCE, gravity, d_MASS, delta_t, N);
grid_size = hashtable_size / block_size + 1;
hashtableReset << <grid_size, block_size >> >(d_hashtable, particles_per_row, pitch, hashtable_size);
grid_size = T / block_size + 1;
hashParticlePositions << <grid_size, block_size >> > (d_hashtable, d_PRED_POSITION, invh, hash, T, pitch, particles_per_row);
grid_size = N / block_size + 1;
collisionHandler << <grid_size, block_size >> > (d_PRED_POSITION, d_PRED_VELOCITY, d_NORMAL, d_TYPE, d_hashtable, h, invh, pitch, hash, particles_per_row, BOUNDARY_DIAMETER, epsilon, N);
DensityCalc << <grid_size, block_size >> > (d_PRED_POSITION, d_MASS, d_DENSITY, h, invh, rho_0, particles_per_row, pitch, d_hashtable, hash, N);
PressureCalc << <grid_size, block_size >> > (d_PRESSURE, d_DENSITY, rho_0, pressure_coeff, N);
PressureForceCalc << <grid_size, block_size >> > (d_PRED_POSITION, d_PRESSURE_FORCE, d_PRESSURE, d_MASS, d_DENSITY,d_TYPE, h, invh, particles_per_row, pitch, d_hashtable, hash, N);
_k_++;
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
}
positionAndVelocity << <grid_size, block_size >> > (d_POSITION, d_VELOCITY, d_POSITION, d_VELOCITY, d_PRESSURE_FORCE, d_VISCOSITY_FORCE, d_ST_FORCE, gravity, d_MASS, delta_t, N);
collisionHandler << <grid_size, block_size >> > (d_POSITION, d_VELOCITY, d_NORMAL, d_TYPE, d_hashtable, h, invh, pitch, hash, particles_per_row, BOUNDARY_DIAMETER, epsilon, N);
//criterias for changes in delta_t value
gpuErrchk(cudaMemcpy(DENSITY, d_DENSITY, N * sizeof(float), cudaMemcpyDeviceToHost));
max_rho_err_t_1 = max_rho_err;
float max_velocity = 0.f;
float max_force = 0.f;
float sum_rho_err = 0.f;
resetValues<<<1,1>>>(d_max_velocity, d_max_force, d_sum_rho_err, d_max_rho_err);
grid_size = N / block_size + 1;
getMaxVandF << <grid_size, block_size >> > (d_max_velocity, d_max_force, d_VELOCITY, d_PRESSURE_FORCE, d_VISCOSITY_FORCE, d_ST_FORCE, gravity, d_MASS,d_DENSITY,d_sum_rho_err,d_max_rho_err, rho_0, N);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaMemcpy(&max_velocity, d_max_velocity, sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(&max_force, d_max_force, sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(&sum_rho_err, d_sum_rho_err, sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(&max_rho_err, d_max_rho_err, sizeof(float), cudaMemcpyDeviceToHost));
float avg_rho_err = sum_rho_err / N;
// delta_t increase
bool criteria1 = 0.19f * sqrt(h / max_force) > delta_t;
bool criteria2 = max_rho_err < 4.5f * max_vol_comp;
bool criteria3 = avg_rho_err < 0.9f * max_vol_comp;
bool criteria4 = 0.39f * (h/max_velocity) > delta_t;
if (criteria1 && criteria2 && criteria3 && criteria4) {
delta_t += delta_t * 0.2f / 100;
}
//delta_t decrease
criteria1 = 0.2f * sqrt(h / max_force) < delta_t;
criteria2 = max_rho_err > 5.5f * max_vol_comp;
criteria3 = avg_rho_err > max_vol_comp;
criteria4 = 0.4f * (h / max_velocity) <= delta_t;
if (criteria1 || criteria2 || criteria3 || criteria4) {
delta_t -= delta_t * 0.2f / 100;
}
//shock handling
criteria1 = max_rho_err - max_rho_err_t_1 > 8 * max_vol_comp;
criteria2 = max_rho_err > max_rho_fluc;
criteria3 = 0.45f * (h/max_velocity) < delta_t;
if (criteria1 || criteria2 || criteria3) {
int last_iter = getLastIter(main_path);
char* iter_path = new char[100];
char* num_buffer = new char[32];
while (iteration - last_iter < 2) {
itoa(last_iter, num_buffer, 10);
strcpy(iter_path, vtu_path);
strcat(iter_path, "/iter");
strcat(iter_path, num_buffer);
strcat(iter_path, ".vtu");
//printf("%s\n", iter_path);
remove(iter_path);
last_iter = getLastIter(main_path);
//printf("%d\n", last_iter);
num_buffer = new char[32];
iter_path = new char[100];
}
std::cout << "\n\nSHOCK DETECTED! RETURNING " << iteration - last_iter << " ITERATIONS!\n" << std::endl;
write_pvd = false;
//SHOCK DETECTED
//delta_t = fminf(0.2f * sqrt(h/max_force),0.08f*h/max_velocity);
delta_t = delta_t / 5;
//Return 2 iterations
iteration = last_iter;
if (iteration <= 0) {
std::cout << "\nIMPOSSIBLE TO RETURN 2 ITERATIONS! TERMINATING SIMULATION\n" << std::endl;
return 1;
}
vec3d* position = (vec3d*)malloc(N * sizeof(vec3d));
vec3d* velocity = (vec3d*)malloc(N * sizeof(vec3d));
itoa(iteration, num_buffer, 10);
strcpy(iter_path, vtu_path);
strcat(iter_path, "/iter");
strcat(iter_path, num_buffer);
strcat(iter_path, ".vtu");
readVTU(iter_path, position, velocity);
getNewSimTime(main_path);
rewritePVD(main_path);
gpuErrchk(cudaMemcpy(d_POSITION, position, 3 * N * sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_VELOCITY, velocity, 3 * N * sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
return 0;
}
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
simulation_time += delta_t;
iteration++;
writeTimeKeeper(main_path);
return 0;
}
void multiprocessor_writer() {
char buf[1024];
itoa(iteration, buf, 10);
strcpy(vtu_fullpath, vtu_path);
strcat(vtu_fullpath, "/iter");
strcat(vtu_fullpath, buf);
strcat(vtu_fullpath, ".vtu");
std::future<void> write_vtu;
vec3d* write_position = (vec3d*)malloc(3 * N * sizeof(float));
vec3d* write_velocity = (vec3d*)malloc(3 * N * sizeof(float));
vec3d* write_viscosity_force = (vec3d*)malloc(3 * N * sizeof(float));
vec3d* write_st_force = (vec3d*)malloc(3 * N * sizeof(float));
vec3d* write_presure_force = (vec3d*)malloc(3 * N * sizeof(float));
float* write_density = (float*)malloc(N * sizeof(float));
float* write_pressure = (float*)malloc(N * sizeof(float));
gpuErrchk(cudaMemcpy(write_position, d_POSITION, N * 3 * sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(write_velocity, d_VELOCITY, N * 3 * sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(write_viscosity_force, d_VISCOSITY_FORCE, N * 3 * sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(write_st_force, d_ST_FORCE, N * 3 * sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(write_presure_force, d_PRESSURE_FORCE, N * 3 * sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(write_density, d_DENSITY, N * sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(write_pressure, d_PRESSURE, N * sizeof(float), cudaMemcpyDeviceToHost));
//auto started = std::chrono::high_resolution_clock::now();
float** pointData[2];
vec3d** vectorData[4];
pointData[0] = &write_density;
pointData[1] = &write_pressure;
size_pointData = sizeof(pointData) / sizeof(double);
vectorData[0] = &write_velocity;
vectorData[1] = &write_presure_force;
vectorData[2] = &write_viscosity_force;
vectorData[3] = &write_st_force;
//vectorData[4] = &NORMAL;
size_vectorData = sizeof(vectorData) / sizeof(double);
write_vtu = std::async(std::launch::async, VTU_Writer, vtu_path, iteration, write_position, N, pointData, vectorData, pointDataNames, vectorDataNames, size_pointData, size_vectorData, vtu_fullpath,2);
//auto done = std::chrono::high_resolution_clock::now();
//std::cout << "Second VTU_Writer() -> " << std::chrono::duration_cast<std::chrono::milliseconds>(done - started).count() << " ms\n";
if (write_pvd == true) {
strcpy(buf, vtu_fullpath);
VTK_Group(vtk_group_path, buf, simulation_time);
}
write_pvd = true;
//write_vtu.get();
return;
} |
e07a65007b0b3e3204141de0481ba5b174248cd3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "dUpdateSignif.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *gpuData = NULL;
hipMalloc(&gpuData, XSIZE*YSIZE);
size_t n = XSIZE*YSIZE;
float *gpuResults = NULL;
hipMalloc(&gpuResults, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
dUpdateSignif), dim3(gridBlock),dim3(threadBlock), 0, 0, gpuData,n,gpuResults);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
dUpdateSignif), dim3(gridBlock),dim3(threadBlock), 0, 0, gpuData,n,gpuResults);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
dUpdateSignif), dim3(gridBlock),dim3(threadBlock), 0, 0, gpuData,n,gpuResults);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e07a65007b0b3e3204141de0481ba5b174248cd3.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "dUpdateSignif.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *gpuData = NULL;
cudaMalloc(&gpuData, XSIZE*YSIZE);
size_t n = XSIZE*YSIZE;
float *gpuResults = NULL;
cudaMalloc(&gpuResults, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
dUpdateSignif<<<gridBlock,threadBlock>>>(gpuData,n,gpuResults);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
dUpdateSignif<<<gridBlock,threadBlock>>>(gpuData,n,gpuResults);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
dUpdateSignif<<<gridBlock,threadBlock>>>(gpuData,n,gpuResults);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
420c0e7a3ecdbc341b04081e0d40d54999c99dc7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
}
__global__ void uplo_swap_no_transp (const int sd, const int unit, const int bottom, REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < sd);
const bool check = valid &&
((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1);
if (check) {
const int ia = offset_a + gid_0 + gid_1 * ld_a;
const int ib = offset_b + gid_0 + gid_1 * ld_b;
const REAL c = b[ib];
b[ib] = a[ia];
a[ia] = c;
}
} | 420c0e7a3ecdbc341b04081e0d40d54999c99dc7.cu | #include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
}
__global__ void uplo_swap_no_transp (const int sd, const int unit, const int bottom, REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < sd);
const bool check = valid &&
((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1);
if (check) {
const int ia = offset_a + gid_0 + gid_1 * ld_a;
const int ib = offset_b + gid_0 + gid_1 * ld_b;
const REAL c = b[ib];
b[ib] = a[ia];
a[ia] = c;
}
} |
55cc3718778fbc79dca1f5ba0791072269008931.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of BlockRadixSort utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <algorithm>
#include <iostream>
#include <cub/block/block_radix_sort.cuh>
#include <hipcub/hipcub.hpp>
#include <cub/block/block_store.cuh>
#include <hipcub/hipcub.hpp>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
CachingDeviceAllocator g_allocator(true);
//---------------------------------------------------------------------
// Test kernels
//---------------------------------------------------------------------
/// Specialized descending, blocked -> blocked
template <int BLOCK_THREADS, typename BlockRadixSort, int ITEMS_PER_THREAD, typename Key, typename Value>
__device__ __forceinline__ void TestBlockSort(
typename BlockRadixSort::TempStorage &temp_storage,
Key (&keys)[ITEMS_PER_THREAD],
Value (&values)[ITEMS_PER_THREAD],
Key *d_keys,
Value *d_values,
int begin_bit,
int end_bit,
clock_t &stop,
Int2Type<true> is_descending,
Int2Type<true> is_blocked_output)
{
BlockRadixSort(temp_storage).SortDescending(keys, values, begin_bit, end_bit);
stop = clock();
StoreDirectBlocked(threadIdx.x, d_keys, keys);
StoreDirectBlocked(threadIdx.x, d_values, values);
}
/// Specialized descending, blocked -> striped
template <int BLOCK_THREADS, typename BlockRadixSort, int ITEMS_PER_THREAD, typename Key, typename Value>
__device__ __forceinline__ void TestBlockSort(
typename BlockRadixSort::TempStorage &temp_storage,
Key (&keys)[ITEMS_PER_THREAD],
Value (&values)[ITEMS_PER_THREAD],
Key *d_keys,
Value *d_values,
int begin_bit,
int end_bit,
clock_t &stop,
Int2Type<true> is_descending,
Int2Type<false> is_blocked_output)
{
BlockRadixSort(temp_storage).SortDescendingBlockedToStriped(keys, values, begin_bit, end_bit);
stop = clock();
StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_keys, keys);
StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_values, values);
}
/// Specialized ascending, blocked -> blocked
template <int BLOCK_THREADS, typename BlockRadixSort, int ITEMS_PER_THREAD, typename Key, typename Value>
__device__ __forceinline__ void TestBlockSort(
typename BlockRadixSort::TempStorage &temp_storage,
Key (&keys)[ITEMS_PER_THREAD],
Value (&values)[ITEMS_PER_THREAD],
Key *d_keys,
Value *d_values,
int begin_bit,
int end_bit,
clock_t &stop,
Int2Type<false> is_descending,
Int2Type<true> is_blocked_output)
{
BlockRadixSort(temp_storage).Sort(keys, values, begin_bit, end_bit);
stop = clock();
StoreDirectBlocked(threadIdx.x, d_keys, keys);
StoreDirectBlocked(threadIdx.x, d_values, values);
}
/// Specialized ascending, blocked -> striped
template <int BLOCK_THREADS, typename BlockRadixSort, int ITEMS_PER_THREAD, typename Key, typename Value>
__device__ __forceinline__ void TestBlockSort(
typename BlockRadixSort::TempStorage &temp_storage,
Key (&keys)[ITEMS_PER_THREAD],
Value (&values)[ITEMS_PER_THREAD],
Key *d_keys,
Value *d_values,
int begin_bit,
int end_bit,
clock_t &stop,
Int2Type<false> is_descending,
Int2Type<false> is_blocked_output)
{
BlockRadixSort(temp_storage).SortBlockedToStriped(keys, values, begin_bit, end_bit);
stop = clock();
StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_keys, keys);
StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_values, values);
}
/**
* BlockRadixSort kernel
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
int RADIX_BITS,
bool MEMOIZE_OUTER_SCAN,
BlockScanAlgorithm INNER_SCAN_ALGORITHM,
hipSharedMemConfig SMEM_CONFIG,
int DESCENDING,
int BLOCKED_OUTPUT,
typename Key,
typename Value>
__launch_bounds__ (BLOCK_THREADS, 1)
__global__ void Kernel(
Key *d_keys,
Value *d_values,
int begin_bit,
int end_bit,
clock_t *d_elapsed)
{
// Threadblock load/store abstraction types
typedef BlockRadixSort<
Key,
BLOCK_THREADS,
ITEMS_PER_THREAD,
Value,
RADIX_BITS,
MEMOIZE_OUTER_SCAN,
INNER_SCAN_ALGORITHM,
SMEM_CONFIG>
BlockRadixSortT;
// Allocate temp storage in shared memory
__shared__ typename BlockRadixSortT::TempStorage temp_storage;
// Items per thread
Key keys[ITEMS_PER_THREAD];
Value values[ITEMS_PER_THREAD];
LoadDirectBlocked(threadIdx.x, d_keys, keys);
LoadDirectBlocked(threadIdx.x, d_values, values);
// Start cycle timer
clock_t stop;
clock_t start = clock();
TestBlockSort<BLOCK_THREADS, BlockRadixSortT>(
temp_storage, keys, values, d_keys, d_values, begin_bit, end_bit, stop, Int2Type<DESCENDING>(), Int2Type<BLOCKED_OUTPUT>());
// Store time
if (threadIdx.x == 0)
*d_elapsed = (start > stop) ? start - stop : stop - start;
}
//---------------------------------------------------------------------
// Host testing subroutines
//---------------------------------------------------------------------
/**
* Simple key-value pairing
*/
template <
typename Key,
typename Value,
bool IS_FLOAT = (Traits<Key>::CATEGORY == FLOATING_POINT)>
struct Pair
{
Key key;
Value value;
bool operator<(const Pair &b) const
{
return (key < b.key);
}
};
/**
* Simple key-value pairing (specialized for floating point types)
*/
template <typename Key, typename Value>
struct Pair<Key, Value, true>
{
Key key;
Value value;
bool operator<(const Pair &b) const
{
if (key < b.key)
return true;
if (key > b.key)
return false;
// Key in unsigned bits
typedef typename Traits<Key>::UnsignedBits UnsignedBits;
// Return true if key is negative zero and b.key is positive zero
UnsignedBits key_bits = *reinterpret_cast<UnsignedBits*>(const_cast<Key*>(&key));
UnsignedBits b_key_bits = *reinterpret_cast<UnsignedBits*>(const_cast<Key*>(&b.key));
UnsignedBits HIGH_BIT = Traits<Key>::HIGH_BIT;
return ((key_bits & HIGH_BIT) != 0) && ((b_key_bits & HIGH_BIT) == 0);
}
};
/**
* Initialize key-value sorting problem.
*/
template <bool DESCENDING, typename Key, typename Value>
void Initialize(
GenMode gen_mode,
Key *h_keys,
Value *h_values,
Key *h_reference_keys,
Value *h_reference_values,
int num_items,
int entropy_reduction,
int begin_bit,
int end_bit)
{
Pair<Key, Value> *h_pairs = new Pair<Key, Value>[num_items];
for (int i = 0; i < num_items; ++i)
{
InitValue(gen_mode, h_keys[i], i);
RandomBits(h_values[i]);
// Mask off unwanted portions
int num_bits = end_bit - begin_bit;
if ((begin_bit > 0) || (end_bit < sizeof(Key) * 8))
{
unsigned long long base = 0;
memcpy(&base, &h_keys[i], sizeof(Key));
base &= ((1ull << num_bits) - 1) << begin_bit;
memcpy(&h_keys[i], &base, sizeof(Key));
}
h_pairs[i].key = h_keys[i];
h_pairs[i].value = h_values[i];
}
if (DESCENDING) std::reverse(h_pairs, h_pairs + num_items);
std::stable_sort(h_pairs, h_pairs + num_items);
if (DESCENDING) std::reverse(h_pairs, h_pairs + num_items);
for (int i = 0; i < num_items; ++i)
{
h_reference_keys[i] = h_pairs[i].key;
h_reference_values[i] = h_pairs[i].value;
}
delete[] h_pairs;
}
/**
* Test BlockRadixSort kernel
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
int RADIX_BITS,
bool MEMOIZE_OUTER_SCAN,
BlockScanAlgorithm INNER_SCAN_ALGORITHM,
hipSharedMemConfig SMEM_CONFIG,
bool DESCENDING,
bool BLOCKED_OUTPUT,
typename Key,
typename Value>
void TestDriver(
GenMode gen_mode,
int entropy_reduction,
int begin_bit,
int end_bit)
{
enum
{
TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD,
KEYS_ONLY = Equals<Value, NullType>::VALUE,
};
// Allocate host arrays
Key *h_keys = new Key[TILE_SIZE];
Key *h_reference_keys = new Key[TILE_SIZE];
Value *h_values = new Value[TILE_SIZE];
Value *h_reference_values = new Value[TILE_SIZE];
// Allocate device arrays
Key *d_keys = NULL;
Value *d_values = NULL;
clock_t *d_elapsed = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_keys, sizeof(Key) * TILE_SIZE));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_values, sizeof(Value) * TILE_SIZE));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(clock_t)));
// Initialize problem and solution on host
Initialize<DESCENDING>(gen_mode, h_keys, h_values, h_reference_keys, h_reference_values,
TILE_SIZE, entropy_reduction, begin_bit, end_bit);
// Copy problem to device
CubDebugExit(hipMemcpy(d_keys, h_keys, sizeof(Key) * TILE_SIZE, hipMemcpyHostToDevice));
CubDebugExit(hipMemcpy(d_values, h_values, sizeof(Value) * TILE_SIZE, hipMemcpyHostToDevice));
printf("%s "
"BLOCK_THREADS(%d) "
"ITEMS_PER_THREAD(%d) "
"RADIX_BITS(%d) "
"MEMOIZE_OUTER_SCAN(%d) "
"INNER_SCAN_ALGORITHM(%d) "
"SMEM_CONFIG(%d) "
"DESCENDING(%d) "
"BLOCKED_OUTPUT(%d) "
"sizeof(Key)(%d) "
"sizeof(Value)(%d) "
"gen_mode(%d), "
"entropy_reduction(%d) "
"begin_bit(%d) "
"end_bit(%d), "
"samples(%d)\n",
((KEYS_ONLY) ? "Keys-only" : "Key-value"),
BLOCK_THREADS,
ITEMS_PER_THREAD,
RADIX_BITS,
MEMOIZE_OUTER_SCAN,
INNER_SCAN_ALGORITHM,
SMEM_CONFIG,
DESCENDING,
BLOCKED_OUTPUT,
(int) sizeof(Key),
(int) sizeof(Value),
gen_mode,
entropy_reduction,
begin_bit,
end_bit,
g_num_rand_samples);
// Set shared memory config
hipDeviceSetSharedMemConfig(SMEM_CONFIG);
// Run kernel
hipLaunchKernelGGL(( Kernel<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, DESCENDING, BLOCKED_OUTPUT>), dim3(1), dim3(BLOCK_THREADS), 0, 0,
d_keys, d_values, begin_bit, end_bit, d_elapsed);
// Flush kernel output / errors
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
// Check keys results
printf("\tKeys: ");
int compare = CompareDeviceResults(h_reference_keys, d_keys, TILE_SIZE, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Check value results
if (!KEYS_ONLY)
{
printf("\tValues: ");
int compare = CompareDeviceResults(h_reference_values, d_values, TILE_SIZE, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
}
printf("\n");
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
printf("\n");
// Cleanup
if (h_keys) delete[] h_keys;
if (h_reference_keys) delete[] h_reference_keys;
if (h_values) delete[] h_values;
if (h_reference_values) delete[] h_reference_values;
if (d_keys) CubDebugExit(g_allocator.DeviceFree(d_keys));
if (d_values) CubDebugExit(g_allocator.DeviceFree(d_values));
if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed));
}
/**
* Test driver (valid tile size <= MAX_SMEM_BYTES)
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
int RADIX_BITS,
bool MEMOIZE_OUTER_SCAN,
BlockScanAlgorithm INNER_SCAN_ALGORITHM,
hipSharedMemConfig SMEM_CONFIG,
bool DESCENDING,
bool BLOCKED_OUTPUT,
typename Key,
typename Value>
void TestValid(Int2Type<true> fits_smem_capacity)
{
// Iterate begin_bit
for (int begin_bit = 0; begin_bit <= 1; begin_bit++)
{
// Iterate end bit
for (int end_bit = begin_bit + 1; end_bit <= sizeof(Key) * 8; end_bit = end_bit * 2 + begin_bit)
{
// Uniform key distribution
TestDriver<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, DESCENDING, BLOCKED_OUTPUT, Key, Value>(
UNIFORM, 0, begin_bit, end_bit);
// Sequential key distribution
TestDriver<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, DESCENDING, BLOCKED_OUTPUT, Key, Value>(
INTEGER_SEED, 0, begin_bit, end_bit);
// Iterate random with entropy_reduction
for (int entropy_reduction = 0; entropy_reduction <= 9; entropy_reduction += 3)
{
TestDriver<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, DESCENDING, BLOCKED_OUTPUT, Key, Value>(
RANDOM, entropy_reduction, begin_bit, end_bit);
}
}
}
}
/**
* Test driver (invalid tile size)
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
int RADIX_BITS,
bool MEMOIZE_OUTER_SCAN,
BlockScanAlgorithm INNER_SCAN_ALGORITHM,
hipSharedMemConfig SMEM_CONFIG,
bool DESCENDING,
bool BLOCKED_OUTPUT,
typename Key,
typename Value>
void TestValid(Int2Type<false> fits_smem_capacity)
{}
/**
* Test ascending/descending and to-blocked/to-striped
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
int RADIX_BITS,
bool MEMOIZE_OUTER_SCAN,
BlockScanAlgorithm INNER_SCAN_ALGORITHM,
hipSharedMemConfig SMEM_CONFIG,
typename Key,
typename Value>
void Test()
{
// Check size of smem storage for the target arch to make sure it will fit
typedef BlockRadixSort<Key, BLOCK_THREADS, ITEMS_PER_THREAD, Value, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG> BlockRadixSortT;
#if defined(SM100) || defined(SM110) || defined(SM130)
Int2Type<sizeof(typename BlockRadixSortT::TempStorage) <= 16 * 1024> fits_smem_capacity;
#else
Int2Type<(sizeof(typename BlockRadixSortT::TempStorage) <= 48 * 1024)> fits_smem_capacity;
#endif
// Sort-ascending, to-striped
TestValid<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, true, false, Key, Value>(fits_smem_capacity);
// Sort-descending, to-blocked
TestValid<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, false, true, Key, Value>(fits_smem_capacity);
// Not necessary
// TestValid<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, false, false, Key, Value>(fits_smem_capacity);
// TestValid<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, true, true, Key, Value>(fits_smem_capacity);
}
/**
* Test value type and smem config
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
int RADIX_BITS,
bool MEMOIZE_OUTER_SCAN,
BlockScanAlgorithm INNER_SCAN_ALGORITHM,
typename Key>
void TestKeys()
{
// Test keys-only sorting with both smem configs
Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, hipSharedMemBankSizeFourByte, Key, NullType>(); // Keys-only (4-byte smem bank config)
#if !defined(SM100) && !defined(SM110) && !defined(SM130) && !defined(SM200)
Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, hipSharedMemBankSizeEightByte, Key, NullType>(); // Keys-only (8-byte smem bank config)
#endif
}
/**
* Test value type and smem config
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
int RADIX_BITS,
bool MEMOIZE_OUTER_SCAN,
BlockScanAlgorithm INNER_SCAN_ALGORITHM,
typename Key>
void TestKeysAndPairs()
{
// Test pairs sorting with only 4-byte configs
Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, hipSharedMemBankSizeFourByte, Key, char>(); // With small-values
Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, hipSharedMemBankSizeFourByte, Key, Key>(); // With same-values
Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, hipSharedMemBankSizeFourByte, Key, TestFoo>(); // With large values
}
/**
* Test key type
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
int RADIX_BITS,
bool MEMOIZE_OUTER_SCAN,
BlockScanAlgorithm INNER_SCAN_ALGORITHM>
void Test()
{
// Get ptx version
int ptx_version;
CubDebugExit(PtxVersion(ptx_version));
#ifdef TEST_KEYS_ONLY
// Test unsigned types with keys-only
TestKeys<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, unsigned char>();
TestKeys<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, unsigned short>();
TestKeys<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, unsigned int>();
TestKeys<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, unsigned long>();
TestKeys<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, unsigned long long>();
#else
// Test signed and fp types with paired values
TestKeysAndPairs<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, char>();
TestKeysAndPairs<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, short>();
TestKeysAndPairs<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, int>();
TestKeysAndPairs<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, long>();
TestKeysAndPairs<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, long long>();
TestKeysAndPairs<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, float>();
if (ptx_version > 120)
{
// Don't check doubles on PTX120 or below because they're down-converted
TestKeysAndPairs<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, double>();
}
#endif
}
/**
* Test inner scan algorithm
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
int RADIX_BITS,
bool MEMOIZE_OUTER_SCAN>
void Test()
{
Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, BLOCK_SCAN_RAKING>();
Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, BLOCK_SCAN_WARP_SCANS>();
}
/**
* Test outer scan algorithm
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
int RADIX_BITS>
void Test()
{
Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, true>();
Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, false>();
}
/**
* Test radix bits
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD>
void Test()
{
Test<BLOCK_THREADS, ITEMS_PER_THREAD, 1>();
Test<BLOCK_THREADS, ITEMS_PER_THREAD, 2>();
Test<BLOCK_THREADS, ITEMS_PER_THREAD, 5>();
}
/**
* Test items per thread
*/
template <int BLOCK_THREADS>
void Test()
{
Test<BLOCK_THREADS, 1>();
#if defined(SM100) || defined(SM110) || defined(SM130)
// Open64 compiler can't handle the number of test cases
#else
Test<BLOCK_THREADS, 4>();
#endif
Test<BLOCK_THREADS, 11>();
}
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
#ifdef QUICK_TEST
{
typedef float T;
TestDriver<32, 4, 4, true, BLOCK_SCAN_WARP_SCANS, hipSharedMemBankSizeFourByte, false, false, T, NullType>(INTEGER_SEED, 0, 0, sizeof(T) * 8);
}
/*
// Compile/run quick tests
typedef unsigned int T;
TestDriver<64, 17, 4, true, BLOCK_SCAN_WARP_SCANS, hipSharedMemBankSizeFourByte, false, false, T, NullType>(RANDOM, 0, 0, sizeof(T) * 8);
TestDriver<96, 8, 4, true, BLOCK_SCAN_WARP_SCANS, hipSharedMemBankSizeFourByte, false, false, T, NullType>(RANDOM, 0, 0, sizeof(T) * 8);
TestDriver<128, 2, 4, true, BLOCK_SCAN_WARP_SCANS, hipSharedMemBankSizeFourByte, false, false, T, NullType>(RANDOM, 0, 0, sizeof(T) * 8);
*/
#else
// Compile/run thorough tests
Test<32>();
Test<64>();
Test<160>();
#endif // QUICK_TEST
return 0;
}
| 55cc3718778fbc79dca1f5ba0791072269008931.cu | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of BlockRadixSort utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <algorithm>
#include <iostream>
#include <cub/block/block_radix_sort.cuh>
#include <cub/block/block_load.cuh>
#include <cub/block/block_store.cuh>
#include <cub/util_allocator.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
CachingDeviceAllocator g_allocator(true);
//---------------------------------------------------------------------
// Test kernels
//---------------------------------------------------------------------
/// Specialized descending, blocked -> blocked
template <int BLOCK_THREADS, typename BlockRadixSort, int ITEMS_PER_THREAD, typename Key, typename Value>
__device__ __forceinline__ void TestBlockSort(
typename BlockRadixSort::TempStorage &temp_storage,
Key (&keys)[ITEMS_PER_THREAD],
Value (&values)[ITEMS_PER_THREAD],
Key *d_keys,
Value *d_values,
int begin_bit,
int end_bit,
clock_t &stop,
Int2Type<true> is_descending,
Int2Type<true> is_blocked_output)
{
BlockRadixSort(temp_storage).SortDescending(keys, values, begin_bit, end_bit);
stop = clock();
StoreDirectBlocked(threadIdx.x, d_keys, keys);
StoreDirectBlocked(threadIdx.x, d_values, values);
}
/// Specialized descending, blocked -> striped
template <int BLOCK_THREADS, typename BlockRadixSort, int ITEMS_PER_THREAD, typename Key, typename Value>
__device__ __forceinline__ void TestBlockSort(
typename BlockRadixSort::TempStorage &temp_storage,
Key (&keys)[ITEMS_PER_THREAD],
Value (&values)[ITEMS_PER_THREAD],
Key *d_keys,
Value *d_values,
int begin_bit,
int end_bit,
clock_t &stop,
Int2Type<true> is_descending,
Int2Type<false> is_blocked_output)
{
BlockRadixSort(temp_storage).SortDescendingBlockedToStriped(keys, values, begin_bit, end_bit);
stop = clock();
StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_keys, keys);
StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_values, values);
}
/// Specialized ascending, blocked -> blocked
template <int BLOCK_THREADS, typename BlockRadixSort, int ITEMS_PER_THREAD, typename Key, typename Value>
__device__ __forceinline__ void TestBlockSort(
typename BlockRadixSort::TempStorage &temp_storage,
Key (&keys)[ITEMS_PER_THREAD],
Value (&values)[ITEMS_PER_THREAD],
Key *d_keys,
Value *d_values,
int begin_bit,
int end_bit,
clock_t &stop,
Int2Type<false> is_descending,
Int2Type<true> is_blocked_output)
{
BlockRadixSort(temp_storage).Sort(keys, values, begin_bit, end_bit);
stop = clock();
StoreDirectBlocked(threadIdx.x, d_keys, keys);
StoreDirectBlocked(threadIdx.x, d_values, values);
}
/// Specialized ascending, blocked -> striped
template <int BLOCK_THREADS, typename BlockRadixSort, int ITEMS_PER_THREAD, typename Key, typename Value>
__device__ __forceinline__ void TestBlockSort(
typename BlockRadixSort::TempStorage &temp_storage,
Key (&keys)[ITEMS_PER_THREAD],
Value (&values)[ITEMS_PER_THREAD],
Key *d_keys,
Value *d_values,
int begin_bit,
int end_bit,
clock_t &stop,
Int2Type<false> is_descending,
Int2Type<false> is_blocked_output)
{
BlockRadixSort(temp_storage).SortBlockedToStriped(keys, values, begin_bit, end_bit);
stop = clock();
StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_keys, keys);
StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_values, values);
}
/**
* BlockRadixSort kernel
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
int RADIX_BITS,
bool MEMOIZE_OUTER_SCAN,
BlockScanAlgorithm INNER_SCAN_ALGORITHM,
cudaSharedMemConfig SMEM_CONFIG,
int DESCENDING,
int BLOCKED_OUTPUT,
typename Key,
typename Value>
__launch_bounds__ (BLOCK_THREADS, 1)
__global__ void Kernel(
Key *d_keys,
Value *d_values,
int begin_bit,
int end_bit,
clock_t *d_elapsed)
{
// Threadblock load/store abstraction types
typedef BlockRadixSort<
Key,
BLOCK_THREADS,
ITEMS_PER_THREAD,
Value,
RADIX_BITS,
MEMOIZE_OUTER_SCAN,
INNER_SCAN_ALGORITHM,
SMEM_CONFIG>
BlockRadixSortT;
// Allocate temp storage in shared memory
__shared__ typename BlockRadixSortT::TempStorage temp_storage;
// Items per thread
Key keys[ITEMS_PER_THREAD];
Value values[ITEMS_PER_THREAD];
LoadDirectBlocked(threadIdx.x, d_keys, keys);
LoadDirectBlocked(threadIdx.x, d_values, values);
// Start cycle timer
clock_t stop;
clock_t start = clock();
TestBlockSort<BLOCK_THREADS, BlockRadixSortT>(
temp_storage, keys, values, d_keys, d_values, begin_bit, end_bit, stop, Int2Type<DESCENDING>(), Int2Type<BLOCKED_OUTPUT>());
// Store time
if (threadIdx.x == 0)
*d_elapsed = (start > stop) ? start - stop : stop - start;
}
//---------------------------------------------------------------------
// Host testing subroutines
//---------------------------------------------------------------------
/**
* Simple key-value pairing
*/
template <
typename Key,
typename Value,
bool IS_FLOAT = (Traits<Key>::CATEGORY == FLOATING_POINT)>
struct Pair
{
Key key;
Value value;
bool operator<(const Pair &b) const
{
return (key < b.key);
}
};
/**
* Simple key-value pairing (specialized for floating point types)
*/
template <typename Key, typename Value>
struct Pair<Key, Value, true>
{
Key key;
Value value;
bool operator<(const Pair &b) const
{
if (key < b.key)
return true;
if (key > b.key)
return false;
// Key in unsigned bits
typedef typename Traits<Key>::UnsignedBits UnsignedBits;
// Return true if key is negative zero and b.key is positive zero
UnsignedBits key_bits = *reinterpret_cast<UnsignedBits*>(const_cast<Key*>(&key));
UnsignedBits b_key_bits = *reinterpret_cast<UnsignedBits*>(const_cast<Key*>(&b.key));
UnsignedBits HIGH_BIT = Traits<Key>::HIGH_BIT;
return ((key_bits & HIGH_BIT) != 0) && ((b_key_bits & HIGH_BIT) == 0);
}
};
/**
* Initialize key-value sorting problem.
*/
template <bool DESCENDING, typename Key, typename Value>
void Initialize(
GenMode gen_mode,
Key *h_keys,
Value *h_values,
Key *h_reference_keys,
Value *h_reference_values,
int num_items,
int entropy_reduction,
int begin_bit,
int end_bit)
{
Pair<Key, Value> *h_pairs = new Pair<Key, Value>[num_items];
for (int i = 0; i < num_items; ++i)
{
InitValue(gen_mode, h_keys[i], i);
RandomBits(h_values[i]);
// Mask off unwanted portions
int num_bits = end_bit - begin_bit;
if ((begin_bit > 0) || (end_bit < sizeof(Key) * 8))
{
unsigned long long base = 0;
memcpy(&base, &h_keys[i], sizeof(Key));
base &= ((1ull << num_bits) - 1) << begin_bit;
memcpy(&h_keys[i], &base, sizeof(Key));
}
h_pairs[i].key = h_keys[i];
h_pairs[i].value = h_values[i];
}
if (DESCENDING) std::reverse(h_pairs, h_pairs + num_items);
std::stable_sort(h_pairs, h_pairs + num_items);
if (DESCENDING) std::reverse(h_pairs, h_pairs + num_items);
for (int i = 0; i < num_items; ++i)
{
h_reference_keys[i] = h_pairs[i].key;
h_reference_values[i] = h_pairs[i].value;
}
delete[] h_pairs;
}
/**
* Test BlockRadixSort kernel
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
int RADIX_BITS,
bool MEMOIZE_OUTER_SCAN,
BlockScanAlgorithm INNER_SCAN_ALGORITHM,
cudaSharedMemConfig SMEM_CONFIG,
bool DESCENDING,
bool BLOCKED_OUTPUT,
typename Key,
typename Value>
void TestDriver(
GenMode gen_mode,
int entropy_reduction,
int begin_bit,
int end_bit)
{
enum
{
TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD,
KEYS_ONLY = Equals<Value, NullType>::VALUE,
};
// Allocate host arrays
Key *h_keys = new Key[TILE_SIZE];
Key *h_reference_keys = new Key[TILE_SIZE];
Value *h_values = new Value[TILE_SIZE];
Value *h_reference_values = new Value[TILE_SIZE];
// Allocate device arrays
Key *d_keys = NULL;
Value *d_values = NULL;
clock_t *d_elapsed = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_keys, sizeof(Key) * TILE_SIZE));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_values, sizeof(Value) * TILE_SIZE));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(clock_t)));
// Initialize problem and solution on host
Initialize<DESCENDING>(gen_mode, h_keys, h_values, h_reference_keys, h_reference_values,
TILE_SIZE, entropy_reduction, begin_bit, end_bit);
// Copy problem to device
CubDebugExit(cudaMemcpy(d_keys, h_keys, sizeof(Key) * TILE_SIZE, cudaMemcpyHostToDevice));
CubDebugExit(cudaMemcpy(d_values, h_values, sizeof(Value) * TILE_SIZE, cudaMemcpyHostToDevice));
printf("%s "
"BLOCK_THREADS(%d) "
"ITEMS_PER_THREAD(%d) "
"RADIX_BITS(%d) "
"MEMOIZE_OUTER_SCAN(%d) "
"INNER_SCAN_ALGORITHM(%d) "
"SMEM_CONFIG(%d) "
"DESCENDING(%d) "
"BLOCKED_OUTPUT(%d) "
"sizeof(Key)(%d) "
"sizeof(Value)(%d) "
"gen_mode(%d), "
"entropy_reduction(%d) "
"begin_bit(%d) "
"end_bit(%d), "
"samples(%d)\n",
((KEYS_ONLY) ? "Keys-only" : "Key-value"),
BLOCK_THREADS,
ITEMS_PER_THREAD,
RADIX_BITS,
MEMOIZE_OUTER_SCAN,
INNER_SCAN_ALGORITHM,
SMEM_CONFIG,
DESCENDING,
BLOCKED_OUTPUT,
(int) sizeof(Key),
(int) sizeof(Value),
gen_mode,
entropy_reduction,
begin_bit,
end_bit,
g_num_rand_samples);
// Set shared memory config
cudaDeviceSetSharedMemConfig(SMEM_CONFIG);
// Run kernel
Kernel<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, DESCENDING, BLOCKED_OUTPUT><<<1, BLOCK_THREADS>>>(
d_keys, d_values, begin_bit, end_bit, d_elapsed);
// Flush kernel output / errors
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
// Check keys results
printf("\tKeys: ");
int compare = CompareDeviceResults(h_reference_keys, d_keys, TILE_SIZE, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Check value results
if (!KEYS_ONLY)
{
printf("\tValues: ");
int compare = CompareDeviceResults(h_reference_values, d_values, TILE_SIZE, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
}
printf("\n");
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
printf("\n");
// Cleanup
if (h_keys) delete[] h_keys;
if (h_reference_keys) delete[] h_reference_keys;
if (h_values) delete[] h_values;
if (h_reference_values) delete[] h_reference_values;
if (d_keys) CubDebugExit(g_allocator.DeviceFree(d_keys));
if (d_values) CubDebugExit(g_allocator.DeviceFree(d_values));
if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed));
}
/**
* Test driver (valid tile size <= MAX_SMEM_BYTES)
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
int RADIX_BITS,
bool MEMOIZE_OUTER_SCAN,
BlockScanAlgorithm INNER_SCAN_ALGORITHM,
cudaSharedMemConfig SMEM_CONFIG,
bool DESCENDING,
bool BLOCKED_OUTPUT,
typename Key,
typename Value>
void TestValid(Int2Type<true> fits_smem_capacity)
{
// Iterate begin_bit
for (int begin_bit = 0; begin_bit <= 1; begin_bit++)
{
// Iterate end bit
for (int end_bit = begin_bit + 1; end_bit <= sizeof(Key) * 8; end_bit = end_bit * 2 + begin_bit)
{
// Uniform key distribution
TestDriver<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, DESCENDING, BLOCKED_OUTPUT, Key, Value>(
UNIFORM, 0, begin_bit, end_bit);
// Sequential key distribution
TestDriver<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, DESCENDING, BLOCKED_OUTPUT, Key, Value>(
INTEGER_SEED, 0, begin_bit, end_bit);
// Iterate random with entropy_reduction
for (int entropy_reduction = 0; entropy_reduction <= 9; entropy_reduction += 3)
{
TestDriver<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, DESCENDING, BLOCKED_OUTPUT, Key, Value>(
RANDOM, entropy_reduction, begin_bit, end_bit);
}
}
}
}
/**
* Test driver (invalid tile size)
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
int RADIX_BITS,
bool MEMOIZE_OUTER_SCAN,
BlockScanAlgorithm INNER_SCAN_ALGORITHM,
cudaSharedMemConfig SMEM_CONFIG,
bool DESCENDING,
bool BLOCKED_OUTPUT,
typename Key,
typename Value>
void TestValid(Int2Type<false> fits_smem_capacity)
{}
/**
* Test ascending/descending and to-blocked/to-striped
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
int RADIX_BITS,
bool MEMOIZE_OUTER_SCAN,
BlockScanAlgorithm INNER_SCAN_ALGORITHM,
cudaSharedMemConfig SMEM_CONFIG,
typename Key,
typename Value>
void Test()
{
// Check size of smem storage for the target arch to make sure it will fit
typedef BlockRadixSort<Key, BLOCK_THREADS, ITEMS_PER_THREAD, Value, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG> BlockRadixSortT;
#if defined(SM100) || defined(SM110) || defined(SM130)
Int2Type<sizeof(typename BlockRadixSortT::TempStorage) <= 16 * 1024> fits_smem_capacity;
#else
Int2Type<(sizeof(typename BlockRadixSortT::TempStorage) <= 48 * 1024)> fits_smem_capacity;
#endif
// Sort-ascending, to-striped
TestValid<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, true, false, Key, Value>(fits_smem_capacity);
// Sort-descending, to-blocked
TestValid<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, false, true, Key, Value>(fits_smem_capacity);
// Not necessary
// TestValid<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, false, false, Key, Value>(fits_smem_capacity);
// TestValid<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, true, true, Key, Value>(fits_smem_capacity);
}
/**
* Test value type and smem config
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
int RADIX_BITS,
bool MEMOIZE_OUTER_SCAN,
BlockScanAlgorithm INNER_SCAN_ALGORITHM,
typename Key>
void TestKeys()
{
// Test keys-only sorting with both smem configs
Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, cudaSharedMemBankSizeFourByte, Key, NullType>(); // Keys-only (4-byte smem bank config)
#if !defined(SM100) && !defined(SM110) && !defined(SM130) && !defined(SM200)
Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, cudaSharedMemBankSizeEightByte, Key, NullType>(); // Keys-only (8-byte smem bank config)
#endif
}
/**
* Test value type and smem config
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
int RADIX_BITS,
bool MEMOIZE_OUTER_SCAN,
BlockScanAlgorithm INNER_SCAN_ALGORITHM,
typename Key>
void TestKeysAndPairs()
{
// Test pairs sorting with only 4-byte configs
Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, cudaSharedMemBankSizeFourByte, Key, char>(); // With small-values
Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, cudaSharedMemBankSizeFourByte, Key, Key>(); // With same-values
Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, cudaSharedMemBankSizeFourByte, Key, TestFoo>(); // With large values
}
/**
* Test key type
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
int RADIX_BITS,
bool MEMOIZE_OUTER_SCAN,
BlockScanAlgorithm INNER_SCAN_ALGORITHM>
void Test()
{
// Get ptx version
int ptx_version;
CubDebugExit(PtxVersion(ptx_version));
#ifdef TEST_KEYS_ONLY
// Test unsigned types with keys-only
TestKeys<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, unsigned char>();
TestKeys<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, unsigned short>();
TestKeys<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, unsigned int>();
TestKeys<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, unsigned long>();
TestKeys<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, unsigned long long>();
#else
// Test signed and fp types with paired values
TestKeysAndPairs<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, char>();
TestKeysAndPairs<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, short>();
TestKeysAndPairs<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, int>();
TestKeysAndPairs<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, long>();
TestKeysAndPairs<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, long long>();
TestKeysAndPairs<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, float>();
if (ptx_version > 120)
{
// Don't check doubles on PTX120 or below because they're down-converted
TestKeysAndPairs<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, double>();
}
#endif
}
/**
* Test inner scan algorithm
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
int RADIX_BITS,
bool MEMOIZE_OUTER_SCAN>
void Test()
{
Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, BLOCK_SCAN_RAKING>();
Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, BLOCK_SCAN_WARP_SCANS>();
}
/**
* Test outer scan algorithm
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
int RADIX_BITS>
void Test()
{
Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, true>();
Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, false>();
}
/**
* Test radix bits
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD>
void Test()
{
Test<BLOCK_THREADS, ITEMS_PER_THREAD, 1>();
Test<BLOCK_THREADS, ITEMS_PER_THREAD, 2>();
Test<BLOCK_THREADS, ITEMS_PER_THREAD, 5>();
}
/**
* Test items per thread
*/
template <int BLOCK_THREADS>
void Test()
{
Test<BLOCK_THREADS, 1>();
#if defined(SM100) || defined(SM110) || defined(SM130)
// Open64 compiler can't handle the number of test cases
#else
Test<BLOCK_THREADS, 4>();
#endif
Test<BLOCK_THREADS, 11>();
}
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
#ifdef QUICK_TEST
{
typedef float T;
TestDriver<32, 4, 4, true, BLOCK_SCAN_WARP_SCANS, cudaSharedMemBankSizeFourByte, false, false, T, NullType>(INTEGER_SEED, 0, 0, sizeof(T) * 8);
}
/*
// Compile/run quick tests
typedef unsigned int T;
TestDriver<64, 17, 4, true, BLOCK_SCAN_WARP_SCANS, cudaSharedMemBankSizeFourByte, false, false, T, NullType>(RANDOM, 0, 0, sizeof(T) * 8);
TestDriver<96, 8, 4, true, BLOCK_SCAN_WARP_SCANS, cudaSharedMemBankSizeFourByte, false, false, T, NullType>(RANDOM, 0, 0, sizeof(T) * 8);
TestDriver<128, 2, 4, true, BLOCK_SCAN_WARP_SCANS, cudaSharedMemBankSizeFourByte, false, false, T, NullType>(RANDOM, 0, 0, sizeof(T) * 8);
*/
#else
// Compile/run thorough tests
Test<32>();
Test<64>();
Test<160>();
#endif // QUICK_TEST
return 0;
}
|
ddfaf232782120b44bf402c423639f0fa9e9a831.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cudaDadd_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int size = 1;
double value = 2;
const double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
double *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cudaDadd_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,value,x,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cudaDadd_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,value,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cudaDadd_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,value,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ddfaf232782120b44bf402c423639f0fa9e9a831.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cudaDadd_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int size = 1;
double value = 2;
const double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
double *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cudaDadd_kernel<<<gridBlock,threadBlock>>>(size,value,x,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cudaDadd_kernel<<<gridBlock,threadBlock>>>(size,value,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cudaDadd_kernel<<<gridBlock,threadBlock>>>(size,value,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d315b8a509c09b9bdb01010d8db81c85093b7f12.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/*
* Initialize array values on the host.
*/
void init(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i;
}
}
/*
* Double elements in parallel on the GPU.
*/
__global__
void doubleElements(int *a, int N)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
a[i] *= 2;
}
}
/*
* Check all elements have been doubled on the host.
*/
bool checkElementsAreDoubled(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
if (a[i] != i*2) return false;
}
return true;
}
int main()
{
int N = 100;
int *a;
size_t size = N * sizeof(int);
/*
* Refactor this memory allocation to provide a pointer
* `a` that can be used on both the host and the device.
*/
a = (int *)malloc(size);
init(a, N);
size_t threads_per_block = 10;
size_t number_of_blocks = 10;
/*
* This launch will not work until the pointer `a` is also
* available to the device.
*/
hipLaunchKernelGGL(( doubleElements), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, a, N);
hipDeviceSynchronize();
bool areDoubled = checkElementsAreDoubled(a, N);
printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE");
/*
* Refactor to free memory that has been allocated to be
* accessed by both the host and the device.
*/
free(a);
}
| d315b8a509c09b9bdb01010d8db81c85093b7f12.cu | #include <stdio.h>
/*
* Initialize array values on the host.
*/
void init(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i;
}
}
/*
* Double elements in parallel on the GPU.
*/
__global__
void doubleElements(int *a, int N)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
a[i] *= 2;
}
}
/*
* Check all elements have been doubled on the host.
*/
bool checkElementsAreDoubled(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
if (a[i] != i*2) return false;
}
return true;
}
int main()
{
int N = 100;
int *a;
size_t size = N * sizeof(int);
/*
* Refactor this memory allocation to provide a pointer
* `a` that can be used on both the host and the device.
*/
a = (int *)malloc(size);
init(a, N);
size_t threads_per_block = 10;
size_t number_of_blocks = 10;
/*
* This launch will not work until the pointer `a` is also
* available to the device.
*/
doubleElements<<<number_of_blocks, threads_per_block>>>(a, N);
cudaDeviceSynchronize();
bool areDoubled = checkElementsAreDoubled(a, N);
printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE");
/*
* Refactor to free memory that has been allocated to be
* accessed by both the host and the device.
*/
free(a);
}
|
3055bfd783b3d4b5504b37d67945d86d4fd6eb2a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define THREADNUM 512
__device__ float randGpu(hiprandState_t *global_state, int ind){
//int ind = threadIdx.x;
hiprandState_t local_state = global_state[ind];
float rand_num = hiprand_uniform(&local_state);
global_state[ind] = local_state;
return rand_num;
}
__global__ void setupKernel(hiprandState_t *states, unsigned long seed){
int ind = threadIdx.x;
hiprand_init(seed, ind, 0, &states[ind]);
}
__global__ void genRandom(float *data, hiprandState_t *global_state){
int ind = threadIdx.x;
data[ind] = randGpu(global_state, ind);
}
int main(){
float *data, *G_data;
data = (float*)malloc(sizeof(float) * THREADNUM);
hipMalloc((void**) &G_data, sizeof(float) * THREADNUM);
hiprandState_t *dev_states;
hipMalloc((void**) &dev_states, sizeof(hiprandState_t) * THREADNUM);
hipLaunchKernelGGL(( setupKernel), dim3(1), dim3(THREADNUM), 0, 0, dev_states, unsigned(time(NULL)));
hipLaunchKernelGGL(( genRandom), dim3(1), dim3(THREADNUM), 0, 0, G_data, dev_states);
hipMemcpy(data, G_data, sizeof(float) * THREADNUM, hipMemcpyDeviceToHost);
int i;
for(i=0; i<THREADNUM; i++){
printf("%f\n", data[i]);
}
return 0;
}
| 3055bfd783b3d4b5504b37d67945d86d4fd6eb2a.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#define THREADNUM 512
__device__ float randGpu(curandState *global_state, int ind){
//int ind = threadIdx.x;
curandState local_state = global_state[ind];
float rand_num = curand_uniform(&local_state);
global_state[ind] = local_state;
return rand_num;
}
__global__ void setupKernel(curandState *states, unsigned long seed){
int ind = threadIdx.x;
curand_init(seed, ind, 0, &states[ind]);
}
__global__ void genRandom(float *data, curandState *global_state){
int ind = threadIdx.x;
data[ind] = randGpu(global_state, ind);
}
int main(){
float *data, *G_data;
data = (float*)malloc(sizeof(float) * THREADNUM);
cudaMalloc((void**) &G_data, sizeof(float) * THREADNUM);
curandState *dev_states;
cudaMalloc((void**) &dev_states, sizeof(curandState) * THREADNUM);
setupKernel<<<1, THREADNUM>>>(dev_states, unsigned(time(NULL)));
genRandom<<<1, THREADNUM>>>(G_data, dev_states);
cudaMemcpy(data, G_data, sizeof(float) * THREADNUM, cudaMemcpyDeviceToHost);
int i;
for(i=0; i<THREADNUM; i++){
printf("%f\n", data[i]);
}
return 0;
}
|
d27f48971f68f895a8b98378a2c9e10dc9976a2b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <hiprand/hiprand.h>
#include <ctime>
#include <assert.h>
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(hipError_t stat, const char *file, int line) {
if (stat != hipSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(hiprandStatus_t stat, const char *file, int line) {
if (stat != HIPRAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
#include <mma.h>
using namespace nvcuda;
//enum MatrixLayout{
#define ROW_MAJOR 0
#define COL_MAJOR 1
//};
//ONLY THE PARAMETER HERE NEEDS TO BE CHANGED
// Must be multiples of 16 for wmma code to work
#define MATRIX_M (16)
#define MATRIX_N (16)
#define MATRIX_K (16)
const int WMMA_M =16;
const int WMMA_N =16;
const int WMMA_K =16;
typedef half atype;
typedef half btype;
typedef float ctype;
typedef float dtype;
typedef float host_type;
#define A_LAYOUT COL_MAJOR
#define B_LAYOUT COL_MAJOR
#define C_LAYOUT COL_MAJOR
#define D_LAYOUT COL_MAJOR
#define NUM_CTA 1
#define WARP_IN_CTA 1
//Don't change anything after here
#define THREAD_IN_WARP 32
#if A_LAYOUT==ROW_MAJOR
#define LAYOUT_A wmma::row_major
#define A_STRIDE MATRIX_K
#else
#define LAYOUT_A wmma::col_major
#define A_STRIDE MATRIX_M
#endif
#if B_LAYOUT==ROW_MAJOR
#define LAYOUT_B wmma::row_major
#define B_STRIDE MATRIX_N
#else
#define LAYOUT_B wmma::col_major
#define B_STRIDE MATRIX_K
#endif
#if C_LAYOUT==ROW_MAJOR
#define LAYOUT_C wmma::mem_row_major
#define C_STRIDE MATRIX_N
#else
#define LAYOUT_C wmma::mem_col_major
#define C_STRIDE MATRIX_M
#endif
#if D_LAYOUT==ROW_MAJOR
#define LAYOUT_D wmma::mem_row_major
#define D_STRIDE MATRIX_N
#else
#define LAYOUT_D wmma::mem_col_major
#define D_STRIDE MATRIX_M
#endif
enum MatrixInitializationType{
ZERO,
ONE,
RANDOM,
IDENTITY,
LINEAR
};
int get_value(MatrixInitializationType init_type,int randomRange=3,bool RESET=false){
static int val=0;
switch(init_type){
case ZERO:
break;
case ONE:
val=1;
break;
case RANDOM:
val=rand()%randomRange;
break;
case LINEAR:
val++;
break;
default :
printf("illegal MatrixInitializationType\n");
abort();
break;
}
if(RESET)
val=0;
return val;
}
template <typename T>
void print_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
T val;
if(layout==ROW_MAJOR)
val=matrix[row*col_size+col];
else
val=matrix[col*row_size+row];
printf("%.2f ",static_cast<float>(val));
}
printf(";\n");
}
}
template <typename T>
void initialize_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout,MatrixInitializationType init_type){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
if(init_type==IDENTITY){
assert(row_size==col_size);//only for square matrix can be used
matrix[row*row_size+col]=static_cast<T>(1);
}
else{
if(layout==ROW_MAJOR){
matrix[row*col_size+col]=static_cast<T>(get_value(init_type));
}
else{
matrix[col*row_size+row]=static_cast<T>(get_value(init_type));
}
}
}
}
get_value(init_type,10,true);//reseting the val counter
print_matrix<T>(matrix,row_size,col_size,layout);
}
int get_index(int row,int col,int row_size,int col_size,int/*MatrixLayout*/ layout){
int index=0;
if(layout==ROW_MAJOR){
index=row*col_size+col;
}
else{
index=col*row_size+row;
}
return index;
}
template <typename T>
void matrix_multiply(T *result_matrix, T *matrix_a,T* matrix_b,T *matrix_c,int M,int N,int K,int/*MatrixLayout*/ resultlayout,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout,int/*MatrixLayout*/ clayout){
for(int row=0;row<M;row++){
for(int col=0;col<N;col++){
int rindex=get_index(row,col,M,N,resultlayout);
int cindex=get_index(row,col,M,N,clayout);
for(int k=0;k<K;k++){
int aindex=get_index(row,k,M,K,alayout);
int bindex=get_index(k,col,K,N,blayout);
result_matrix[rindex]+=matrix_a[aindex]*matrix_b[bindex];
}
result_matrix[rindex]+=matrix_c[cindex];
}
}
print_matrix<T>(result_matrix,M,N,resultlayout);
}
template <typename T>
void compare_matrix(T *matrix_a, T *matrix_b,int row_size,int col_size,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
int index_a,index_b;
index_a=get_index(row,col,row_size,col_size,alayout);
index_b=get_index(row,col,row_size,col_size,alayout);
if(matrix_a[index_a]!=matrix_b[index_b])
printf("ERROR at index row=%d col=%d\n",row,col);
}
}
}
__global__ void wmma_example(atype *a, btype *b, ctype *c,dtype *d)
{
float t;
// Declare the fragments
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, atype , LAYOUT_A> a_frag;
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, btype , LAYOUT_B> b_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, ctype> c_frag;
// Bounds checking
wmma::load_matrix_sync(a_frag, a, A_STRIDE);
wmma::load_matrix_sync(b_frag, b, B_STRIDE);
wmma::load_matrix_sync(c_frag, c, C_STRIDE,LAYOUT_C);
for(int i=0; i < a_frag.num_elements; i++) {
t=static_cast<float>(a_frag.x[i]);
printf("A_THREAD%d: %.2f \n",threadIdx.x,t);
}
for(int i=0; i < b_frag.num_elements; i++) {
t=static_cast<float>(b_frag.x[i]);
printf("B_THREAD%d: %.2f \n",threadIdx.x,t);
}
for(int i=0; i < c_frag.num_elements; i++) {
t=static_cast<float>(c_frag.x[i]);
printf("C_THREAD%d: %.2f \n",threadIdx.x,t);
}
wmma::mma_sync(c_frag, a_frag, b_frag, c_frag);
wmma::store_matrix_sync(d, c_frag, D_STRIDE, LAYOUT_D);
}
template <typename T1,typename T2>
__global__ void convert(T1 *out, T2 *in, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = in[idx];
}
}
int main(int argc, char* argv[]) {
//data on device in host type format
host_type *a_htype;
host_type *b_htype;
host_type *c_htype;
host_type *d_htype;
//data on device in gemm format
atype *a_atype;
btype *b_btype;
ctype *c_ctype;
dtype *d_dtype;
srand(time(NULL));
host_type *a_host_wmma;
host_type *b_host_wmma;
host_type *c_host_wmma;
host_type *d_host_wmma;
host_type *d_cal_host_wmma;
hipEvent_t startWMMA;
hipEvent_t stopWMMA;
cudaErrCheck(hipEventCreate(&startWMMA));
cudaErrCheck(hipEventCreate(&stopWMMA));
// Use tensor cores
cudaErrCheck(hipMalloc((void**)&a_htype, MATRIX_M * MATRIX_K * sizeof(host_type)));
cudaErrCheck(hipMalloc((void**)&b_htype, MATRIX_K * MATRIX_N * sizeof(host_type)));
cudaErrCheck(hipMalloc((void**)&c_htype, MATRIX_M * MATRIX_N * sizeof(host_type)));
cudaErrCheck(hipMalloc((void**)&d_htype, MATRIX_M * MATRIX_N * sizeof(host_type)));
cudaErrCheck(hipMalloc((void**)&a_atype, MATRIX_M * MATRIX_K * sizeof(atype)));
cudaErrCheck(hipMalloc((void**)&b_btype, MATRIX_K * MATRIX_N * sizeof(btype)));
cudaErrCheck(hipMalloc((void**)&c_ctype, MATRIX_M * MATRIX_N * sizeof(ctype)));
cudaErrCheck(hipMalloc((void**)&d_dtype, MATRIX_M * MATRIX_N * sizeof(dtype)));
a_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_K * sizeof(host_type));
b_host_wmma = (host_type*)malloc(MATRIX_K * MATRIX_N * sizeof(host_type));
c_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
d_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
d_cal_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
printf("a_host\n");
initialize_matrix<host_type>(a_host_wmma,MATRIX_M,MATRIX_K,A_LAYOUT,LINEAR);
printf("b_host\n");
initialize_matrix<host_type>(b_host_wmma,MATRIX_K,MATRIX_N,B_LAYOUT,LINEAR);
printf("c_host\n");
initialize_matrix<host_type>(c_host_wmma,MATRIX_M,MATRIX_N,C_LAYOUT,LINEAR);
printf("d_cal_host\n");
initialize_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,ZERO);
printf("d_cal_host\n");
matrix_multiply<host_type>(d_cal_host_wmma,a_host_wmma,b_host_wmma,c_host_wmma,MATRIX_M,MATRIX_N,MATRIX_K,D_LAYOUT,A_LAYOUT,B_LAYOUT,C_LAYOUT);
cudaErrCheck(hipMemcpy(a_htype,a_host_wmma, MATRIX_M * MATRIX_K * sizeof(host_type), hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(b_htype,b_host_wmma, MATRIX_K * MATRIX_N * sizeof(host_type), hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(c_htype,c_host_wmma, MATRIX_M * MATRIX_N * sizeof(host_type), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( convert<atype,host_type>) , dim3((MATRIX_M * MATRIX_K + 255) / 256), dim3(256) , 0, 0, a_atype, a_htype, MATRIX_M * MATRIX_K);
hipLaunchKernelGGL(( convert<btype,host_type>) , dim3((MATRIX_K * MATRIX_N + 255) / 256), dim3(256) , 0, 0, b_btype, b_htype, MATRIX_K * MATRIX_N);
hipLaunchKernelGGL(( convert<ctype,host_type>) , dim3((MATRIX_M * MATRIX_N + 255) / 256), dim3(256) , 0, 0, c_ctype, c_htype, MATRIX_M * MATRIX_N);
printf("\nM = %d, N = %d, K = %d. \n", MATRIX_M, MATRIX_N, MATRIX_K);
printf("Running with wmma...\n");
cudaErrCheck(hipEventRecord(startWMMA));
hipLaunchKernelGGL(( wmma_example) , dim3(NUM_CTA),dim3(WARP_IN_CTA*THREAD_IN_WARP), 0, 0, a_atype, b_btype, c_ctype, d_dtype);
cudaErrCheck(hipEventRecord(stopWMMA));
hipLaunchKernelGGL(( convert<host_type,dtype>) , dim3((MATRIX_M * MATRIX_N + 255) / 256), dim3(256) , 0, 0, d_htype, d_dtype, MATRIX_M * MATRIX_N);
cudaErrCheck(hipEventSynchronize(stopWMMA));
// Error checking
printf("\nChecking results...\n");
cudaErrCheck(hipMemcpy(d_host_wmma, d_htype, MATRIX_M * MATRIX_N * sizeof(host_type), hipMemcpyDeviceToHost));
printf("Results verified: cublas and WMMA agree.\n\n");
float wmmaTime;
cudaErrCheck(hipEventElapsedTime(&wmmaTime, startWMMA, stopWMMA));
printf("wmma took %.2fms\n", wmmaTime);
cudaErrCheck(hipEventDestroy(startWMMA));
cudaErrCheck(hipEventDestroy(stopWMMA));
printf("D_CALCULATED\n");
print_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT);
printf("D_WMMA\n");
print_matrix<host_type>(d_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT);
printf("CHECKING\n");
compare_matrix<host_type>(d_host_wmma,d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,D_LAYOUT);
cudaErrCheck(hipFree(a_htype));
cudaErrCheck(hipFree(b_htype));
cudaErrCheck(hipFree(c_htype));
cudaErrCheck(hipFree(d_htype));
cudaErrCheck(hipFree(a_atype));
cudaErrCheck(hipFree(b_btype));
cudaErrCheck(hipFree(c_ctype));
cudaErrCheck(hipFree(d_dtype));
free(a_host_wmma);
free(b_host_wmma);
free(c_host_wmma);
free(d_host_wmma);
free(d_cal_host_wmma);
cudaErrCheck(hipDeviceReset());
return 0;
}
| d27f48971f68f895a8b98378a2c9e10dc9976a2b.cu | #include <stdio.h>
#include <curand.h>
#include <ctime>
#include <assert.h>
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
if (stat != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(curandStatus_t stat, const char *file, int line) {
if (stat != CURAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
#include <mma.h>
using namespace nvcuda;
//enum MatrixLayout{
#define ROW_MAJOR 0
#define COL_MAJOR 1
//};
//ONLY THE PARAMETER HERE NEEDS TO BE CHANGED
// Must be multiples of 16 for wmma code to work
#define MATRIX_M (16)
#define MATRIX_N (16)
#define MATRIX_K (16)
const int WMMA_M =16;
const int WMMA_N =16;
const int WMMA_K =16;
typedef half atype;
typedef half btype;
typedef float ctype;
typedef float dtype;
typedef float host_type;
#define A_LAYOUT COL_MAJOR
#define B_LAYOUT COL_MAJOR
#define C_LAYOUT COL_MAJOR
#define D_LAYOUT COL_MAJOR
#define NUM_CTA 1
#define WARP_IN_CTA 1
//Don't change anything after here
#define THREAD_IN_WARP 32
#if A_LAYOUT==ROW_MAJOR
#define LAYOUT_A wmma::row_major
#define A_STRIDE MATRIX_K
#else
#define LAYOUT_A wmma::col_major
#define A_STRIDE MATRIX_M
#endif
#if B_LAYOUT==ROW_MAJOR
#define LAYOUT_B wmma::row_major
#define B_STRIDE MATRIX_N
#else
#define LAYOUT_B wmma::col_major
#define B_STRIDE MATRIX_K
#endif
#if C_LAYOUT==ROW_MAJOR
#define LAYOUT_C wmma::mem_row_major
#define C_STRIDE MATRIX_N
#else
#define LAYOUT_C wmma::mem_col_major
#define C_STRIDE MATRIX_M
#endif
#if D_LAYOUT==ROW_MAJOR
#define LAYOUT_D wmma::mem_row_major
#define D_STRIDE MATRIX_N
#else
#define LAYOUT_D wmma::mem_col_major
#define D_STRIDE MATRIX_M
#endif
enum MatrixInitializationType{
ZERO,
ONE,
RANDOM,
IDENTITY,
LINEAR
};
int get_value(MatrixInitializationType init_type,int randomRange=3,bool RESET=false){
static int val=0;
switch(init_type){
case ZERO:
break;
case ONE:
val=1;
break;
case RANDOM:
val=rand()%randomRange;
break;
case LINEAR:
val++;
break;
default :
printf("illegal MatrixInitializationType\n");
abort();
break;
}
if(RESET)
val=0;
return val;
}
template <typename T>
void print_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
T val;
if(layout==ROW_MAJOR)
val=matrix[row*col_size+col];
else
val=matrix[col*row_size+row];
printf("%.2f ",static_cast<float>(val));
}
printf(";\n");
}
}
template <typename T>
void initialize_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout,MatrixInitializationType init_type){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
if(init_type==IDENTITY){
assert(row_size==col_size);//only for square matrix can be used
matrix[row*row_size+col]=static_cast<T>(1);
}
else{
if(layout==ROW_MAJOR){
matrix[row*col_size+col]=static_cast<T>(get_value(init_type));
}
else{
matrix[col*row_size+row]=static_cast<T>(get_value(init_type));
}
}
}
}
get_value(init_type,10,true);//reseting the val counter
print_matrix<T>(matrix,row_size,col_size,layout);
}
int get_index(int row,int col,int row_size,int col_size,int/*MatrixLayout*/ layout){
int index=0;
if(layout==ROW_MAJOR){
index=row*col_size+col;
}
else{
index=col*row_size+row;
}
return index;
}
template <typename T>
void matrix_multiply(T *result_matrix, T *matrix_a,T* matrix_b,T *matrix_c,int M,int N,int K,int/*MatrixLayout*/ resultlayout,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout,int/*MatrixLayout*/ clayout){
for(int row=0;row<M;row++){
for(int col=0;col<N;col++){
int rindex=get_index(row,col,M,N,resultlayout);
int cindex=get_index(row,col,M,N,clayout);
for(int k=0;k<K;k++){
int aindex=get_index(row,k,M,K,alayout);
int bindex=get_index(k,col,K,N,blayout);
result_matrix[rindex]+=matrix_a[aindex]*matrix_b[bindex];
}
result_matrix[rindex]+=matrix_c[cindex];
}
}
print_matrix<T>(result_matrix,M,N,resultlayout);
}
template <typename T>
void compare_matrix(T *matrix_a, T *matrix_b,int row_size,int col_size,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
int index_a,index_b;
index_a=get_index(row,col,row_size,col_size,alayout);
index_b=get_index(row,col,row_size,col_size,alayout);
if(matrix_a[index_a]!=matrix_b[index_b])
printf("ERROR at index row=%d col=%d\n",row,col);
}
}
}
__global__ void wmma_example(atype *a, btype *b, ctype *c,dtype *d)
{
float t;
// Declare the fragments
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, atype , LAYOUT_A> a_frag;
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, btype , LAYOUT_B> b_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, ctype> c_frag;
// Bounds checking
wmma::load_matrix_sync(a_frag, a, A_STRIDE);
wmma::load_matrix_sync(b_frag, b, B_STRIDE);
wmma::load_matrix_sync(c_frag, c, C_STRIDE,LAYOUT_C);
for(int i=0; i < a_frag.num_elements; i++) {
t=static_cast<float>(a_frag.x[i]);
printf("A_THREAD%d: %.2f \n",threadIdx.x,t);
}
for(int i=0; i < b_frag.num_elements; i++) {
t=static_cast<float>(b_frag.x[i]);
printf("B_THREAD%d: %.2f \n",threadIdx.x,t);
}
for(int i=0; i < c_frag.num_elements; i++) {
t=static_cast<float>(c_frag.x[i]);
printf("C_THREAD%d: %.2f \n",threadIdx.x,t);
}
wmma::mma_sync(c_frag, a_frag, b_frag, c_frag);
wmma::store_matrix_sync(d, c_frag, D_STRIDE, LAYOUT_D);
}
template <typename T1,typename T2>
__global__ void convert(T1 *out, T2 *in, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = in[idx];
}
}
int main(int argc, char* argv[]) {
//data on device in host type format
host_type *a_htype;
host_type *b_htype;
host_type *c_htype;
host_type *d_htype;
//data on device in gemm format
atype *a_atype;
btype *b_btype;
ctype *c_ctype;
dtype *d_dtype;
srand(time(NULL));
host_type *a_host_wmma;
host_type *b_host_wmma;
host_type *c_host_wmma;
host_type *d_host_wmma;
host_type *d_cal_host_wmma;
cudaEvent_t startWMMA;
cudaEvent_t stopWMMA;
cudaErrCheck(cudaEventCreate(&startWMMA));
cudaErrCheck(cudaEventCreate(&stopWMMA));
// Use tensor cores
cudaErrCheck(cudaMalloc((void**)&a_htype, MATRIX_M * MATRIX_K * sizeof(host_type)));
cudaErrCheck(cudaMalloc((void**)&b_htype, MATRIX_K * MATRIX_N * sizeof(host_type)));
cudaErrCheck(cudaMalloc((void**)&c_htype, MATRIX_M * MATRIX_N * sizeof(host_type)));
cudaErrCheck(cudaMalloc((void**)&d_htype, MATRIX_M * MATRIX_N * sizeof(host_type)));
cudaErrCheck(cudaMalloc((void**)&a_atype, MATRIX_M * MATRIX_K * sizeof(atype)));
cudaErrCheck(cudaMalloc((void**)&b_btype, MATRIX_K * MATRIX_N * sizeof(btype)));
cudaErrCheck(cudaMalloc((void**)&c_ctype, MATRIX_M * MATRIX_N * sizeof(ctype)));
cudaErrCheck(cudaMalloc((void**)&d_dtype, MATRIX_M * MATRIX_N * sizeof(dtype)));
a_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_K * sizeof(host_type));
b_host_wmma = (host_type*)malloc(MATRIX_K * MATRIX_N * sizeof(host_type));
c_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
d_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
d_cal_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
printf("a_host\n");
initialize_matrix<host_type>(a_host_wmma,MATRIX_M,MATRIX_K,A_LAYOUT,LINEAR);
printf("b_host\n");
initialize_matrix<host_type>(b_host_wmma,MATRIX_K,MATRIX_N,B_LAYOUT,LINEAR);
printf("c_host\n");
initialize_matrix<host_type>(c_host_wmma,MATRIX_M,MATRIX_N,C_LAYOUT,LINEAR);
printf("d_cal_host\n");
initialize_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,ZERO);
printf("d_cal_host\n");
matrix_multiply<host_type>(d_cal_host_wmma,a_host_wmma,b_host_wmma,c_host_wmma,MATRIX_M,MATRIX_N,MATRIX_K,D_LAYOUT,A_LAYOUT,B_LAYOUT,C_LAYOUT);
cudaErrCheck(cudaMemcpy(a_htype,a_host_wmma, MATRIX_M * MATRIX_K * sizeof(host_type), cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(b_htype,b_host_wmma, MATRIX_K * MATRIX_N * sizeof(host_type), cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(c_htype,c_host_wmma, MATRIX_M * MATRIX_N * sizeof(host_type), cudaMemcpyHostToDevice));
convert<atype,host_type> <<< (MATRIX_M * MATRIX_K + 255) / 256, 256 >>> (a_atype, a_htype, MATRIX_M * MATRIX_K);
convert<btype,host_type> <<< (MATRIX_K * MATRIX_N + 255) / 256, 256 >>> (b_btype, b_htype, MATRIX_K * MATRIX_N);
convert<ctype,host_type> <<< (MATRIX_M * MATRIX_N + 255) / 256, 256 >>> (c_ctype, c_htype, MATRIX_M * MATRIX_N);
printf("\nM = %d, N = %d, K = %d. \n", MATRIX_M, MATRIX_N, MATRIX_K);
printf("Running with wmma...\n");
cudaErrCheck(cudaEventRecord(startWMMA));
wmma_example <<< NUM_CTA,WARP_IN_CTA*THREAD_IN_WARP>>> (a_atype, b_btype, c_ctype, d_dtype);
cudaErrCheck(cudaEventRecord(stopWMMA));
convert<host_type,dtype> <<< (MATRIX_M * MATRIX_N + 255) / 256, 256 >>> (d_htype, d_dtype, MATRIX_M * MATRIX_N);
cudaErrCheck(cudaEventSynchronize(stopWMMA));
// Error checking
printf("\nChecking results...\n");
cudaErrCheck(cudaMemcpy(d_host_wmma, d_htype, MATRIX_M * MATRIX_N * sizeof(host_type), cudaMemcpyDeviceToHost));
printf("Results verified: cublas and WMMA agree.\n\n");
float wmmaTime;
cudaErrCheck(cudaEventElapsedTime(&wmmaTime, startWMMA, stopWMMA));
printf("wmma took %.2fms\n", wmmaTime);
cudaErrCheck(cudaEventDestroy(startWMMA));
cudaErrCheck(cudaEventDestroy(stopWMMA));
printf("D_CALCULATED\n");
print_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT);
printf("D_WMMA\n");
print_matrix<host_type>(d_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT);
printf("CHECKING\n");
compare_matrix<host_type>(d_host_wmma,d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,D_LAYOUT);
cudaErrCheck(cudaFree(a_htype));
cudaErrCheck(cudaFree(b_htype));
cudaErrCheck(cudaFree(c_htype));
cudaErrCheck(cudaFree(d_htype));
cudaErrCheck(cudaFree(a_atype));
cudaErrCheck(cudaFree(b_btype));
cudaErrCheck(cudaFree(c_ctype));
cudaErrCheck(cudaFree(d_dtype));
free(a_host_wmma);
free(b_host_wmma);
free(c_host_wmma);
free(d_host_wmma);
free(d_cal_host_wmma);
cudaErrCheck(cudaDeviceReset());
return 0;
}
|
2d939db8111a939ad14b3b60f7624c3875dbe22c.hip | // !!! This is a file automatically generated by hipify!!!
/**
* calculate pi
*/
#include <stdio.h>
#include <math.h>
#include <iostream>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
//#define BLOCKS 512
#define NUMTHREADS 8192
#define ITERATIONS 2e09
/**
* CUDA Kernel Device code
*
*/
/*****************************************************************************/
__global__ void calculatePi(double *piTotal, long int iterations, int totalThreads)
{ long int initIteration, endIteration;
long int i = 0;
double piPartial;
int index = (blockDim.x * blockIdx.x) + threadIdx.x;
initIteration = (iterations/totalThreads) * index;
endIteration = initIteration + (iterations/totalThreads) - 1;
i = initIteration;
piPartial = 0;
do{
piPartial = piPartial + (double)(4.0 / ((i*2)+1));
i++;
piPartial = piPartial - (double)(4.0 / ((i*2)+1));
i++;
}while(i < endIteration);
piTotal[index] = piPartial;
__syncthreads();
if(index == 0){
for(i = 1; i < totalThreads; i++)
piTotal[0] = piTotal[0] + piTotal[i];
}
}
/******************************************************************************
* Host main routine
*/
int main(int argc, char *argv[])
{
int blocksPerGrid, threadsPerBlock, i, size;
long int iterations;
int totalThreads;
double *h_pitotal, *d_pitotal;
std::cout << "OK1\n";
//sscanf(argv[1], "%i", &blocksPerGrid);
blocksPerGrid = 10;
hipError_t err = hipSuccess;
std::cout << "OK2\n";
size = sizeof(double)*NUMTHREADS;
h_pitotal = (double *)malloc(size);
if ( h_pitotal == NULL){
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
std::cout << "OK3\n";
for(i = 0; i < NUMTHREADS; i++)
h_pitotal[i] = 0.0;
std::cout << "OK4\n";
err = hipMalloc((void **)&d_pitotal, size);
if (err != hipSuccess){
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
std::cout << "OK5\n";
err = hipMemcpy(d_pitotal, h_pitotal, sizeof(double)*NUMTHREADS, hipMemcpyHostToDevice);
if (err != hipSuccess){
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
std::cout << "OK6\n";
// Lanzar KERNEL
threadsPerBlock = NUMTHREADS/blocksPerGrid;
totalThreads = blocksPerGrid * threadsPerBlock;
iterations = ITERATIONS;
printf("CUDA kernel launch with %d blocks of %d threads Total: %i\n", blocksPerGrid, threadsPerBlock, totalThreads );
hipLaunchKernelGGL(( calculatePi), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pitotal, iterations, totalThreads);
err = hipGetLastError();
std::cout << "OK7\n";
if (err != hipSuccess){
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
std::cout << "OK8\n";
err = hipMemcpy(h_pitotal, d_pitotal, size, hipMemcpyDeviceToHost);
if (err != hipSuccess){
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
std::cout << "OK9\n";
err = hipFree(d_pitotal);
if (err != hipSuccess){
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
std::cout << "OK10\n";
printf("\n%.12f", *h_pitotal);
// Free host memory
free(h_pitotal);
err = hipDeviceReset();
std::cout << "OK11\n";
if (err != hipSuccess){
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
std::cout << "OK12\n";
return 0;
}
| 2d939db8111a939ad14b3b60f7624c3875dbe22c.cu |
/**
* calculate pi
*/
#include <stdio.h>
#include <math.h>
#include <iostream>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
//#define BLOCKS 512
#define NUMTHREADS 8192
#define ITERATIONS 2e09
/**
* CUDA Kernel Device code
*
*/
/*****************************************************************************/
__global__ void calculatePi(double *piTotal, long int iterations, int totalThreads)
{ long int initIteration, endIteration;
long int i = 0;
double piPartial;
int index = (blockDim.x * blockIdx.x) + threadIdx.x;
initIteration = (iterations/totalThreads) * index;
endIteration = initIteration + (iterations/totalThreads) - 1;
i = initIteration;
piPartial = 0;
do{
piPartial = piPartial + (double)(4.0 / ((i*2)+1));
i++;
piPartial = piPartial - (double)(4.0 / ((i*2)+1));
i++;
}while(i < endIteration);
piTotal[index] = piPartial;
__syncthreads();
if(index == 0){
for(i = 1; i < totalThreads; i++)
piTotal[0] = piTotal[0] + piTotal[i];
}
}
/******************************************************************************
* Host main routine
*/
int main(int argc, char *argv[])
{
int blocksPerGrid, threadsPerBlock, i, size;
long int iterations;
int totalThreads;
double *h_pitotal, *d_pitotal;
std::cout << "OK1\n";
//sscanf(argv[1], "%i", &blocksPerGrid);
blocksPerGrid = 10;
cudaError_t err = cudaSuccess;
std::cout << "OK2\n";
size = sizeof(double)*NUMTHREADS;
h_pitotal = (double *)malloc(size);
if ( h_pitotal == NULL){
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
std::cout << "OK3\n";
for(i = 0; i < NUMTHREADS; i++)
h_pitotal[i] = 0.0;
std::cout << "OK4\n";
err = cudaMalloc((void **)&d_pitotal, size);
if (err != cudaSuccess){
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
std::cout << "OK5\n";
err = cudaMemcpy(d_pitotal, h_pitotal, sizeof(double)*NUMTHREADS, cudaMemcpyHostToDevice);
if (err != cudaSuccess){
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
std::cout << "OK6\n";
// Lanzar KERNEL
threadsPerBlock = NUMTHREADS/blocksPerGrid;
totalThreads = blocksPerGrid * threadsPerBlock;
iterations = ITERATIONS;
printf("CUDA kernel launch with %d blocks of %d threads Total: %i\n", blocksPerGrid, threadsPerBlock, totalThreads );
calculatePi<<<blocksPerGrid, threadsPerBlock>>>(d_pitotal, iterations, totalThreads);
err = cudaGetLastError();
std::cout << "OK7\n";
if (err != cudaSuccess){
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
std::cout << "OK8\n";
err = cudaMemcpy(h_pitotal, d_pitotal, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess){
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
std::cout << "OK9\n";
err = cudaFree(d_pitotal);
if (err != cudaSuccess){
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
std::cout << "OK10\n";
printf("\n%.12f", *h_pitotal);
// Free host memory
free(h_pitotal);
err = cudaDeviceReset();
std::cout << "OK11\n";
if (err != cudaSuccess){
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
std::cout << "OK12\n";
return 0;
}
|
32ca540d8d722189b71f40a554272c0a318e7375.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaCircularize.cuh"
#include "Common/CudaErrorCheck.cuh"
#include <Havana2/Configuration.h>
#ifdef CUDA_ENABLED
#include <math_functions.h>
#include <math_constants.h>
// GPU kernels
__global__ void getInterpolationMap(float* rho, float* theta, int radius, int width)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * 2 * radius;
if ((x < 2 * radius) && (y < 2 * radius))
{
// Set meshgrid
int xr = radius - x;
int yr = y - radius;
// Rho : Interpolation Map
rho[offset] = ((float)radius - 1.0f) / radius * hypotf(xr, yr);
// Theta : Interpolation Map
theta[offset] = ((float)width - 1.0f) / (2.0f * CUDART_PI_F) * (CUDART_PI_F + atan2f(yr, xr));
}
}
__global__ void interpolation(uint8_t* pRect, uint8_t* pCirc, float* rho, float* theta,
int radius, int circ_center, int width, int height, int channels)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * 2 * radius;
if ((x < 2 * radius) && (y < 2 * radius))
{
int i = theta[offset];
int j = circ_center + rho[offset];
if (j < circ_center + radius)
{
// Bilinear
int i0 = floorf(i); int j0 = floorf(j);
int i1 = i0 + 1; int j1 = j0 + 1;
for (int c = 0; c < channels; c++)
{
float f00 = pRect[(i0 + j0 * width) * channels + c];
float f10 = pRect[(i1 + j0 * width) * channels + c];
float f01 = pRect[(i0 + j1 * width) * channels + c];
float f11 = pRect[(i1 + j1 * width) * channels + c];
float f0 = (f01 - f00) / (j1 - j0) * (j - j0) + f00;
float f1 = (f11 - f10) / (j1 - j0) * (j - j0) + f10;
pCirc[offset * channels + c] = (f1 - f0) / (i1 - i0) * (i - i0) + f0;
}
// Nearest
///for (int c = 0; c < channels; c++)
/// pCirc[offset * channels + c] = pRect[(int)(roundf(i) + roundf(j) * width) * channels + c];
}
else
for (int c = 0; c < channels; c++)
pCirc[offset * channels + c] = 0;
}
}
CudaCircularize::CudaCircularize(int _radius, int _width, int _height) :
radius(_radius), diameter(2 * _radius), width(_width), height(_height)
{
// Memory Allocation
CUDA_CHECK_ERROR(hipMalloc((void**)&deviceRho, sizeof(float) * diameter * diameter));
CUDA_CHECK_ERROR(hipMalloc((void**)&deviceTheta, sizeof(float) * diameter * diameter));
CUDA_CHECK_ERROR(hipMalloc((void**)&deviceRect, sizeof(uint8_t) * width * height));
CUDA_CHECK_ERROR(hipMalloc((void**)&deviceCirc, sizeof(uint8_t) * diameter * diameter));
CUDA_CHECK_ERROR(hipMalloc((void**)&deviceRectRGB, sizeof(uint8_t) * 3 * width * height));
CUDA_CHECK_ERROR(hipMalloc((void**)&deviceCircRGB, sizeof(uint8_t) * 3 * diameter * diameter));
// Grid and Block Dimensions
blocksPerGrid = dim3((diameter + N_THREADS - 1) / N_THREADS, (diameter + N_THREADS - 1) / N_THREADS);
threadsPerBlock = dim3(N_THREADS, N_THREADS);
// Interpolation Map
getInterpolationMap << < blocksPerGrid, threadsPerBlock >> > (deviceRho, deviceTheta, radius, width);
}
CudaCircularize::~CudaCircularize()
{
// Memory Deallocation
CUDA_CHECK_ERROR(hipFree(deviceRho));
CUDA_CHECK_ERROR(hipFree(deviceTheta));
CUDA_CHECK_ERROR(hipFree(deviceRect));
CUDA_CHECK_ERROR(hipFree(deviceCirc));
CUDA_CHECK_ERROR(hipFree(deviceRectRGB));
CUDA_CHECK_ERROR(hipFree(deviceCircRGB));
}
void CudaCircularize::operator()(uint8_t* pRect, uint8_t* pCirc, int circ_center)
{
// Transfer to Device
CUDA_CHECK_ERROR(hipMemcpy(deviceRect, pRect, sizeof(uint8_t) * width * height, hipMemcpyHostToDevice));
// Circularizing
interpolation << < blocksPerGrid, threadsPerBlock >> > (deviceRect, deviceCirc, deviceRho, deviceTheta, radius, circ_center, width, height);
// Transfer to Host
CUDA_CHECK_ERROR(hipMemcpy(pCirc, deviceCirc, sizeof(uint8_t) * diameter * diameter, hipMemcpyDeviceToHost));
}
void CudaCircularize::operator()(uint8_t* pRectRGB, uint8_t* pCircRGB, const char* rgb, int circ_center)
{
// Transfer to Device
CUDA_CHECK_ERROR(hipMemcpy(deviceRectRGB, pRectRGB, sizeof(uint8_t) * 3 * width * height, hipMemcpyHostToDevice));
// Circularizing
interpolation << < blocksPerGrid, threadsPerBlock >> > (deviceRectRGB, deviceCircRGB, deviceRho, deviceTheta, radius, circ_center, width, height, 3);
// Transfer to Host
CUDA_CHECK_ERROR(hipMemcpy(pCircRGB, deviceCircRGB, sizeof(uint8_t) * 3 * diameter * diameter, hipMemcpyDeviceToHost));
(void)rgb;
}
#endif | 32ca540d8d722189b71f40a554272c0a318e7375.cu |
#include "CudaCircularize.cuh"
#include "Common/CudaErrorCheck.cuh"
#include <Havana2/Configuration.h>
#ifdef CUDA_ENABLED
#include <math_functions.h>
#include <math_constants.h>
// GPU kernels
__global__ void getInterpolationMap(float* rho, float* theta, int radius, int width)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * 2 * radius;
if ((x < 2 * radius) && (y < 2 * radius))
{
// Set meshgrid
int xr = radius - x;
int yr = y - radius;
// Rho : Interpolation Map
rho[offset] = ((float)radius - 1.0f) / radius * hypotf(xr, yr);
// Theta : Interpolation Map
theta[offset] = ((float)width - 1.0f) / (2.0f * CUDART_PI_F) * (CUDART_PI_F + atan2f(yr, xr));
}
}
__global__ void interpolation(uint8_t* pRect, uint8_t* pCirc, float* rho, float* theta,
int radius, int circ_center, int width, int height, int channels)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * 2 * radius;
if ((x < 2 * radius) && (y < 2 * radius))
{
int i = theta[offset];
int j = circ_center + rho[offset];
if (j < circ_center + radius)
{
// Bilinear
int i0 = floorf(i); int j0 = floorf(j);
int i1 = i0 + 1; int j1 = j0 + 1;
for (int c = 0; c < channels; c++)
{
float f00 = pRect[(i0 + j0 * width) * channels + c];
float f10 = pRect[(i1 + j0 * width) * channels + c];
float f01 = pRect[(i0 + j1 * width) * channels + c];
float f11 = pRect[(i1 + j1 * width) * channels + c];
float f0 = (f01 - f00) / (j1 - j0) * (j - j0) + f00;
float f1 = (f11 - f10) / (j1 - j0) * (j - j0) + f10;
pCirc[offset * channels + c] = (f1 - f0) / (i1 - i0) * (i - i0) + f0;
}
// Nearest
///for (int c = 0; c < channels; c++)
/// pCirc[offset * channels + c] = pRect[(int)(roundf(i) + roundf(j) * width) * channels + c];
}
else
for (int c = 0; c < channels; c++)
pCirc[offset * channels + c] = 0;
}
}
CudaCircularize::CudaCircularize(int _radius, int _width, int _height) :
radius(_radius), diameter(2 * _radius), width(_width), height(_height)
{
// Memory Allocation
CUDA_CHECK_ERROR(cudaMalloc((void**)&deviceRho, sizeof(float) * diameter * diameter));
CUDA_CHECK_ERROR(cudaMalloc((void**)&deviceTheta, sizeof(float) * diameter * diameter));
CUDA_CHECK_ERROR(cudaMalloc((void**)&deviceRect, sizeof(uint8_t) * width * height));
CUDA_CHECK_ERROR(cudaMalloc((void**)&deviceCirc, sizeof(uint8_t) * diameter * diameter));
CUDA_CHECK_ERROR(cudaMalloc((void**)&deviceRectRGB, sizeof(uint8_t) * 3 * width * height));
CUDA_CHECK_ERROR(cudaMalloc((void**)&deviceCircRGB, sizeof(uint8_t) * 3 * diameter * diameter));
// Grid and Block Dimensions
blocksPerGrid = dim3((diameter + N_THREADS - 1) / N_THREADS, (diameter + N_THREADS - 1) / N_THREADS);
threadsPerBlock = dim3(N_THREADS, N_THREADS);
// Interpolation Map
getInterpolationMap << < blocksPerGrid, threadsPerBlock >> > (deviceRho, deviceTheta, radius, width);
}
CudaCircularize::~CudaCircularize()
{
// Memory Deallocation
CUDA_CHECK_ERROR(cudaFree(deviceRho));
CUDA_CHECK_ERROR(cudaFree(deviceTheta));
CUDA_CHECK_ERROR(cudaFree(deviceRect));
CUDA_CHECK_ERROR(cudaFree(deviceCirc));
CUDA_CHECK_ERROR(cudaFree(deviceRectRGB));
CUDA_CHECK_ERROR(cudaFree(deviceCircRGB));
}
void CudaCircularize::operator()(uint8_t* pRect, uint8_t* pCirc, int circ_center)
{
// Transfer to Device
CUDA_CHECK_ERROR(cudaMemcpy(deviceRect, pRect, sizeof(uint8_t) * width * height, cudaMemcpyHostToDevice));
// Circularizing
interpolation << < blocksPerGrid, threadsPerBlock >> > (deviceRect, deviceCirc, deviceRho, deviceTheta, radius, circ_center, width, height);
// Transfer to Host
CUDA_CHECK_ERROR(cudaMemcpy(pCirc, deviceCirc, sizeof(uint8_t) * diameter * diameter, cudaMemcpyDeviceToHost));
}
void CudaCircularize::operator()(uint8_t* pRectRGB, uint8_t* pCircRGB, const char* rgb, int circ_center)
{
// Transfer to Device
CUDA_CHECK_ERROR(cudaMemcpy(deviceRectRGB, pRectRGB, sizeof(uint8_t) * 3 * width * height, cudaMemcpyHostToDevice));
// Circularizing
interpolation << < blocksPerGrid, threadsPerBlock >> > (deviceRectRGB, deviceCircRGB, deviceRho, deviceTheta, radius, circ_center, width, height, 3);
// Transfer to Host
CUDA_CHECK_ERROR(cudaMemcpy(pCircRGB, deviceCircRGB, sizeof(uint8_t) * 3 * diameter * diameter, cudaMemcpyDeviceToHost));
(void)rgb;
}
#endif |
d611c799f54929ebbd75884b7e714c2777a3b747.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <vector>
#include <cstdlib>
#include <ctime>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include "bitmap_image.hpp"
#define MazesizeX 500
#define MazesizeY 300
#define vert MazesizeX*MazesizeY
#define WHITE 1
#define BLACK 2
#define GRAY 3
#define UP 1
#define DOWN 2
#define LEFT 3
#define RIGHT 4
#define blockNum 20
#define threadNum 50
#define cores blockNum*threadNum
using namespace std;
__device__ volatile int cuda_restnum = vert;
__device__ volatile bool cuda_relocate_list[cores] = {false};
__device__ volatile int signal[cores] = {0};
__device__ volatile bool signal_valid[cores] = {false};
__device__ int findnext(int *cuda_colormap, int *cuda_status,
int thread_index, int* next_direction) {
int candidate[vert], cand_num = 0;
int pot_direction[4], dir_num = 0;
int next_node;
for(int i = 0; i < vert; i++) {
if(cuda_status[thread_index * vert + i] == GRAY) {
candidate[cand_num++] = i;
}
}
// randomly select a possible node as candidate
hiprandState_t candidate_state;
hiprand_init(thread_index, 0, cuda_restnum, &candidate_state);
// printf("thread %d cand_num = %d\n", thread_index, cand_num);
if(!cand_num) return -1;
next_node = candidate[hiprand(&candidate_state)%cand_num];
// printf("next node: %d, dir_num = %d\n", next_node, dir_num);
// randomly select a direction
if(next_node + MazesizeX < vert)
if(cuda_status[thread_index * vert + next_node + MazesizeX] == BLACK){
pot_direction[dir_num++] = UP;
// printf("thread %d, nextnode %d, UP\n", thread_index, next_node);
}
if(next_node >= MazesizeX)
if(cuda_status[thread_index * vert + next_node - MazesizeX] == BLACK){
pot_direction[dir_num++] = DOWN;
// printf("thread %d, nextnode %d, DOWN\n", thread_index, next_node);
}
if(next_node % MazesizeX != 0)
if(cuda_status[thread_index * vert + next_node - 1] == BLACK){
pot_direction[dir_num++] = LEFT;
// printf("thread %d, nextnode %d, LEFT\n", thread_index, next_node);
}
if(next_node % MazesizeX != MazesizeX - 1)
if(cuda_status[thread_index * vert + next_node + 1] == BLACK){
pot_direction[dir_num++] = RIGHT;
// printf("thread %d, nextnode %d, RIGHT\n", thread_index, next_node);
}
hiprandState_t direction_state;
hiprand_init(thread_index, 0, cuda_restnum, &direction_state);
if(dir_num)
*next_direction = pot_direction[hiprand(&direction_state)%dir_num];
else {
// printf("no direction, next_node %d\n", next_node);
*next_direction = -1;
}
return next_node;
}
__device__
void mergetree(int treei, int treej, int *cuda_colormap, int *cuda_status) { // merge treej into treei
// merge tree node
// printf("MERGE: tree:%d and tree:%d \n", treei, treej);
// if(treej < 0 || treei < 0) return;
for(int i = 0; i < vert; i++) {
if(cuda_status[treej*vert + i] == BLACK) {
printf("tree %d, %d is black\n",treej, i);
cuda_status[treei* vert + i] = BLACK;
}
if(cuda_status[treej* vert + i] == GRAY)
if(cuda_status[treei * vert + i] == WHITE)
cuda_status[treei * vert + i] = GRAY;
if(cuda_colormap[i] == treej) cuda_colormap[i] = treei;
cuda_status[treej * vert + i] = WHITE;
}
}
__device__
int recv_signal(int local_index, volatile int signal_map[cores], volatile bool signal_valid[cores]) {
if(signal_valid[local_index]) {
signal_valid[local_index] = false;
return signal_map[local_index];
}
else return -1;
}
__device__
int thread_relocate(int thread_index, int* cuda_colormap, int* cuda_status) {
int order = -1, temp = 0;
int next_node = -1;
for(int i = 0; i < cores; i++) {
if(cuda_relocate_list[i]) {
order++;
if(i == thread_index) {
break;
}
}
}
if(order == -1 || !cuda_relocate_list[thread_index]) return -1;
for(int i = 0; i < vert; i++) {
if(cuda_colormap[i] == -1) {
if(temp == order) {
cuda_status[vert * thread_index + i] = BLACK;
if(i >= MazesizeX)
cuda_status[thread_index * vert + i - MazesizeX] = GRAY;
if(i + MazesizeX < vert)
cuda_status[thread_index * vert + i + MazesizeX] = GRAY;
if(i % MazesizeX > 0)
cuda_status[thread_index * vert + i - 1] = GRAY;
if(i % MazesizeX < (MazesizeX - 1))
cuda_status[thread_index * vert + i + 1] = GRAY;
cuda_colormap[i] = thread_index;
next_node = i;
break;
}
else temp++;
}
}
if(next_node >= 0){
printf("thread %d restarted by signal at %d, color:%d\n", thread_index, next_node, cuda_colormap[next_node]);
cuda_relocate_list[order] = false;
}
return next_node;
}
__global__
void MSTthread(int *cuda_map, int *cuda_colormap, int *cuda_status) {
int thread_index = threadIdx.x + blockDim.x * blockIdx.x;
int next_node, next_direction;
volatile int signal_recved;
int relocate_node;
bool flag;
// printf("thread %d starts\n", thread_index);
signal[thread_index] = 0;
signal_valid[thread_index] = false;
while(1) {
//if(share * thread_index >= vert) return;
flag = true;
while(flag) {
signal_recved = recv_signal(thread_index, signal, signal_valid);
if( signal_recved != -1 && signal_recved < thread_index) {
// return;
// here I'll implement a new way to "restart" the thread
relocate_node = thread_relocate(thread_index, cuda_colormap, cuda_status);
if(relocate_node == -1) {
printf("thread %d finally no pos\n", thread_index);
return;
}
else {
// continue;
}
}
next_node = -1;
next_node = findnext(cuda_colormap, cuda_status, thread_index, &next_direction);
if(next_node == -1) {
printf("thread %d return for no node avaliable\n", thread_index);
return;
}
else {
if(thread_index == 0) {printf("thread 0 finds %d, color: %d\n", next_node, cuda_colormap[next_node]);}
if(cuda_colormap[next_node] == -1) {
cuda_colormap[next_node] = thread_index;
switch(next_direction) {
case UP:
cuda_map[next_node * 4] = 1;
cuda_map[(next_node + MazesizeX) * 4 + 1] = 1;
break;
case DOWN:
cuda_map[next_node * 4 + 1] = 1;
cuda_map[(next_node-MazesizeX) * 4] = 1;
break;
case LEFT:
cuda_map[next_node * 4 + 2] = 1;
cuda_map[(next_node - 1) * 4 + 3] = 1;
break;
case RIGHT:
cuda_map[next_node * 4 + 3] = 1;
cuda_map[(next_node + 1) * 4 + 2] = 1;
break;
default: break;
}
// update status and keyarray and cuda_restnum
cuda_status[thread_index * vert + next_node] = BLACK;
if(next_node >= MazesizeX)
if(cuda_status[thread_index * vert +next_node - MazesizeX] == WHITE)
cuda_status[thread_index * vert + next_node - MazesizeX] = GRAY;
if(next_node + MazesizeX < vert)
if(cuda_status[thread_index * vert + next_node + MazesizeX] == WHITE)
cuda_status[thread_index * vert + next_node + MazesizeX] = GRAY;
if(next_node % MazesizeX > 0)
if(cuda_status[thread_index * vert + next_node - 1] == WHITE)
cuda_status[thread_index * vert + next_node - 1] = GRAY;
if(next_node % MazesizeX < (MazesizeX - 1))
if(cuda_status[thread_index * vert + next_node + 1] == WHITE)
cuda_status[thread_index * vert + next_node + 1] = GRAY;
//cuda_restnum--;
//printf("----%d left----\n", cuda_restnum);
}
else if(cuda_colormap[next_node] != thread_index) {
int target_color = cuda_colormap[next_node];
if(target_color < thread_index) {
signal[thread_index] = target_color;
signal_valid[thread_index] = true;
signal[target_color] = thread_index;
signal_valid[target_color] = true;
//while(!signal_valid[target_color] ){};
mergetree(target_color, thread_index, cuda_colormap, cuda_status);
switch(next_direction) {
case UP:
cuda_map[next_node * 4] = 1;
cuda_map[(next_node + MazesizeX) * 4 + 1] = 1;
break;
case DOWN:
cuda_map[next_node * 4 + 1] = 1;
cuda_map[(next_node-MazesizeX) * 4] = 1;
break;
case LEFT:
cuda_map[next_node * 4 + 2] = 1;
cuda_map[(next_node - 1) * 4 + 3] = 1;
break;
case RIGHT:
cuda_map[next_node * 4 + 3] = 1;
cuda_map[(next_node + 1) * 4 + 2] = 1;
break;
default: break;
}
// unlock minnode and kill j
printf("thread %d get merged at %d\n", thread_index, next_node);
// add the
cuda_relocate_list[thread_index] = true;
//thread_relocate(thread_index, cuda_colormap, cuda_status);
// return;
}
else if(target_color > thread_index) {
signal[thread_index] = target_color;
signal_valid[thread_index] = true;
signal[target_color] = thread_index;
signal_valid[target_color] = true;
//while(!signal_valid[target_color]) {};
mergetree(thread_index, target_color, cuda_colormap, cuda_status);
switch(next_direction) {
case UP:
cuda_map[next_node * 4] = 1;
cuda_map[(next_node + MazesizeX) * 4 + 1] = 1;
break;
case DOWN:
cuda_map[next_node * 4 + 1] = 1;
cuda_map[(next_node-MazesizeX) * 4] = 1;
break;
case LEFT:
cuda_map[next_node * 4 + 2] = 1;
cuda_map[(next_node - 1) * 4 + 3] = 1;
break;
case RIGHT:
cuda_map[next_node * 4 + 3] = 1;
cuda_map[(next_node + 1) * 4 + 2] = 1;
break;
default: break;
}
// unlock minnode and kill j
}
else {
// unlock minnode
continue;
}
}
else if(cuda_colormap[next_node] == thread_index) {
cuda_status[thread_index * vert + next_node] = BLACK;
if(next_node >= MazesizeX)
if(cuda_status[thread_index * vert +next_node - MazesizeX] == WHITE)
cuda_status[thread_index * vert + next_node - MazesizeX] = GRAY;
if(next_node + MazesizeX < vert)
if(cuda_status[thread_index * vert + next_node + MazesizeX] == WHITE)
cuda_status[thread_index * vert + next_node + MazesizeX] = GRAY;
if(next_node % MazesizeX > 0)
if(cuda_status[thread_index * vert + next_node - 1] == WHITE)
cuda_status[thread_index * vert + next_node - 1] = GRAY;
if(next_node % MazesizeX < (MazesizeX - 1))
if(cuda_status[thread_index * vert + next_node + 1] == WHITE)
cuda_status[thread_index * vert + next_node + 1] = GRAY;
continue;
}
}
}
}
}
void print(int map[vert * 4]) {
int dimx = MazesizeX*10;
int dimy = MazesizeY*10;
bitmap_image image(dimx+1, dimy+1);
for(int x = 0; x < dimx; x++)
for(int y = 1; y < dimy; y++) {
if(x%10 == 0 || y %10 == 0) {
if(x%10 == 0) {
if(map[((y/10)*MazesizeX+(x/10))* 4 + 2]) image.set_pixel(x,dimy+1-y,255,255,255);
else image.set_pixel(x,dimy+1-y,0,0,0);
}
if(y%10 == 0) {
if(map[((y/10)*MazesizeX+(x/10))* 4 + 1]) image.set_pixel(x,dimy+1-y,255,255,255);
else image.set_pixel(x,dimy+1-y,0,0,0);
}
}
else image.set_pixel(x,dimy+1-y,255,255,255);
}
image.save_image("maze.bmp");
}
void print_ascii(int map[vert * 4]) {
for(int j = 0; j < MazesizeX; j++) {
cout << "--";
}
cout << "-" << endl;
for(int i = MazesizeY-1; i >= 0; i--) {
for(int j = 0; j < MazesizeX; j++) {
if(!map[(i*MazesizeX+j)* 4 + 2]) cout << '|';
else cout << ' ';
cout << ' ';
}
cout << '|' << endl;
for(int j = 0; j < MazesizeX; j++) {
if(!map[(i*MazesizeX+j)* 4 + 1]) cout << "--";
else cout << "- ";
}
cout << '-' << endl;
}
return;
}
int main(int argc, char** argv) {
// declare data structure
int *map, *cuda_map;
int *colormap, *cuda_colormap;
int *status, *cuda_status;
// initialization
map = new int[vert * 4];
if(hipSuccess != hipMalloc(&cuda_map, vert * 4 * sizeof(int))) {
printf("map create failed\n");
return -1;
}
colormap = new int[vert];
if(hipSuccess != hipMalloc(&cuda_colormap, vert * sizeof(int))) {
printf("color map create failed\n");
return -1;
}
status = new int[vert * cores];
if(hipSuccess != hipMalloc(&cuda_status, vert * cores * sizeof(int))) {
printf("status map create failed\n");
return -1;
}
for(int i = 0; i < vert * 4; i++) {
map[i] = 0;
}
for(int i = 0; i < vert; i++) {
colormap[i] = -1;
}
int share = vert / (cores) * (cores) == vert?
vert/(cores) : vert/(cores)+1;
cout << "share = " << share << endl;
for(int i = 0; i < cores; i++) {
for(int j = 0; j < vert; j++) {
if(j == share * i) status[i*vert+j] = GRAY;
else status[i*vert+j] = WHITE;
}
}
hipMemcpy(cuda_map, map, vert*4*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(cuda_colormap, colormap, vert*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(cuda_status, status, vert*cores*sizeof(int), hipMemcpyHostToDevice);
// start timing
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
// start
hipLaunchKernelGGL(( MSTthread) , dim3(blockNum), dim3(threadNum) , 0, 0, cuda_map, cuda_colormap, cuda_status);
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
float time = 0;
hipEventElapsedTime(&time, start, stop);
cout << "Total time = " << time << "ms" << endl;
// print
hipMemcpy(map, cuda_map, vert*4*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(colormap, cuda_colormap, vert*sizeof(int), hipMemcpyDeviceToHost);
print(map);
//print_ascii(map);
hipFree(cuda_map);
hipFree(cuda_colormap);
hipFree(cuda_status);
/*
for(int i = 0; i < vert; i++) {
cout << colormap[i] << ' ';
if((i +1)% 20 == 0)
cout << endl;
}
*/
return 0;
}
| d611c799f54929ebbd75884b7e714c2777a3b747.cu | #include <iostream>
#include <vector>
#include <cstdlib>
#include <ctime>
#include <curand.h>
#include <curand_kernel.h>
#include <stdio.h>
#include "bitmap_image.hpp"
#define MazesizeX 500
#define MazesizeY 300
#define vert MazesizeX*MazesizeY
#define WHITE 1
#define BLACK 2
#define GRAY 3
#define UP 1
#define DOWN 2
#define LEFT 3
#define RIGHT 4
#define blockNum 20
#define threadNum 50
#define cores blockNum*threadNum
using namespace std;
__device__ volatile int cuda_restnum = vert;
__device__ volatile bool cuda_relocate_list[cores] = {false};
__device__ volatile int signal[cores] = {0};
__device__ volatile bool signal_valid[cores] = {false};
__device__ int findnext(int *cuda_colormap, int *cuda_status,
int thread_index, int* next_direction) {
int candidate[vert], cand_num = 0;
int pot_direction[4], dir_num = 0;
int next_node;
for(int i = 0; i < vert; i++) {
if(cuda_status[thread_index * vert + i] == GRAY) {
candidate[cand_num++] = i;
}
}
// randomly select a possible node as candidate
curandState_t candidate_state;
curand_init(thread_index, 0, cuda_restnum, &candidate_state);
// printf("thread %d cand_num = %d\n", thread_index, cand_num);
if(!cand_num) return -1;
next_node = candidate[curand(&candidate_state)%cand_num];
// printf("next node: %d, dir_num = %d\n", next_node, dir_num);
// randomly select a direction
if(next_node + MazesizeX < vert)
if(cuda_status[thread_index * vert + next_node + MazesizeX] == BLACK){
pot_direction[dir_num++] = UP;
// printf("thread %d, nextnode %d, UP\n", thread_index, next_node);
}
if(next_node >= MazesizeX)
if(cuda_status[thread_index * vert + next_node - MazesizeX] == BLACK){
pot_direction[dir_num++] = DOWN;
// printf("thread %d, nextnode %d, DOWN\n", thread_index, next_node);
}
if(next_node % MazesizeX != 0)
if(cuda_status[thread_index * vert + next_node - 1] == BLACK){
pot_direction[dir_num++] = LEFT;
// printf("thread %d, nextnode %d, LEFT\n", thread_index, next_node);
}
if(next_node % MazesizeX != MazesizeX - 1)
if(cuda_status[thread_index * vert + next_node + 1] == BLACK){
pot_direction[dir_num++] = RIGHT;
// printf("thread %d, nextnode %d, RIGHT\n", thread_index, next_node);
}
curandState_t direction_state;
curand_init(thread_index, 0, cuda_restnum, &direction_state);
if(dir_num)
*next_direction = pot_direction[curand(&direction_state)%dir_num];
else {
// printf("no direction, next_node %d\n", next_node);
*next_direction = -1;
}
return next_node;
}
__device__
void mergetree(int treei, int treej, int *cuda_colormap, int *cuda_status) { // merge treej into treei
// merge tree node
// printf("MERGE: tree:%d and tree:%d \n", treei, treej);
// if(treej < 0 || treei < 0) return;
for(int i = 0; i < vert; i++) {
if(cuda_status[treej*vert + i] == BLACK) {
printf("tree %d, %d is black\n",treej, i);
cuda_status[treei* vert + i] = BLACK;
}
if(cuda_status[treej* vert + i] == GRAY)
if(cuda_status[treei * vert + i] == WHITE)
cuda_status[treei * vert + i] = GRAY;
if(cuda_colormap[i] == treej) cuda_colormap[i] = treei;
cuda_status[treej * vert + i] = WHITE;
}
}
__device__
int recv_signal(int local_index, volatile int signal_map[cores], volatile bool signal_valid[cores]) {
if(signal_valid[local_index]) {
signal_valid[local_index] = false;
return signal_map[local_index];
}
else return -1;
}
__device__
int thread_relocate(int thread_index, int* cuda_colormap, int* cuda_status) {
int order = -1, temp = 0;
int next_node = -1;
for(int i = 0; i < cores; i++) {
if(cuda_relocate_list[i]) {
order++;
if(i == thread_index) {
break;
}
}
}
if(order == -1 || !cuda_relocate_list[thread_index]) return -1;
for(int i = 0; i < vert; i++) {
if(cuda_colormap[i] == -1) {
if(temp == order) {
cuda_status[vert * thread_index + i] = BLACK;
if(i >= MazesizeX)
cuda_status[thread_index * vert + i - MazesizeX] = GRAY;
if(i + MazesizeX < vert)
cuda_status[thread_index * vert + i + MazesizeX] = GRAY;
if(i % MazesizeX > 0)
cuda_status[thread_index * vert + i - 1] = GRAY;
if(i % MazesizeX < (MazesizeX - 1))
cuda_status[thread_index * vert + i + 1] = GRAY;
cuda_colormap[i] = thread_index;
next_node = i;
break;
}
else temp++;
}
}
if(next_node >= 0){
printf("thread %d restarted by signal at %d, color:%d\n", thread_index, next_node, cuda_colormap[next_node]);
cuda_relocate_list[order] = false;
}
return next_node;
}
__global__
void MSTthread(int *cuda_map, int *cuda_colormap, int *cuda_status) {
int thread_index = threadIdx.x + blockDim.x * blockIdx.x;
int next_node, next_direction;
volatile int signal_recved;
int relocate_node;
bool flag;
// printf("thread %d starts\n", thread_index);
signal[thread_index] = 0;
signal_valid[thread_index] = false;
while(1) {
//if(share * thread_index >= vert) return;
flag = true;
while(flag) {
signal_recved = recv_signal(thread_index, signal, signal_valid);
if( signal_recved != -1 && signal_recved < thread_index) {
// return;
// here I'll implement a new way to "restart" the thread
relocate_node = thread_relocate(thread_index, cuda_colormap, cuda_status);
if(relocate_node == -1) {
printf("thread %d finally no pos\n", thread_index);
return;
}
else {
// continue;
}
}
next_node = -1;
next_node = findnext(cuda_colormap, cuda_status, thread_index, &next_direction);
if(next_node == -1) {
printf("thread %d return for no node avaliable\n", thread_index);
return;
}
else {
if(thread_index == 0) {printf("thread 0 finds %d, color: %d\n", next_node, cuda_colormap[next_node]);}
if(cuda_colormap[next_node] == -1) {
cuda_colormap[next_node] = thread_index;
switch(next_direction) {
case UP:
cuda_map[next_node * 4] = 1;
cuda_map[(next_node + MazesizeX) * 4 + 1] = 1;
break;
case DOWN:
cuda_map[next_node * 4 + 1] = 1;
cuda_map[(next_node-MazesizeX) * 4] = 1;
break;
case LEFT:
cuda_map[next_node * 4 + 2] = 1;
cuda_map[(next_node - 1) * 4 + 3] = 1;
break;
case RIGHT:
cuda_map[next_node * 4 + 3] = 1;
cuda_map[(next_node + 1) * 4 + 2] = 1;
break;
default: break;
}
// update status and keyarray and cuda_restnum
cuda_status[thread_index * vert + next_node] = BLACK;
if(next_node >= MazesizeX)
if(cuda_status[thread_index * vert +next_node - MazesizeX] == WHITE)
cuda_status[thread_index * vert + next_node - MazesizeX] = GRAY;
if(next_node + MazesizeX < vert)
if(cuda_status[thread_index * vert + next_node + MazesizeX] == WHITE)
cuda_status[thread_index * vert + next_node + MazesizeX] = GRAY;
if(next_node % MazesizeX > 0)
if(cuda_status[thread_index * vert + next_node - 1] == WHITE)
cuda_status[thread_index * vert + next_node - 1] = GRAY;
if(next_node % MazesizeX < (MazesizeX - 1))
if(cuda_status[thread_index * vert + next_node + 1] == WHITE)
cuda_status[thread_index * vert + next_node + 1] = GRAY;
//cuda_restnum--;
//printf("----%d left----\n", cuda_restnum);
}
else if(cuda_colormap[next_node] != thread_index) {
int target_color = cuda_colormap[next_node];
if(target_color < thread_index) {
signal[thread_index] = target_color;
signal_valid[thread_index] = true;
signal[target_color] = thread_index;
signal_valid[target_color] = true;
//while(!signal_valid[target_color] ){};
mergetree(target_color, thread_index, cuda_colormap, cuda_status);
switch(next_direction) {
case UP:
cuda_map[next_node * 4] = 1;
cuda_map[(next_node + MazesizeX) * 4 + 1] = 1;
break;
case DOWN:
cuda_map[next_node * 4 + 1] = 1;
cuda_map[(next_node-MazesizeX) * 4] = 1;
break;
case LEFT:
cuda_map[next_node * 4 + 2] = 1;
cuda_map[(next_node - 1) * 4 + 3] = 1;
break;
case RIGHT:
cuda_map[next_node * 4 + 3] = 1;
cuda_map[(next_node + 1) * 4 + 2] = 1;
break;
default: break;
}
// unlock minnode and kill j
printf("thread %d get merged at %d\n", thread_index, next_node);
// add the
cuda_relocate_list[thread_index] = true;
//thread_relocate(thread_index, cuda_colormap, cuda_status);
// return;
}
else if(target_color > thread_index) {
signal[thread_index] = target_color;
signal_valid[thread_index] = true;
signal[target_color] = thread_index;
signal_valid[target_color] = true;
//while(!signal_valid[target_color]) {};
mergetree(thread_index, target_color, cuda_colormap, cuda_status);
switch(next_direction) {
case UP:
cuda_map[next_node * 4] = 1;
cuda_map[(next_node + MazesizeX) * 4 + 1] = 1;
break;
case DOWN:
cuda_map[next_node * 4 + 1] = 1;
cuda_map[(next_node-MazesizeX) * 4] = 1;
break;
case LEFT:
cuda_map[next_node * 4 + 2] = 1;
cuda_map[(next_node - 1) * 4 + 3] = 1;
break;
case RIGHT:
cuda_map[next_node * 4 + 3] = 1;
cuda_map[(next_node + 1) * 4 + 2] = 1;
break;
default: break;
}
// unlock minnode and kill j
}
else {
// unlock minnode
continue;
}
}
else if(cuda_colormap[next_node] == thread_index) {
cuda_status[thread_index * vert + next_node] = BLACK;
if(next_node >= MazesizeX)
if(cuda_status[thread_index * vert +next_node - MazesizeX] == WHITE)
cuda_status[thread_index * vert + next_node - MazesizeX] = GRAY;
if(next_node + MazesizeX < vert)
if(cuda_status[thread_index * vert + next_node + MazesizeX] == WHITE)
cuda_status[thread_index * vert + next_node + MazesizeX] = GRAY;
if(next_node % MazesizeX > 0)
if(cuda_status[thread_index * vert + next_node - 1] == WHITE)
cuda_status[thread_index * vert + next_node - 1] = GRAY;
if(next_node % MazesizeX < (MazesizeX - 1))
if(cuda_status[thread_index * vert + next_node + 1] == WHITE)
cuda_status[thread_index * vert + next_node + 1] = GRAY;
continue;
}
}
}
}
}
void print(int map[vert * 4]) {
int dimx = MazesizeX*10;
int dimy = MazesizeY*10;
bitmap_image image(dimx+1, dimy+1);
for(int x = 0; x < dimx; x++)
for(int y = 1; y < dimy; y++) {
if(x%10 == 0 || y %10 == 0) {
if(x%10 == 0) {
if(map[((y/10)*MazesizeX+(x/10))* 4 + 2]) image.set_pixel(x,dimy+1-y,255,255,255);
else image.set_pixel(x,dimy+1-y,0,0,0);
}
if(y%10 == 0) {
if(map[((y/10)*MazesizeX+(x/10))* 4 + 1]) image.set_pixel(x,dimy+1-y,255,255,255);
else image.set_pixel(x,dimy+1-y,0,0,0);
}
}
else image.set_pixel(x,dimy+1-y,255,255,255);
}
image.save_image("maze.bmp");
}
void print_ascii(int map[vert * 4]) {
for(int j = 0; j < MazesizeX; j++) {
cout << "--";
}
cout << "-" << endl;
for(int i = MazesizeY-1; i >= 0; i--) {
for(int j = 0; j < MazesizeX; j++) {
if(!map[(i*MazesizeX+j)* 4 + 2]) cout << '|';
else cout << ' ';
cout << ' ';
}
cout << '|' << endl;
for(int j = 0; j < MazesizeX; j++) {
if(!map[(i*MazesizeX+j)* 4 + 1]) cout << "--";
else cout << "- ";
}
cout << '-' << endl;
}
return;
}
int main(int argc, char** argv) {
// declare data structure
int *map, *cuda_map;
int *colormap, *cuda_colormap;
int *status, *cuda_status;
// initialization
map = new int[vert * 4];
if(cudaSuccess != cudaMalloc(&cuda_map, vert * 4 * sizeof(int))) {
printf("map create failed\n");
return -1;
}
colormap = new int[vert];
if(cudaSuccess != cudaMalloc(&cuda_colormap, vert * sizeof(int))) {
printf("color map create failed\n");
return -1;
}
status = new int[vert * cores];
if(cudaSuccess != cudaMalloc(&cuda_status, vert * cores * sizeof(int))) {
printf("status map create failed\n");
return -1;
}
for(int i = 0; i < vert * 4; i++) {
map[i] = 0;
}
for(int i = 0; i < vert; i++) {
colormap[i] = -1;
}
int share = vert / (cores) * (cores) == vert?
vert/(cores) : vert/(cores)+1;
cout << "share = " << share << endl;
for(int i = 0; i < cores; i++) {
for(int j = 0; j < vert; j++) {
if(j == share * i) status[i*vert+j] = GRAY;
else status[i*vert+j] = WHITE;
}
}
cudaMemcpy(cuda_map, map, vert*4*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_colormap, colormap, vert*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_status, status, vert*cores*sizeof(int), cudaMemcpyHostToDevice);
// start timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
// start
MSTthread <<< blockNum, threadNum >>>(cuda_map, cuda_colormap, cuda_status);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float time = 0;
cudaEventElapsedTime(&time, start, stop);
cout << "Total time = " << time << "ms" << endl;
// print
cudaMemcpy(map, cuda_map, vert*4*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(colormap, cuda_colormap, vert*sizeof(int), cudaMemcpyDeviceToHost);
print(map);
//print_ascii(map);
cudaFree(cuda_map);
cudaFree(cuda_colormap);
cudaFree(cuda_status);
/*
for(int i = 0; i < vert; i++) {
cout << colormap[i] << ' ';
if((i +1)% 20 == 0)
cout << endl;
}
*/
return 0;
}
|
5f1638371a369e87d5560662c233a3b59826569a.hip | // !!! This is a file automatically generated by hipify!!!
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// helper functions and utilities to work with CUDA
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cutil.h>
#include <fcntl.h>
#include <float.h>
#include <unistd.h>
#include "texton.h"
#include "convert.h"
#include "intervening.h"
#include "lanczos.h"
#include "stencilMVM.h"
#include "localcues.h"
#include "combine.h"
#include "nonmax.h"
#include "spectralPb.h"
#include "globalPb.h"
#include "skeleton.h"
#include "common_func.h"
#include "CircleTemplateTrace.h"
#include "LineSegTrace.h"
#include "pgm.cuh"
int main(int argc, char **argv)
{
for (int i =0; i<= 74; i++)
{
int fileIdx = i;
char filename[MAX_PATH];
sprintf(filename, "%s%d", argv[1], fileIdx);
char inputfile[MAX_PATH];
char outputColorfile[MAX_PATH];
char* period = strrchr(filename, '.');
if (period == 0) {
period = strrchr(filename, 0);
}
strncpy(inputfile, filename, period - filename);
sprintf(&inputfile[0] + (period - filename) , "bin.pgm");
strncpy(outputColorfile, filename, period - filename);
sprintf(&outputColorfile[0] + (period - filename) , "_traced.pgm");
/**null cutil*/
//float* data = NULL;
int width, height;
//cutLoadPGMf(inputfile, (float**)&data, &width, &height);
float* data = loadPGM(inputfile, &width, &height);
assert(width > 0 && height > 0);
CLineSegTrace trace;
int nAmount = trace.initTracePoints((float*)data, width, height);
printf("Outputting %s \n", inputfile);
printf("Valid Pt number:%d \n", nAmount);
trace.traceLineSegs();
trace.debugPrintOutput(outputColorfile, 70, 100000);
/* cutFree(data);*/
}
//system("pause");
return 1;
} | 5f1638371a369e87d5560662c233a3b59826569a.cu | // System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// helper functions and utilities to work with CUDA
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <cutil.h>
#include <fcntl.h>
#include <float.h>
#include <unistd.h>
#include "texton.h"
#include "convert.h"
#include "intervening.h"
#include "lanczos.h"
#include "stencilMVM.h"
#include "localcues.h"
#include "combine.h"
#include "nonmax.h"
#include "spectralPb.h"
#include "globalPb.h"
#include "skeleton.h"
#include "common_func.h"
#include "CircleTemplateTrace.h"
#include "LineSegTrace.h"
#include "pgm.cuh"
int main(int argc, char **argv)
{
for (int i =0; i<= 74; i++)
{
int fileIdx = i;
char filename[MAX_PATH];
sprintf(filename, "%s%d", argv[1], fileIdx);
char inputfile[MAX_PATH];
char outputColorfile[MAX_PATH];
char* period = strrchr(filename, '.');
if (period == 0) {
period = strrchr(filename, 0);
}
strncpy(inputfile, filename, period - filename);
sprintf(&inputfile[0] + (period - filename) , "bin.pgm");
strncpy(outputColorfile, filename, period - filename);
sprintf(&outputColorfile[0] + (period - filename) , "_traced.pgm");
/**null ´ÓcutilÄÚ²¿ÉêÇëÄÚ´æ*/
//float* data = NULL;
int width, height;
//cutLoadPGMf(inputfile, (float**)&data, &width, &height);
float* data = loadPGM(inputfile, &width, &height);
assert(width > 0 && height > 0);
CLineSegTrace trace;
int nAmount = trace.initTracePoints((float*)data, width, height);
printf("Outputting %s \n", inputfile);
printf("Valid Pt number:%d \n", nAmount);
trace.traceLineSegs();
trace.debugPrintOutput(outputColorfile, 70, 100000);
/* cutFree(data);*/
}
//system("pause");
return 1;
} |
f55de4ff62de4d5ac9d285e7eae7cba37f6b4fd4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
int log2(int i)
{
int r = 0;
while (i >>= 1) r++;
return r;
}
int bit_reverse(int w, int bits)
{
int r = 0;
for (int i = 0; i < bits; i++)
{
int bit = (w & (1 << i)) >> i;
r |= bit << (bits - i - 1);
}
return r;
}
__global__ void naive_histo(int *d_bins, const int *d_in, const int BIN_COUNT)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int myItem = d_in[myId];
int myBin = myItem % BIN_COUNT;
d_bins[myBin]++;
}
__global__ void simple_histo(int *d_bins, const int *d_in, const int BIN_COUNT)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int myItem = d_in[myId];
int myBin = myItem % BIN_COUNT;
atomicAdd(&(d_bins[myBin]), 1);
}
int main(int argc, char **argv)
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
hipSetDevice(dev);
hipDeviceProp_t devProps;
if (hipGetDeviceProperties(&devProps, dev) == 0)
{
printf("Using device %d:\n", dev);
printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
const int ARRAY_SIZE = 65536;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
const int BIN_COUNT = 16;
const int BIN_BYTES = BIN_COUNT * sizeof(int);
// generate the input array on the host
int h_in[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = bit_reverse(i, log2(ARRAY_SIZE));
}
int h_bins[BIN_COUNT];
for(int i = 0; i < BIN_COUNT; i++) {
h_bins[i] = 0;
}
// declare GPU memory pointers
int * d_in;
int * d_bins;
// allocate GPU memory
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_bins, BIN_BYTES);
// transfer the arrays to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
hipMemcpy(d_bins, h_bins, BIN_BYTES, hipMemcpyHostToDevice);
int whichKernel = 0;
if (argc == 2) {
whichKernel = atoi(argv[1]);
}
// launch the kernel
switch(whichKernel) {
case 0:
printf("Running naive histo\n");
hipLaunchKernelGGL(( naive_histo), dim3(ARRAY_SIZE / 64), dim3(64), 0, 0, d_bins, d_in, BIN_COUNT);
break;
case 1:
printf("Running simple histo\n");
hipLaunchKernelGGL(( simple_histo), dim3(ARRAY_SIZE / 64), dim3(64), 0, 0, d_bins, d_in, BIN_COUNT);
break;
default:
fprintf(stderr, "error: ran no kernel\n");
exit(EXIT_FAILURE);
}
// copy back the sum from GPU
hipMemcpy(h_bins, d_bins, BIN_BYTES, hipMemcpyDeviceToHost);
for(int i = 0; i < BIN_COUNT; i++) {
printf("bin %d: count %d\n", i, h_bins[i]);
}
// free GPU memory allocation
hipFree(d_in);
hipFree(d_bins);
return 0;
}
| f55de4ff62de4d5ac9d285e7eae7cba37f6b4fd4.cu | #include <stdio.h>
#include <cuda_runtime.h>
int log2(int i)
{
int r = 0;
while (i >>= 1) r++;
return r;
}
int bit_reverse(int w, int bits)
{
int r = 0;
for (int i = 0; i < bits; i++)
{
int bit = (w & (1 << i)) >> i;
r |= bit << (bits - i - 1);
}
return r;
}
__global__ void naive_histo(int *d_bins, const int *d_in, const int BIN_COUNT)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int myItem = d_in[myId];
int myBin = myItem % BIN_COUNT;
d_bins[myBin]++;
}
__global__ void simple_histo(int *d_bins, const int *d_in, const int BIN_COUNT)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int myItem = d_in[myId];
int myBin = myItem % BIN_COUNT;
atomicAdd(&(d_bins[myBin]), 1);
}
int main(int argc, char **argv)
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp devProps;
if (cudaGetDeviceProperties(&devProps, dev) == 0)
{
printf("Using device %d:\n", dev);
printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
const int ARRAY_SIZE = 65536;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
const int BIN_COUNT = 16;
const int BIN_BYTES = BIN_COUNT * sizeof(int);
// generate the input array on the host
int h_in[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = bit_reverse(i, log2(ARRAY_SIZE));
}
int h_bins[BIN_COUNT];
for(int i = 0; i < BIN_COUNT; i++) {
h_bins[i] = 0;
}
// declare GPU memory pointers
int * d_in;
int * d_bins;
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_bins, BIN_BYTES);
// transfer the arrays to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_bins, h_bins, BIN_BYTES, cudaMemcpyHostToDevice);
int whichKernel = 0;
if (argc == 2) {
whichKernel = atoi(argv[1]);
}
// launch the kernel
switch(whichKernel) {
case 0:
printf("Running naive histo\n");
naive_histo<<<ARRAY_SIZE / 64, 64>>>(d_bins, d_in, BIN_COUNT);
break;
case 1:
printf("Running simple histo\n");
simple_histo<<<ARRAY_SIZE / 64, 64>>>(d_bins, d_in, BIN_COUNT);
break;
default:
fprintf(stderr, "error: ran no kernel\n");
exit(EXIT_FAILURE);
}
// copy back the sum from GPU
cudaMemcpy(h_bins, d_bins, BIN_BYTES, cudaMemcpyDeviceToHost);
for(int i = 0; i < BIN_COUNT; i++) {
printf("bin %d: count %d\n", i, h_bins[i]);
}
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_bins);
return 0;
}
|
1bb30ba6ec23fb75eb077540be565c5441114dfc.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/cuda_utils.cuh>
#include <vector>
#include <cuml/cluster/dbscan.hpp>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/cuml.hpp>
#include <cuml/datasets/make_blobs.hpp>
#include <cuml/metrics/metrics.hpp>
#include <raft/linalg/cublas_wrappers.h>
#include <raft/linalg/transpose.h>
#include <test_utils.h>
#include <cuml/common/device_buffer.hpp>
#include <cuml/common/logger.hpp>
namespace ML {
using namespace MLCommon;
using namespace Datasets;
using namespace Metrics;
using namespace std;
// Note: false negatives are theoretically possible, given that border
// points are ambiguous.
// If test failures are observed, these tests might need to be re-written
// (cf how the Python tests work).
template <typename T, typename IdxT>
struct DbscanInputs {
IdxT n_row;
IdxT n_col;
IdxT n_centers;
T cluster_std;
T eps;
int min_pts;
size_t max_bytes_per_batch;
unsigned long long int seed;
};
template <typename T, typename IdxT>
::std::ostream &operator<<(::std::ostream &os,
const DbscanInputs<T, IdxT> &dims) {
return os;
}
template <typename T, typename IdxT>
class DbscanTest : public ::testing::TestWithParam<DbscanInputs<T, IdxT>> {
protected:
void basicTest() {
raft::handle_t handle;
params = ::testing::TestWithParam<DbscanInputs<T, IdxT>>::GetParam();
device_buffer<T> out(handle.get_device_allocator(), handle.get_stream(),
params.n_row * params.n_col);
device_buffer<IdxT> l(handle.get_device_allocator(), handle.get_stream(),
params.n_row);
make_blobs(handle, out.data(), l.data(), params.n_row, params.n_col,
params.n_centers, true, nullptr, nullptr, params.cluster_std,
true, -10.0f, 10.0f, params.seed);
raft::allocate(labels, params.n_row);
raft::allocate(labels_ref, params.n_row);
raft::copy(labels_ref, l.data(), params.n_row, handle.get_stream());
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
Dbscan::fit(handle, out.data(), params.n_row, params.n_col, params.eps,
params.min_pts, labels, nullptr, params.max_bytes_per_batch);
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
score = adjusted_rand_index(handle, labels_ref, labels, params.n_row);
if (score < 1.0) {
auto str = raft::arr2Str(labels_ref, params.n_row, "labels_ref",
handle.get_stream());
CUML_LOG_DEBUG("y: %s", str.c_str());
str = raft::arr2Str(labels, params.n_row, "labels", handle.get_stream());
CUML_LOG_DEBUG("y_hat: %s", str.c_str());
CUML_LOG_DEBUG("Score = %lf", score);
}
}
void SetUp() override { basicTest(); }
void TearDown() override {
CUDA_CHECK(hipFree(labels));
CUDA_CHECK(hipFree(labels_ref));
}
protected:
DbscanInputs<T, IdxT> params;
IdxT *labels, *labels_ref;
double score;
};
const std::vector<DbscanInputs<float, int>> inputsf2 = {
{500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL},
{1000, 1000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{20000, 10000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{20000, 100, 5000, 0.01, 2, 2, (size_t)13e3, 1234ULL}};
const std::vector<DbscanInputs<float, int64_t>> inputsf3 = {
{50000, 16, 5, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL},
{1000, 1000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{50000, 16, 5l, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{20000, 10000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{20000, 100, 5000, 0.01, 2, 2, (size_t)9e3, 1234ULL}};
const std::vector<DbscanInputs<double, int>> inputsd2 = {
{50000, 16, 5, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL},
{1000, 1000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{100, 10000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{20000, 10000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{20000, 100, 5000, 0.01, 2, 2, (size_t)13e3, 1234ULL}};
const std::vector<DbscanInputs<double, int64_t>> inputsd3 = {
{50000, 16, 5, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL},
{1000, 1000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{100, 10000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{20000, 10000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{20000, 100, 5000, 0.01, 2, 2, (size_t)9e3, 1234ULL}};
typedef DbscanTest<float, int> DbscanTestF_Int;
TEST_P(DbscanTestF_Int, Result) { ASSERT_TRUE(score == 1.0); }
typedef DbscanTest<float, int64_t> DbscanTestF_Int64;
TEST_P(DbscanTestF_Int64, Result) { ASSERT_TRUE(score == 1.0); }
typedef DbscanTest<double, int> DbscanTestD_Int;
TEST_P(DbscanTestD_Int, Result) { ASSERT_TRUE(score == 1.0); }
typedef DbscanTest<double, int64_t> DbscanTestD_Int64;
TEST_P(DbscanTestD_Int64, Result) { ASSERT_TRUE(score == 1.0); }
INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestF_Int,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestF_Int64,
::testing::ValuesIn(inputsf3));
INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestD_Int,
::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestD_Int64,
::testing::ValuesIn(inputsd3));
template <typename T>
struct DBScan2DArrayInputs {
const T *points;
const int *out;
size_t n_row;
// n_out allows to compare less labels than we have inputs
// (some output labels can be ambiguous)
size_t n_out;
T eps;
int min_pts;
const int *core_indices; //Expected core_indices
};
template <typename T>
class Dbscan2DSimple : public ::testing::TestWithParam<DBScan2DArrayInputs<T>> {
protected:
void basicTest() {
raft::handle_t handle;
params = ::testing::TestWithParam<DBScan2DArrayInputs<T>>::GetParam();
raft::allocate(inputs, params.n_row * 2);
raft::allocate(labels, params.n_row);
raft::allocate(labels_ref, params.n_out);
raft::allocate(core_sample_indices_d, params.n_row);
raft::copy(inputs, params.points, params.n_row * 2, handle.get_stream());
raft::copy(labels_ref, params.out, params.n_out, handle.get_stream());
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
Dbscan::fit(handle, inputs, (int)params.n_row, 2, params.eps,
params.min_pts, labels, core_sample_indices_d);
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
score = adjusted_rand_index(handle, labels_ref, labels, (int)params.n_out);
if (score < 1.0) {
auto str = raft::arr2Str(labels_ref, params.n_out, "labels_ref",
handle.get_stream());
CUML_LOG_DEBUG("y: %s", str.c_str());
str = raft::arr2Str(labels, params.n_row, "labels", handle.get_stream());
CUML_LOG_DEBUG("y_hat: %s", str.c_str());
CUML_LOG_DEBUG("Score = %lf", score);
}
EXPECT_TRUE(raft::devArrMatchHost(
params.core_indices, core_sample_indices_d, params.n_row,
raft::Compare<int>(), handle.get_stream()));
}
void SetUp() override { basicTest(); }
void TearDown() override {
CUDA_CHECK(hipFree(labels_ref));
CUDA_CHECK(hipFree(labels));
CUDA_CHECK(hipFree(inputs));
CUDA_CHECK(hipFree(core_sample_indices_d));
}
protected:
DBScan2DArrayInputs<T> params;
int *labels, *labels_ref;
int *core_sample_indices_d;
T *inputs;
double score;
};
// The input looks like a latin cross or a star with a chain:
// .
// . . . . .
// .
// There is 1 core-point (intersection of the bars)
// and the two points to the very right are not reachable from it
// So there should be one cluster (the plus/star on the left)
// and two noise points
const std::vector<float> test2d1_f = {0, 0, 1, 0, 1, 1, 1,
-1, 2, 0, 3, 0, 4, 0};
const std::vector<double> test2d1_d(test2d1_f.begin(), test2d1_f.end());
const std::vector<int> test2d1_l = {0, 0, 0, 0, 0, -1, -1};
const std::vector<int> test2d1c_l = {1, -1, -1, -1, -1, -1, -1};
// The input looks like a long two-barred (orhodox) cross or
// two stars next to each other:
// . .
// . . . . . .
// . .
// There are 2 core-points but they are not reachable from each other
// So there should be two clusters, both in the form of a plus/star
const std::vector<float> test2d2_f = {0, 0, 1, 0, 1, 1, 1, -1, 2, 0,
3, 0, 4, 0, 4, 1, 4, -1, 5, 0};
const std::vector<double> test2d2_d(test2d2_f.begin(), test2d2_f.end());
const std::vector<int> test2d2_l = {0, 0, 0, 0, 0, 1, 1, 1, 1, 1};
const std::vector<int> test2d2c_l = {1, 6, -1, -1, -1, -1, -1, -1, -1, -1};
// The input looks like a two-barred (orhodox) cross or
// two stars sharing a link:
// . .
// . . . . .
// . .
// There are 2 core-points but they are not reachable from each other
// So there should be two clusters.
// However, the link that is shared between the stars
// actually has an ambiguous label (to the best of my knowledge)
// as it will depend on the order in which we process the core-points.
// Note that there are 9 input points, but only 8 labels for this reason
const std::vector<float> test2d3_f = {
0, 0, 1, 0, 1, 1, 1, -1, 3, 0, 3, 1, 3, -1, 4, 0, 2, 0,
};
const std::vector<double> test2d3_d(test2d3_f.begin(), test2d3_f.end());
const std::vector<int> test2d3_l = {0, 0, 0, 0, 1, 1, 1, 1};
const std::vector<int> test2d3c_l = {1, 4, -1, -1, -1, -1, -1, -1, -1};
const std::vector<DBScan2DArrayInputs<float>> inputs2d_f = {
{test2d1_f.data(), test2d1_l.data(), test2d1_f.size() / 2, test2d1_l.size(),
1.1f, 4, test2d1c_l.data()},
{test2d2_f.data(), test2d2_l.data(), test2d2_f.size() / 2, test2d2_l.size(),
1.1f, 4, test2d2c_l.data()},
{test2d3_f.data(), test2d3_l.data(), test2d3_f.size() / 2, test2d3_l.size(),
1.1f, 4, test2d3c_l.data()},
};
const std::vector<DBScan2DArrayInputs<double>> inputs2d_d = {
{test2d1_d.data(), test2d1_l.data(), test2d1_d.size() / 2, test2d1_l.size(),
1.1, 4, test2d1c_l.data()},
{test2d2_d.data(), test2d2_l.data(), test2d2_d.size() / 2, test2d2_l.size(),
1.1, 4, test2d2c_l.data()},
{test2d3_d.data(), test2d3_l.data(), test2d3_d.size() / 2, test2d3_l.size(),
1.1, 4, test2d3c_l.data()},
};
typedef Dbscan2DSimple<float> Dbscan2DSimple_F;
TEST_P(Dbscan2DSimple_F, Result) { ASSERT_TRUE(score == 1.0); }
typedef Dbscan2DSimple<double> Dbscan2DSimple_D;
TEST_P(Dbscan2DSimple_D, Result) { ASSERT_TRUE(score == 1.0); }
INSTANTIATE_TEST_CASE_P(DbscanTests, Dbscan2DSimple_F,
::testing::ValuesIn(inputs2d_f));
INSTANTIATE_TEST_CASE_P(DbscanTests, Dbscan2DSimple_D,
::testing::ValuesIn(inputs2d_d));
} // end namespace ML
| 1bb30ba6ec23fb75eb077540be565c5441114dfc.cu | /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/cuda_utils.cuh>
#include <vector>
#include <cuml/cluster/dbscan.hpp>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/cuml.hpp>
#include <cuml/datasets/make_blobs.hpp>
#include <cuml/metrics/metrics.hpp>
#include <raft/linalg/cublas_wrappers.h>
#include <raft/linalg/transpose.h>
#include <test_utils.h>
#include <cuml/common/device_buffer.hpp>
#include <cuml/common/logger.hpp>
namespace ML {
using namespace MLCommon;
using namespace Datasets;
using namespace Metrics;
using namespace std;
// Note: false negatives are theoretically possible, given that border
// points are ambiguous.
// If test failures are observed, these tests might need to be re-written
// (cf how the Python tests work).
template <typename T, typename IdxT>
struct DbscanInputs {
IdxT n_row;
IdxT n_col;
IdxT n_centers;
T cluster_std;
T eps;
int min_pts;
size_t max_bytes_per_batch;
unsigned long long int seed;
};
template <typename T, typename IdxT>
::std::ostream &operator<<(::std::ostream &os,
const DbscanInputs<T, IdxT> &dims) {
return os;
}
template <typename T, typename IdxT>
class DbscanTest : public ::testing::TestWithParam<DbscanInputs<T, IdxT>> {
protected:
void basicTest() {
raft::handle_t handle;
params = ::testing::TestWithParam<DbscanInputs<T, IdxT>>::GetParam();
device_buffer<T> out(handle.get_device_allocator(), handle.get_stream(),
params.n_row * params.n_col);
device_buffer<IdxT> l(handle.get_device_allocator(), handle.get_stream(),
params.n_row);
make_blobs(handle, out.data(), l.data(), params.n_row, params.n_col,
params.n_centers, true, nullptr, nullptr, params.cluster_std,
true, -10.0f, 10.0f, params.seed);
raft::allocate(labels, params.n_row);
raft::allocate(labels_ref, params.n_row);
raft::copy(labels_ref, l.data(), params.n_row, handle.get_stream());
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
Dbscan::fit(handle, out.data(), params.n_row, params.n_col, params.eps,
params.min_pts, labels, nullptr, params.max_bytes_per_batch);
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
score = adjusted_rand_index(handle, labels_ref, labels, params.n_row);
if (score < 1.0) {
auto str = raft::arr2Str(labels_ref, params.n_row, "labels_ref",
handle.get_stream());
CUML_LOG_DEBUG("y: %s", str.c_str());
str = raft::arr2Str(labels, params.n_row, "labels", handle.get_stream());
CUML_LOG_DEBUG("y_hat: %s", str.c_str());
CUML_LOG_DEBUG("Score = %lf", score);
}
}
void SetUp() override { basicTest(); }
void TearDown() override {
CUDA_CHECK(cudaFree(labels));
CUDA_CHECK(cudaFree(labels_ref));
}
protected:
DbscanInputs<T, IdxT> params;
IdxT *labels, *labels_ref;
double score;
};
const std::vector<DbscanInputs<float, int>> inputsf2 = {
{500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL},
{1000, 1000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{20000, 10000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{20000, 100, 5000, 0.01, 2, 2, (size_t)13e3, 1234ULL}};
const std::vector<DbscanInputs<float, int64_t>> inputsf3 = {
{50000, 16, 5, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL},
{1000, 1000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{50000, 16, 5l, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{20000, 10000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{20000, 100, 5000, 0.01, 2, 2, (size_t)9e3, 1234ULL}};
const std::vector<DbscanInputs<double, int>> inputsd2 = {
{50000, 16, 5, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL},
{1000, 1000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{100, 10000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{20000, 10000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL},
{20000, 100, 5000, 0.01, 2, 2, (size_t)13e3, 1234ULL}};
const std::vector<DbscanInputs<double, int64_t>> inputsd3 = {
{50000, 16, 5, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL},
{1000, 1000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{100, 10000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{20000, 10000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL},
{20000, 100, 5000, 0.01, 2, 2, (size_t)9e3, 1234ULL}};
typedef DbscanTest<float, int> DbscanTestF_Int;
TEST_P(DbscanTestF_Int, Result) { ASSERT_TRUE(score == 1.0); }
typedef DbscanTest<float, int64_t> DbscanTestF_Int64;
TEST_P(DbscanTestF_Int64, Result) { ASSERT_TRUE(score == 1.0); }
typedef DbscanTest<double, int> DbscanTestD_Int;
TEST_P(DbscanTestD_Int, Result) { ASSERT_TRUE(score == 1.0); }
typedef DbscanTest<double, int64_t> DbscanTestD_Int64;
TEST_P(DbscanTestD_Int64, Result) { ASSERT_TRUE(score == 1.0); }
INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestF_Int,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestF_Int64,
::testing::ValuesIn(inputsf3));
INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestD_Int,
::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestD_Int64,
::testing::ValuesIn(inputsd3));
template <typename T>
struct DBScan2DArrayInputs {
const T *points;
const int *out;
size_t n_row;
// n_out allows to compare less labels than we have inputs
// (some output labels can be ambiguous)
size_t n_out;
T eps;
int min_pts;
const int *core_indices; //Expected core_indices
};
template <typename T>
class Dbscan2DSimple : public ::testing::TestWithParam<DBScan2DArrayInputs<T>> {
protected:
void basicTest() {
raft::handle_t handle;
params = ::testing::TestWithParam<DBScan2DArrayInputs<T>>::GetParam();
raft::allocate(inputs, params.n_row * 2);
raft::allocate(labels, params.n_row);
raft::allocate(labels_ref, params.n_out);
raft::allocate(core_sample_indices_d, params.n_row);
raft::copy(inputs, params.points, params.n_row * 2, handle.get_stream());
raft::copy(labels_ref, params.out, params.n_out, handle.get_stream());
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
Dbscan::fit(handle, inputs, (int)params.n_row, 2, params.eps,
params.min_pts, labels, core_sample_indices_d);
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
score = adjusted_rand_index(handle, labels_ref, labels, (int)params.n_out);
if (score < 1.0) {
auto str = raft::arr2Str(labels_ref, params.n_out, "labels_ref",
handle.get_stream());
CUML_LOG_DEBUG("y: %s", str.c_str());
str = raft::arr2Str(labels, params.n_row, "labels", handle.get_stream());
CUML_LOG_DEBUG("y_hat: %s", str.c_str());
CUML_LOG_DEBUG("Score = %lf", score);
}
EXPECT_TRUE(raft::devArrMatchHost(
params.core_indices, core_sample_indices_d, params.n_row,
raft::Compare<int>(), handle.get_stream()));
}
void SetUp() override { basicTest(); }
void TearDown() override {
CUDA_CHECK(cudaFree(labels_ref));
CUDA_CHECK(cudaFree(labels));
CUDA_CHECK(cudaFree(inputs));
CUDA_CHECK(cudaFree(core_sample_indices_d));
}
protected:
DBScan2DArrayInputs<T> params;
int *labels, *labels_ref;
int *core_sample_indices_d;
T *inputs;
double score;
};
// The input looks like a latin cross or a star with a chain:
// .
// . . . . .
// .
// There is 1 core-point (intersection of the bars)
// and the two points to the very right are not reachable from it
// So there should be one cluster (the plus/star on the left)
// and two noise points
const std::vector<float> test2d1_f = {0, 0, 1, 0, 1, 1, 1,
-1, 2, 0, 3, 0, 4, 0};
const std::vector<double> test2d1_d(test2d1_f.begin(), test2d1_f.end());
const std::vector<int> test2d1_l = {0, 0, 0, 0, 0, -1, -1};
const std::vector<int> test2d1c_l = {1, -1, -1, -1, -1, -1, -1};
// The input looks like a long two-barred (orhodox) cross or
// two stars next to each other:
// . .
// . . . . . .
// . .
// There are 2 core-points but they are not reachable from each other
// So there should be two clusters, both in the form of a plus/star
const std::vector<float> test2d2_f = {0, 0, 1, 0, 1, 1, 1, -1, 2, 0,
3, 0, 4, 0, 4, 1, 4, -1, 5, 0};
const std::vector<double> test2d2_d(test2d2_f.begin(), test2d2_f.end());
const std::vector<int> test2d2_l = {0, 0, 0, 0, 0, 1, 1, 1, 1, 1};
const std::vector<int> test2d2c_l = {1, 6, -1, -1, -1, -1, -1, -1, -1, -1};
// The input looks like a two-barred (orhodox) cross or
// two stars sharing a link:
// . .
// . . . . .
// . .
// There are 2 core-points but they are not reachable from each other
// So there should be two clusters.
// However, the link that is shared between the stars
// actually has an ambiguous label (to the best of my knowledge)
// as it will depend on the order in which we process the core-points.
// Note that there are 9 input points, but only 8 labels for this reason
const std::vector<float> test2d3_f = {
0, 0, 1, 0, 1, 1, 1, -1, 3, 0, 3, 1, 3, -1, 4, 0, 2, 0,
};
const std::vector<double> test2d3_d(test2d3_f.begin(), test2d3_f.end());
const std::vector<int> test2d3_l = {0, 0, 0, 0, 1, 1, 1, 1};
const std::vector<int> test2d3c_l = {1, 4, -1, -1, -1, -1, -1, -1, -1};
const std::vector<DBScan2DArrayInputs<float>> inputs2d_f = {
{test2d1_f.data(), test2d1_l.data(), test2d1_f.size() / 2, test2d1_l.size(),
1.1f, 4, test2d1c_l.data()},
{test2d2_f.data(), test2d2_l.data(), test2d2_f.size() / 2, test2d2_l.size(),
1.1f, 4, test2d2c_l.data()},
{test2d3_f.data(), test2d3_l.data(), test2d3_f.size() / 2, test2d3_l.size(),
1.1f, 4, test2d3c_l.data()},
};
const std::vector<DBScan2DArrayInputs<double>> inputs2d_d = {
{test2d1_d.data(), test2d1_l.data(), test2d1_d.size() / 2, test2d1_l.size(),
1.1, 4, test2d1c_l.data()},
{test2d2_d.data(), test2d2_l.data(), test2d2_d.size() / 2, test2d2_l.size(),
1.1, 4, test2d2c_l.data()},
{test2d3_d.data(), test2d3_l.data(), test2d3_d.size() / 2, test2d3_l.size(),
1.1, 4, test2d3c_l.data()},
};
typedef Dbscan2DSimple<float> Dbscan2DSimple_F;
TEST_P(Dbscan2DSimple_F, Result) { ASSERT_TRUE(score == 1.0); }
typedef Dbscan2DSimple<double> Dbscan2DSimple_D;
TEST_P(Dbscan2DSimple_D, Result) { ASSERT_TRUE(score == 1.0); }
INSTANTIATE_TEST_CASE_P(DbscanTests, Dbscan2DSimple_F,
::testing::ValuesIn(inputs2d_f));
INSTANTIATE_TEST_CASE_P(DbscanTests, Dbscan2DSimple_D,
::testing::ValuesIn(inputs2d_d));
} // end namespace ML
|
a75f2aa799c9ace3564690811c31108f947d2361.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2015-2019 by Contributors
* \file regression_obj.cu
* \brief Definition of single-value regression and classification objectives.
* \author Tianqi Chen, Kailong Chen
*/
#include <dmlc/omp.h>
#include <xgboost/logging.h>
#include <xgboost/objective.h>
#include <cmath>
#include <memory>
#include <vector>
#include "xgboost/host_device_vector.h"
#include "xgboost/json.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "../common/transform.h"
#include "../common/common.h"
#include "../common/threading_utils.h"
#include "./regression_loss.h"
namespace xgboost {
namespace obj {
#if defined(XGBOOST_USE_CUDA)
DMLC_REGISTRY_FILE_TAG(regression_obj_gpu);
#endif // defined(XGBOOST_USE_CUDA)
struct RegLossParam : public XGBoostParameter<RegLossParam> {
float scale_pos_weight;
// declare parameters
DMLC_DECLARE_PARAMETER(RegLossParam) {
DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f)
.describe("Scale the weight of positive examples by this factor");
}
};
template<typename Loss>
class RegLossObj : public ObjFunction {
protected:
HostDeviceVector<float> additional_input_;
public:
// 0 - label_correct flag, 1 - scale_pos_weight, 2 - is_null_weight
RegLossObj(): additional_input_(3) {}
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair>* out_gpair) override {
CHECK_EQ(preds.Size(), info.labels_.Size())
<< " " << "labels are not correctly provided"
<< "preds.size=" << preds.Size() << ", label.size=" << info.labels_.Size() << ", "
<< "Loss: " << Loss::Name();
size_t const ndata = preds.Size();
out_gpair->Resize(ndata);
auto device = tparam_->gpu_id;
additional_input_.HostVector().begin()[0] = 1; // Fill the label_correct flag
bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
auto scale_pos_weight = param_.scale_pos_weight;
additional_input_.HostVector().begin()[1] = scale_pos_weight;
additional_input_.HostVector().begin()[2] = is_null_weight;
const size_t nthreads = tparam_->Threads();
bool on_device = device >= 0;
// On CPU we run the transformation each thread processing a contigious block of data
// for better performance.
const size_t n_data_blocks =
::max(static_cast<size_t>(1), (on_device ? ndata : nthreads));
const size_t block_size = ndata / n_data_blocks + !!(ndata % n_data_blocks);
common::Transform<>::Init(
[block_size, ndata] XGBOOST_DEVICE(
size_t data_block_idx, common::Span<float> _additional_input,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
const bst_float* preds_ptr = _preds.data();
const bst_float* labels_ptr = _labels.data();
const bst_float* weights_ptr = _weights.data();
GradientPair* out_gpair_ptr = _out_gpair.data();
const size_t begin = data_block_idx*block_size;
const size_t end = ::min(ndata, begin + block_size);
const float _scale_pos_weight = _additional_input[1];
const bool _is_null_weight = _additional_input[2];
for (size_t idx = begin; idx < end; ++idx) {
bst_float p = Loss::PredTransform(preds_ptr[idx]);
bst_float w = _is_null_weight ? 1.0f : weights_ptr[idx];
bst_float label = labels_ptr[idx];
if (label == 1.0f) {
w *= _scale_pos_weight;
}
if (!Loss::CheckLabel(label)) {
// If there is an incorrect label, the host code will know.
_additional_input[0] = 0;
}
out_gpair_ptr[idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w,
Loss::SecondOrderGradient(p, label) * w);
}
},
common::Range{0, static_cast<int64_t>(n_data_blocks)}, device)
.Eval(&additional_input_, out_gpair, &preds, &info.labels_,
&info.weights_);
auto const flag = additional_input_.HostVector().begin()[0];
if (flag == 0) {
LOG(FATAL) << Loss::LabelErrorMsg();
}
}
public:
const char* DefaultEvalMetric() const override {
return Loss::DefaultEvalMetric();
}
void PredTransform(HostDeviceVector<float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) {
_preds[_idx] = Loss::PredTransform(_preds[_idx]);
}, common::Range{0, static_cast<int64_t>(io_preds->Size())},
io_preds->DeviceIdx())
.Eval(io_preds);
}
float ProbToMargin(float base_score) const override {
return Loss::ProbToMargin(base_score);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String(Loss::Name());
out["reg_loss_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["reg_loss_param"], ¶m_);
}
protected:
RegLossParam param_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(RegLossParam);
XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, LinearSquareLoss::Name())
.describe("Regression with squared error.")
.set_body([]() { return new RegLossObj<LinearSquareLoss>(); });
XGBOOST_REGISTER_OBJECTIVE(SquareLogError, SquaredLogError::Name())
.describe("Regression with root mean squared logarithmic error.")
.set_body([]() { return new RegLossObj<SquaredLogError>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, LogisticRegression::Name())
.describe("Logistic regression for probability regression task.")
.set_body([]() { return new RegLossObj<LogisticRegression>(); });
XGBOOST_REGISTER_OBJECTIVE(PseudoHuberError, PseudoHuberError::Name())
.describe("Regression Pseudo Huber error.")
.set_body([]() { return new RegLossObj<PseudoHuberError>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, LogisticClassification::Name())
.describe("Logistic regression for binary classification task.")
.set_body([]() { return new RegLossObj<LogisticClassification>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, LogisticRaw::Name())
.describe("Logistic regression for classification, output score "
"before logistic transformation.")
.set_body([]() { return new RegLossObj<LogisticRaw>(); });
// Deprecated functions
XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear")
.describe("Regression with squared error.")
.set_body([]() {
LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror.";
return new RegLossObj<LinearSquareLoss>(); });
// End deprecated
// declare parameter
struct PoissonRegressionParam : public XGBoostParameter<PoissonRegressionParam> {
float max_delta_step;
DMLC_DECLARE_PARAMETER(PoissonRegressionParam) {
DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f)
.describe("Maximum delta step we allow each weight estimation to be." \
" This parameter is required for possion regression.");
}
};
// poisson regression for count
class PoissonRegression : public ObjFunction {
public:
// declare functions
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided";
size_t const ndata = preds.Size();
out_gpair->Resize(ndata);
auto device = tparam_->gpu_id;
label_correct_.Resize(1);
label_correct_.Fill(1);
bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
bst_float max_delta_step = param_.max_delta_step;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y < 0.0f) {
_label_correct[0] = 0;
}
_out_gpair[_idx] = GradientPair{(expf(p) - y) * w,
expf(p + max_delta_step) * w};
},
common::Range{0, static_cast<int64_t>(ndata)}, device).Eval(
&label_correct_, out_gpair, &preds, &info.labels_, &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "PoissonRegression: label must be nonnegative";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())},
io_preds->DeviceIdx())
.Eval(io_preds);
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return ::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "poisson-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("count:poisson");
out["poisson_regression_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["poisson_regression_param"], ¶m_);
}
private:
PoissonRegressionParam param_;
HostDeviceVector<int> label_correct_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(PoissonRegressionParam);
XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson")
.describe("Poisson regression for count data.")
.set_body([]() { return new PoissonRegression(); });
// cox regression for survival data (negative values mean they are censored)
class CoxRegression : public ObjFunction {
public:
void Configure(
const std::vector<std::pair<std::string, std::string> >&) override {}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided";
const auto& preds_h = preds.HostVector();
out_gpair->Resize(preds_h.size());
auto& gpair = out_gpair->HostVector();
const std::vector<size_t> &label_order = info.LabelAbsSort();
const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*)
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
// pre-compute a sum
double exp_p_sum = 0; // we use double because we might need the precision with large datasets
for (omp_ulong i = 0; i < ndata; ++i) {
exp_p_sum += ::exp(preds_h[label_order[i]]);
}
// start calculating grad and hess
const auto& labels = info.labels_.HostVector();
double r_k = 0;
double s_k = 0;
double last_exp_p = 0.0;
double last_abs_y = 0.0;
double accumulated_sum = 0;
for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*)
const size_t ind = label_order[i];
const double p = preds_h[ind];
const double exp_p = ::exp(p);
const double w = info.GetWeight(ind);
const double y = labels[ind];
const double abs_y = std::abs(y);
// only update the denominator after we move forward in time (labels are sorted)
// this is Breslow's method for ties
accumulated_sum += last_exp_p;
if (last_abs_y < abs_y) {
exp_p_sum -= accumulated_sum;
accumulated_sum = 0;
} else {
CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " <<
"MetaInfo::LabelArgsort failed!";
}
if (y > 0) {
r_k += 1.0/exp_p_sum;
s_k += 1.0/(exp_p_sum*exp_p_sum);
}
const double grad = exp_p*r_k - static_cast<bst_float>(y > 0);
const double hess = exp_p*r_k - exp_p*exp_p * s_k;
gpair.at(ind) = GradientPair(grad * w, hess * w);
last_abs_y = abs_y;
last_exp_p = exp_p;
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
std::vector<bst_float> &preds = io_preds->HostVector();
const long ndata = static_cast<long>(preds.size()); // NOLINT(*)
common::ParallelFor(ndata, [&](long j) { // NOLINT(*)
preds[j] = ::exp(preds[j]);
});
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return ::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "cox-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("survival:cox");
}
void LoadConfig(Json const&) override {}
};
// register the objective function
XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox")
.describe("Cox regression for censored survival data (negative labels are considered censored).")
.set_body([]() { return new CoxRegression(); });
// gamma regression
class GammaRegression : public ObjFunction {
public:
void Configure(
const std::vector<std::pair<std::string, std::string> >&) override {}
void GetGradient(const HostDeviceVector<bst_float> &preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided";
const size_t ndata = preds.Size();
auto device = tparam_->gpu_id;
out_gpair->Resize(ndata);
label_correct_.Resize(1);
label_correct_.Fill(1);
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y <= 0.0f) {
_label_correct[0] = 0;
}
_out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w);
},
common::Range{0, static_cast<int64_t>(ndata)}, device).Eval(
&label_correct_, out_gpair, &preds, &info.labels_, &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "GammaRegression: label must be positive.";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())},
io_preds->DeviceIdx())
.Eval(io_preds);
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return ::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "gamma-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("reg:gamma");
}
void LoadConfig(Json const&) override {}
private:
HostDeviceVector<int> label_correct_;
};
// register the objective functions
XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma")
.describe("Gamma regression for severity data.")
.set_body([]() { return new GammaRegression(); });
// declare parameter
struct TweedieRegressionParam : public XGBoostParameter<TweedieRegressionParam> {
float tweedie_variance_power;
DMLC_DECLARE_PARAMETER(TweedieRegressionParam) {
DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f)
.describe("Tweedie variance power. Must be between in range [1, 2).");
}
};
// tweedie regression
class TweedieRegression : public ObjFunction {
public:
// declare functions
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
std::ostringstream os;
os << "tweedie-nloglik@" << param_.tweedie_variance_power;
metric_ = os.str();
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided";
const size_t ndata = preds.Size();
out_gpair->Resize(ndata);
auto device = tparam_->gpu_id;
label_correct_.Resize(1);
label_correct_.Fill(1);
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
const float rho = param_.tweedie_variance_power;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y < 0.0f) {
_label_correct[0] = 0;
}
bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p);
bst_float hess =
-y * (1 - rho) * \
::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p);
_out_gpair[_idx] = GradientPair(grad * w, hess * w);
},
common::Range{0, static_cast<int64_t>(ndata), 1}, device)
.Eval(&label_correct_, out_gpair, &preds, &info.labels_, &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "TweedieRegression: label must be nonnegative";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())},
io_preds->DeviceIdx())
.Eval(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return ::log(base_score);
}
const char* DefaultEvalMetric() const override {
return metric_.c_str();
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("reg:tweedie");
out["tweedie_regression_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["tweedie_regression_param"], ¶m_);
}
private:
std::string metric_;
TweedieRegressionParam param_;
HostDeviceVector<int> label_correct_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(TweedieRegressionParam);
XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie")
.describe("Tweedie regression for insurance data.")
.set_body([]() { return new TweedieRegression(); });
} // namespace obj
} // namespace xgboost
| a75f2aa799c9ace3564690811c31108f947d2361.cu | /*!
* Copyright 2015-2019 by Contributors
* \file regression_obj.cu
* \brief Definition of single-value regression and classification objectives.
* \author Tianqi Chen, Kailong Chen
*/
#include <dmlc/omp.h>
#include <xgboost/logging.h>
#include <xgboost/objective.h>
#include <cmath>
#include <memory>
#include <vector>
#include "xgboost/host_device_vector.h"
#include "xgboost/json.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "../common/transform.h"
#include "../common/common.h"
#include "../common/threading_utils.h"
#include "./regression_loss.h"
namespace xgboost {
namespace obj {
#if defined(XGBOOST_USE_CUDA)
DMLC_REGISTRY_FILE_TAG(regression_obj_gpu);
#endif // defined(XGBOOST_USE_CUDA)
struct RegLossParam : public XGBoostParameter<RegLossParam> {
float scale_pos_weight;
// declare parameters
DMLC_DECLARE_PARAMETER(RegLossParam) {
DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f)
.describe("Scale the weight of positive examples by this factor");
}
};
template<typename Loss>
class RegLossObj : public ObjFunction {
protected:
HostDeviceVector<float> additional_input_;
public:
// 0 - label_correct flag, 1 - scale_pos_weight, 2 - is_null_weight
RegLossObj(): additional_input_(3) {}
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair>* out_gpair) override {
CHECK_EQ(preds.Size(), info.labels_.Size())
<< " " << "labels are not correctly provided"
<< "preds.size=" << preds.Size() << ", label.size=" << info.labels_.Size() << ", "
<< "Loss: " << Loss::Name();
size_t const ndata = preds.Size();
out_gpair->Resize(ndata);
auto device = tparam_->gpu_id;
additional_input_.HostVector().begin()[0] = 1; // Fill the label_correct flag
bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
auto scale_pos_weight = param_.scale_pos_weight;
additional_input_.HostVector().begin()[1] = scale_pos_weight;
additional_input_.HostVector().begin()[2] = is_null_weight;
const size_t nthreads = tparam_->Threads();
bool on_device = device >= 0;
// On CPU we run the transformation each thread processing a contigious block of data
// for better performance.
const size_t n_data_blocks =
std::max(static_cast<size_t>(1), (on_device ? ndata : nthreads));
const size_t block_size = ndata / n_data_blocks + !!(ndata % n_data_blocks);
common::Transform<>::Init(
[block_size, ndata] XGBOOST_DEVICE(
size_t data_block_idx, common::Span<float> _additional_input,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
const bst_float* preds_ptr = _preds.data();
const bst_float* labels_ptr = _labels.data();
const bst_float* weights_ptr = _weights.data();
GradientPair* out_gpair_ptr = _out_gpair.data();
const size_t begin = data_block_idx*block_size;
const size_t end = std::min(ndata, begin + block_size);
const float _scale_pos_weight = _additional_input[1];
const bool _is_null_weight = _additional_input[2];
for (size_t idx = begin; idx < end; ++idx) {
bst_float p = Loss::PredTransform(preds_ptr[idx]);
bst_float w = _is_null_weight ? 1.0f : weights_ptr[idx];
bst_float label = labels_ptr[idx];
if (label == 1.0f) {
w *= _scale_pos_weight;
}
if (!Loss::CheckLabel(label)) {
// If there is an incorrect label, the host code will know.
_additional_input[0] = 0;
}
out_gpair_ptr[idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w,
Loss::SecondOrderGradient(p, label) * w);
}
},
common::Range{0, static_cast<int64_t>(n_data_blocks)}, device)
.Eval(&additional_input_, out_gpair, &preds, &info.labels_,
&info.weights_);
auto const flag = additional_input_.HostVector().begin()[0];
if (flag == 0) {
LOG(FATAL) << Loss::LabelErrorMsg();
}
}
public:
const char* DefaultEvalMetric() const override {
return Loss::DefaultEvalMetric();
}
void PredTransform(HostDeviceVector<float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) {
_preds[_idx] = Loss::PredTransform(_preds[_idx]);
}, common::Range{0, static_cast<int64_t>(io_preds->Size())},
io_preds->DeviceIdx())
.Eval(io_preds);
}
float ProbToMargin(float base_score) const override {
return Loss::ProbToMargin(base_score);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String(Loss::Name());
out["reg_loss_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["reg_loss_param"], ¶m_);
}
protected:
RegLossParam param_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(RegLossParam);
XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, LinearSquareLoss::Name())
.describe("Regression with squared error.")
.set_body([]() { return new RegLossObj<LinearSquareLoss>(); });
XGBOOST_REGISTER_OBJECTIVE(SquareLogError, SquaredLogError::Name())
.describe("Regression with root mean squared logarithmic error.")
.set_body([]() { return new RegLossObj<SquaredLogError>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, LogisticRegression::Name())
.describe("Logistic regression for probability regression task.")
.set_body([]() { return new RegLossObj<LogisticRegression>(); });
XGBOOST_REGISTER_OBJECTIVE(PseudoHuberError, PseudoHuberError::Name())
.describe("Regression Pseudo Huber error.")
.set_body([]() { return new RegLossObj<PseudoHuberError>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, LogisticClassification::Name())
.describe("Logistic regression for binary classification task.")
.set_body([]() { return new RegLossObj<LogisticClassification>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, LogisticRaw::Name())
.describe("Logistic regression for classification, output score "
"before logistic transformation.")
.set_body([]() { return new RegLossObj<LogisticRaw>(); });
// Deprecated functions
XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear")
.describe("Regression with squared error.")
.set_body([]() {
LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror.";
return new RegLossObj<LinearSquareLoss>(); });
// End deprecated
// declare parameter
struct PoissonRegressionParam : public XGBoostParameter<PoissonRegressionParam> {
float max_delta_step;
DMLC_DECLARE_PARAMETER(PoissonRegressionParam) {
DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f)
.describe("Maximum delta step we allow each weight estimation to be." \
" This parameter is required for possion regression.");
}
};
// poisson regression for count
class PoissonRegression : public ObjFunction {
public:
// declare functions
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided";
size_t const ndata = preds.Size();
out_gpair->Resize(ndata);
auto device = tparam_->gpu_id;
label_correct_.Resize(1);
label_correct_.Fill(1);
bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
bst_float max_delta_step = param_.max_delta_step;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y < 0.0f) {
_label_correct[0] = 0;
}
_out_gpair[_idx] = GradientPair{(expf(p) - y) * w,
expf(p + max_delta_step) * w};
},
common::Range{0, static_cast<int64_t>(ndata)}, device).Eval(
&label_correct_, out_gpair, &preds, &info.labels_, &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "PoissonRegression: label must be nonnegative";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())},
io_preds->DeviceIdx())
.Eval(io_preds);
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "poisson-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("count:poisson");
out["poisson_regression_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["poisson_regression_param"], ¶m_);
}
private:
PoissonRegressionParam param_;
HostDeviceVector<int> label_correct_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(PoissonRegressionParam);
XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson")
.describe("Poisson regression for count data.")
.set_body([]() { return new PoissonRegression(); });
// cox regression for survival data (negative values mean they are censored)
class CoxRegression : public ObjFunction {
public:
void Configure(
const std::vector<std::pair<std::string, std::string> >&) override {}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided";
const auto& preds_h = preds.HostVector();
out_gpair->Resize(preds_h.size());
auto& gpair = out_gpair->HostVector();
const std::vector<size_t> &label_order = info.LabelAbsSort();
const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*)
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
// pre-compute a sum
double exp_p_sum = 0; // we use double because we might need the precision with large datasets
for (omp_ulong i = 0; i < ndata; ++i) {
exp_p_sum += std::exp(preds_h[label_order[i]]);
}
// start calculating grad and hess
const auto& labels = info.labels_.HostVector();
double r_k = 0;
double s_k = 0;
double last_exp_p = 0.0;
double last_abs_y = 0.0;
double accumulated_sum = 0;
for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*)
const size_t ind = label_order[i];
const double p = preds_h[ind];
const double exp_p = std::exp(p);
const double w = info.GetWeight(ind);
const double y = labels[ind];
const double abs_y = std::abs(y);
// only update the denominator after we move forward in time (labels are sorted)
// this is Breslow's method for ties
accumulated_sum += last_exp_p;
if (last_abs_y < abs_y) {
exp_p_sum -= accumulated_sum;
accumulated_sum = 0;
} else {
CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " <<
"MetaInfo::LabelArgsort failed!";
}
if (y > 0) {
r_k += 1.0/exp_p_sum;
s_k += 1.0/(exp_p_sum*exp_p_sum);
}
const double grad = exp_p*r_k - static_cast<bst_float>(y > 0);
const double hess = exp_p*r_k - exp_p*exp_p * s_k;
gpair.at(ind) = GradientPair(grad * w, hess * w);
last_abs_y = abs_y;
last_exp_p = exp_p;
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
std::vector<bst_float> &preds = io_preds->HostVector();
const long ndata = static_cast<long>(preds.size()); // NOLINT(*)
common::ParallelFor(ndata, [&](long j) { // NOLINT(*)
preds[j] = std::exp(preds[j]);
});
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "cox-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("survival:cox");
}
void LoadConfig(Json const&) override {}
};
// register the objective function
XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox")
.describe("Cox regression for censored survival data (negative labels are considered censored).")
.set_body([]() { return new CoxRegression(); });
// gamma regression
class GammaRegression : public ObjFunction {
public:
void Configure(
const std::vector<std::pair<std::string, std::string> >&) override {}
void GetGradient(const HostDeviceVector<bst_float> &preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided";
const size_t ndata = preds.Size();
auto device = tparam_->gpu_id;
out_gpair->Resize(ndata);
label_correct_.Resize(1);
label_correct_.Fill(1);
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y <= 0.0f) {
_label_correct[0] = 0;
}
_out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w);
},
common::Range{0, static_cast<int64_t>(ndata)}, device).Eval(
&label_correct_, out_gpair, &preds, &info.labels_, &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "GammaRegression: label must be positive.";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())},
io_preds->DeviceIdx())
.Eval(io_preds);
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "gamma-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("reg:gamma");
}
void LoadConfig(Json const&) override {}
private:
HostDeviceVector<int> label_correct_;
};
// register the objective functions
XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma")
.describe("Gamma regression for severity data.")
.set_body([]() { return new GammaRegression(); });
// declare parameter
struct TweedieRegressionParam : public XGBoostParameter<TweedieRegressionParam> {
float tweedie_variance_power;
DMLC_DECLARE_PARAMETER(TweedieRegressionParam) {
DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f)
.describe("Tweedie variance power. Must be between in range [1, 2).");
}
};
// tweedie regression
class TweedieRegression : public ObjFunction {
public:
// declare functions
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
std::ostringstream os;
os << "tweedie-nloglik@" << param_.tweedie_variance_power;
metric_ = os.str();
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided";
const size_t ndata = preds.Size();
out_gpair->Resize(ndata);
auto device = tparam_->gpu_id;
label_correct_.Resize(1);
label_correct_.Fill(1);
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
const float rho = param_.tweedie_variance_power;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y < 0.0f) {
_label_correct[0] = 0;
}
bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p);
bst_float hess =
-y * (1 - rho) * \
std::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p);
_out_gpair[_idx] = GradientPair(grad * w, hess * w);
},
common::Range{0, static_cast<int64_t>(ndata), 1}, device)
.Eval(&label_correct_, out_gpair, &preds, &info.labels_, &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "TweedieRegression: label must be nonnegative";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())},
io_preds->DeviceIdx())
.Eval(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
return metric_.c_str();
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("reg:tweedie");
out["tweedie_regression_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["tweedie_regression_param"], ¶m_);
}
private:
std::string metric_;
TweedieRegressionParam param_;
HostDeviceVector<int> label_correct_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(TweedieRegressionParam);
XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie")
.describe("Tweedie regression for insurance data.")
.set_body([]() { return new TweedieRegression(); });
} // namespace obj
} // namespace xgboost
|
a10948baf85a445768a76d64495ba92c73f03e0c.hip | // !!! This is a file automatically generated by hipify!!!
#include "dbscan.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <stack>
#include <time.h>
#include <ctime>
__constant__ Point sp[2000];
#define CUDA_CHECK_RETURN(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
//####################################################################################
float __device__ cudaManhattanDistance(const Point &source, const Point &destination) //the Manhattan distance for 128D points
{
float dist = 0.0;
for (unsigned i=0; i < 128; i++)
dist += fabs(source.a[i] - destination.a[i]);
return dist;
}
float __device__ cudaEuclideanDistance(const Point &source, const Point &destination) //the Euclidean distance for 128D points
{
float dist = 0.0;
for (unsigned i=0; i < 128; i++)
dist += pow(source.a[i] - destination.a[i], 2);
return sqrt(dist);
}
float __device__ cudaManhattanDistance2D(const Point2D &source, const Point2D &destination)
{
return (fabs(source.x - destination.x) + fabs(source.y - destination.y) );
}
float __device__ cudaEuclideanDistance2D(const Point2D &source, const Point2D &destination)
{
return sqrt( pow(source.x - destination.x, 2) + pow(source.y - destination.y, 2) );
}
void __global__ cudaCreateNeighborsTableShared(Point * inputPoints, int pointNumber,
float eps, int * cudaNeighborhoodMatrix, int minPoint) //the version with shared memory
{
unsigned int id;
unsigned int source;
unsigned int destination;
unsigned int symValue;
float dist;
int sum;
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
id = tid;
while(id < pointNumber * pointNumber) // neighborhood matrix
{
source = id / pointNumber; // device x
destination = id % pointNumber; // device y
if(source < destination)
{
symValue = destination * pointNumber + source;
dist = 0.0;
for (unsigned i = 0; i < 128; i++)
dist += pow(sp[source].a[i] - sp[destination].a[i], 2); //euclidean distance (faster than calling the function)
//dist = sqrt( pow(sp[source].x - sp[destination].x, 2) + pow(sp[source].y - sp[destination].y, 2) );
if(dist < eps) //&& cudaDistTable[id] != 0)
{
cudaNeighborhoodMatrix[id] = 1;
cudaNeighborhoodMatrix[symValue] = 1;
}
else
{
cudaNeighborhoodMatrix[id] = 0;
cudaNeighborhoodMatrix[symValue] = 0;
}
}
id += blockDim.x * gridDim.x;
}
__syncthreads();
id = tid;
while(id < pointNumber) // tag core
{
sum = 0;
source = id * pointNumber;
for (int i = 0; i < pointNumber;i++)
{
if(id != i) // not itself
{
if(cudaNeighborhoodMatrix[source+i])
{
cudaNeighborhoodMatrix[sum] = i;
sum++;
}
}
}
if(sum >= minPoint)
{
inputPoints[id].hint = 1; //hint = 1 - core;
}
id += blockDim.x * gridDim.x;
}
}
void __global__ cudaCreateNeighborsTable(Point * inputPoints, int pointNumber,
float eps, int * cudaNeighborhoodMatrix, int minPoint)
{
unsigned int id;
unsigned int source;
unsigned int destination;
unsigned int symValue;
int sum;
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
id = tid;
while(id < pointNumber * pointNumber) // neighborhood matrix
{
source = id / pointNumber; // device x
destination = id % pointNumber; // device y
if(source < destination)
{
symValue = destination * pointNumber + source;
if(cudaEuclideanDistance(inputPoints[source], inputPoints[destination]) < eps) //&& cudaDistTable[id] != 0)
{
cudaNeighborhoodMatrix[id] = 1;
cudaNeighborhoodMatrix[symValue] = 1;
}
else
{
cudaNeighborhoodMatrix[id] = 0;
cudaNeighborhoodMatrix[symValue] = 0;
}
}
id += blockDim.x * gridDim.x;
}
__syncthreads();
id = tid;
while(id < pointNumber) // tag core
{
sum = 0;
source = id * pointNumber;
for(int i = 0; i < pointNumber; i++)
{
if(id != i) // not itself
{
if(cudaNeighborhoodMatrix[source+i])
{
cudaNeighborhoodMatrix[sum] = i;
sum++;
}
}
}
if(sum>=minPoint)
{
inputPoints[id].hint = 1; //hint = 1 - core;
}
id += blockDim.x * gridDim.x;
}
}
void __global__ cudaLabeling(Point * inputPoints, int pointNumber, float * cudaPowerMatrix)
{
unsigned int id;
unsigned int source;
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
id = tid;
while(id < pointNumber)
{
source = id * pointNumber;
for(int i = 0; i < pointNumber; i++)
{
if(cudaPowerMatrix[source + i] > 0)
{
inputPoints[id].label = i;
cudaPowerMatrix[source + i] = 0;
break;
}
}
id += blockDim.x * gridDim.x;
}
}
void hostLabeling(Point *inputPoints, int pointNumber, int *hostNeighborhoodMatrix)
{
std::stack<int> stack;
int label = 1;
for (int i = 0; i < pointNumber; i++)
{
if(inputPoints[i].hint == 1) // if the point is a core point
{
inputPoints[i].label = label;
int offset = pointNumber*i;
while(hostNeighborhoodMatrix[offset] != 0)
{
inputPoints[hostNeighborhoodMatrix[offset]].label = label;
stack.push(hostNeighborhoodMatrix[offset]);
offset++;
}
while(!stack.empty())
{
if(inputPoints[stack.top()].hint == 1)
{
offset = stack.top() * pointNumber;
while(hostNeighborhoodMatrix[offset] != 0)
{
if(inputPoints[hostNeighborhoodMatrix[offset]].label < 1)
{
inputPoints[hostNeighborhoodMatrix[offset]].label = label;
stack.push(hostNeighborhoodMatrix[offset]);
}
offset++;
}
}
stack.pop();
}
label++;
}
}
}
Point *cudaDbscanNR(Point *inputPoints, int pointNumber, float eps, int minPoint)
{
hipEvent_t start, stop, allocate, memcopy, neighbor, labeling, mainCopy, sstop;
hipEventCreate(&start);
hipEventCreate(&allocate);
hipEventCreate(&memcopy);
hipEventCreate(&neighbor);
hipEventCreate(&labeling);
hipEventCreate(&mainCopy);
hipEventCreate(&stop);
hipEventCreate(&sstop);
hipEventRecord( start, 0 );
int hostLabelingTime;
int *hostNeighborhoodMatrix = (int*) malloc(pointNumber*pointNumber*sizeof(int));
Point *cudaPoints;
int *cudaNeighborhoodMatrix;
int allNum = pointNumber * pointNumber;
int threadsNum = 128;
//############################################################
CUDA_CHECK_RETURN(hipMalloc((void**)&cudaPoints, pointNumber * sizeof(Point)));
CUDA_CHECK_RETURN(hipMalloc((int**)&cudaNeighborhoodMatrix, pointNumber * pointNumber * sizeof(int)));
hipEventRecord( allocate, 0 );
//############################################################
CUDA_CHECK_RETURN(hipMemcpy(cudaPoints, inputPoints, pointNumber*sizeof(Point), hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpyToSymbol(sp, inputPoints, 2000 * sizeof(Point)));
hipEventRecord( memcopy, 0 );
//###############################################################
dim3 threads;
threads.x = threadsNum;
dim3 blocks;
blocks.x = ( ( allNum + threads.x - 1 ) / threads.x );
printf ("thread = %d ; blocks %d \n", threads.x, blocks.x);
//cudaCreateNeighborsTable<<<blocks, threads>>>(cudaPoints, pointNumber, eps, cudaNeighborhoodMatrix, minPoint);
hipLaunchKernelGGL(( cudaCreateNeighborsTableShared), dim3(blocks), dim3(threads), 0, 0, cudaPoints, pointNumber, eps, cudaNeighborhoodMatrix, minPoint);
hipEventRecord( neighbor, 0 );
//##############################################################
CUDA_CHECK_RETURN(hipMemcpy(hostNeighborhoodMatrix, cudaNeighborhoodMatrix, pointNumber*pointNumber*sizeof(int), hipMemcpyDeviceToHost));
CUDA_CHECK_RETURN(hipMemcpy(inputPoints, cudaPoints, pointNumber*sizeof(Point), hipMemcpyDeviceToHost));
hipEventRecord( mainCopy, 0 );
/*for(int i = 0; i < pointNumber; i++)
{
for(int j =0;j<pointNumber;j++)
{
printf("%7.2f",hostDistTable[i*pointNumber + j]);
}
printf ("\n");
}
for(int i=0;i<pointNumber;i++)
{
printf("hint = %d\t", inputPoints[i].hint);
for(int j =0;j<pointNumber;j++)
{
printf("%7.3f",hostNeighborhoodMatrix[i*pointNumber + j]);
}
printf ("\n");
}*/
//###############################################
clock_t hostRun = clock();
hostLabeling(inputPoints, pointNumber, hostNeighborhoodMatrix);
hostLabelingTime = clock() - hostRun;
// cudaLabeling<<<blocks, threads>>>(cudaPoints, pointNumber, cudaNeighborhoodMatrixPower);
//
// hipEventRecord( labeling, 0 );
//
// CUDA_CHECK_RETURN(hipMemcpy(inputPoints, cudaPoints,
// pointNumber*sizeof(Point2D), hipMemcpyDeviceToHost));
//##############################################
hipFree(cudaPoints);
hipFree(cudaNeighborhoodMatrix);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventRecord( sstop, 0 );
hipEventSynchronize( sstop );
float elapsedTime;
hipEventElapsedTime( &elapsedTime, start, stop );
printf( "Time of all (stop):%3.2f ms\n", elapsedTime );
hipEventElapsedTime( &elapsedTime, start, sstop );
printf( "Time of all (sstop):%3.2f ms\n", elapsedTime );
hipEventElapsedTime( &elapsedTime, start, allocate );
printf( "Time of allocate:%3.2f ms\n", elapsedTime );
hipEventElapsedTime( &elapsedTime, allocate, memcopy );
printf( "Time of memcopy:%3.2f ms\n", elapsedTime );
hipEventElapsedTime( &elapsedTime, memcopy, neighbor );
printf( "Time of neighbor matrix:%3.2f ms\n", elapsedTime );
printf("Host labeling: %d clicks (%f seconds).\n", hostLabelingTime, ((float) hostLabelingTime)/CLOCKS_PER_SEC);
// hipEventElapsedTime( &elapsedTime, labeling , mainCopy);
// printf( "Time of mainCopy:%3.2f ms\n", elapsedTime );
hipEventElapsedTime( &elapsedTime, mainCopy , stop);
printf( "Time of free:%3.2f ms\n", elapsedTime );
hipEventDestroy(start);
hipEventDestroy(allocate);
hipEventDestroy(memcopy);
hipEventDestroy(labeling);
hipEventDestroy(mainCopy);
hipEventDestroy(stop);
hipEventDestroy(sstop);
return NULL;
}
| a10948baf85a445768a76d64495ba92c73f03e0c.cu | #include "dbscan.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <cublas_v2.h>
#include <stack>
#include <time.h>
#include <ctime>
__constant__ Point sp[2000];
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
//####################################################################################
float __device__ cudaManhattanDistance(const Point &source, const Point &destination) //the Manhattan distance for 128D points
{
float dist = 0.0;
for (unsigned i=0; i < 128; i++)
dist += fabs(source.a[i] - destination.a[i]);
return dist;
}
float __device__ cudaEuclideanDistance(const Point &source, const Point &destination) //the Euclidean distance for 128D points
{
float dist = 0.0;
for (unsigned i=0; i < 128; i++)
dist += pow(source.a[i] - destination.a[i], 2);
return sqrt(dist);
}
float __device__ cudaManhattanDistance2D(const Point2D &source, const Point2D &destination)
{
return (fabs(source.x - destination.x) + fabs(source.y - destination.y) );
}
float __device__ cudaEuclideanDistance2D(const Point2D &source, const Point2D &destination)
{
return sqrt( pow(source.x - destination.x, 2) + pow(source.y - destination.y, 2) );
}
void __global__ cudaCreateNeighborsTableShared(Point * inputPoints, int pointNumber,
float eps, int * cudaNeighborhoodMatrix, int minPoint) //the version with shared memory
{
unsigned int id;
unsigned int source;
unsigned int destination;
unsigned int symValue;
float dist;
int sum;
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
id = tid;
while(id < pointNumber * pointNumber) // neighborhood matrix
{
source = id / pointNumber; // device x
destination = id % pointNumber; // device y
if(source < destination)
{
symValue = destination * pointNumber + source;
dist = 0.0;
for (unsigned i = 0; i < 128; i++)
dist += pow(sp[source].a[i] - sp[destination].a[i], 2); //euclidean distance (faster than calling the function)
//dist = sqrt( pow(sp[source].x - sp[destination].x, 2) + pow(sp[source].y - sp[destination].y, 2) );
if(dist < eps) //&& cudaDistTable[id] != 0)
{
cudaNeighborhoodMatrix[id] = 1;
cudaNeighborhoodMatrix[symValue] = 1;
}
else
{
cudaNeighborhoodMatrix[id] = 0;
cudaNeighborhoodMatrix[symValue] = 0;
}
}
id += blockDim.x * gridDim.x;
}
__syncthreads();
id = tid;
while(id < pointNumber) // tag core
{
sum = 0;
source = id * pointNumber;
for (int i = 0; i < pointNumber;i++)
{
if(id != i) // not itself
{
if(cudaNeighborhoodMatrix[source+i])
{
cudaNeighborhoodMatrix[sum] = i;
sum++;
}
}
}
if(sum >= minPoint)
{
inputPoints[id].hint = 1; //hint = 1 - core;
}
id += blockDim.x * gridDim.x;
}
}
void __global__ cudaCreateNeighborsTable(Point * inputPoints, int pointNumber,
float eps, int * cudaNeighborhoodMatrix, int minPoint)
{
unsigned int id;
unsigned int source;
unsigned int destination;
unsigned int symValue;
int sum;
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
id = tid;
while(id < pointNumber * pointNumber) // neighborhood matrix
{
source = id / pointNumber; // device x
destination = id % pointNumber; // device y
if(source < destination)
{
symValue = destination * pointNumber + source;
if(cudaEuclideanDistance(inputPoints[source], inputPoints[destination]) < eps) //&& cudaDistTable[id] != 0)
{
cudaNeighborhoodMatrix[id] = 1;
cudaNeighborhoodMatrix[symValue] = 1;
}
else
{
cudaNeighborhoodMatrix[id] = 0;
cudaNeighborhoodMatrix[symValue] = 0;
}
}
id += blockDim.x * gridDim.x;
}
__syncthreads();
id = tid;
while(id < pointNumber) // tag core
{
sum = 0;
source = id * pointNumber;
for(int i = 0; i < pointNumber; i++)
{
if(id != i) // not itself
{
if(cudaNeighborhoodMatrix[source+i])
{
cudaNeighborhoodMatrix[sum] = i;
sum++;
}
}
}
if(sum>=minPoint)
{
inputPoints[id].hint = 1; //hint = 1 - core;
}
id += blockDim.x * gridDim.x;
}
}
void __global__ cudaLabeling(Point * inputPoints, int pointNumber, float * cudaPowerMatrix)
{
unsigned int id;
unsigned int source;
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
id = tid;
while(id < pointNumber)
{
source = id * pointNumber;
for(int i = 0; i < pointNumber; i++)
{
if(cudaPowerMatrix[source + i] > 0)
{
inputPoints[id].label = i;
cudaPowerMatrix[source + i] = 0;
break;
}
}
id += blockDim.x * gridDim.x;
}
}
void hostLabeling(Point *inputPoints, int pointNumber, int *hostNeighborhoodMatrix)
{
std::stack<int> stack;
int label = 1;
for (int i = 0; i < pointNumber; i++)
{
if(inputPoints[i].hint == 1) // if the point is a core point
{
inputPoints[i].label = label;
int offset = pointNumber*i;
while(hostNeighborhoodMatrix[offset] != 0)
{
inputPoints[hostNeighborhoodMatrix[offset]].label = label;
stack.push(hostNeighborhoodMatrix[offset]);
offset++;
}
while(!stack.empty())
{
if(inputPoints[stack.top()].hint == 1)
{
offset = stack.top() * pointNumber;
while(hostNeighborhoodMatrix[offset] != 0)
{
if(inputPoints[hostNeighborhoodMatrix[offset]].label < 1)
{
inputPoints[hostNeighborhoodMatrix[offset]].label = label;
stack.push(hostNeighborhoodMatrix[offset]);
}
offset++;
}
}
stack.pop();
}
label++;
}
}
}
Point *cudaDbscanNR(Point *inputPoints, int pointNumber, float eps, int minPoint)
{
cudaEvent_t start, stop, allocate, memcopy, neighbor, labeling, mainCopy, sstop;
cudaEventCreate(&start);
cudaEventCreate(&allocate);
cudaEventCreate(&memcopy);
cudaEventCreate(&neighbor);
cudaEventCreate(&labeling);
cudaEventCreate(&mainCopy);
cudaEventCreate(&stop);
cudaEventCreate(&sstop);
cudaEventRecord( start, 0 );
int hostLabelingTime;
int *hostNeighborhoodMatrix = (int*) malloc(pointNumber*pointNumber*sizeof(int));
Point *cudaPoints;
int *cudaNeighborhoodMatrix;
int allNum = pointNumber * pointNumber;
int threadsNum = 128;
//############################################################
CUDA_CHECK_RETURN(cudaMalloc((void**)&cudaPoints, pointNumber * sizeof(Point)));
CUDA_CHECK_RETURN(cudaMalloc((int**)&cudaNeighborhoodMatrix, pointNumber * pointNumber * sizeof(int)));
cudaEventRecord( allocate, 0 );
//############################################################
CUDA_CHECK_RETURN(cudaMemcpy(cudaPoints, inputPoints, pointNumber*sizeof(Point), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpyToSymbol(sp, inputPoints, 2000 * sizeof(Point)));
cudaEventRecord( memcopy, 0 );
//###############################################################
dim3 threads;
threads.x = threadsNum;
dim3 blocks;
blocks.x = ( ( allNum + threads.x - 1 ) / threads.x );
printf ("thread = %d ; blocks %d \n", threads.x, blocks.x);
//cudaCreateNeighborsTable<<<blocks, threads>>>(cudaPoints, pointNumber, eps, cudaNeighborhoodMatrix, minPoint);
cudaCreateNeighborsTableShared<<<blocks, threads>>>(cudaPoints, pointNumber, eps, cudaNeighborhoodMatrix, minPoint);
cudaEventRecord( neighbor, 0 );
//##############################################################
CUDA_CHECK_RETURN(cudaMemcpy(hostNeighborhoodMatrix, cudaNeighborhoodMatrix, pointNumber*pointNumber*sizeof(int), cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaMemcpy(inputPoints, cudaPoints, pointNumber*sizeof(Point), cudaMemcpyDeviceToHost));
cudaEventRecord( mainCopy, 0 );
/*for(int i = 0; i < pointNumber; i++)
{
for(int j =0;j<pointNumber;j++)
{
printf("%7.2f",hostDistTable[i*pointNumber + j]);
}
printf ("\n");
}
for(int i=0;i<pointNumber;i++)
{
printf("hint = %d\t", inputPoints[i].hint);
for(int j =0;j<pointNumber;j++)
{
printf("%7.3f",hostNeighborhoodMatrix[i*pointNumber + j]);
}
printf ("\n");
}*/
//###############################################
clock_t hostRun = clock();
hostLabeling(inputPoints, pointNumber, hostNeighborhoodMatrix);
hostLabelingTime = clock() - hostRun;
// cudaLabeling<<<blocks, threads>>>(cudaPoints, pointNumber, cudaNeighborhoodMatrixPower);
//
// cudaEventRecord( labeling, 0 );
//
// CUDA_CHECK_RETURN(cudaMemcpy(inputPoints, cudaPoints,
// pointNumber*sizeof(Point2D), cudaMemcpyDeviceToHost));
//##############################################
cudaFree(cudaPoints);
cudaFree(cudaNeighborhoodMatrix);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventRecord( sstop, 0 );
cudaEventSynchronize( sstop );
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, start, stop );
printf( "Time of all (stop):%3.2f ms\n", elapsedTime );
cudaEventElapsedTime( &elapsedTime, start, sstop );
printf( "Time of all (sstop):%3.2f ms\n", elapsedTime );
cudaEventElapsedTime( &elapsedTime, start, allocate );
printf( "Time of allocate:%3.2f ms\n", elapsedTime );
cudaEventElapsedTime( &elapsedTime, allocate, memcopy );
printf( "Time of memcopy:%3.2f ms\n", elapsedTime );
cudaEventElapsedTime( &elapsedTime, memcopy, neighbor );
printf( "Time of neighbor matrix:%3.2f ms\n", elapsedTime );
printf("Host labeling: %d clicks (%f seconds).\n", hostLabelingTime, ((float) hostLabelingTime)/CLOCKS_PER_SEC);
// cudaEventElapsedTime( &elapsedTime, labeling , mainCopy);
// printf( "Time of mainCopy:%3.2f ms\n", elapsedTime );
cudaEventElapsedTime( &elapsedTime, mainCopy , stop);
printf( "Time of free:%3.2f ms\n", elapsedTime );
cudaEventDestroy(start);
cudaEventDestroy(allocate);
cudaEventDestroy(memcopy);
cudaEventDestroy(labeling);
cudaEventDestroy(mainCopy);
cudaEventDestroy(stop);
cudaEventDestroy(sstop);
return NULL;
}
|
7c6889e201344fe5529ea1d025763b8bc51ee380.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <THH/THHAtomics.cuh>
#include <stdio.h>
#include <math.h>
#include <numeric>
#include <float.h>
#ifndef AT_CHECK
#define AT_CHECK TORCH_CHECK
#endif
using namespace at;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 512;
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N)
{
return ::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
}
void print_tsize(at::Tensor t, const char *msg) {
printf("%s size: ");
for (int i = 0; i < t.ndimension(); i++) {
printf("%d ", int(t.size(i)));
}
printf("\n");
}
// this ad-hoc converts from targets (l in [1]) to augmented targets (l' in [1]) note that no bound-checking is done
// __restrict__ impact to be measured, https://devblogs.nvidia.com/cuda-pro-tip-optimize-pointer-aliasing/
template <typename scalar_t>
__device__ static inline int64_t get_target_prime(
const scalar_t* __restrict__ target, int64_t offset, int64_t stride, int64_t idx, int64_t BLANK
) {
if (idx % 2 == 0) {
return BLANK;
} else {
return target[offset + stride * (idx / 2)];
}
}
template <typename scalar_t>
__device__ static inline scalar_t safe_log_add(scalar_t a, scalar_t b)
{
/*
-inf-inf
-infainf,b
*/
scalar_t m=((a > b) ? a : b);
if (m == -INFINITY)
m = 0;
return (::log(::exp(a-m) + ::exp(b-m)) + m);
}
template <typename scalar_t>
__global__ void ctc2d_log_alpha_gpu_kernel(
const int64_t n,
scalar_t* __restrict__ log_alpha_data, const scalar_t* log_probs_data,
const int64_t* __restrict__ input_lengths, int max_input_length,
const int64_t* __restrict__ targets_data,
const int64_t* __restrict__ target_lengths, const int max_target_length,
scalar_t* __restrict__ neg_log_likelihood_data,
int64_t lp_input_stride, int64_t lp_height_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t la_batch_stride, int64_t la_input_stride, int64_t la_height_stride, int64_t la_target_stride,
int64_t tg_batch_stride, int64_t tg_target_stride,
int64_t batch_size, int64_t batch_per_block, int64_t height, int64_t BLANK
) {
CUDA_KERNEL_LOOP(index, n)
{
int64_t b = (index - blockIdx.x*blockDim.x) / (2*max_target_length+1) + blockIdx.x*batch_per_block;
int64_t s = (index - blockIdx.x*blockDim.x) % (2*max_target_length+1);
if ((b >= batch_size) || (b >= (blockIdx.x+1)*batch_per_block))
return;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
// log_probs_data ==> [T, H, N, C]
// log_alpha_data ==> [N, T, H, 2*S+1]
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t la_batch_offset = b*la_batch_stride;
int64_t tg_batch_offset = b*tg_batch_stride;
scalar_t la;
//01
switch (s) {
case 0:
for (int64_t h=0; h < height; h++) {
//,s=0
la = log_probs_data[lp_height_stride*h + lp_batch_offset + lp_char_stride*BLANK];
if (s < 2*max_target_length+1)
log_alpha_data[la_batch_offset + la_height_stride*h + la_target_stride*s] = la;
}
break;
case 1:
for (int64_t h=0; h < height; h++) {
if (target_length > 0) {
//s=1 log_alpha[s=1]
la = log_probs_data[lp_height_stride*h + lp_batch_offset +
lp_char_stride*get_target_prime(targets_data, tg_batch_offset, tg_target_stride, 1, BLANK)];
}
else {
la = -INFINITY;
}
if (s < 2*max_target_length+1)
log_alpha_data[la_batch_offset + la_height_stride*h + la_target_stride*s] = la;
}
break;
default:
la = -INFINITY;
if (s < 2*max_target_length+1) {
for (int64_t h=0; h < height; h++)
log_alpha_data[la_batch_offset + la_height_stride*h + la_target_stride*s] = la;
}
}
// These two only depend on s, so we can cache them.
int64_t current_char; // l_s in eq (6)
bool have_three; // flag which of the two cases in eq (6) we have
if (s < 2*target_length+1) {
current_char = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK);
have_three = ((s > 1) &&
(get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s-2, BLANK) != current_char));
} else {
//target
current_char = BLANK;
have_three = false;
}
for (int64_t t=1; t < max_input_length; t++) {
// on cuda 9 we might use partial synchronization of only the threads within the same batch
__syncthreads();
if ((t < input_length) && (target_length > 0) && (s < 2*target_length+1)) {
// only for valid t, s. This is equation (6) and (7), la1, la2, la3 are the three summands,
// lamax is the maximum for the logsumexp trick.
scalar_t la1 = log_alpha_data[la_batch_offset + la_input_stride*(t-1) +
la_height_stride*0 + la_target_stride*s];
//t-1log
for (int64_t h=1; h < height; h++) {
la1 = safe_log_add(la1, log_alpha_data[la_batch_offset + la_input_stride*(t-1) +
la_height_stride*h + la_target_stride*s]);
}
scalar_t lamax = la1;
scalar_t la2, la3;
if (s > 0) {
//\alpha(s-1)(t-1)
la2 = log_alpha_data[la_batch_offset + la_input_stride*(t-1) +
la_height_stride*0 + la_target_stride*(s-1)];
for (int64_t h=1; h < height; h++) {
la2 = safe_log_add(la2, log_alpha_data[la_batch_offset + la_input_stride*(t-1) +
la_height_stride*h + la_target_stride*(s-1)]);
}
if (la2 > lamax)
lamax = la2;
} else {
la2 = -INFINITY;
}
if (have_three) {
//s\alphas-2t-1
la3 = log_alpha_data[la_batch_offset + la_input_stride*(t-1) +
la_height_stride*0 + la_target_stride*(s-2)];
for (int64_t h=1; h < height; h++) {
la3 = safe_log_add(la3, log_alpha_data[la_batch_offset + la_input_stride*(t-1) +
la_height_stride*h + la_target_stride*(s-2)]);
}
if (la3 > lamax)
lamax = la3;
} else {
la3 = -INFINITY;
}
// when all are neginf. (then the whole thing is neginf, but we can pretend)
if (lamax == -INFINITY)
lamax = 0;
for (int64_t h=0; h < height; h++) {
log_alpha_data[la_batch_offset + la_input_stride*t + la_height_stride*h + la_target_stride*s] =
::log(::exp(la1-lamax) + ::exp(la2-lamax) + ::exp(la3-lamax)) + lamax +
log_probs_data[lp_input_stride*t + lp_height_stride*h +
lp_batch_offset + lp_char_stride*current_char];
}
} else {
// otherwise we just set to neginf
if (s < 2*max_target_length+1) {
for (int64_t h = 0; h < height; h++) {
log_alpha_data[la_batch_offset + la_input_stride * t +
la_height_stride * h + la_target_stride * s] = -INFINITY;
}
}
}
}
// on cuda 9 we might use partial synchronization of only the threads within the same batch
__syncthreads();
// compute the loss (eq (8))
if (s == 0) {
// 2s 2s-1
scalar_t l1 = log_alpha_data[la_batch_offset + la_input_stride*(input_length-1) +
la_height_stride*0 + la_target_stride*(target_length*2)];
for (int64_t h=1; h < height; h++) {
l1 = safe_log_add(l1, log_alpha_data[la_batch_offset + la_input_stride*(input_length-1) +
la_height_stride*h + la_target_stride*(target_length*2)]);
}
scalar_t l2 = log_alpha_data[la_batch_offset + la_input_stride*(input_length-1) +
la_height_stride*0 + la_target_stride*(target_length*2-1)];
for (int64_t h=1; h < height; h++) {
l2 = safe_log_add(l2, log_alpha_data[la_batch_offset + la_input_stride*(input_length-1) +
la_height_stride*h + la_target_stride*(target_length*2-1)]);
}
if (l1==-INFINITY && l2==-INFINITY){
// -lnp=010P
//-ln(0.001)=6.9
//0 log-inf=0.0
neg_log_likelihood_data[b] = 6.9;
}else{
scalar_t m = ((l1 > l2) ? l1 : l2);
if (m == -INFINITY)
m = 0;
scalar_t log_likelihood = ::log(::exp(l1-m)+::exp(l2-m))+m;
neg_log_likelihood_data[b] = -log_likelihood;
}
}
}
}
std::tuple<at::Tensor, at::Tensor> ctc2d_gpu_template(
const at::Tensor log_probs, const at::Tensor targets,
const at::Tensor input_lengths, const at::Tensor target_lengths,
int64_t BLANK, float TINY
) {
int64_t max_target_length = targets.size(1);
AT_CHECK((2 * max_target_length + 1) <= CUDA_NUM_THREADS, "max target length out of range, got ", max_target_length,
", must less than ", CUDA_NUM_THREADS);
int64_t max_input_length = log_probs.size(0);
int64_t height = log_probs.size(1);
int64_t batch_size = log_probs.size(2);
int64_t batch_per_block = CUDA_NUM_THREADS / (2 * max_target_length + 1);
const int num_kernels = (batch_size + batch_per_block - 1) / batch_per_block * CUDA_NUM_THREADS;
// N T H 2*S+1
at::Tensor log_alpha = at::zeros(
{batch_size, log_probs.size(0), log_probs.size(1), 2*max_target_length+1},
log_probs.options()
);
at::Tensor neg_log_likelihood = at::zeros({batch_size}, log_probs.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(log_probs.type(), "ctc2d_log_alpha_gpu_template", ([&] {
hipLaunchKernelGGL(( ctc2d_log_alpha_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels,
log_alpha.data<scalar_t>(), log_probs.data<scalar_t>(),
input_lengths.data<int64_t>(), max_input_length,
targets.data<int64_t>(),
target_lengths.data<int64_t>(), max_target_length,
neg_log_likelihood.data<scalar_t>(),
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_probs.stride(3),
log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), log_alpha.stride(3),
targets.stride(0), targets.stride(1),
batch_size, batch_per_block, height, BLANK
);
}));
return std::make_tuple(neg_log_likelihood, log_alpha);
}
template <typename scalar_t>
__global__ void ctc2d_log_beta_gpu_kernel(
const int64_t n,
scalar_t* __restrict__ log_beta_data, const scalar_t* log_probs_data,
const int64_t* __restrict__ input_lengths, int max_input_length,
const int64_t* __restrict__ targets_data,
const int64_t* __restrict__ target_lengths, const int max_target_length,
int64_t lp_input_stride, int64_t lp_height_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_height_stride, int64_t lb_target_stride,
int64_t tg_batch_stride, int64_t tg_target_stride,
int64_t batch_size, int64_t batch_per_block, int64_t height, int64_t BLANK
) {
CUDA_KERNEL_LOOP(index, n)
{
int64_t b = (index - blockIdx.x*blockDim.x) / (2*max_target_length+1) + blockIdx.x*batch_per_block;
int64_t s = (index - blockIdx.x*blockDim.x) % (2*max_target_length+1);
if ((b >= batch_size) || (b >= (blockIdx.x+1)*batch_per_block))
return;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
// log_probs_data ==> [T, H, N, C]
// log_beta_data ==> [N, T, H, 2*S+1]
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t lb_batch_offset = b*lb_batch_stride;
int64_t tg_batch_offset = b*tg_batch_stride;
scalar_t lb;
if (s == 2*target_length) {
for (int64_t h=0; h < height; h++) {
lb = log_probs_data[lp_input_stride*(input_length-1) + lp_height_stride*h + lp_batch_offset + lp_char_stride*BLANK];
log_beta_data[lb_batch_offset + lb_input_stride*(input_length-1) + lb_height_stride*h + lb_target_stride*s] = lb;
}
} else if ((target_length > 0) && (s == 2*target_length-1)) {
int64_t current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK);
for (int64_t h=0; h < height; h++) {
lb = log_probs_data[lp_input_stride*(input_length-1) + lp_height_stride*h + lp_batch_offset +
lp_char_stride*current_target_prime];
log_beta_data[lb_batch_offset + lb_input_stride*(input_length-1) + lb_height_stride*h + lb_target_stride*s] = lb;
}
} else {
for (int64_t h=0; h < height; h++) {
log_beta_data[lb_batch_offset + lb_input_stride*(input_length-1) + lb_height_stride*h + lb_target_stride*s] =
-INFINITY;
}
}
int64_t current_target_prime;
bool have_three;
if (s < 2*target_length+1) {
current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK);
have_three = ((s < 2*target_length-1) &&
(get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s+2, BLANK) !=
current_target_prime));
} else {
current_target_prime = BLANK;
have_three = false;
}
// now go backward in t. Note that we need to skip the last timestep that we did above.
for (int64_t t=max_input_length-2; t>=0; t--) {
__syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch item
if ((t < input_length-1) && (target_length > 0) && (s < 2*target_length+1)) {
scalar_t lb1 = log_beta_data[lb_batch_offset + lb_input_stride*(t+1) +
lb_height_stride*0 + lb_target_stride*s];
for (int64_t h=1; h < height; h++) {
lb1 = safe_log_add(lb1, log_beta_data[lb_batch_offset + lb_input_stride*(t+1) +
lb_height_stride*h + lb_target_stride*s]);
}
scalar_t lbmax = lb1;
scalar_t lb2, lb3;
if (s < 2*target_length) {
lb2 = log_beta_data[
lb_batch_offset + lb_input_stride*(t+1) + lb_height_stride*0 + lb_target_stride*(s+1)];
for (int64_t h=1; h < height; h++) {
lb2 = safe_log_add(lb2, log_beta_data[lb_batch_offset + lb_input_stride*(t+1) +
lb_height_stride*h + lb_target_stride*(s+1)]);
}
if (lb2 > lbmax)
lbmax = lb2;
} else {
lb2 = -INFINITY;
}
if (have_three) {
lb3 = log_beta_data[lb_batch_offset + lb_input_stride*(t+1) +
lb_height_stride*0 + lb_target_stride*(s+2)];
for (int64_t h=1; h < height; h++) {
lb3 = safe_log_add(lb3, log_beta_data[lb_batch_offset + lb_input_stride*(t+1) +
lb_height_stride*h + lb_target_stride*(s+2)]);
}
if (lb3 > lbmax)
lbmax = lb3;
} else {
lb3 = -INFINITY;
}
if (lbmax == -INFINITY)
lbmax = 0;
scalar_t tmp = ::log(::exp(lb1-lbmax) + ::exp(lb2-lbmax) + ::exp(lb3-lbmax)) + lbmax;
for (int64_t h=0; h < height; h++) {
log_beta_data[lb_batch_offset + lb_input_stride*t + lb_height_stride*h + lb_target_stride*s] =
tmp + log_probs_data[lp_input_stride*t + lp_height_stride*h +
lp_batch_offset + lp_char_stride*current_target_prime];
}
} else if ((target_length == 0) || (s > 2*target_length+1) || (t >= input_length)) {
for (int64_t h=0; h < height; h++) {
log_beta_data[lb_batch_offset + lb_input_stride*t + lb_height_stride*h + lb_target_stride*s] = -INFINITY;
}
}
}
}
}
template <typename scalar_t>
__global__ void ctc2d_backward_collect_nonblank_gpu_kernel(
const int64_t n,
scalar_t* __restrict__ gradient_data,
const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride,
const scalar_t* __restrict__ log_alpha_data, const scalar_t* __restrict__ log_beta_data,
const scalar_t* log_probs_data,
const int64_t* __restrict__ input_lengths, int64_t max_input_length,
const int64_t* __restrict__ targets_data,
const int64_t* __restrict__ target_lengths, int64_t max_target_length,
const scalar_t* __restrict__ neg_log_likelihood_data,
int64_t gr_input_stride, int64_t gr_height_stride, int64_t gr_batch_stride, int64_t gr_char_stride,
int64_t lp_input_stride, int64_t lp_height_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t la_batch_stride, int64_t la_input_stride, int64_t la_height_stride, int64_t la_target_stride,
int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_height_stride, int64_t lb_target_stride,
int64_t tg_batch_stride, int64_t tg_target_stride,
int64_t batch_size, int64_t batch_per_block, int64_t height, int64_t BLANK, bool zero_infinity
) {
CUDA_KERNEL_LOOP(index, n)
{
int64_t b = (index - blockIdx.x*blockDim.x) / max_target_length + blockIdx.x*batch_per_block;
int64_t s = (index - blockIdx.x*blockDim.x) % max_target_length;
if (b >= batch_size)
return;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
int64_t gr_batch_offset = b*gr_batch_stride;
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t la_batch_offset = b*la_batch_stride;
int64_t lb_batch_offset = b*lb_batch_stride;
int64_t tg_batch_offset = b*tg_batch_stride;
if (s >= target_length)
return;
int64_t target = targets_data[tg_batch_offset + s*tg_target_stride];
scalar_t nll = neg_log_likelihood_data[b];
scalar_t gr = grad_out_data[b * grad_out_batch_stride];
if (zero_infinity && nll == INFINITY)
return;
for (int64_t t = 0; t < input_length; t++) {
for (int64_t h = 0; h < height; h++) {
scalar_t lp = log_probs_data[lp_batch_offset + lp_input_stride*t + lp_height_stride*h + lp_char_stride*target];
atomicAdd(&gradient_data[gr_batch_offset + gr_input_stride*t + gr_height_stride*h + gr_char_stride*target],
-::exp(log_alpha_data[la_batch_offset + la_input_stride*t + la_height_stride*h + la_target_stride*(s*2+1)]
+ log_beta_data[lb_batch_offset + lb_input_stride*t + lb_height_stride*h + lb_target_stride*(s*2+1)]
+ nll - lp) * gr);
}
}
}
}
template <typename scalar_t>
__global__ void ctc2d_backward_collect_gpu_kernel(
const int64_t n,
scalar_t* __restrict__ gradient_data,
const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride,
const scalar_t* __restrict__ log_alpha_data, const scalar_t* __restrict__ log_beta_data,
const scalar_t* log_probs_data,
const int64_t* __restrict__ input_lengths, int64_t max_input_length,
const int64_t* __restrict__ targets_data,
const int64_t* __restrict__ target_lengths, int64_t max_target_length,
const scalar_t* __restrict__ neg_log_likelihood_data,
int64_t gr_input_stride, int64_t gr_height_stride, int64_t gr_batch_stride, int64_t gr_char_stride,
int64_t lp_input_stride, int64_t lp_height_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t la_batch_stride, int64_t la_input_stride, int64_t la_height_stride, int64_t la_target_stride,
int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_height_stride, int64_t lb_target_stride,
int64_t tg_batch_stride, int64_t tg_target_stride,
int64_t batch_size, int64_t num_labels, int64_t batch_per_block, int64_t height, int64_t BLANK, bool zero_infinity
) {
CUDA_KERNEL_LOOP(index, n)
{
int64_t b = (index - blockIdx.x*blockDim.x) / max_input_length + blockIdx.x*batch_per_block;
int64_t t = (index - blockIdx.x*blockDim.x) % max_input_length;
if (b >= batch_size)
return;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
int64_t gr_batch_offset = b*gr_batch_stride;
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t la_batch_offset = b*la_batch_stride;
int64_t lb_batch_offset = b*lb_batch_stride;
int64_t tg_batch_offset = b*tg_batch_stride;
// collected[b, t, h, target'[s]] "log+=" log_alpha[t, s]+log_beta[t, s]
for (int64_t s = 0; s < 2*max_target_length+1; s++) {
if ((target_length > 0) && (s < 2*target_length+1)) {
int64_t current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK);
/*scalar_t laaa = log_alpha_data[la_batch_offset + la_input_stride*t +
la_height_stride*0 + la_target_stride*s];
for (int64_t h = 1; h < height; h++) {
laaa = safe_log_add(laaa, log_alpha_data[la_batch_offset + la_input_stride*t +
la_height_stride*h + la_target_stride*s]);
}
scalar_t lbbb = log_beta_data[lb_batch_offset + lb_input_stride*t +
lb_height_stride*0 + lb_target_stride*s];
for (int64_t h = 1; h < height; h++) {
lbbb = safe_log_add(lbbb, log_beta_data[lb_batch_offset + lb_input_stride*t +
lb_height_stride*h + lb_target_stride*s]);
}*/
for (int64_t h = 0; h < height; h++) {
scalar_t laaa = log_alpha_data[la_batch_offset + la_input_stride*t +
la_height_stride*h + la_target_stride*s];
scalar_t lbbb = log_beta_data[lb_batch_offset + lb_input_stride*t +
lb_height_stride*h + lb_target_stride*s];
scalar_t log_alpha_beta = laaa + lbbb;
scalar_t& lcab =
gradient_data[gr_batch_offset + gr_input_stride*t + gr_height_stride*h + gr_char_stride*current_target_prime];
if (lcab == -INFINITY) {
lcab = log_alpha_beta;
} else {
scalar_t max = ((lcab > log_alpha_beta) ? lcab : log_alpha_beta);
lcab = ::log(::exp(lcab-max)+::exp(log_alpha_beta-max))+max;
}
}
}
}
scalar_t nll = neg_log_likelihood_data[b];
scalar_t gr = grad_out_data[b * grad_out_batch_stride];
for (int64_t c = 0; c < num_labels; c++) {
for (int64_t h = 0; h < height; h++) {
scalar_t& res = gradient_data[gr_batch_offset + gr_input_stride*t + gr_height_stride*h + gr_char_stride*c];
if (t < input_length && (! zero_infinity || nll != INFINITY)) {
scalar_t lp = log_probs_data[lp_batch_offset + lp_input_stride*t + lp_height_stride*h + lp_char_stride*c];
if (res == -INFINITY)
res = 0;
else
res = (::exp(lp) - ::exp(res + nll - lp)) * gr;
}
else {
res = 0.;
}
}
}
}
}
at::Tensor ctc2d_gpu_backward_template(
const at::Tensor grad_out, const at::Tensor log_probs, const at::Tensor targets,
const at::Tensor input_lengths, const at::Tensor target_lengths,
const at::Tensor neg_log_likelihood, const at::Tensor log_alpha,
int64_t BLANK
) {
bool zero_infinity = 0;
int64_t max_target_length = targets.size(1);
int64_t max_input_length = log_probs.size(0);
int64_t height = log_probs.size(1);
int64_t batch_size = log_probs.size(2);
int64_t num_labels = log_probs.size(3);
int64_t batch_per_block = CUDA_NUM_THREADS / (2 * max_target_length + 1);
int64_t num_kernels = (batch_size + batch_per_block - 1) / batch_per_block * CUDA_NUM_THREADS;
at::Tensor log_beta = at::zeros(
{batch_size, log_probs.size(0), log_probs.size(1), 2*max_target_length+1},
log_probs.options()
);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(log_probs.type(), "ctc2d_log_beta_gpu_template", ([&] {
hipLaunchKernelGGL(( ctc2d_log_beta_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels,
log_beta.data<scalar_t>(), log_probs.data<scalar_t>(),
input_lengths.data<int64_t>(), max_input_length,
targets.data<int64_t>(),
target_lengths.data<int64_t>(), max_target_length,
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_probs.stride(3),
log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), log_beta.stride(3),
targets.stride(0), targets.stride(1),
batch_size, batch_per_block, height, BLANK
);
}));
at::Tensor grad = at::full_like(log_probs, -INFINITY);
// bool is_large = (2*log_probs.size(0)+(24*batch_size)/10+(2*num_labels)/10) > 450;
bool is_large = 0;
if (is_large) { // large alphabet, large batch
// std::cout << "+++Large+++" << std::endl;
// this computes the probs, minuend in (16)
exp_out(grad, log_probs);
// now we compute the subtrahend for the blanks. It is a straightforward reduction because we know that
// blanks are in every other position.
// maybe we should kernelize this, too.
auto grad_blank = grad.narrow(3, BLANK, 1);
grad_blank -= (at::logsumexp(
log_alpha.as_strided({batch_size, log_alpha.size(1), log_alpha.size(2), max_target_length+1},
{log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), log_alpha.stride(3)*2}) +
log_beta.as_strided({batch_size, log_beta.size(1), log_beta.size(2), max_target_length+1},
{log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), log_beta.stride(3)*2}),
3, true)
.permute({1, 2, 0, 3})
.add_(neg_log_likelihood.view({1, 1, batch_size, 1}))
.sub_(log_probs.narrow(3, BLANK, 1))
.exp_()
);
grad *= grad_out.view({1, 1, batch_size, 1});
// For the non-blank characters, we use a kernel to compute the subtrahend.
// Again we might configure block and grid in a better way.
batch_per_block = CUDA_NUM_THREADS / max_target_length;
num_kernels = (batch_size + batch_per_block - 1) / batch_per_block * CUDA_NUM_THREADS;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(log_probs.type(), "ctc2d_collect_nonblank", ([&] {
hipLaunchKernelGGL(( ctc2d_backward_collect_nonblank_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels,
grad.data<scalar_t>(),
grad_out.data<scalar_t>(), grad_out.stride(0),
log_alpha.data<scalar_t>(), log_beta.data<scalar_t>(),
log_probs.data<scalar_t>(),
input_lengths.data<int64_t>(), max_input_length,
targets.data<int64_t>(),
target_lengths.data<int64_t>(), max_target_length,
neg_log_likelihood.data<scalar_t>(),
grad.stride(0), grad.stride(1), grad.stride(2), grad.stride(3),
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_probs.stride(3),
log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), log_alpha.stride(3),
log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), log_beta.stride(3),
targets.stride(0), targets.stride(1),
batch_size, batch_per_block, height, BLANK, zero_infinity
);
}));
} else { // small problem, use naive algorithm
// std::cout << "+++Small+++" << std::endl;
batch_per_block = CUDA_NUM_THREADS / max_input_length;
num_kernels = (batch_size + batch_per_block - 1) / batch_per_block * CUDA_NUM_THREADS;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(log_probs.type(), "ctc2d_collect_all", ([&] {
hipLaunchKernelGGL(( ctc2d_backward_collect_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels,
grad.data<scalar_t>(),
grad_out.data<scalar_t>(), grad_out.stride(0),
log_alpha.data<scalar_t>(), log_beta.data<scalar_t>(),
log_probs.data<scalar_t>(),
input_lengths.data<int64_t>(), max_input_length,
targets.data<int64_t>(),
target_lengths.data<int64_t>(), max_target_length,
neg_log_likelihood.data<scalar_t>(),
grad.stride(0), grad.stride(1), grad.stride(2), grad.stride(3),
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_probs.stride(3),
log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), log_alpha.stride(3),
log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), log_beta.stride(3),
targets.stride(0), targets.stride(1),
batch_size, num_labels, batch_per_block, height, BLANK, zero_infinity
);
}));
}
return grad;
}
| 7c6889e201344fe5529ea1d025763b8bc51ee380.cu | #include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <THC/THCAtomics.cuh>
#include <stdio.h>
#include <math.h>
#include <numeric>
#include <float.h>
#ifndef AT_CHECK
#define AT_CHECK TORCH_CHECK
#endif
using namespace at;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 512;
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N)
{
return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
}
void print_tsize(at::Tensor t, const char *msg) {
printf("%s size: ");
for (int i = 0; i < t.ndimension(); i++) {
printf("%d ", int(t.size(i)));
}
printf("\n");
}
// this ad-hoc converts from targets (l in [1]) to augmented targets (l' in [1]) note that no bound-checking is done
// __restrict__ impact to be measured, https://devblogs.nvidia.com/cuda-pro-tip-optimize-pointer-aliasing/
template <typename scalar_t>
__device__ static inline int64_t get_target_prime(
const scalar_t* __restrict__ target, int64_t offset, int64_t stride, int64_t idx, int64_t BLANK
) {
if (idx % 2 == 0) {
return BLANK;
} else {
return target[offset + stride * (idx / 2)];
}
}
template <typename scalar_t>
__device__ static inline scalar_t safe_log_add(scalar_t a, scalar_t b)
{
/*
两个都为-inf的时候,结果也是-inf
其中有一个为-inf的时候,比如a是——inf,结果是b
结果正确
*/
scalar_t m=((a > b) ? a : b);
if (m == -INFINITY)
m = 0;
return (std::log(std::exp(a-m) + std::exp(b-m)) + m);
}
template <typename scalar_t>
__global__ void ctc2d_log_alpha_gpu_kernel(
const int64_t n,
scalar_t* __restrict__ log_alpha_data, const scalar_t* log_probs_data,
const int64_t* __restrict__ input_lengths, int max_input_length,
const int64_t* __restrict__ targets_data,
const int64_t* __restrict__ target_lengths, const int max_target_length,
scalar_t* __restrict__ neg_log_likelihood_data,
int64_t lp_input_stride, int64_t lp_height_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t la_batch_stride, int64_t la_input_stride, int64_t la_height_stride, int64_t la_target_stride,
int64_t tg_batch_stride, int64_t tg_target_stride,
int64_t batch_size, int64_t batch_per_block, int64_t height, int64_t BLANK
) {
CUDA_KERNEL_LOOP(index, n)
{
int64_t b = (index - blockIdx.x*blockDim.x) / (2*max_target_length+1) + blockIdx.x*batch_per_block;
int64_t s = (index - blockIdx.x*blockDim.x) % (2*max_target_length+1);
if ((b >= batch_size) || (b >= (blockIdx.x+1)*batch_per_block))
return;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
// log_probs_data ==> [T, H, N, C]
// log_alpha_data ==> [N, T, H, 2*S+1]
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t la_batch_offset = b*la_batch_stride;
int64_t tg_batch_offset = b*tg_batch_stride;
scalar_t la;
//动态规划初始化第0,1个字符的前向概率
switch (s) {
case 0:
for (int64_t h=0; h < height; h++) {
//开头为空格字符的概率,初始化s=0位空格的概率
la = log_probs_data[lp_height_stride*h + lp_batch_offset + lp_char_stride*BLANK];
if (s < 2*max_target_length+1)
log_alpha_data[la_batch_offset + la_height_stride*h + la_target_stride*s] = la;
}
break;
case 1:
for (int64_t h=0; h < height; h++) {
if (target_length > 0) {
//s=1 时候,log_alpha[s=1]的概率为第一个字符的概率
la = log_probs_data[lp_height_stride*h + lp_batch_offset +
lp_char_stride*get_target_prime(targets_data, tg_batch_offset, tg_target_stride, 1, BLANK)];
}
else {
la = -INFINITY;
}
if (s < 2*max_target_length+1)
log_alpha_data[la_batch_offset + la_height_stride*h + la_target_stride*s] = la;
}
break;
default:
la = -INFINITY;
if (s < 2*max_target_length+1) {
for (int64_t h=0; h < height; h++)
log_alpha_data[la_batch_offset + la_height_stride*h + la_target_stride*s] = la;
}
}
// These two only depend on s, so we can cache them.
int64_t current_char; // l_s in eq (6)
bool have_three; // flag which of the two cases in eq (6) we have
if (s < 2*target_length+1) {
current_char = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK);
have_three = ((s > 1) &&
(get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s-2, BLANK) != current_char));
} else {
//超出target长度,剩余字符应该为空白符
current_char = BLANK;
have_three = false;
}
for (int64_t t=1; t < max_input_length; t++) {
// on cuda 9 we might use partial synchronization of only the threads within the same batch
__syncthreads();
if ((t < input_length) && (target_length > 0) && (s < 2*target_length+1)) {
// only for valid t, s. This is equation (6) and (7), la1, la2, la3 are the three summands,
// lamax is the maximum for the logsumexp trick.
scalar_t la1 = log_alpha_data[la_batch_offset + la_input_stride*(t-1) +
la_height_stride*0 + la_target_stride*s];
//将该t-1的所有高度概率log相加
for (int64_t h=1; h < height; h++) {
la1 = safe_log_add(la1, log_alpha_data[la_batch_offset + la_input_stride*(t-1) +
la_height_stride*h + la_target_stride*s]);
}
scalar_t lamax = la1;
scalar_t la2, la3;
if (s > 0) {
//动态规划转移方程的第二项\alpha(s-1)(t-1)
la2 = log_alpha_data[la_batch_offset + la_input_stride*(t-1) +
la_height_stride*0 + la_target_stride*(s-1)];
for (int64_t h=1; h < height; h++) {
la2 = safe_log_add(la2, log_alpha_data[la_batch_offset + la_input_stride*(t-1) +
la_height_stride*h + la_target_stride*(s-1)]);
}
if (la2 > lamax)
lamax = la2;
} else {
la2 = -INFINITY;
}
if (have_three) {
//如果当前s不是空白符,则当前项还可以通过\alpha(s-2)(t-1)
la3 = log_alpha_data[la_batch_offset + la_input_stride*(t-1) +
la_height_stride*0 + la_target_stride*(s-2)];
for (int64_t h=1; h < height; h++) {
la3 = safe_log_add(la3, log_alpha_data[la_batch_offset + la_input_stride*(t-1) +
la_height_stride*h + la_target_stride*(s-2)]);
}
if (la3 > lamax)
lamax = la3;
} else {
la3 = -INFINITY;
}
// when all are neginf. (then the whole thing is neginf, but we can pretend)
if (lamax == -INFINITY)
lamax = 0;
for (int64_t h=0; h < height; h++) {
log_alpha_data[la_batch_offset + la_input_stride*t + la_height_stride*h + la_target_stride*s] =
std::log(std::exp(la1-lamax) + std::exp(la2-lamax) + std::exp(la3-lamax)) + lamax +
log_probs_data[lp_input_stride*t + lp_height_stride*h +
lp_batch_offset + lp_char_stride*current_char];
}
} else {
// otherwise we just set to neginf
if (s < 2*max_target_length+1) {
for (int64_t h = 0; h < height; h++) {
log_alpha_data[la_batch_offset + la_input_stride * t +
la_height_stride * h + la_target_stride * s] = -INFINITY;
}
}
}
}
// on cuda 9 we might use partial synchronization of only the threads within the same batch
__syncthreads();
// compute the loss (eq (8))
if (s == 0) {
//将 2s 与2s-1的概率相加
scalar_t l1 = log_alpha_data[la_batch_offset + la_input_stride*(input_length-1) +
la_height_stride*0 + la_target_stride*(target_length*2)];
for (int64_t h=1; h < height; h++) {
l1 = safe_log_add(l1, log_alpha_data[la_batch_offset + la_input_stride*(input_length-1) +
la_height_stride*h + la_target_stride*(target_length*2)]);
}
scalar_t l2 = log_alpha_data[la_batch_offset + la_input_stride*(input_length-1) +
la_height_stride*0 + la_target_stride*(target_length*2-1)];
for (int64_t h=1; h < height; h++) {
l2 = safe_log_add(l2, log_alpha_data[la_batch_offset + la_input_stride*(input_length-1) +
la_height_stride*h + la_target_stride*(target_length*2-1)]);
}
if (l1==-INFINITY && l2==-INFINITY){
// -ln(p)=0即证明,概率为1,所以不能单纯为0,而是使得概率P是一个特别小的数字
//-ln(0.001)=6.9
//两个概率都为0 log(-inf)=0.0,
neg_log_likelihood_data[b] = 6.9;
}else{
scalar_t m = ((l1 > l2) ? l1 : l2);
if (m == -INFINITY)
m = 0;
scalar_t log_likelihood = std::log(std::exp(l1-m)+std::exp(l2-m))+m;
neg_log_likelihood_data[b] = -log_likelihood;
}
}
}
}
std::tuple<at::Tensor, at::Tensor> ctc2d_gpu_template(
const at::Tensor log_probs, const at::Tensor targets,
const at::Tensor input_lengths, const at::Tensor target_lengths,
int64_t BLANK, float TINY
) {
int64_t max_target_length = targets.size(1);
AT_CHECK((2 * max_target_length + 1) <= CUDA_NUM_THREADS, "max target length out of range, got ", max_target_length,
", must less than ", CUDA_NUM_THREADS);
int64_t max_input_length = log_probs.size(0);
int64_t height = log_probs.size(1);
int64_t batch_size = log_probs.size(2);
int64_t batch_per_block = CUDA_NUM_THREADS / (2 * max_target_length + 1);
const int num_kernels = (batch_size + batch_per_block - 1) / batch_per_block * CUDA_NUM_THREADS;
// N T H 2*S+1
at::Tensor log_alpha = at::zeros(
{batch_size, log_probs.size(0), log_probs.size(1), 2*max_target_length+1},
log_probs.options()
);
at::Tensor neg_log_likelihood = at::zeros({batch_size}, log_probs.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(log_probs.type(), "ctc2d_log_alpha_gpu_template", ([&] {
ctc2d_log_alpha_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
log_alpha.data<scalar_t>(), log_probs.data<scalar_t>(),
input_lengths.data<int64_t>(), max_input_length,
targets.data<int64_t>(),
target_lengths.data<int64_t>(), max_target_length,
neg_log_likelihood.data<scalar_t>(),
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_probs.stride(3),
log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), log_alpha.stride(3),
targets.stride(0), targets.stride(1),
batch_size, batch_per_block, height, BLANK
);
}));
return std::make_tuple(neg_log_likelihood, log_alpha);
}
template <typename scalar_t>
__global__ void ctc2d_log_beta_gpu_kernel(
const int64_t n,
scalar_t* __restrict__ log_beta_data, const scalar_t* log_probs_data,
const int64_t* __restrict__ input_lengths, int max_input_length,
const int64_t* __restrict__ targets_data,
const int64_t* __restrict__ target_lengths, const int max_target_length,
int64_t lp_input_stride, int64_t lp_height_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_height_stride, int64_t lb_target_stride,
int64_t tg_batch_stride, int64_t tg_target_stride,
int64_t batch_size, int64_t batch_per_block, int64_t height, int64_t BLANK
) {
CUDA_KERNEL_LOOP(index, n)
{
int64_t b = (index - blockIdx.x*blockDim.x) / (2*max_target_length+1) + blockIdx.x*batch_per_block;
int64_t s = (index - blockIdx.x*blockDim.x) % (2*max_target_length+1);
if ((b >= batch_size) || (b >= (blockIdx.x+1)*batch_per_block))
return;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
// log_probs_data ==> [T, H, N, C]
// log_beta_data ==> [N, T, H, 2*S+1]
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t lb_batch_offset = b*lb_batch_stride;
int64_t tg_batch_offset = b*tg_batch_stride;
scalar_t lb;
if (s == 2*target_length) {
for (int64_t h=0; h < height; h++) {
lb = log_probs_data[lp_input_stride*(input_length-1) + lp_height_stride*h + lp_batch_offset + lp_char_stride*BLANK];
log_beta_data[lb_batch_offset + lb_input_stride*(input_length-1) + lb_height_stride*h + lb_target_stride*s] = lb;
}
} else if ((target_length > 0) && (s == 2*target_length-1)) {
int64_t current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK);
for (int64_t h=0; h < height; h++) {
lb = log_probs_data[lp_input_stride*(input_length-1) + lp_height_stride*h + lp_batch_offset +
lp_char_stride*current_target_prime];
log_beta_data[lb_batch_offset + lb_input_stride*(input_length-1) + lb_height_stride*h + lb_target_stride*s] = lb;
}
} else {
for (int64_t h=0; h < height; h++) {
log_beta_data[lb_batch_offset + lb_input_stride*(input_length-1) + lb_height_stride*h + lb_target_stride*s] =
-INFINITY;
}
}
int64_t current_target_prime;
bool have_three;
if (s < 2*target_length+1) {
current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK);
have_three = ((s < 2*target_length-1) &&
(get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s+2, BLANK) !=
current_target_prime));
} else {
current_target_prime = BLANK;
have_three = false;
}
// now go backward in t. Note that we need to skip the last timestep that we did above.
for (int64_t t=max_input_length-2; t>=0; t--) {
__syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch item
if ((t < input_length-1) && (target_length > 0) && (s < 2*target_length+1)) {
scalar_t lb1 = log_beta_data[lb_batch_offset + lb_input_stride*(t+1) +
lb_height_stride*0 + lb_target_stride*s];
for (int64_t h=1; h < height; h++) {
lb1 = safe_log_add(lb1, log_beta_data[lb_batch_offset + lb_input_stride*(t+1) +
lb_height_stride*h + lb_target_stride*s]);
}
scalar_t lbmax = lb1;
scalar_t lb2, lb3;
if (s < 2*target_length) {
lb2 = log_beta_data[
lb_batch_offset + lb_input_stride*(t+1) + lb_height_stride*0 + lb_target_stride*(s+1)];
for (int64_t h=1; h < height; h++) {
lb2 = safe_log_add(lb2, log_beta_data[lb_batch_offset + lb_input_stride*(t+1) +
lb_height_stride*h + lb_target_stride*(s+1)]);
}
if (lb2 > lbmax)
lbmax = lb2;
} else {
lb2 = -INFINITY;
}
if (have_three) {
lb3 = log_beta_data[lb_batch_offset + lb_input_stride*(t+1) +
lb_height_stride*0 + lb_target_stride*(s+2)];
for (int64_t h=1; h < height; h++) {
lb3 = safe_log_add(lb3, log_beta_data[lb_batch_offset + lb_input_stride*(t+1) +
lb_height_stride*h + lb_target_stride*(s+2)]);
}
if (lb3 > lbmax)
lbmax = lb3;
} else {
lb3 = -INFINITY;
}
if (lbmax == -INFINITY)
lbmax = 0;
scalar_t tmp = std::log(std::exp(lb1-lbmax) + std::exp(lb2-lbmax) + std::exp(lb3-lbmax)) + lbmax;
for (int64_t h=0; h < height; h++) {
log_beta_data[lb_batch_offset + lb_input_stride*t + lb_height_stride*h + lb_target_stride*s] =
tmp + log_probs_data[lp_input_stride*t + lp_height_stride*h +
lp_batch_offset + lp_char_stride*current_target_prime];
}
} else if ((target_length == 0) || (s > 2*target_length+1) || (t >= input_length)) {
for (int64_t h=0; h < height; h++) {
log_beta_data[lb_batch_offset + lb_input_stride*t + lb_height_stride*h + lb_target_stride*s] = -INFINITY;
}
}
}
}
}
template <typename scalar_t>
__global__ void ctc2d_backward_collect_nonblank_gpu_kernel(
const int64_t n,
scalar_t* __restrict__ gradient_data,
const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride,
const scalar_t* __restrict__ log_alpha_data, const scalar_t* __restrict__ log_beta_data,
const scalar_t* log_probs_data,
const int64_t* __restrict__ input_lengths, int64_t max_input_length,
const int64_t* __restrict__ targets_data,
const int64_t* __restrict__ target_lengths, int64_t max_target_length,
const scalar_t* __restrict__ neg_log_likelihood_data,
int64_t gr_input_stride, int64_t gr_height_stride, int64_t gr_batch_stride, int64_t gr_char_stride,
int64_t lp_input_stride, int64_t lp_height_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t la_batch_stride, int64_t la_input_stride, int64_t la_height_stride, int64_t la_target_stride,
int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_height_stride, int64_t lb_target_stride,
int64_t tg_batch_stride, int64_t tg_target_stride,
int64_t batch_size, int64_t batch_per_block, int64_t height, int64_t BLANK, bool zero_infinity
) {
CUDA_KERNEL_LOOP(index, n)
{
int64_t b = (index - blockIdx.x*blockDim.x) / max_target_length + blockIdx.x*batch_per_block;
int64_t s = (index - blockIdx.x*blockDim.x) % max_target_length;
if (b >= batch_size)
return;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
int64_t gr_batch_offset = b*gr_batch_stride;
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t la_batch_offset = b*la_batch_stride;
int64_t lb_batch_offset = b*lb_batch_stride;
int64_t tg_batch_offset = b*tg_batch_stride;
if (s >= target_length)
return;
int64_t target = targets_data[tg_batch_offset + s*tg_target_stride];
scalar_t nll = neg_log_likelihood_data[b];
scalar_t gr = grad_out_data[b * grad_out_batch_stride];
if (zero_infinity && nll == INFINITY)
return;
for (int64_t t = 0; t < input_length; t++) {
for (int64_t h = 0; h < height; h++) {
scalar_t lp = log_probs_data[lp_batch_offset + lp_input_stride*t + lp_height_stride*h + lp_char_stride*target];
atomicAdd(&gradient_data[gr_batch_offset + gr_input_stride*t + gr_height_stride*h + gr_char_stride*target],
-std::exp(log_alpha_data[la_batch_offset + la_input_stride*t + la_height_stride*h + la_target_stride*(s*2+1)]
+ log_beta_data[lb_batch_offset + lb_input_stride*t + lb_height_stride*h + lb_target_stride*(s*2+1)]
+ nll - lp) * gr);
}
}
}
}
template <typename scalar_t>
__global__ void ctc2d_backward_collect_gpu_kernel(
const int64_t n,
scalar_t* __restrict__ gradient_data,
const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride,
const scalar_t* __restrict__ log_alpha_data, const scalar_t* __restrict__ log_beta_data,
const scalar_t* log_probs_data,
const int64_t* __restrict__ input_lengths, int64_t max_input_length,
const int64_t* __restrict__ targets_data,
const int64_t* __restrict__ target_lengths, int64_t max_target_length,
const scalar_t* __restrict__ neg_log_likelihood_data,
int64_t gr_input_stride, int64_t gr_height_stride, int64_t gr_batch_stride, int64_t gr_char_stride,
int64_t lp_input_stride, int64_t lp_height_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
int64_t la_batch_stride, int64_t la_input_stride, int64_t la_height_stride, int64_t la_target_stride,
int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_height_stride, int64_t lb_target_stride,
int64_t tg_batch_stride, int64_t tg_target_stride,
int64_t batch_size, int64_t num_labels, int64_t batch_per_block, int64_t height, int64_t BLANK, bool zero_infinity
) {
CUDA_KERNEL_LOOP(index, n)
{
int64_t b = (index - blockIdx.x*blockDim.x) / max_input_length + blockIdx.x*batch_per_block;
int64_t t = (index - blockIdx.x*blockDim.x) % max_input_length;
if (b >= batch_size)
return;
int64_t input_length = input_lengths[b];
int64_t target_length = target_lengths[b];
int64_t gr_batch_offset = b*gr_batch_stride;
int64_t lp_batch_offset = b*lp_batch_stride;
int64_t la_batch_offset = b*la_batch_stride;
int64_t lb_batch_offset = b*lb_batch_stride;
int64_t tg_batch_offset = b*tg_batch_stride;
// collected[b, t, h, target'[s]] "log+=" log_alpha[t, s]+log_beta[t, s]
for (int64_t s = 0; s < 2*max_target_length+1; s++) {
if ((target_length > 0) && (s < 2*target_length+1)) {
int64_t current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK);
/*scalar_t laaa = log_alpha_data[la_batch_offset + la_input_stride*t +
la_height_stride*0 + la_target_stride*s];
for (int64_t h = 1; h < height; h++) {
laaa = safe_log_add(laaa, log_alpha_data[la_batch_offset + la_input_stride*t +
la_height_stride*h + la_target_stride*s]);
}
scalar_t lbbb = log_beta_data[lb_batch_offset + lb_input_stride*t +
lb_height_stride*0 + lb_target_stride*s];
for (int64_t h = 1; h < height; h++) {
lbbb = safe_log_add(lbbb, log_beta_data[lb_batch_offset + lb_input_stride*t +
lb_height_stride*h + lb_target_stride*s]);
}*/
for (int64_t h = 0; h < height; h++) {
scalar_t laaa = log_alpha_data[la_batch_offset + la_input_stride*t +
la_height_stride*h + la_target_stride*s];
scalar_t lbbb = log_beta_data[lb_batch_offset + lb_input_stride*t +
lb_height_stride*h + lb_target_stride*s];
scalar_t log_alpha_beta = laaa + lbbb;
scalar_t& lcab =
gradient_data[gr_batch_offset + gr_input_stride*t + gr_height_stride*h + gr_char_stride*current_target_prime];
if (lcab == -INFINITY) {
lcab = log_alpha_beta;
} else {
scalar_t max = ((lcab > log_alpha_beta) ? lcab : log_alpha_beta);
lcab = std::log(std::exp(lcab-max)+std::exp(log_alpha_beta-max))+max;
}
}
}
}
scalar_t nll = neg_log_likelihood_data[b];
scalar_t gr = grad_out_data[b * grad_out_batch_stride];
for (int64_t c = 0; c < num_labels; c++) {
for (int64_t h = 0; h < height; h++) {
scalar_t& res = gradient_data[gr_batch_offset + gr_input_stride*t + gr_height_stride*h + gr_char_stride*c];
if (t < input_length && (! zero_infinity || nll != INFINITY)) {
scalar_t lp = log_probs_data[lp_batch_offset + lp_input_stride*t + lp_height_stride*h + lp_char_stride*c];
if (res == -INFINITY)
res = 0;
else
res = (std::exp(lp) - std::exp(res + nll - lp)) * gr;
}
else {
res = 0.;
}
}
}
}
}
at::Tensor ctc2d_gpu_backward_template(
const at::Tensor grad_out, const at::Tensor log_probs, const at::Tensor targets,
const at::Tensor input_lengths, const at::Tensor target_lengths,
const at::Tensor neg_log_likelihood, const at::Tensor log_alpha,
int64_t BLANK
) {
bool zero_infinity = 0;
int64_t max_target_length = targets.size(1);
int64_t max_input_length = log_probs.size(0);
int64_t height = log_probs.size(1);
int64_t batch_size = log_probs.size(2);
int64_t num_labels = log_probs.size(3);
int64_t batch_per_block = CUDA_NUM_THREADS / (2 * max_target_length + 1);
int64_t num_kernels = (batch_size + batch_per_block - 1) / batch_per_block * CUDA_NUM_THREADS;
at::Tensor log_beta = at::zeros(
{batch_size, log_probs.size(0), log_probs.size(1), 2*max_target_length+1},
log_probs.options()
);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(log_probs.type(), "ctc2d_log_beta_gpu_template", ([&] {
ctc2d_log_beta_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
log_beta.data<scalar_t>(), log_probs.data<scalar_t>(),
input_lengths.data<int64_t>(), max_input_length,
targets.data<int64_t>(),
target_lengths.data<int64_t>(), max_target_length,
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_probs.stride(3),
log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), log_beta.stride(3),
targets.stride(0), targets.stride(1),
batch_size, batch_per_block, height, BLANK
);
}));
at::Tensor grad = at::full_like(log_probs, -INFINITY);
// bool is_large = (2*log_probs.size(0)+(24*batch_size)/10+(2*num_labels)/10) > 450;
bool is_large = 0;
if (is_large) { // large alphabet, large batch
// std::cout << "+++Large+++" << std::endl;
// this computes the probs, minuend in (16)
exp_out(grad, log_probs);
// now we compute the subtrahend for the blanks. It is a straightforward reduction because we know that
// blanks are in every other position.
// maybe we should kernelize this, too.
auto grad_blank = grad.narrow(3, BLANK, 1);
grad_blank -= (at::logsumexp(
log_alpha.as_strided({batch_size, log_alpha.size(1), log_alpha.size(2), max_target_length+1},
{log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), log_alpha.stride(3)*2}) +
log_beta.as_strided({batch_size, log_beta.size(1), log_beta.size(2), max_target_length+1},
{log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), log_beta.stride(3)*2}),
3, true)
.permute({1, 2, 0, 3})
.add_(neg_log_likelihood.view({1, 1, batch_size, 1}))
.sub_(log_probs.narrow(3, BLANK, 1))
.exp_()
);
grad *= grad_out.view({1, 1, batch_size, 1});
// For the non-blank characters, we use a kernel to compute the subtrahend.
// Again we might configure block and grid in a better way.
batch_per_block = CUDA_NUM_THREADS / max_target_length;
num_kernels = (batch_size + batch_per_block - 1) / batch_per_block * CUDA_NUM_THREADS;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(log_probs.type(), "ctc2d_collect_nonblank", ([&] {
ctc2d_backward_collect_nonblank_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
grad.data<scalar_t>(),
grad_out.data<scalar_t>(), grad_out.stride(0),
log_alpha.data<scalar_t>(), log_beta.data<scalar_t>(),
log_probs.data<scalar_t>(),
input_lengths.data<int64_t>(), max_input_length,
targets.data<int64_t>(),
target_lengths.data<int64_t>(), max_target_length,
neg_log_likelihood.data<scalar_t>(),
grad.stride(0), grad.stride(1), grad.stride(2), grad.stride(3),
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_probs.stride(3),
log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), log_alpha.stride(3),
log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), log_beta.stride(3),
targets.stride(0), targets.stride(1),
batch_size, batch_per_block, height, BLANK, zero_infinity
);
}));
} else { // small problem, use naive algorithm
// std::cout << "+++Small+++" << std::endl;
batch_per_block = CUDA_NUM_THREADS / max_input_length;
num_kernels = (batch_size + batch_per_block - 1) / batch_per_block * CUDA_NUM_THREADS;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(log_probs.type(), "ctc2d_collect_all", ([&] {
ctc2d_backward_collect_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels,
grad.data<scalar_t>(),
grad_out.data<scalar_t>(), grad_out.stride(0),
log_alpha.data<scalar_t>(), log_beta.data<scalar_t>(),
log_probs.data<scalar_t>(),
input_lengths.data<int64_t>(), max_input_length,
targets.data<int64_t>(),
target_lengths.data<int64_t>(), max_target_length,
neg_log_likelihood.data<scalar_t>(),
grad.stride(0), grad.stride(1), grad.stride(2), grad.stride(3),
log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_probs.stride(3),
log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), log_alpha.stride(3),
log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), log_beta.stride(3),
targets.stride(0), targets.stride(1),
batch_size, num_labels, batch_per_block, height, BLANK, zero_infinity
);
}));
}
return grad;
}
|
e1be741f7502b8bd62fdb86b8129275a49f180c1.hip | // !!! This is a file automatically generated by hipify!!!
#define EIGEN_USE_GPU
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "tensor_benchmarks.h"
// Simple functions
#define BM_FuncGPU(FUNC) \
static void BM_##FUNC(int iters, int N) { \
StopBenchmarkTiming(); \
Eigen::CudaStreamDevice stream; \
Eigen::GpuDevice device(&stream); \
BenchmarkSuite<Eigen::GpuDevice, float> suite(device, N); \
hipDeviceSynchronize(); \
suite.FUNC(iters); \
} \
BENCHMARK_RANGE(BM_##FUNC, 10, 5000);
BM_FuncGPU(memcpy);
BM_FuncGPU(typeCasting);
BM_FuncGPU(random);
BM_FuncGPU(slicing);
BM_FuncGPU(rowChip);
BM_FuncGPU(colChip);
BM_FuncGPU(shuffling);
BM_FuncGPU(padding);
BM_FuncGPU(striding);
BM_FuncGPU(broadcasting);
BM_FuncGPU(coeffWiseOp);
BM_FuncGPU(algebraicFunc);
BM_FuncGPU(transcendentalFunc);
BM_FuncGPU(rowReduction);
BM_FuncGPU(colReduction);
BM_FuncGPU(fullReduction);
// Contractions
#define BM_FuncWithInputDimsGPU(FUNC, D1, D2, D3) \
static void BM_##FUNC##_##D1##x##D2##x##D3(int iters, int N) { \
StopBenchmarkTiming(); \
Eigen::CudaStreamDevice stream; \
Eigen::GpuDevice device(&stream); \
BenchmarkSuite<Eigen::GpuDevice, float> suite(device, D1, D2, D3); \
hipDeviceSynchronize(); \
suite.FUNC(iters); \
} \
BENCHMARK_RANGE(BM_##FUNC##_##D1##x##D2##x##D3, 10, 5000);
BM_FuncWithInputDimsGPU(contraction, N, N, N);
BM_FuncWithInputDimsGPU(contraction, 64, N, N);
BM_FuncWithInputDimsGPU(contraction, N, 64, N);
BM_FuncWithInputDimsGPU(contraction, N, N, 64);
// Convolutions
#define BM_FuncWithKernelDimsGPU(FUNC, DIM1, DIM2) \
static void BM_##FUNC##_##DIM1##x##DIM2(int iters, int N) { \
StopBenchmarkTiming(); \
Eigen::CudaStreamDevice stream; \
Eigen::GpuDevice device(&stream); \
BenchmarkSuite<Eigen::GpuDevice, float> suite(device, N); \
hipDeviceSynchronize(); \
suite.FUNC(iters, DIM1, DIM2); \
} \
BENCHMARK_RANGE(BM_##FUNC##_##DIM1##x##DIM2, 128, 5000);
BM_FuncWithKernelDimsGPU(convolution, 7, 1);
BM_FuncWithKernelDimsGPU(convolution, 1, 7);
BM_FuncWithKernelDimsGPU(convolution, 7, 4);
BM_FuncWithKernelDimsGPU(convolution, 4, 7);
BM_FuncWithKernelDimsGPU(convolution, 7, 64);
BM_FuncWithKernelDimsGPU(convolution, 64, 7);
| e1be741f7502b8bd62fdb86b8129275a49f180c1.cu | #define EIGEN_USE_GPU
#include <cuda.h>
#include <cuda_runtime.h>
#include "tensor_benchmarks.h"
// Simple functions
#define BM_FuncGPU(FUNC) \
static void BM_##FUNC(int iters, int N) { \
StopBenchmarkTiming(); \
Eigen::CudaStreamDevice stream; \
Eigen::GpuDevice device(&stream); \
BenchmarkSuite<Eigen::GpuDevice, float> suite(device, N); \
cudaDeviceSynchronize(); \
suite.FUNC(iters); \
} \
BENCHMARK_RANGE(BM_##FUNC, 10, 5000);
BM_FuncGPU(memcpy);
BM_FuncGPU(typeCasting);
BM_FuncGPU(random);
BM_FuncGPU(slicing);
BM_FuncGPU(rowChip);
BM_FuncGPU(colChip);
BM_FuncGPU(shuffling);
BM_FuncGPU(padding);
BM_FuncGPU(striding);
BM_FuncGPU(broadcasting);
BM_FuncGPU(coeffWiseOp);
BM_FuncGPU(algebraicFunc);
BM_FuncGPU(transcendentalFunc);
BM_FuncGPU(rowReduction);
BM_FuncGPU(colReduction);
BM_FuncGPU(fullReduction);
// Contractions
#define BM_FuncWithInputDimsGPU(FUNC, D1, D2, D3) \
static void BM_##FUNC##_##D1##x##D2##x##D3(int iters, int N) { \
StopBenchmarkTiming(); \
Eigen::CudaStreamDevice stream; \
Eigen::GpuDevice device(&stream); \
BenchmarkSuite<Eigen::GpuDevice, float> suite(device, D1, D2, D3); \
cudaDeviceSynchronize(); \
suite.FUNC(iters); \
} \
BENCHMARK_RANGE(BM_##FUNC##_##D1##x##D2##x##D3, 10, 5000);
BM_FuncWithInputDimsGPU(contraction, N, N, N);
BM_FuncWithInputDimsGPU(contraction, 64, N, N);
BM_FuncWithInputDimsGPU(contraction, N, 64, N);
BM_FuncWithInputDimsGPU(contraction, N, N, 64);
// Convolutions
#define BM_FuncWithKernelDimsGPU(FUNC, DIM1, DIM2) \
static void BM_##FUNC##_##DIM1##x##DIM2(int iters, int N) { \
StopBenchmarkTiming(); \
Eigen::CudaStreamDevice stream; \
Eigen::GpuDevice device(&stream); \
BenchmarkSuite<Eigen::GpuDevice, float> suite(device, N); \
cudaDeviceSynchronize(); \
suite.FUNC(iters, DIM1, DIM2); \
} \
BENCHMARK_RANGE(BM_##FUNC##_##DIM1##x##DIM2, 128, 5000);
BM_FuncWithKernelDimsGPU(convolution, 7, 1);
BM_FuncWithKernelDimsGPU(convolution, 1, 7);
BM_FuncWithKernelDimsGPU(convolution, 7, 4);
BM_FuncWithKernelDimsGPU(convolution, 4, 7);
BM_FuncWithKernelDimsGPU(convolution, 7, 64);
BM_FuncWithKernelDimsGPU(convolution, 64, 7);
|
bfffe950a440f042ce5bf960d07bf4922a58fa87.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2014, Evghenii Gaburov
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Based on radixSort from http://www.moderngpu.com
*/
#include "hip_helpers.cuh"
#include <cassert>
#define NUMBITS 8
#define NUMDIGITS (1<<NUMBITS)
typedef long long Key;
__forceinline__ __device__ int atomic_add_global(int* ptr, int value)
{
return atomicAdd(ptr, value);
}
static __device__ __forceinline__ int shfl_scan_add_step(int partial, int up_offset)
{
int result;
asm(
"{.reg .u32 r0;"
".reg .pred p;"
"shfl.up.b32 r0|p, %1, %2, 0;"
"@p add.u32 r0, r0, %3;"
"mov.u32 %0, r0;}"
: "=r"(result) : "r"(partial), "r"(up_offset), "r"(partial));
return result;
}
__forceinline__ __device__ int exclusive_scan_add(int value)
{
int mysum = value;
#pragma unroll
for(int i = 0; i < 5; ++i)
mysum = shfl_scan_add_step(mysum, 1 << i);
return mysum - value;
}
__global__
void countPass(
const Key keysAll[],
Key sortedAll[],
const int bit,
const int numElements,
int countsAll[],
int countsGlobal[])
{
const int blkIdx = taskIndex;
const int numBlocks = taskCount;
const int blkDim = (numElements + numBlocks - 1) / numBlocks;
const int mask = (1 << NUMBITS) - 1;
const Key * keys = keysAll + blkIdx*blkDim;
Key * sorted = sortedAll + blkIdx*blkDim;
int * counts = countsAll + blkIdx*NUMDIGITS;
const int nloc = min(numElements - blkIdx*blkDim, blkDim);
#pragma unroll 8
for (int digit = programIndex; digit < NUMDIGITS; digit += programCount)
counts[digit] = 0;
for (int i = programIndex; i < nloc; i += programCount)
if (i < nloc)
{
sorted[i] = keys[i];
const int key = mask & ((unsigned int)keys[i] >> bit);
atomic_add_global(&counts[key], 1);
}
#pragma unroll 8
for (int digit = programIndex; digit < NUMDIGITS; digit += programCount)
atomic_add_global(&countsGlobal[digit], counts[digit]);
}
__global__
void sortPass(
Key keysAll[],
Key sorted[],
int bit,
int numElements,
int digitOffsetsAll[])
{
const int blkIdx = taskIndex;
const int numBlocks = taskCount;
const int blkDim = (numElements + numBlocks - 1) / numBlocks;
const int keyIndex = blkIdx * blkDim;
Key * keys = keysAll + keyIndex;
const int nloc = min(numElements - keyIndex, blkDim);
const int mask = (1 << NUMBITS) - 1;
/* copy digit offset from Gmem to Lmem */
#if 1
__shared__ int digitOffsets_sh[NUMDIGITS*4];
volatile int *digitOffsets = digitOffsets_sh + warpIdx*NUMDIGITS;
for (int digit = programIndex; digit < NUMDIGITS; digit += programCount)
digitOffsets[digit] = digitOffsetsAll[blkIdx*NUMDIGITS + digit];
#else
int *digitOffsets = &digitOffsetsAll[blkIdx*NUMDIGITS];
#endif
for (int i = programIndex; i < nloc; i += programCount)
if (i < nloc)
{
const int key = mask & ((unsigned int)keys[i] >> bit);
int scatter;
/* not a vector friendly loop */
#pragma unroll 1 /* needed, otherwise compiler unroll and optimizes the result :S */
for (int iv = 0; iv < programCount; iv++)
if (programIndex == iv)
scatter = digitOffsets[key]++;
sorted [scatter] = keys[i];
}
}
__global__
void partialScanLocal(
int numBlocks,
int excScanAll[],
int countsAll[],
int partialSumAll[])
{
const int blkIdx = taskIndex;
const int blkDim = (numBlocks+taskCount-1)/taskCount;
const int bbeg = blkIdx * blkDim;
const int bend = min(bbeg + blkDim, numBlocks);
int (* countsBlock)[NUMDIGITS] = ( int (*)[NUMDIGITS])countsAll;
int (* excScanBlock)[NUMDIGITS] = ( int (*)[NUMDIGITS])excScanAll;
int (* partialSum)[NUMDIGITS] = ( int (*)[NUMDIGITS])partialSumAll;
#pragma unroll 8
for (int digit = programIndex; digit < NUMDIGITS; digit += programCount)
{
int prev = bbeg == 0 ? excScanBlock[0][digit] : 0;
for ( int block = bbeg; block < bend; block++)
{
const int y = countsBlock[block][digit];
excScanBlock[block][digit] = prev;
prev += y;
}
partialSum[blkIdx][digit] = excScanBlock[bend-1][digit] + countsBlock[bend-1][digit];
}
}
__global__
void partialScanGlobal(
const int numBlocks,
int partialSumAll[],
int prefixSumAll[])
{
int (* partialSum)[NUMDIGITS] = ( int (*)[NUMDIGITS])partialSumAll;
int (* prefixSum)[NUMDIGITS] = ( int (*)[NUMDIGITS]) prefixSumAll;
const int digit = taskIndex;
int carry = 0;
for (int block = programIndex; block < numBlocks; block += programCount)
{
const int value = partialSum[block][digit];
const int scan = exclusive_scan_add(value);
if (block < numBlocks)
prefixSum[block][digit] = scan + carry;
carry += __shfl(scan+value, programCount-1);
}
}
__global__
void completeScanGlobal(
int numBlocks,
int excScanAll[],
int carryValueAll[])
{
const int blkIdx = taskIndex;
const int blkDim = (numBlocks+taskCount-1)/taskCount;
const int bbeg = blkIdx * blkDim;
const int bend = min(bbeg + blkDim, numBlocks);
int (* excScanBlock)[NUMDIGITS] = ( int (*)[NUMDIGITS])excScanAll;
int (* carryValue)[NUMDIGITS] = ( int (*)[NUMDIGITS])carryValueAll;
#pragma unroll 8
for (int digit = programIndex; digit < NUMDIGITS; digit += programCount)
{
const int carry = carryValue[blkIdx][digit];
for ( int block = bbeg; block < bend; block++)
excScanBlock[block][digit] += carry;
}
}
__device__ static
inline void radixExclusiveScan(
const int numBlocks,
int excScanPtr[],
int countsPtr[],
int partialSum[],
int prefixSum[])
{
const int scale = 8;
launch (numBlocks/scale, 1,1, partialScanLocal)(numBlocks, excScanPtr, countsPtr, partialSum);
sync;
launch (NUMDIGITS,1,1,partialScanGlobal) (numBlocks/scale, partialSum, prefixSum);
sync;
launch (numBlocks/scale,1,1, completeScanGlobal) (numBlocks, excScanPtr, prefixSum);
sync;
}
__device__ static int * memoryPool = NULL;
__device__ static int numBlocks;
__device__ static int nSharedCounts;
__device__ static int nCountsGlobal;
__device__ static int nExcScan;
__device__ static int nCountsBlock;
__device__ static int nPartialSum;
__device__ static int nPrefixSum;
__device__ static int * sharedCounts;
__device__ static int * countsGlobal;
__device__ static int * excScan;
__device__ static int * counts;
__device__ static int * partialSum;
__device__ static int * prefixSum;
__device__ static int numElementsBuf = 0;
__device__ static Key * bufKeys;
__global__
void radixSort_alloc___export(const int n)
{
assert(memoryPool == NULL);
numBlocks = 13*32*4;
nSharedCounts = NUMDIGITS*numBlocks;
nCountsGlobal = NUMDIGITS;
nExcScan = NUMDIGITS*numBlocks;
nCountsBlock = NUMDIGITS*numBlocks;
nPartialSum = NUMDIGITS*numBlocks;
nPrefixSum = NUMDIGITS*numBlocks;
const int nalloc =
nSharedCounts +
nCountsGlobal +
nExcScan +
nCountsBlock +
nPartialSum +
nPrefixSum;
if (programIndex == 0)
memoryPool = new int[nalloc];
sharedCounts = memoryPool;
countsGlobal = sharedCounts + nSharedCounts;
excScan = countsGlobal + nCountsGlobal;
counts = excScan + nExcScan;
partialSum = counts + nCountsBlock;
prefixSum = partialSum + nPartialSum;
}
extern "C"
void radixSort_alloc(const int n)
{
hipLaunchKernelGGL(( radixSort_alloc___export), dim3(1),dim3(32), 0, 0, n);
sync;
}
__device__ static
void radixSort_freeBufKeys()
{
if (numElementsBuf > 0)
{
if (programIndex == 0)
delete bufKeys;
numElementsBuf = 0;
}
}
__global__ void radixSort_free___export()
{
assert(memoryPool != NULL);
if (programIndex == 0)
delete memoryPool;
memoryPool = NULL;
radixSort_freeBufKeys();
}
extern "C"
void radixSort_free()
{
hipLaunchKernelGGL(( radixSort_free___export), dim3(1),dim3(32), 0, 0, );
sync;
}
__global__ void radixSort___export(
const int numElements,
Key keys[],
const int nBits)
{
#ifdef __NVPTX__
assert((numBlocks & 3) == 0); /* task granularity on Kepler is 4 */
#endif
if (numElementsBuf < numElements)
radixSort_freeBufKeys();
if (numElementsBuf == 0)
{
numElementsBuf = numElements;
if (programIndex == 0)
bufKeys = new Key[numElementsBuf];
}
const int blkDim = (numElements + numBlocks - 1) / numBlocks;
for ( int bit = 0; bit < nBits; bit += NUMBITS)
{
/* initialize histogram for each digit */
for (int digit = programIndex; digit < NUMDIGITS; digit += programCount)
countsGlobal[digit] = 0;
/* compute histogram for each digit */
launch (numBlocks,1,1, countPass)(keys, bufKeys, bit, numElements, counts, countsGlobal);
sync;
/* exclusive scan on global histogram */
int carry = 0;
excScan[0] = 0;
#pragma unroll 8
for (int digit = programIndex; digit < NUMDIGITS; digit += programCount)
{
const int value = countsGlobal[digit];
const int scan = exclusive_scan_add(value);
excScan[digit] = scan + carry;
carry += __shfl(scan+value, programCount-1);
}
/* computing offsets for each digit */
radixExclusiveScan(numBlocks, excScan, counts, partialSum, prefixSum);
/* sorting */
launch (numBlocks,1,1,
sortPass)(
bufKeys,
keys,
bit,
numElements,
excScan);
sync;
}
}
extern "C"
void radixSort(
const int numElements,
Key keys[],
const int nBits)
{
hipDeviceSetCacheConfig ( hipFuncCachePreferEqual );
hipLaunchKernelGGL(( radixSort___export), dim3(1),dim3(32), 0, 0, numElements, keys, nBits);
sync;
}
| bfffe950a440f042ce5bf960d07bf4922a58fa87.cu | /*
Copyright (c) 2014, Evghenii Gaburov
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Based on radixSort from http://www.moderngpu.com
*/
#include "cuda_helpers.cuh"
#include <cassert>
#define NUMBITS 8
#define NUMDIGITS (1<<NUMBITS)
typedef long long Key;
__forceinline__ __device__ int atomic_add_global(int* ptr, int value)
{
return atomicAdd(ptr, value);
}
static __device__ __forceinline__ int shfl_scan_add_step(int partial, int up_offset)
{
int result;
asm(
"{.reg .u32 r0;"
".reg .pred p;"
"shfl.up.b32 r0|p, %1, %2, 0;"
"@p add.u32 r0, r0, %3;"
"mov.u32 %0, r0;}"
: "=r"(result) : "r"(partial), "r"(up_offset), "r"(partial));
return result;
}
__forceinline__ __device__ int exclusive_scan_add(int value)
{
int mysum = value;
#pragma unroll
for(int i = 0; i < 5; ++i)
mysum = shfl_scan_add_step(mysum, 1 << i);
return mysum - value;
}
__global__
void countPass(
const Key keysAll[],
Key sortedAll[],
const int bit,
const int numElements,
int countsAll[],
int countsGlobal[])
{
const int blkIdx = taskIndex;
const int numBlocks = taskCount;
const int blkDim = (numElements + numBlocks - 1) / numBlocks;
const int mask = (1 << NUMBITS) - 1;
const Key * keys = keysAll + blkIdx*blkDim;
Key * sorted = sortedAll + blkIdx*blkDim;
int * counts = countsAll + blkIdx*NUMDIGITS;
const int nloc = min(numElements - blkIdx*blkDim, blkDim);
#pragma unroll 8
for (int digit = programIndex; digit < NUMDIGITS; digit += programCount)
counts[digit] = 0;
for (int i = programIndex; i < nloc; i += programCount)
if (i < nloc)
{
sorted[i] = keys[i];
const int key = mask & ((unsigned int)keys[i] >> bit);
atomic_add_global(&counts[key], 1);
}
#pragma unroll 8
for (int digit = programIndex; digit < NUMDIGITS; digit += programCount)
atomic_add_global(&countsGlobal[digit], counts[digit]);
}
__global__
void sortPass(
Key keysAll[],
Key sorted[],
int bit,
int numElements,
int digitOffsetsAll[])
{
const int blkIdx = taskIndex;
const int numBlocks = taskCount;
const int blkDim = (numElements + numBlocks - 1) / numBlocks;
const int keyIndex = blkIdx * blkDim;
Key * keys = keysAll + keyIndex;
const int nloc = min(numElements - keyIndex, blkDim);
const int mask = (1 << NUMBITS) - 1;
/* copy digit offset from Gmem to Lmem */
#if 1
__shared__ int digitOffsets_sh[NUMDIGITS*4];
volatile int *digitOffsets = digitOffsets_sh + warpIdx*NUMDIGITS;
for (int digit = programIndex; digit < NUMDIGITS; digit += programCount)
digitOffsets[digit] = digitOffsetsAll[blkIdx*NUMDIGITS + digit];
#else
int *digitOffsets = &digitOffsetsAll[blkIdx*NUMDIGITS];
#endif
for (int i = programIndex; i < nloc; i += programCount)
if (i < nloc)
{
const int key = mask & ((unsigned int)keys[i] >> bit);
int scatter;
/* not a vector friendly loop */
#pragma unroll 1 /* needed, otherwise compiler unroll and optimizes the result :S */
for (int iv = 0; iv < programCount; iv++)
if (programIndex == iv)
scatter = digitOffsets[key]++;
sorted [scatter] = keys[i];
}
}
__global__
void partialScanLocal(
int numBlocks,
int excScanAll[],
int countsAll[],
int partialSumAll[])
{
const int blkIdx = taskIndex;
const int blkDim = (numBlocks+taskCount-1)/taskCount;
const int bbeg = blkIdx * blkDim;
const int bend = min(bbeg + blkDim, numBlocks);
int (* countsBlock)[NUMDIGITS] = ( int (*)[NUMDIGITS])countsAll;
int (* excScanBlock)[NUMDIGITS] = ( int (*)[NUMDIGITS])excScanAll;
int (* partialSum)[NUMDIGITS] = ( int (*)[NUMDIGITS])partialSumAll;
#pragma unroll 8
for (int digit = programIndex; digit < NUMDIGITS; digit += programCount)
{
int prev = bbeg == 0 ? excScanBlock[0][digit] : 0;
for ( int block = bbeg; block < bend; block++)
{
const int y = countsBlock[block][digit];
excScanBlock[block][digit] = prev;
prev += y;
}
partialSum[blkIdx][digit] = excScanBlock[bend-1][digit] + countsBlock[bend-1][digit];
}
}
__global__
void partialScanGlobal(
const int numBlocks,
int partialSumAll[],
int prefixSumAll[])
{
int (* partialSum)[NUMDIGITS] = ( int (*)[NUMDIGITS])partialSumAll;
int (* prefixSum)[NUMDIGITS] = ( int (*)[NUMDIGITS]) prefixSumAll;
const int digit = taskIndex;
int carry = 0;
for (int block = programIndex; block < numBlocks; block += programCount)
{
const int value = partialSum[block][digit];
const int scan = exclusive_scan_add(value);
if (block < numBlocks)
prefixSum[block][digit] = scan + carry;
carry += __shfl(scan+value, programCount-1);
}
}
__global__
void completeScanGlobal(
int numBlocks,
int excScanAll[],
int carryValueAll[])
{
const int blkIdx = taskIndex;
const int blkDim = (numBlocks+taskCount-1)/taskCount;
const int bbeg = blkIdx * blkDim;
const int bend = min(bbeg + blkDim, numBlocks);
int (* excScanBlock)[NUMDIGITS] = ( int (*)[NUMDIGITS])excScanAll;
int (* carryValue)[NUMDIGITS] = ( int (*)[NUMDIGITS])carryValueAll;
#pragma unroll 8
for (int digit = programIndex; digit < NUMDIGITS; digit += programCount)
{
const int carry = carryValue[blkIdx][digit];
for ( int block = bbeg; block < bend; block++)
excScanBlock[block][digit] += carry;
}
}
__device__ static
inline void radixExclusiveScan(
const int numBlocks,
int excScanPtr[],
int countsPtr[],
int partialSum[],
int prefixSum[])
{
const int scale = 8;
launch (numBlocks/scale, 1,1, partialScanLocal)(numBlocks, excScanPtr, countsPtr, partialSum);
sync;
launch (NUMDIGITS,1,1,partialScanGlobal) (numBlocks/scale, partialSum, prefixSum);
sync;
launch (numBlocks/scale,1,1, completeScanGlobal) (numBlocks, excScanPtr, prefixSum);
sync;
}
__device__ static int * memoryPool = NULL;
__device__ static int numBlocks;
__device__ static int nSharedCounts;
__device__ static int nCountsGlobal;
__device__ static int nExcScan;
__device__ static int nCountsBlock;
__device__ static int nPartialSum;
__device__ static int nPrefixSum;
__device__ static int * sharedCounts;
__device__ static int * countsGlobal;
__device__ static int * excScan;
__device__ static int * counts;
__device__ static int * partialSum;
__device__ static int * prefixSum;
__device__ static int numElementsBuf = 0;
__device__ static Key * bufKeys;
__global__
void radixSort_alloc___export(const int n)
{
assert(memoryPool == NULL);
numBlocks = 13*32*4;
nSharedCounts = NUMDIGITS*numBlocks;
nCountsGlobal = NUMDIGITS;
nExcScan = NUMDIGITS*numBlocks;
nCountsBlock = NUMDIGITS*numBlocks;
nPartialSum = NUMDIGITS*numBlocks;
nPrefixSum = NUMDIGITS*numBlocks;
const int nalloc =
nSharedCounts +
nCountsGlobal +
nExcScan +
nCountsBlock +
nPartialSum +
nPrefixSum;
if (programIndex == 0)
memoryPool = new int[nalloc];
sharedCounts = memoryPool;
countsGlobal = sharedCounts + nSharedCounts;
excScan = countsGlobal + nCountsGlobal;
counts = excScan + nExcScan;
partialSum = counts + nCountsBlock;
prefixSum = partialSum + nPartialSum;
}
extern "C"
void radixSort_alloc(const int n)
{
radixSort_alloc___export<<<1,32>>>(n);
sync;
}
__device__ static
void radixSort_freeBufKeys()
{
if (numElementsBuf > 0)
{
if (programIndex == 0)
delete bufKeys;
numElementsBuf = 0;
}
}
__global__ void radixSort_free___export()
{
assert(memoryPool != NULL);
if (programIndex == 0)
delete memoryPool;
memoryPool = NULL;
radixSort_freeBufKeys();
}
extern "C"
void radixSort_free()
{
radixSort_free___export<<<1,32>>>();
sync;
}
__global__ void radixSort___export(
const int numElements,
Key keys[],
const int nBits)
{
#ifdef __NVPTX__
assert((numBlocks & 3) == 0); /* task granularity on Kepler is 4 */
#endif
if (numElementsBuf < numElements)
radixSort_freeBufKeys();
if (numElementsBuf == 0)
{
numElementsBuf = numElements;
if (programIndex == 0)
bufKeys = new Key[numElementsBuf];
}
const int blkDim = (numElements + numBlocks - 1) / numBlocks;
for ( int bit = 0; bit < nBits; bit += NUMBITS)
{
/* initialize histogram for each digit */
for (int digit = programIndex; digit < NUMDIGITS; digit += programCount)
countsGlobal[digit] = 0;
/* compute histogram for each digit */
launch (numBlocks,1,1, countPass)(keys, bufKeys, bit, numElements, counts, countsGlobal);
sync;
/* exclusive scan on global histogram */
int carry = 0;
excScan[0] = 0;
#pragma unroll 8
for (int digit = programIndex; digit < NUMDIGITS; digit += programCount)
{
const int value = countsGlobal[digit];
const int scan = exclusive_scan_add(value);
excScan[digit] = scan + carry;
carry += __shfl(scan+value, programCount-1);
}
/* computing offsets for each digit */
radixExclusiveScan(numBlocks, excScan, counts, partialSum, prefixSum);
/* sorting */
launch (numBlocks,1,1,
sortPass)(
bufKeys,
keys,
bit,
numElements,
excScan);
sync;
}
}
extern "C"
void radixSort(
const int numElements,
Key keys[],
const int nBits)
{
cudaDeviceSetCacheConfig ( cudaFuncCachePreferEqual );
radixSort___export<<<1,32>>>(numElements, keys, nBits);
sync;
}
|
d27f34ac1b89ad0b96d10e6209c9a2c6fed56b20.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "book.h"
__global__ void add(int a, int b, int *c) {
*c = a + b;
};
int main(void) {
int c;
int *dev_c;
HANDLE_ERROR(hipMalloc((void**)&dev_c, sizeof(int)));
add << <1, 1 >> >(2, 7, dev_c);
HANDLE_ERROR(hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost));
printf("2 + 7 = %d\n", c);
hipFree(dev_c);
return 0;
}; | d27f34ac1b89ad0b96d10e6209c9a2c6fed56b20.cu | #include <iostream>
#include "book.h"
__global__ void add(int a, int b, int *c) {
*c = a + b;
};
int main(void) {
int c;
int *dev_c;
HANDLE_ERROR(cudaMalloc((void**)&dev_c, sizeof(int)));
add << <1, 1 >> >(2, 7, dev_c);
HANDLE_ERROR(cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost));
printf("2 + 7 = %d\n", c);
cudaFree(dev_c);
return 0;
}; |
99fcbda2f75e975586b861052a3af7baed12c1cd.hip | // !!! This is a file automatically generated by hipify!!!
//
// =============== SHA256 part on nVidia GPU ======================
//
// NOTE: compile this .cu module for compute_10,sm_10 with --maxrregcount=64
//
#include <stdio.h>
#include <map>
#include <hip/hip_runtime.h>
#define applog(...) (void)0
//#include "salsa_kernel.h"
//#include "miner.h"
#include "sha256.h"
// define some error checking macros
#undef checkCudaErrors
#if WIN32
#define DELIMITER '/'
#else
#define DELIMITER '/'
#endif
#define __FILENAME__ ( strrchr(__FILE__, DELIMITER) != NULL ? strrchr(__FILE__, DELIMITER)+1 : __FILE__ )
#define checkCudaErrors(x) \
{ \
hipGetLastError(); \
x; \
hipError_t err = hipGetLastError(); \
if (err != hipSuccess) \
applog(LOG_ERR, "GPU #%d: hipError_t %d (%s) calling '%s' (%s line %d)\n", device_map[thr_id], err, hipGetErrorString(err), #x, __FILENAME__, __LINE__); \
}
// from salsa_kernel.cu
extern std::map<int, uint32_t *> context_idata[2];
extern std::map<int, uint32_t *> context_odata[2];
extern std::map<int, hipStream_t> context_streams[2];
extern std::map<int, uint32_t *> context_tstate[2];
extern std::map<int, uint32_t *> context_ostate[2];
extern std::map<int, uint32_t *> context_hash[2];
static const uint32_t host_sha256_h[8] = {
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19
};
static const uint32_t host_sha256_k[64] = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
};
/* Elementary functions used by SHA256 */
#define Ch(x, y, z) ((x & (y ^ z)) ^ z)
#define Maj(x, y, z) ((x & (y | z)) | (y & z))
#define ROTR(x, n) ((x >> n) | (x << (32 - n)))
#define S0(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22))
#define S1(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25))
#define s0(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ (x >> 3))
#define s1(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ (x >> 10))
/* SHA256 round function */
#define RND(a, b, c, d, e, f, g, h, k) \
do { \
t0 = h + S1(e) + Ch(e, f, g) + k; \
t1 = S0(a) + Maj(a, b, c); \
d += t0; \
h = t0 + t1; \
} while (0)
/* Adjusted round function for rotating state */
#define RNDr(S, W, i) \
RND(S[(64 - i) % 8], S[(65 - i) % 8], \
S[(66 - i) % 8], S[(67 - i) % 8], \
S[(68 - i) % 8], S[(69 - i) % 8], \
S[(70 - i) % 8], S[(71 - i) % 8], \
W[i] + sha256_k[i])
static const uint32_t host_keypad[12] = {
0x80000000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x00000280
};
static const uint32_t host_innerpad[11] = {
0x80000000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x000004a0
};
static const uint32_t host_outerpad[8] = {
0x80000000, 0, 0, 0, 0, 0, 0, 0x00000300
};
static const uint32_t host_finalblk[16] = {
0x00000001, 0x80000000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x00000620
};
//
// CUDA code
//
__constant__ uint32_t sha256_h[8];
__constant__ uint32_t sha256_k[64];
__constant__ uint32_t keypad[12];
__constant__ uint32_t innerpad[11];
__constant__ uint32_t outerpad[8];
__constant__ uint32_t finalblk[16];
__constant__ uint32_t pdata[20];
__constant__ uint32_t midstate[8];
__device__ void mycpy12(uint32_t *d, const uint32_t *s) {
#pragma unroll 3
for (int k=0; k < 3; k++) d[k] = s[k];
}
__device__ void mycpy16(uint32_t *d, const uint32_t *s) {
#pragma unroll 4
for (int k=0; k < 4; k++) d[k] = s[k];
}
__device__ void mycpy32(uint32_t *d, const uint32_t *s) {
#pragma unroll 8
for (int k=0; k < 8; k++) d[k] = s[k];
}
__device__ void mycpy44(uint32_t *d, const uint32_t *s) {
#pragma unroll 11
for (int k=0; k < 11; k++) d[k] = s[k];
}
__device__ void mycpy48(uint32_t *d, const uint32_t *s) {
#pragma unroll 12
for (int k=0; k < 12; k++) d[k] = s[k];
}
__device__ void mycpy64(uint32_t *d, const uint32_t *s) {
#pragma unroll 16
for (int k=0; k < 16; k++) d[k] = s[k];
}
__device__ uint32_t cuda_swab32(uint32_t x)
{
return (((x << 24) & 0xff000000u) | ((x << 8) & 0x00ff0000u)
| ((x >> 8) & 0x0000ff00u) | ((x >> 24) & 0x000000ffu));
}
__device__ void mycpy32_swab32(uint32_t *d, const uint32_t *s) {
#pragma unroll 8
for (int k=0; k < 8; k++) d[k] = cuda_swab32(s[k]);
}
__device__ void mycpy64_swab32(uint32_t *d, const uint32_t *s) {
#pragma unroll 16
for (int k=0; k < 16; k++) d[k] = cuda_swab32(s[k]);
}
__device__ void cuda_sha256_init(uint32_t *state)
{
mycpy32(state, sha256_h);
}
/*
* SHA256 block compression function. The 256-bit state is transformed via
* the 512-bit input block to produce a new state. Modified for lower register use.
*/
__device__ void cuda_sha256_transform(uint32_t *state, const uint32_t *block)
{
uint32_t W[64]; // only 4 of these are accessed during each partial Mix
uint32_t S[8];
uint32_t t0, t1;
int i;
/* 1. Initialize working variables. */
mycpy32(S, state);
/* 2. Prepare message schedule W and Mix. */
mycpy16(W, block);
RNDr(S, W, 0); RNDr(S, W, 1); RNDr(S, W, 2); RNDr(S, W, 3);
mycpy16(W+4, block+4);
RNDr(S, W, 4); RNDr(S, W, 5); RNDr(S, W, 6); RNDr(S, W, 7);
mycpy16(W+8, block+8);
RNDr(S, W, 8); RNDr(S, W, 9); RNDr(S, W, 10); RNDr(S, W, 11);
mycpy16(W+12, block+12);
RNDr(S, W, 12); RNDr(S, W, 13); RNDr(S, W, 14); RNDr(S, W, 15);
#pragma unroll 2
for (i = 16; i < 20; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 16); RNDr(S, W, 17); RNDr(S, W, 18); RNDr(S, W, 19);
#pragma unroll 2
for (i = 20; i < 24; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 20); RNDr(S, W, 21); RNDr(S, W, 22); RNDr(S, W, 23);
#pragma unroll 2
for (i = 24; i < 28; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 24); RNDr(S, W, 25); RNDr(S, W, 26); RNDr(S, W, 27);
#pragma unroll 2
for (i = 28; i < 32; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 28); RNDr(S, W, 29); RNDr(S, W, 30); RNDr(S, W, 31);
#pragma unroll 2
for (i = 32; i < 36; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 32); RNDr(S, W, 33); RNDr(S, W, 34); RNDr(S, W, 35);
#pragma unroll 2
for (i = 36; i < 40; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 36); RNDr(S, W, 37); RNDr(S, W, 38); RNDr(S, W, 39);
#pragma unroll 2
for (i = 40; i < 44; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 40); RNDr(S, W, 41); RNDr(S, W, 42); RNDr(S, W, 43);
#pragma unroll 2
for (i = 44; i < 48; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 44); RNDr(S, W, 45); RNDr(S, W, 46); RNDr(S, W, 47);
#pragma unroll 2
for (i = 48; i < 52; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 48); RNDr(S, W, 49); RNDr(S, W, 50); RNDr(S, W, 51);
#pragma unroll 2
for (i = 52; i < 56; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 52); RNDr(S, W, 53); RNDr(S, W, 54); RNDr(S, W, 55);
#pragma unroll 2
for (i = 56; i < 60; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 56); RNDr(S, W, 57); RNDr(S, W, 58); RNDr(S, W, 59);
#pragma unroll 2
for (i = 60; i < 64; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 60); RNDr(S, W, 61); RNDr(S, W, 62); RNDr(S, W, 63);
/* 3. Mix local working variables into global state */
#pragma unroll 8
for (i = 0; i < 8; i++)
state[i] += S[i];
}
//
// HMAC SHA256 functions, modified to work with pdata and nonce directly
//
__device__ void cuda_HMAC_SHA256_80_init(uint32_t *tstate, uint32_t *ostate, uint32_t nonce)
{
uint32_t ihash[8];
uint32_t pad[16];
int i;
/* tstate is assumed to contain the midstate of key */
mycpy12(pad, pdata + 16);
pad[3] = nonce;
mycpy48(pad + 4, keypad);
cuda_sha256_transform(tstate, pad);
mycpy32(ihash, tstate);
cuda_sha256_init(ostate);
#pragma unroll 8
for (i = 0; i < 8; i++)
pad[i] = ihash[i] ^ 0x5c5c5c5c;
#pragma unroll 8
for (i=8; i < 16; i++)
pad[i] = 0x5c5c5c5c;
cuda_sha256_transform(ostate, pad);
cuda_sha256_init(tstate);
#pragma unroll 8
for (i = 0; i < 8; i++)
pad[i] = ihash[i] ^ 0x36363636;
#pragma unroll 8
for (i=8; i < 16; i++)
pad[i] = 0x36363636;
cuda_sha256_transform(tstate, pad);
}
__device__ void
cuda_PBKDF2_SHA256_80_128(const uint32_t *tstate,
const uint32_t *ostate,
uint32_t *output,
uint32_t nonce)
{
uint32_t istate[8], ostate2[8];
uint32_t ibuf[16], obuf[16];
mycpy32(istate, tstate);
cuda_sha256_transform(istate, pdata);
mycpy12(ibuf, pdata + 16);
ibuf[3] = nonce;
ibuf[4] = 1;
mycpy44(ibuf + 5, innerpad);
mycpy32(obuf, istate);
mycpy32(obuf + 8, outerpad);
cuda_sha256_transform(obuf, ibuf);
mycpy32(ostate2, ostate);
cuda_sha256_transform(ostate2, obuf);
mycpy32_swab32(output, ostate2); // TODO: coalescing would be desired
mycpy32(obuf, istate);
ibuf[4] = 2;
cuda_sha256_transform(obuf, ibuf);
mycpy32(ostate2, ostate);
cuda_sha256_transform(ostate2, obuf);
mycpy32_swab32(output+8, ostate2); // TODO: coalescing would be desired
mycpy32(obuf, istate);
ibuf[4] = 3;
cuda_sha256_transform(obuf, ibuf);
mycpy32(ostate2, ostate);
cuda_sha256_transform(ostate2, obuf);
mycpy32_swab32(output+16, ostate2); // TODO: coalescing would be desired
mycpy32(obuf, istate);
ibuf[4] = 4;
cuda_sha256_transform(obuf, ibuf);
mycpy32(ostate2, ostate);
cuda_sha256_transform(ostate2, obuf);
mycpy32_swab32(output+24, ostate2); // TODO: coalescing would be desired
}
__global__ void cuda_pre_sha256(uint32_t g_inp[32], uint32_t g_tstate_ext[8], uint32_t g_ostate_ext[8], uint32_t nonce)
{
nonce += (blockIdx.x * blockDim.x) + threadIdx.x;
g_inp += 32 * ((blockIdx.x * blockDim.x) + threadIdx.x);
g_tstate_ext += 8 * ((blockIdx.x * blockDim.x) + threadIdx.x);
g_ostate_ext += 8 * ((blockIdx.x * blockDim.x) + threadIdx.x);
uint32_t tstate[8], ostate[8];
mycpy32(tstate, midstate);
cuda_HMAC_SHA256_80_init(tstate, ostate, nonce);
mycpy32(g_tstate_ext, tstate); // TODO: coalescing would be desired
mycpy32(g_ostate_ext, ostate); // TODO: coalescing would be desired
cuda_PBKDF2_SHA256_80_128(tstate, ostate, g_inp, nonce);
}
__global__ void cuda_post_sha256(uint32_t g_output[8], uint32_t g_tstate_ext[8], uint32_t g_ostate_ext[8], uint32_t g_salt_ext[32])
{
g_output += 8 * ((blockIdx.x * blockDim.x) + threadIdx.x);
g_tstate_ext += 8 * ((blockIdx.x * blockDim.x) + threadIdx.x);
g_ostate_ext += 8 * ((blockIdx.x * blockDim.x) + threadIdx.x);
g_salt_ext += 32 * ((blockIdx.x * blockDim.x) + threadIdx.x);
uint32_t tstate[16];
mycpy32(tstate, g_tstate_ext); // TODO: coalescing would be desired
uint32_t halfsalt[16];
mycpy64_swab32(halfsalt, g_salt_ext); // TODO: coalescing would be desired
cuda_sha256_transform(tstate, halfsalt);
mycpy64_swab32(halfsalt, g_salt_ext+16); // TODO: coalescing would be desired
cuda_sha256_transform(tstate, halfsalt);
cuda_sha256_transform(tstate, finalblk);
uint32_t buf[16];
mycpy32(buf, tstate);
mycpy32(buf + 8, outerpad);
uint32_t ostate[16];
mycpy32(ostate, g_ostate_ext);
cuda_sha256_transform(ostate, buf);
mycpy32_swab32(g_output, ostate); // TODO: coalescing would be desired
}
//
// callable host code to initialize constants and to call kernels
//
extern "C" void prepare_sha256(int thr_id, uint32_t host_pdata[20], uint32_t host_midstate[8])
{
static bool init[8] = {false, false, false, false, false, false, false, false};
if (!init[thr_id])
{
checkCudaErrors(hipMemcpyToSymbol(sha256_h, host_sha256_h, sizeof(host_sha256_h), 0, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyToSymbol(sha256_k, host_sha256_k, sizeof(host_sha256_k), 0, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyToSymbol(keypad, host_keypad, sizeof(host_keypad), 0, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyToSymbol(innerpad, host_innerpad, sizeof(host_innerpad), 0, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyToSymbol(outerpad, host_outerpad, sizeof(host_outerpad), 0, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyToSymbol(finalblk, host_finalblk, sizeof(host_finalblk), 0, hipMemcpyHostToDevice));
init[thr_id] = true;
}
checkCudaErrors(hipMemcpyToSymbol(pdata, host_pdata, 20*sizeof(uint32_t), 0, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyToSymbol(midstate, host_midstate, 8*sizeof(uint32_t), 0, hipMemcpyHostToDevice));
}
extern "C" void pre_sha256(int thr_id, int stream, uint32_t nonce, int throughput)
{
dim3 block(128);
dim3 grid((throughput+127)/128);
printf("grid: %d\n", grid.x);
hipLaunchKernelGGL(( cuda_pre_sha256), dim3(grid), dim3(block), 0, context_streams[stream][thr_id], context_idata[stream][thr_id],
context_tstate[stream][thr_id],
context_ostate[stream][thr_id],
nonce);
}
extern "C" void post_sha256(int thr_id, int stream, int throughput)
{
dim3 block(128);
dim3 grid((throughput+127)/128);
hipLaunchKernelGGL(( cuda_post_sha256), dim3(grid), dim3(block), 0, context_streams[stream][thr_id], context_hash[stream][thr_id],
context_tstate[stream][thr_id],
context_ostate[stream][thr_id],
context_odata[stream][thr_id]);
}
| 99fcbda2f75e975586b861052a3af7baed12c1cd.cu | //
// =============== SHA256 part on nVidia GPU ======================
//
// NOTE: compile this .cu module for compute_10,sm_10 with --maxrregcount=64
//
#include <stdio.h>
#include <map>
#include <cuda.h>
#define applog(...) (void)0
//#include "salsa_kernel.h"
//#include "miner.h"
#include "sha256.h"
// define some error checking macros
#undef checkCudaErrors
#if WIN32
#define DELIMITER '/'
#else
#define DELIMITER '/'
#endif
#define __FILENAME__ ( strrchr(__FILE__, DELIMITER) != NULL ? strrchr(__FILE__, DELIMITER)+1 : __FILE__ )
#define checkCudaErrors(x) \
{ \
cudaGetLastError(); \
x; \
cudaError_t err = cudaGetLastError(); \
if (err != cudaSuccess) \
applog(LOG_ERR, "GPU #%d: cudaError %d (%s) calling '%s' (%s line %d)\n", device_map[thr_id], err, cudaGetErrorString(err), #x, __FILENAME__, __LINE__); \
}
// from salsa_kernel.cu
extern std::map<int, uint32_t *> context_idata[2];
extern std::map<int, uint32_t *> context_odata[2];
extern std::map<int, cudaStream_t> context_streams[2];
extern std::map<int, uint32_t *> context_tstate[2];
extern std::map<int, uint32_t *> context_ostate[2];
extern std::map<int, uint32_t *> context_hash[2];
static const uint32_t host_sha256_h[8] = {
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19
};
static const uint32_t host_sha256_k[64] = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
};
/* Elementary functions used by SHA256 */
#define Ch(x, y, z) ((x & (y ^ z)) ^ z)
#define Maj(x, y, z) ((x & (y | z)) | (y & z))
#define ROTR(x, n) ((x >> n) | (x << (32 - n)))
#define S0(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22))
#define S1(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25))
#define s0(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ (x >> 3))
#define s1(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ (x >> 10))
/* SHA256 round function */
#define RND(a, b, c, d, e, f, g, h, k) \
do { \
t0 = h + S1(e) + Ch(e, f, g) + k; \
t1 = S0(a) + Maj(a, b, c); \
d += t0; \
h = t0 + t1; \
} while (0)
/* Adjusted round function for rotating state */
#define RNDr(S, W, i) \
RND(S[(64 - i) % 8], S[(65 - i) % 8], \
S[(66 - i) % 8], S[(67 - i) % 8], \
S[(68 - i) % 8], S[(69 - i) % 8], \
S[(70 - i) % 8], S[(71 - i) % 8], \
W[i] + sha256_k[i])
static const uint32_t host_keypad[12] = {
0x80000000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x00000280
};
static const uint32_t host_innerpad[11] = {
0x80000000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x000004a0
};
static const uint32_t host_outerpad[8] = {
0x80000000, 0, 0, 0, 0, 0, 0, 0x00000300
};
static const uint32_t host_finalblk[16] = {
0x00000001, 0x80000000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x00000620
};
//
// CUDA code
//
__constant__ uint32_t sha256_h[8];
__constant__ uint32_t sha256_k[64];
__constant__ uint32_t keypad[12];
__constant__ uint32_t innerpad[11];
__constant__ uint32_t outerpad[8];
__constant__ uint32_t finalblk[16];
__constant__ uint32_t pdata[20];
__constant__ uint32_t midstate[8];
__device__ void mycpy12(uint32_t *d, const uint32_t *s) {
#pragma unroll 3
for (int k=0; k < 3; k++) d[k] = s[k];
}
__device__ void mycpy16(uint32_t *d, const uint32_t *s) {
#pragma unroll 4
for (int k=0; k < 4; k++) d[k] = s[k];
}
__device__ void mycpy32(uint32_t *d, const uint32_t *s) {
#pragma unroll 8
for (int k=0; k < 8; k++) d[k] = s[k];
}
__device__ void mycpy44(uint32_t *d, const uint32_t *s) {
#pragma unroll 11
for (int k=0; k < 11; k++) d[k] = s[k];
}
__device__ void mycpy48(uint32_t *d, const uint32_t *s) {
#pragma unroll 12
for (int k=0; k < 12; k++) d[k] = s[k];
}
__device__ void mycpy64(uint32_t *d, const uint32_t *s) {
#pragma unroll 16
for (int k=0; k < 16; k++) d[k] = s[k];
}
__device__ uint32_t cuda_swab32(uint32_t x)
{
return (((x << 24) & 0xff000000u) | ((x << 8) & 0x00ff0000u)
| ((x >> 8) & 0x0000ff00u) | ((x >> 24) & 0x000000ffu));
}
__device__ void mycpy32_swab32(uint32_t *d, const uint32_t *s) {
#pragma unroll 8
for (int k=0; k < 8; k++) d[k] = cuda_swab32(s[k]);
}
__device__ void mycpy64_swab32(uint32_t *d, const uint32_t *s) {
#pragma unroll 16
for (int k=0; k < 16; k++) d[k] = cuda_swab32(s[k]);
}
__device__ void cuda_sha256_init(uint32_t *state)
{
mycpy32(state, sha256_h);
}
/*
* SHA256 block compression function. The 256-bit state is transformed via
* the 512-bit input block to produce a new state. Modified for lower register use.
*/
__device__ void cuda_sha256_transform(uint32_t *state, const uint32_t *block)
{
uint32_t W[64]; // only 4 of these are accessed during each partial Mix
uint32_t S[8];
uint32_t t0, t1;
int i;
/* 1. Initialize working variables. */
mycpy32(S, state);
/* 2. Prepare message schedule W and Mix. */
mycpy16(W, block);
RNDr(S, W, 0); RNDr(S, W, 1); RNDr(S, W, 2); RNDr(S, W, 3);
mycpy16(W+4, block+4);
RNDr(S, W, 4); RNDr(S, W, 5); RNDr(S, W, 6); RNDr(S, W, 7);
mycpy16(W+8, block+8);
RNDr(S, W, 8); RNDr(S, W, 9); RNDr(S, W, 10); RNDr(S, W, 11);
mycpy16(W+12, block+12);
RNDr(S, W, 12); RNDr(S, W, 13); RNDr(S, W, 14); RNDr(S, W, 15);
#pragma unroll 2
for (i = 16; i < 20; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 16); RNDr(S, W, 17); RNDr(S, W, 18); RNDr(S, W, 19);
#pragma unroll 2
for (i = 20; i < 24; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 20); RNDr(S, W, 21); RNDr(S, W, 22); RNDr(S, W, 23);
#pragma unroll 2
for (i = 24; i < 28; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 24); RNDr(S, W, 25); RNDr(S, W, 26); RNDr(S, W, 27);
#pragma unroll 2
for (i = 28; i < 32; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 28); RNDr(S, W, 29); RNDr(S, W, 30); RNDr(S, W, 31);
#pragma unroll 2
for (i = 32; i < 36; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 32); RNDr(S, W, 33); RNDr(S, W, 34); RNDr(S, W, 35);
#pragma unroll 2
for (i = 36; i < 40; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 36); RNDr(S, W, 37); RNDr(S, W, 38); RNDr(S, W, 39);
#pragma unroll 2
for (i = 40; i < 44; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 40); RNDr(S, W, 41); RNDr(S, W, 42); RNDr(S, W, 43);
#pragma unroll 2
for (i = 44; i < 48; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 44); RNDr(S, W, 45); RNDr(S, W, 46); RNDr(S, W, 47);
#pragma unroll 2
for (i = 48; i < 52; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 48); RNDr(S, W, 49); RNDr(S, W, 50); RNDr(S, W, 51);
#pragma unroll 2
for (i = 52; i < 56; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 52); RNDr(S, W, 53); RNDr(S, W, 54); RNDr(S, W, 55);
#pragma unroll 2
for (i = 56; i < 60; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 56); RNDr(S, W, 57); RNDr(S, W, 58); RNDr(S, W, 59);
#pragma unroll 2
for (i = 60; i < 64; i += 2) {
W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16];
W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; }
RNDr(S, W, 60); RNDr(S, W, 61); RNDr(S, W, 62); RNDr(S, W, 63);
/* 3. Mix local working variables into global state */
#pragma unroll 8
for (i = 0; i < 8; i++)
state[i] += S[i];
}
//
// HMAC SHA256 functions, modified to work with pdata and nonce directly
//
__device__ void cuda_HMAC_SHA256_80_init(uint32_t *tstate, uint32_t *ostate, uint32_t nonce)
{
uint32_t ihash[8];
uint32_t pad[16];
int i;
/* tstate is assumed to contain the midstate of key */
mycpy12(pad, pdata + 16);
pad[3] = nonce;
mycpy48(pad + 4, keypad);
cuda_sha256_transform(tstate, pad);
mycpy32(ihash, tstate);
cuda_sha256_init(ostate);
#pragma unroll 8
for (i = 0; i < 8; i++)
pad[i] = ihash[i] ^ 0x5c5c5c5c;
#pragma unroll 8
for (i=8; i < 16; i++)
pad[i] = 0x5c5c5c5c;
cuda_sha256_transform(ostate, pad);
cuda_sha256_init(tstate);
#pragma unroll 8
for (i = 0; i < 8; i++)
pad[i] = ihash[i] ^ 0x36363636;
#pragma unroll 8
for (i=8; i < 16; i++)
pad[i] = 0x36363636;
cuda_sha256_transform(tstate, pad);
}
__device__ void
cuda_PBKDF2_SHA256_80_128(const uint32_t *tstate,
const uint32_t *ostate,
uint32_t *output,
uint32_t nonce)
{
uint32_t istate[8], ostate2[8];
uint32_t ibuf[16], obuf[16];
mycpy32(istate, tstate);
cuda_sha256_transform(istate, pdata);
mycpy12(ibuf, pdata + 16);
ibuf[3] = nonce;
ibuf[4] = 1;
mycpy44(ibuf + 5, innerpad);
mycpy32(obuf, istate);
mycpy32(obuf + 8, outerpad);
cuda_sha256_transform(obuf, ibuf);
mycpy32(ostate2, ostate);
cuda_sha256_transform(ostate2, obuf);
mycpy32_swab32(output, ostate2); // TODO: coalescing would be desired
mycpy32(obuf, istate);
ibuf[4] = 2;
cuda_sha256_transform(obuf, ibuf);
mycpy32(ostate2, ostate);
cuda_sha256_transform(ostate2, obuf);
mycpy32_swab32(output+8, ostate2); // TODO: coalescing would be desired
mycpy32(obuf, istate);
ibuf[4] = 3;
cuda_sha256_transform(obuf, ibuf);
mycpy32(ostate2, ostate);
cuda_sha256_transform(ostate2, obuf);
mycpy32_swab32(output+16, ostate2); // TODO: coalescing would be desired
mycpy32(obuf, istate);
ibuf[4] = 4;
cuda_sha256_transform(obuf, ibuf);
mycpy32(ostate2, ostate);
cuda_sha256_transform(ostate2, obuf);
mycpy32_swab32(output+24, ostate2); // TODO: coalescing would be desired
}
__global__ void cuda_pre_sha256(uint32_t g_inp[32], uint32_t g_tstate_ext[8], uint32_t g_ostate_ext[8], uint32_t nonce)
{
nonce += (blockIdx.x * blockDim.x) + threadIdx.x;
g_inp += 32 * ((blockIdx.x * blockDim.x) + threadIdx.x);
g_tstate_ext += 8 * ((blockIdx.x * blockDim.x) + threadIdx.x);
g_ostate_ext += 8 * ((blockIdx.x * blockDim.x) + threadIdx.x);
uint32_t tstate[8], ostate[8];
mycpy32(tstate, midstate);
cuda_HMAC_SHA256_80_init(tstate, ostate, nonce);
mycpy32(g_tstate_ext, tstate); // TODO: coalescing would be desired
mycpy32(g_ostate_ext, ostate); // TODO: coalescing would be desired
cuda_PBKDF2_SHA256_80_128(tstate, ostate, g_inp, nonce);
}
__global__ void cuda_post_sha256(uint32_t g_output[8], uint32_t g_tstate_ext[8], uint32_t g_ostate_ext[8], uint32_t g_salt_ext[32])
{
g_output += 8 * ((blockIdx.x * blockDim.x) + threadIdx.x);
g_tstate_ext += 8 * ((blockIdx.x * blockDim.x) + threadIdx.x);
g_ostate_ext += 8 * ((blockIdx.x * blockDim.x) + threadIdx.x);
g_salt_ext += 32 * ((blockIdx.x * blockDim.x) + threadIdx.x);
uint32_t tstate[16];
mycpy32(tstate, g_tstate_ext); // TODO: coalescing would be desired
uint32_t halfsalt[16];
mycpy64_swab32(halfsalt, g_salt_ext); // TODO: coalescing would be desired
cuda_sha256_transform(tstate, halfsalt);
mycpy64_swab32(halfsalt, g_salt_ext+16); // TODO: coalescing would be desired
cuda_sha256_transform(tstate, halfsalt);
cuda_sha256_transform(tstate, finalblk);
uint32_t buf[16];
mycpy32(buf, tstate);
mycpy32(buf + 8, outerpad);
uint32_t ostate[16];
mycpy32(ostate, g_ostate_ext);
cuda_sha256_transform(ostate, buf);
mycpy32_swab32(g_output, ostate); // TODO: coalescing would be desired
}
//
// callable host code to initialize constants and to call kernels
//
extern "C" void prepare_sha256(int thr_id, uint32_t host_pdata[20], uint32_t host_midstate[8])
{
static bool init[8] = {false, false, false, false, false, false, false, false};
if (!init[thr_id])
{
checkCudaErrors(cudaMemcpyToSymbol(sha256_h, host_sha256_h, sizeof(host_sha256_h), 0, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyToSymbol(sha256_k, host_sha256_k, sizeof(host_sha256_k), 0, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyToSymbol(keypad, host_keypad, sizeof(host_keypad), 0, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyToSymbol(innerpad, host_innerpad, sizeof(host_innerpad), 0, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyToSymbol(outerpad, host_outerpad, sizeof(host_outerpad), 0, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyToSymbol(finalblk, host_finalblk, sizeof(host_finalblk), 0, cudaMemcpyHostToDevice));
init[thr_id] = true;
}
checkCudaErrors(cudaMemcpyToSymbol(pdata, host_pdata, 20*sizeof(uint32_t), 0, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyToSymbol(midstate, host_midstate, 8*sizeof(uint32_t), 0, cudaMemcpyHostToDevice));
}
extern "C" void pre_sha256(int thr_id, int stream, uint32_t nonce, int throughput)
{
dim3 block(128);
dim3 grid((throughput+127)/128);
printf("grid: %d\n", grid.x);
cuda_pre_sha256<<<grid, block, 0, context_streams[stream][thr_id]>>>(context_idata[stream][thr_id],
context_tstate[stream][thr_id],
context_ostate[stream][thr_id],
nonce);
}
extern "C" void post_sha256(int thr_id, int stream, int throughput)
{
dim3 block(128);
dim3 grid((throughput+127)/128);
cuda_post_sha256<<<grid, block, 0, context_streams[stream][thr_id]>>>(context_hash[stream][thr_id],
context_tstate[stream][thr_id],
context_ostate[stream][thr_id],
context_odata[stream][thr_id]);
}
|
c1a5fc9b77675ef45333f5efa6bb70f7944a25f3.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| c1a5fc9b77675ef45333f5efa6bb70f7944a25f3.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
f2b9fcdcce2a5b258825a73110d458ac7d9dbf86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SGDLM/HostWrapperImpl.cuh"
#include "kernel_functions.cuh"
#include "cublas_manager.cuh"
#include "curand_manager.cuh"
#include "SGDLM/SGDLM.cuh"
template<typename DOUBLE> SGDLM::HostWrapperImpl<DOUBLE>::HostWrapperImpl(std::size_t no_gpus) :
memory_initialized(false), sim_memory_initialized(false), evo_memory_initialized(false), is_prior(false), use_state_evolution_matrix(
false), i(0), no_gpus(0), main_gpu(0), m(0), max_p(0), nsim(0), nsim_batch(0) {
SYSDEBUG_LOGGER << "HostWrapperImpl::HostWrapperImpl()" << ENDL;
// check number of GPUs
int no_devices;
bool getDeviceCountSuccess = cudaErrchk(hipGetDeviceCount(&no_devices));
if (!getDeviceCountSuccess || no_devices < 1) {
no_devices = 0;
ERROR_LOGGER << "No cuda-enabled devices available." << ENDL;
} else if (no_devices > MAX_NO_GPUS) {
no_devices = MAX_NO_GPUS;
}
if (no_gpus <= no_devices) {
this->no_gpus = no_gpus;
} else {
this->no_gpus = no_devices;
}
INFO_LOGGER << "Using " << this->no_gpus << " GPUs" << ENDL;
// start devices and streams
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index, true);
cudaErrchk(hipSetDeviceFlags(hipDeviceMapHost));
SYSDEBUG_LOGGER << "started device " << gpu_index << ENDL;
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
cudaErrchk(hipStreamCreate(&P.stream));
SYSDEBUG_LOGGER << "created stream on device " << gpu_index << ENDL;
startCublas(&P.CUBLAS);
SYSDEBUG_LOGGER << "started cublas on device " << gpu_index << ENDL;
cublasErrchk(hipblasSetStream(P.CUBLAS, P.stream));
startCurand(&P.CURAND);
SYSDEBUG_LOGGER << "started hiprand on device " << gpu_index << ENDL;
curandErrchk(hiprandSetStream(P.CURAND, P.stream));
curandErrchk(hiprandSetPseudoRandomGeneratorSeed(P.CURAND, 1234ULL + gpu_index * 100)); // set cuRand seed
SYSDEBUG_LOGGER << "set hiprand seed on device " << gpu_index << ENDL;
P.MEM = memory_manager_GPU(gpu_index, P.stream);
P.MEM_evo = memory_manager_GPU(gpu_index, P.stream);
P.MEM_sim = memory_manager_GPU(gpu_index, P.stream);
SYSDEBUG_LOGGER << "initialized the memory managers" << ENDL;
}
}
template<typename DOUBLE> SGDLM::HostWrapperImpl<DOUBLE>::~HostWrapperImpl() {
SYSDEBUG_LOGGER << "HostWrapperImpl::~HostWrapperImpl()" << ENDL;
this->clearMemory();
// shut down GPUs
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
SYSDEBUG_LOGGER << "gpu_index = " << gpu_index << ENDL;
startCuda(gpu_index);
endCublas(this->simP[gpu_index].CUBLAS);
SYSDEBUG_LOGGER << "ended cublas on device " << gpu_index << ENDL;
endCurand(this->simP[gpu_index].CURAND);
SYSDEBUG_LOGGER << "ended hiprand on device " << gpu_index << ENDL;
cudaErrchk(hipStreamDestroy(this->simP[gpu_index].stream));
SYSDEBUG_LOGGER << "destroyed stream on device " << gpu_index << ENDL;
cudaErrchk(hipDeviceSynchronize());
cudaErrchk(hipDeviceReset());
SYSDEBUG_LOGGER << "reset device " << gpu_index << ENDL;
}
}
template<typename DOUBLE> std::size_t SGDLM::HostWrapperImpl<DOUBLE>::getNoSeries() const {
SYSDEBUG_LOGGER << "HostWrapperImpl::getNoSeries()" << ENDL;
return this->m;
}
template<typename DOUBLE> std::size_t SGDLM::HostWrapperImpl<DOUBLE>::getMaxP() const {
SYSDEBUG_LOGGER << "HostWrapperImpl::getMaxP()" << ENDL;
return this->max_p;
}
template<typename DOUBLE> bool SGDLM::HostWrapperImpl<DOUBLE>::getEvolutionMatrixConfiguration() const {
SYSDEBUG_LOGGER << "HostWrapperImpl::getEvolutionMatrixConfiguration()" << ENDL;
return this->use_state_evolution_matrix;
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::initMemory(std::size_t m, std::size_t max_p) {
SYSDEBUG_LOGGER << "HostWrapperImpl::initMemory(" << m << ", " << max_p << ")" << ENDL;
this->clearMemory(); // call clearMemory to set simulation memory un-initialized just in case dimensions change
this->m = m;
this->max_p = max_p;
if (this->no_gpus < 1) {
return;
}
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
memory_manager_GPU& MEM = P.MEM;
// define 0,+1,-1 on device (MEM)
P.zero = MEM.host_device_alloc<DOUBLE>(sizeof(DOUBLE));
hipLaunchKernelGGL(( assignScalar), dim3(1), dim3(1), 0, P.stream, 1, P.zero, (DOUBLE) 0);
P.plus_one = MEM.host_device_alloc<DOUBLE>(sizeof(DOUBLE));
hipLaunchKernelGGL(( assignScalar), dim3(1), dim3(1), 0, P.stream, 1, P.plus_one, (DOUBLE) 1);
P.minus_one = MEM.host_device_alloc<DOUBLE>(sizeof(DOUBLE));
hipLaunchKernelGGL(( assignScalar), dim3(1), dim3(1), 0, P.stream, 1, P.minus_one, (DOUBLE) -1);
// allocate device memory for discount factors (MEM)
P.beta = MEM.device_alloc_vec<DOUBLE>(this->m);
P.data_delta = MEM.device_alloc_vec<DOUBLE>(this->m * this->max_p * this->max_p);
MEM.cpyToDeviceAsPtrArray<DOUBLE>((const DOUBLE*) P.data_delta, this->m, this->max_p * this->max_p, P.delta); // generate CPU+GPU pointer to individual matrices
// allocate device memory for simultaneous parental sets (MEM)
P.p = MEM.device_alloc_vec<unsigned int>(this->m);
P.sp_indices = MEM.device_alloc_vec<unsigned int>(this->m * this->max_p);
// allocate device memory for cache variables (MEM)
P.Q_t = MEM.device_alloc_vec<DOUBLE>(this->m);
MEM.cpyToDeviceAsPtrArray<DOUBLE>(P.Q_t, this->m, 1, P.Q_t_ptrptr);
P.e_t = MEM.device_alloc_vec<DOUBLE>(this->m);
MEM.cpyToDeviceAsPtrArray<DOUBLE>(P.e_t, this->m, 1, P.e_t_ptrptr);
P.data_A_t = MEM.device_alloc_vec<DOUBLE>(this->m * this->max_p);
MEM.cpyToDeviceAsPtrArray<DOUBLE>(P.data_A_t, this->m, this->max_p, P.A_t); // generate CPU+GPU pointer to individual matrices
// allocate device memory for predictors, data
P.y_t = MEM.device_alloc_vec<DOUBLE>(this->m);
P.data_F_t = MEM.device_alloc_vec<DOUBLE>(this->m * this->max_p);
MEM.cpyToDeviceAsPtrArray<DOUBLE>(P.data_F_t, this->m, this->max_p, P.F_t);
// allocate device memory for DLM parameters (MEM)
P.data_m_t = MEM.device_alloc_vec<DOUBLE>(this->m * this->max_p);
MEM.cpyToDeviceAsPtrArray<DOUBLE>((const DOUBLE*) P.data_m_t, this->m, this->max_p, P.m_t); // generate CPU+GPU pointer to individual matrices
P.data_C_t = MEM.device_alloc_vec<DOUBLE>(this->m * this->max_p * this->max_p);
MEM.cpyToDeviceAsPtrArray<DOUBLE>((const DOUBLE*) P.data_C_t, this->m, this->max_p * this->max_p, P.C_t); // generate CPU+GPU pointer to individual matrices
P.n_t = MEM.device_alloc_vec<DOUBLE>(this->m);
P.s_t = MEM.device_alloc_vec<DOUBLE>(this->m);
}
this->manageEvoMemory(this->use_state_evolution_matrix);
this->memory_initialized = true;
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::manageEvoMemory(bool use_state_evolution_matrix) {
SYSDEBUG_LOGGER << "HostWrapperImpl::initEvoMemory(" << use_state_evolution_matrix << ")" << ENDL;
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
memory_manager_GPU& MEM_evo = P.MEM_evo;
memory_manager_GPU& MEM_sim = P.MEM_sim;
// allocate device memory for evolution matrices (MEM)
if (use_state_evolution_matrix) {
if (!this->evo_memory_initialized) {
P.data_G_t = MEM_evo.device_alloc_vec<DOUBLE>(this->m * this->max_p * this->max_p);
P.G_t = NULL;
MEM_evo.cpyToDeviceAsPtrArray<DOUBLE>(P.data_G_t, this->m, this->max_p * this->max_p, P.G_t);
P.data_m_t_buffer = MEM_evo.device_alloc_vec<DOUBLE>(this->m * this->max_p);
P.m_t_buffer = NULL;
MEM_evo.cpyToDeviceAsPtrArray<DOUBLE>(P.data_m_t_buffer, this->m, this->max_p, P.m_t_buffer);
// C_t_buffer is later initialized onto the simulation memory
}
} else {
MEM_evo.clear();
if (!this->sim_memory_initialized) { // free C_t_buffer, which is managed by MEM_sim
MEM_sim.clear();
}
}
}
// manage outsourced memory
if (!this->sim_memory_initialized) { // if simulation memory is not needed, clear it to free C_t_buffer; if in use, do not alter
if (use_state_evolution_matrix) {
this->allocate_C_t_memory();
}
}
this->use_state_evolution_matrix = use_state_evolution_matrix;
this->evo_memory_initialized = use_state_evolution_matrix;
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::clearMemory() {
SYSDEBUG_LOGGER << "HostWrapperImpl::clearGPUs()" << ENDL;
this->memory_initialized = false;
this->sim_memory_initialized = false;
this->evo_memory_initialized = false;
this->nsim = 0;
this->nsim_batch = 0;
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
SYSDEBUG_LOGGER << "gpu_index = " << gpu_index << ENDL;
startCuda(gpu_index);
this->simP[gpu_index].MEM_sim.clear();
this->simP[gpu_index].MEM_evo.clear();
this->simP[gpu_index].MEM.clear();
SYSDEBUG_LOGGER << "cleared memory on device " << gpu_index << ENDL;
}
}
template<typename DOUBLE> std::size_t SGDLM::HostWrapperImpl<DOUBLE>::getNSim() const {
SYSDEBUG_LOGGER << "HostWrapperImpl::getNSim()" << ENDL;
return this->nsim;
}
template<typename DOUBLE> std::size_t SGDLM::HostWrapperImpl<DOUBLE>::getNSimBatch() const {
SYSDEBUG_LOGGER << "HostWrapperImpl::getNSimBatch()" << ENDL;
return this->nsim_batch;
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::initSimMemory(std::size_t nsim, std::size_t nsim_batch) {
SYSDEBUG_LOGGER << "HostWrapperImpl::initSimMemory(" << nsim << ", " << nsim_batch << ")" << ENDL;
myAssert(this->checkInitialized());
// set new dimensions
nsim /= this->no_gpus;
this->nsim = nsim * this->no_gpus;
this->nsim_batch = nsim_batch > this->nsim ? this->nsim : nsim_batch;
for (size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
memory_manager_GPU& MEM_sim = P.MEM_sim;
MEM_sim.clear(); // free existing memory before allocating new memory
P.nsim = nsim;
// FOR VB POSTERIOR ESTIMATION AND FORECASTING
/*
* lambdas is organized by batch : n-array of pointers to m-arrays
* randoms is organized by batch : n-array of pointers to (m * max_p)-arrays
* thetas is organized by batch : n-array of pointers to (m * max_p)-arrays
* Gammas is organized by batch : Gammas_batch_size-array of pointers to (m * m)-arrays
* chol_C_t is organized by dimension: m-array of (max_p * max_p)-arrays
* LU_infos is organized by batch : Gammas_batch_size-array of scalars
* LU_pivots is organized by batch : Gammas_batch_size-array of m-arrays
*
*
* chol_C_t_nrepeat_ptr = n x [chol(C_t[0]), chol(C_t[1]), ..., chol(C_t[m-1])]
* randoms_nrepeat_ptr = same logic as thetas_nrepeat_ptr---just pointing to randoms instead
* thetas_nrepeat_ptr = thetas[0], thetas[max_p], thetas[2*max_p], ..., thetas[(m-1)*max_p], ..., thetas[n*(m-1)*max_p]
*/
// allocate device memory
P.data_lambdas = MEM_sim.device_alloc_vec<DOUBLE>(this->m * P.nsim);
P.data_randoms = MEM_sim.device_alloc_vec<DOUBLE>(((this->max_p > 4) ? this->max_p : 4) * this->m * P.nsim); // allocate max(4, least max_p)*m*P.nsim so that memory P.data_random_pt2 still has 2*m*P.nsim entries allocated
P.data_randoms_pt2 = (DOUBLE*) ((char*) P.data_randoms + (2 * this->m * P.nsim) * sizeof(DOUBLE)); //&data_randoms[2 * m * nsim]; //TODO: verify
P.data_thetas = MEM_sim.device_alloc_vec<DOUBLE>(this->max_p * this->m * P.nsim);
P.data_Gammas = MEM_sim.device_alloc_vec<DOUBLE>(this->m * this->m * 2 * nsim_batch); // allocate for 2 * nsim_batch: VB_posterior can use double the batch size and forecasting will use the 2nd half for the inverse
P.data_chol_C_t = MEM_sim.device_alloc_vec<DOUBLE>(this->max_p * this->max_p * this->m);
P.LU_pivots = MEM_sim.device_alloc_vec<int>(this->m * 2 * nsim_batch);
P.LU_infos = MEM_sim.device_alloc_vec<int>(2 * nsim_batch);
// define array pointers
P.lambdas = NULL;
P.randoms_nrepeat_ptr = NULL;
P.Gammas = NULL;
P.thetas = NULL;
P.thetas_nrepeat_ptr = NULL;
P.lambdas_nrepeat_ptr = NULL;
P.chol_C_t = NULL;
P.chol_C_t_nrepeat_ptr = NULL;
// assign repeat pointers
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_lambdas, P.nsim, this->m, P.lambdas);
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_Gammas, 2 * nsim_batch, m * m, P.Gammas);
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_chol_C_t, this->m, this->max_p * this->max_p, P.chol_C_t);
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_thetas, P.nsim, this->m * this->max_p, P.thetas);
// assign nrepeat pointers
MEM_sim.cpyToDeviceAsPtrArrayByCol<DOUBLE>(P.data_randoms, P.nsim, this->m, this->max_p, P.randoms_nrepeat_ptr);
MEM_sim.cpyToDeviceAsPtrArrayRepeatByBatch<DOUBLE>(P.data_chol_C_t, this->m, this->max_p * this->max_p, P.nsim,
P.chol_C_t_nrepeat_ptr);
MEM_sim.cpyToDeviceAsPtrArrayByCol<DOUBLE>(P.data_thetas, P.nsim, this->m, this->max_p, P.thetas_nrepeat_ptr);
MEM_sim.cpyToDeviceAsPtrArrayByCol<DOUBLE>(P.data_lambdas, P.nsim, this->m, 1, P.lambdas_nrepeat_ptr);
// FOR VB POSTERIOR ESTIMATION
/*
* IS_weights is organized by batch : n-array of scalars
* mean_lambdas is organized by dimension : m-array of scalars
* mean_log_lambdas is organized by dimension: m-array of scalars
* mean_n_t is organized by dimension : m-array of scalars
* mean_s_t is organized by dimension : m-array of scalars
* mean_Q_t is organized by dimension : m-array of scalars
* mean_m_t is organized by dimension : m-array of max_p-arrays
* mean_C_t is organized by dimension : m-array of (max_p * max_p)-arrays
* C_t_buffer is organized by dimension : m-array of (max_p * max_p)-arrays
* INV_infos is organized by batch : m-array of scalars
* INV_pivots is organized by batch : m-array of max_p-arrays
*
*
* chol_C_t_nrepeat_ptr = n x [chol(C_t[0]), chol(C_t[1]), ..., chol(C_t[m-1])]
* randoms_nrepeat_ptr = same logic as thetas_nrepeat_ptr---just pointing to randoms instead
* thetas_nrepeat_ptr = thetas[0], thetas[max_p], thetas[2*max_p], ..., thetas[(m-1)*max_p], ..., thetas[n*(m-1)*max_p]
*/
// allocate device memory
P.IS_weights = MEM_sim.device_alloc_vec<DOUBLE>(P.nsim);
P.sum_det_weights = MEM_sim.device_alloc_vec<DOUBLE>(1);
P.INV_pivots = MEM_sim.device_alloc_vec<int>(this->max_p * this->m);
P.INV_infos = MEM_sim.device_alloc_vec<int>(this->m);
P.mean_lambdas = MEM_sim.device_alloc_vec<DOUBLE>(this->m);
P.mean_log_lambdas = MEM_sim.device_alloc_vec<DOUBLE>(this->m);
P.data_mean_m_t = MEM_sim.device_alloc_vec<DOUBLE>(this->max_p * this->m);
P.data_mean_C_t = MEM_sim.device_alloc_vec<DOUBLE>(this->max_p * this->max_p * this->m);
P.mean_n_t = MEM_sim.device_alloc_vec<DOUBLE>(this->m);
P.mean_s_t = MEM_sim.device_alloc_vec<DOUBLE>(this->m);
P.mean_Q_t = MEM_sim.device_alloc_vec<DOUBLE>(this->m);
P.data_C_t_buffer = MEM_sim.device_alloc_vec<DOUBLE>(this->m * this->max_p * this->max_p);
// define array pointers
P.mean_m_t = NULL;
P.mean_C_t = NULL;
P.C_t_buffer = NULL;
// assign array pointers
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_mean_m_t, this->m, this->max_p, P.mean_m_t);
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_mean_C_t, this->m, this->max_p * this->max_p, P.mean_C_t);
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_C_t_buffer, this->m, this->max_p * this->max_p, P.C_t_buffer);
// FOR FORECASTING
/*
* y is organized by batch : n-array of pointers to m-arrays
* nus is organized by batch : n-array of pointers to m-arrays
* Gammas_inv is organized by batch : Gammas_batch_size-array of pointers to (m * m)-arrays
*/
// allocate device memory
P.data_x_t = MEM_sim.device_alloc_vec<DOUBLE>(this->m * this->max_p);
P.data_y = MEM_sim.device_alloc_vec<DOUBLE>(this->m * P.nsim);
P.data_nus = MEM_sim.device_alloc_vec<DOUBLE>(this->m * P.nsim);
P.data_Gammas_inv = (DOUBLE*) ((char*) P.data_Gammas + (this->m * this->m * nsim_batch) * sizeof(DOUBLE)); //MEM_sim.device_alloc<DOUBLE>(3, dim_Gammas);
// define array pointers
P.x_t = NULL;
P.y = NULL;
P.nus = NULL;
P.Gammas_inv = NULL;
// assign array pointers
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_x_t, this->m, this->max_p, P.x_t);
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_y, P.nsim, this->m, P.y);
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_nus, P.nsim, this->m, P.nus);
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_Gammas_inv, nsim_batch, this->m * this->m, P.Gammas_inv);
}
this->sim_memory_initialized = true;
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::clearSimMemory() {
SYSDEBUG_LOGGER << "HostWrapperImpl::clearSimMemory()" << ENDL;
myAssert(this->checkInitialized());
this->sim_memory_initialized = false;
this->nsim = 0;
this->nsim_batch = 0;
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
SYSDEBUG_LOGGER << "gpu_index = " << gpu_index << ENDL;
startCuda(gpu_index);
this->simP[gpu_index].MEM_sim.clear();
SYSDEBUG_LOGGER << "cleared simulation memory on device " << gpu_index << ENDL;
}
if (this->evo_memory_initialized) { // re-allocate C_t_buffer
this->allocate_C_t_memory();
}
}
template<typename DOUBLE> bool SGDLM::HostWrapperImpl<DOUBLE>::isPrior() const {
SYSDEBUG_LOGGER << "HostWrapperImpl::isPrior()" << ENDL;
return this->is_prior;
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::isPrior(bool is_prior) {
SYSDEBUG_LOGGER << "HostWrapperImpl::isPrior(" << is_prior << ")" << ENDL;
this->is_prior = is_prior;
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::getParameters(DOUBLE* host_data_m_t,
DOUBLE* host_data_C_t, DOUBLE* host_data_n_t, DOUBLE* host_data_s_t) const {
SYSDEBUG_LOGGER << "HostWrapperImpl::getParameters()" << ENDL;
myAssert(this->checkInitialized());
startCuda(this->main_gpu);
const simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[this->main_gpu];
if (host_data_m_t != NULL) {
memory_manager_GPU::cpyToHost<DOUBLE>(P.data_m_t, host_data_m_t, this->m * this->max_p, P.stream);
}
if (host_data_C_t != NULL) {
memory_manager_GPU::cpyToHost<DOUBLE>(P.data_C_t, host_data_C_t, this->m * this->max_p * this->max_p, P.stream);
}
if (host_data_n_t != NULL) {
memory_manager_GPU::cpyToHost<DOUBLE>(P.n_t, host_data_n_t, this->m, P.stream);
}
if (host_data_s_t != NULL) {
memory_manager_GPU::cpyToHost<DOUBLE>(P.s_t, host_data_s_t, this->m, P.stream);
}
cudaErrchk(hipStreamSynchronize(this->simP[this->main_gpu].stream));
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::setParameters(const DOUBLE* host_data_m_t,
const DOUBLE* host_data_C_t, const DOUBLE* host_data_n_t, const DOUBLE* host_data_s_t) {
SYSDEBUG_LOGGER << "HostWrapperImpl::setParameters()" << ENDL;
myAssert(this->checkInitialized());
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
if (host_data_m_t != NULL) {
memory_manager_GPU::cpyToDevice<DOUBLE>(P.data_m_t, host_data_m_t, this->m * this->max_p, P.stream);
}
if (host_data_C_t != NULL) {
memory_manager_GPU::cpyToDevice<DOUBLE>(P.data_C_t, host_data_C_t, this->m * this->max_p * this->max_p,
P.stream);
}
if (host_data_n_t != NULL) {
memory_manager_GPU::cpyToDevice<DOUBLE>(P.n_t, host_data_n_t, this->m, P.stream);
}
if (host_data_s_t != NULL) {
memory_manager_GPU::cpyToDevice<DOUBLE>(P.s_t, host_data_s_t, this->m, P.stream);
}
}
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::getDiscountFactors(DOUBLE* host_data_beta,
DOUBLE* host_data_delta) const {
SYSDEBUG_LOGGER << "HostWrapperImpl::getDiscountFactors()" << ENDL;
myAssert(this->checkInitialized());
startCuda(this->main_gpu);
const simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[this->main_gpu];
if (host_data_beta != NULL) {
memory_manager_GPU::cpyToHost<DOUBLE>(P.beta, host_data_beta, this->m, P.stream);
}
if (host_data_delta != NULL) {
memory_manager_GPU::cpyToHost<DOUBLE>(P.data_delta, host_data_delta, this->m * this->max_p * this->max_p,
P.stream);
}
cudaErrchk(hipStreamSynchronize(this->simP[this->main_gpu].stream));
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::setDiscountFactors(const DOUBLE* host_data_beta,
const DOUBLE* host_data_delta) {
SYSDEBUG_LOGGER << "HostWrapperImpl::setDiscountFactors()" << ENDL;
myAssert(this->checkInitialized());
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
if (host_data_beta != NULL) {
memory_manager_GPU::cpyToDevice<DOUBLE>(P.beta, host_data_beta, this->m, P.stream);
}
if (host_data_delta != NULL) {
memory_manager_GPU::cpyToDevice<DOUBLE>(P.data_delta, host_data_delta, this->m * this->max_p * this->max_p,
P.stream);
}
}
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::getEvolutionMatrix(DOUBLE* host_data_G_t) const {
SYSDEBUG_LOGGER << "HostWrapperImpl::getEvolutionMatrix()" << ENDL;
myAssert(this->checkInitialized());
myAssert(this->checkUseStateEvolutionMatrix());
startCuda(this->main_gpu);
const simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[this->main_gpu];
if (host_data_G_t != NULL) {
memory_manager_GPU::cpyToHost<DOUBLE>(P.data_G_t, host_data_G_t, this->m * this->max_p * this->max_p, P.stream);
}
cudaErrchk(hipStreamSynchronize(this->simP[this->main_gpu].stream));
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::setEvolutionMatrix(const DOUBLE* host_data_G_t) {
SYSDEBUG_LOGGER << "HostWrapperImpl::setEvolutionMatrix()" << ENDL;
myAssert(this->checkInitialized());
myAssert(this->checkUseStateEvolutionMatrix());
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
if (host_data_G_t != NULL) {
memory_manager_GPU::cpyToDevice<DOUBLE>(P.data_G_t, host_data_G_t, this->m * this->max_p * this->max_p,
P.stream);
}
}
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::getParentalSets(unsigned int* host_data_p,
unsigned int* host_data_sp_indices) const {
SYSDEBUG_LOGGER << "HostWrapperImpl::getParentalSets()" << ENDL;
myAssert(this->checkInitialized());
startCuda(this->main_gpu);
const simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[this->main_gpu];
if (host_data_p != NULL) {
memory_manager_GPU::cpyToHost<unsigned int>(P.p, host_data_p, this->m, P.stream);
}
if (host_data_sp_indices != NULL) {
memory_manager_GPU::cpyToHost<unsigned int>(P.sp_indices, host_data_sp_indices, this->m * this->max_p,
P.stream);
}
cudaErrchk(hipStreamSynchronize(this->simP[this->main_gpu].stream));
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::setParentalSets(const unsigned int* host_data_p,
const unsigned int* host_data_sp_indices) {
SYSDEBUG_LOGGER << "HostWrapperImpl::setParentalSets()" << ENDL;
myAssert(this->checkInitialized());
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
if (host_data_p != NULL) {
memory_manager_GPU::cpyToDevice<unsigned int>(P.p, host_data_p, this->m, P.stream);
}
if (host_data_sp_indices != NULL) {
memory_manager_GPU::cpyToDevice<unsigned int>(P.sp_indices, host_data_sp_indices, this->m * this->max_p,
P.stream);
}
}
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::computePrior() {
SYSDEBUG_LOGGER << "HostWrapperImpl::computePrior()" << ENDL;
myAssert(this->checkInitialized());
myAssert(this->checkPrior(false));
// call SGDLM::compute_one_step_ahead_prior with G_t = NULL
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
SGDLM<DOUBLE>::compute_one_step_ahead_prior(this->m, this->max_p, P.p, P.m_t, P.C_t, P.n_t, P.s_t, P.beta,
(const DOUBLE**) P.delta, P.stream);
}
// wait for results
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
cudaErrchk(hipStreamSynchronize(this->simP[gpu_index].stream));
}
this->isPrior(true);
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::computePrior(const DOUBLE* host_data_G_t) {
SYSDEBUG_LOGGER << "HostWrapperImpl::computePrior(...)" << ENDL;
myAssert(this->checkInitialized());
myAssert(this->checkPrior(false));
myAssert(this->checkUseStateEvolutionMatrix());
this->setEvolutionMatrix(host_data_G_t);
// call SGDLM::compute_one_step_ahead_prior with the current state evolution matrix
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
SGDLM<DOUBLE>::compute_one_step_ahead_prior(this->m, this->max_p, P.p, P.m_t, P.C_t, P.n_t, P.s_t, P.beta,
(const DOUBLE**) P.delta, P.stream, P.CUBLAS, P.zero, P.plus_one, (const DOUBLE**) P.G_t, P.C_t_buffer,
P.m_t_buffer);
}
// wait for results
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
cudaErrchk(hipStreamSynchronize(this->simP[gpu_index].stream));
}
this->isPrior(true);
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::computeForecast(DOUBLE* host_data_ytp1,
const DOUBLE* host_data_x_tp1) {
SYSDEBUG_LOGGER << "HostWrapperImpl::computeForecast()" << ENDL;
myAssert(this->checkInitialized());
myAssert(this->checkSimInitialized());
myAssert(this->checkPrior(true));
// copy predictors onto device memory
for (size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
memory_manager_GPU::cpyToDevice<DOUBLE>(P.data_x_t, host_data_x_tp1, this->m * this->max_p, P.stream);
}
// compute forecasts
for (size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
SGDLM<DOUBLE>::forecast((const DOUBLE*) P.zero, (const DOUBLE*) P.plus_one, this->m, this->max_p,
(const unsigned int*) P.p, (const unsigned int*) P.sp_indices, (const DOUBLE**) P.m_t,
(const DOUBLE**) P.C_t, (const DOUBLE*) P.n_t, (const DOUBLE*) P.s_t, P.nsim, this->nsim_batch,
(const DOUBLE**) P.x_t, P.y, P.data_nus, P.nus, P.lambdas, P.data_randoms, P.data_randoms_pt2,
P.randoms_nrepeat_ptr, P.Gammas, P.Gammas_inv, P.LU_pivots, P.LU_infos, P.chol_C_t,
P.chol_C_t_nrepeat_ptr, P.thetas, P.thetas_nrepeat_ptr, P.stream, P.CUBLAS, P.CURAND);
}
// copy results on host memory
for (size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
// copy the result into the right place of the output array
DOUBLE* out_ptr_pos = (DOUBLE*) ((char*) host_data_ytp1 + gpu_index * this->m * P.nsim * sizeof(DOUBLE));
memory_manager_GPU::cpyToHost<DOUBLE>(P.data_y, out_ptr_pos, this->m * P.nsim, P.stream);
}
// wait until computations and memory transfers are complete
for (size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
cudaErrchk(hipStreamSynchronize(this->simP[gpu_index].stream));
}
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::computePosterior(const DOUBLE* host_data_y_t,
const DOUBLE* host_data_F_t) {
SYSDEBUG_LOGGER << "HostWrapperImpl::computePosterior()" << ENDL;
myAssert(this->checkInitialized());
myAssert(this->checkPrior(true));
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
memory_manager_GPU::cpyToDevice<DOUBLE>(P.y_t, host_data_y_t, this->m, P.stream);
memory_manager_GPU::cpyToDevice<DOUBLE>(P.data_F_t, host_data_F_t, this->m * this->max_p, P.stream);
}
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
SGDLM<DOUBLE>::compute_posterior(P.zero, P.plus_one, P.minus_one, this->m, this->max_p, P.p, P.m_t, P.C_t,
P.n_t, P.s_t, (const DOUBLE*) P.y_t, (const DOUBLE**) P.F_t, P.Q_t, P.e_t, P.A_t, P.Q_t_ptrptr,
P.e_t_ptrptr, P.CUBLAS, P.stream);
}
// wait for results
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
cudaErrchk(hipStreamSynchronize(this->simP[gpu_index].stream));
}
this->isPrior(false);
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::computeVBPosterior(DOUBLE* host_data_mean_m_t,
DOUBLE* host_data_mean_C_t, DOUBLE* host_data_mean_n_t, DOUBLE* host_data_mean_s_t,
DOUBLE* host_data_IS_weights, DOUBLE* host_sum_det_weights) {
SYSDEBUG_LOGGER << "HostWrapperImpl::runVB()" << ENDL;
myAssert(this->checkInitialized());
myAssert(this->checkSimInitialized());
myAssert(this->checkPrior(false));
size_t batch_multiplier = 2;
SYSDEBUG_LOGGER << "before starting VB simulation on all selected GPUs" << ENDL;
for (size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
SYSDEBUG_LOGGER << "gpu_index = " << gpu_index << ENDL;
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
SGDLM<DOUBLE>::VB_posterior((const DOUBLE*) P.zero, (const DOUBLE*) P.plus_one, this->m, this->max_p,
(const unsigned int*) P.p, (const unsigned int*) P.sp_indices, (const DOUBLE**) P.m_t,
(const DOUBLE**) P.C_t, (const DOUBLE*) P.n_t, (const DOUBLE*) P.s_t, P.nsim, P.lambdas, P.data_randoms,
P.data_randoms_pt2, P.randoms_nrepeat_ptr, P.Gammas, batch_multiplier * this->nsim_batch, P.IS_weights,
P.sum_det_weights, P.chol_C_t, P.chol_C_t_nrepeat_ptr, P.thetas, P.thetas_nrepeat_ptr, P.LU_pivots,
P.LU_infos, P.mean_lambdas, P.mean_log_lambdas, P.mean_m_t, P.mean_C_t, P.C_t_buffer, P.INV_pivots,
P.INV_infos, P.mean_n_t, P.mean_s_t, P.mean_Q_t, P.lambdas, P.lambdas_nrepeat_ptr, P.stream, P.CUBLAS,
P.CURAND);
}
SYSDEBUG_LOGGER << "VB simulation initiated" << ENDL;
// allocate temporary host memory to copy data in from every gpu
memory_manager host_MEM_temp;
DOUBLE* host_temp_mean_m_t = host_MEM_temp.host_alloc_vec<DOUBLE>(this->no_gpus * this->m * this->max_p);
DOUBLE* host_temp_mean_C_t = host_MEM_temp.host_alloc_vec<DOUBLE>(this->no_gpus * this->m * this->max_p * this->max_p);
DOUBLE* host_temp_mean_n_t = host_MEM_temp.host_alloc_vec<DOUBLE>(this->no_gpus * this->m);
DOUBLE* host_temp_mean_s_t = host_MEM_temp.host_alloc_vec<DOUBLE>(this->no_gpus * this->m);
DOUBLE* host_temp_IS_weights = host_MEM_temp.host_alloc_vec<DOUBLE>(this->nsim);
DOUBLE* host_temp_sum_det_weights = host_MEM_temp.host_alloc_vec<DOUBLE>(this->no_gpus);
// retrieve VB results into temporary host memory
for (size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
SYSDEBUG_LOGGER << "gpu_index = " << gpu_index << ENDL;
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
memory_manager_GPU::cpyToHost<DOUBLE>(P.data_mean_m_t, &host_temp_mean_m_t[gpu_index * this->m * this->max_p],
this->m * this->max_p, P.stream);
memory_manager_GPU::cpyToHost<DOUBLE>(P.data_mean_C_t,
&host_temp_mean_C_t[gpu_index * this->m * this->max_p * this->max_p],
this->m * this->max_p * this->max_p, P.stream);
memory_manager_GPU::cpyToHost<DOUBLE>(P.mean_n_t, &host_temp_mean_n_t[gpu_index * this->m], this->m,
P.stream);
memory_manager_GPU::cpyToHost<DOUBLE>(P.mean_s_t, &host_temp_mean_s_t[gpu_index * this->m], this->m,
P.stream);
memory_manager_GPU::cpyToHost<DOUBLE>(P.IS_weights, &host_temp_IS_weights[gpu_index * P.nsim], P.nsim,
P.stream);
memory_manager_GPU::cpyToHost<DOUBLE>(P.sum_det_weights, &host_temp_sum_det_weights[gpu_index], 1, P.stream);
}
SYSDEBUG_LOGGER << "waiting for memory transfer to complete" << ENDL;
// wait until all results are downloaded
for (size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
SYSDEBUG_LOGGER << "gpu_index = " << gpu_index << ENDL;
startCuda(gpu_index);
cudaErrchk(hipStreamSynchronize(this->simP[gpu_index].stream));
SYSDEBUG_LOGGER << "synchronized stream on device " << gpu_index << ENDL;
}
// sum up determinant weights
host_sum_det_weights[0] = 0;
for (size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
SYSDEBUG_LOGGER << "the sum of the determinants of GPU " << gpu_index << " is: "
<< host_temp_sum_det_weights[gpu_index] << ENDL;
host_sum_det_weights[0] += host_temp_sum_det_weights[gpu_index];
}
SYSDEBUG_LOGGER << "host_sum_det_weights[0] = " << host_sum_det_weights[0] << ENDL;
// average the means from the different GPUs
for (size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
SYSDEBUG_LOGGER << "gpu_index = " << gpu_index << ENDL;
// calculate weight of the batch produced by this GPU
DOUBLE weight_scale = host_temp_sum_det_weights[gpu_index] / host_sum_det_weights[0];
SYSDEBUG_LOGGER << "weight_scale = " << host_temp_sum_det_weights[gpu_index] << " / " << host_sum_det_weights[0]
<< " = " << weight_scale << ENDL;
// write mean_m_t
for (size_t i = 0; i < this->m * this->max_p; i++) {
if (gpu_index == 0) { // initialize memory to 0
host_data_mean_m_t[i] = 0;
}
host_data_mean_m_t[i] += weight_scale * host_temp_mean_m_t[gpu_index * this->m * this->max_p + i];
}
// write mean_C_t
for (size_t i = 0; i < this->m * this->max_p * this->max_p; i++) {
if (gpu_index == 0) { // initialize memory to 0
host_data_mean_C_t[i] = 0;
}
host_data_mean_C_t[i] += weight_scale * host_temp_mean_C_t[gpu_index * this->m * this->max_p * this->max_p + i];
}
// write mean_n_t, mean_s_t
for (size_t i = 0; i < this->m; i++) {
if (gpu_index == 0) { // initialize memory to 0
host_data_mean_n_t[i] = 0;
host_data_mean_s_t[i] = 0;
}
host_data_mean_n_t[i] += weight_scale * host_temp_mean_n_t[gpu_index * this->m + i];
host_data_mean_s_t[i] += weight_scale * host_temp_mean_s_t[gpu_index * this->m + i];
}
// write IS_weights
DOUBLE sum_weights = 0;
for (size_t i = 0; i < this->simP[gpu_index].nsim; i++) {
host_data_IS_weights[gpu_index * this->simP[gpu_index].nsim + i] = weight_scale
* host_temp_IS_weights[gpu_index * this->simP[gpu_index].nsim + i];
sum_weights += host_data_IS_weights[gpu_index * this->simP[gpu_index].nsim + i];
}
SYSDEBUG_LOGGER << "sum_weights = " << sum_weights << ENDL;
}
SYSDEBUG_LOGGER << "before clearing host_MEM_temp" << ENDL;
host_MEM_temp.clear();
}
/*
*
*
*
*
*
*
*/
template<typename DOUBLE> inline bool SGDLM::HostWrapperImpl<DOUBLE>::checkInitialized() const {
if (!this->memory_initialized) {
ERROR_LOGGER << "The device memory is not initialized." << ENDL;
return false;
}
return true;
}
template<typename DOUBLE> inline bool SGDLM::HostWrapperImpl<DOUBLE>::checkSimInitialized() const {
if (!this->sim_memory_initialized) {
ERROR_LOGGER << "The simulation device memory is not initialized." << ENDL;
return false;
}
return true;
}
template<typename DOUBLE> inline bool SGDLM::HostWrapperImpl<DOUBLE>::checkUseStateEvolutionMatrix() const {
if (!this->use_state_evolution_matrix) {
ERROR_LOGGER << "The use of state evolution matrices is disabled." << ENDL;
return false;
}
return true;
}
template<typename DOUBLE> inline bool SGDLM::HostWrapperImpl<DOUBLE>::checkPrior(bool is_prior) const {
if (is_prior) {
if (!this->is_prior) {
ERROR_LOGGER << "This function can only be executed when the parameters are prior parameters." << ENDL;
return false;
}
return true;
} else {
if (this->is_prior) {
ERROR_LOGGER << "This function can only be executed when the parameters are posterior parameters." << ENDL;
return false;
}
return true;
}
}
/*
*
*
*
*
*
*
*/
template<typename DOUBLE> inline void SGDLM::HostWrapperImpl<DOUBLE>::allocate_C_t_memory() { // allocate C_t_buffer to simulation memory
SYSDEBUG_LOGGER << "HostWrapperImpl::allocate_C_t_memory()" << ENDL;
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
memory_manager_GPU& MEM_sim = P.MEM_sim;
P.data_C_t_buffer = MEM_sim.device_alloc_vec<DOUBLE>(this->m * this->max_p * this->max_p);
P.C_t_buffer = NULL;
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_C_t_buffer, this->m, this->max_p * this->max_p, P.C_t_buffer);
}
}
/*
*
*
*
*
*
*
*/
// explicit instantiation
template class SGDLM::HostWrapperImpl<DOUBLETYPE>;
| f2b9fcdcce2a5b258825a73110d458ac7d9dbf86.cu | #include "SGDLM/HostWrapperImpl.cuh"
#include "kernel_functions.cuh"
#include "cublas_manager.cuh"
#include "curand_manager.cuh"
#include "SGDLM/SGDLM.cuh"
template<typename DOUBLE> SGDLM::HostWrapperImpl<DOUBLE>::HostWrapperImpl(std::size_t no_gpus) :
memory_initialized(false), sim_memory_initialized(false), evo_memory_initialized(false), is_prior(false), use_state_evolution_matrix(
false), i(0), no_gpus(0), main_gpu(0), m(0), max_p(0), nsim(0), nsim_batch(0) {
SYSDEBUG_LOGGER << "HostWrapperImpl::HostWrapperImpl()" << ENDL;
// check number of GPUs
int no_devices;
bool getDeviceCountSuccess = cudaErrchk(cudaGetDeviceCount(&no_devices));
if (!getDeviceCountSuccess || no_devices < 1) {
no_devices = 0;
ERROR_LOGGER << "No cuda-enabled devices available." << ENDL;
} else if (no_devices > MAX_NO_GPUS) {
no_devices = MAX_NO_GPUS;
}
if (no_gpus <= no_devices) {
this->no_gpus = no_gpus;
} else {
this->no_gpus = no_devices;
}
INFO_LOGGER << "Using " << this->no_gpus << " GPUs" << ENDL;
// start devices and streams
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index, true);
cudaErrchk(cudaSetDeviceFlags(cudaDeviceMapHost));
SYSDEBUG_LOGGER << "started device " << gpu_index << ENDL;
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
cudaErrchk(cudaStreamCreate(&P.stream));
SYSDEBUG_LOGGER << "created stream on device " << gpu_index << ENDL;
startCublas(&P.CUBLAS);
SYSDEBUG_LOGGER << "started cublas on device " << gpu_index << ENDL;
cublasErrchk(cublasSetStream(P.CUBLAS, P.stream));
startCurand(&P.CURAND);
SYSDEBUG_LOGGER << "started curand on device " << gpu_index << ENDL;
curandErrchk(curandSetStream(P.CURAND, P.stream));
curandErrchk(curandSetPseudoRandomGeneratorSeed(P.CURAND, 1234ULL + gpu_index * 100)); // set cuRand seed
SYSDEBUG_LOGGER << "set curand seed on device " << gpu_index << ENDL;
P.MEM = memory_manager_GPU(gpu_index, P.stream);
P.MEM_evo = memory_manager_GPU(gpu_index, P.stream);
P.MEM_sim = memory_manager_GPU(gpu_index, P.stream);
SYSDEBUG_LOGGER << "initialized the memory managers" << ENDL;
}
}
template<typename DOUBLE> SGDLM::HostWrapperImpl<DOUBLE>::~HostWrapperImpl() {
SYSDEBUG_LOGGER << "HostWrapperImpl::~HostWrapperImpl()" << ENDL;
this->clearMemory();
// shut down GPUs
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
SYSDEBUG_LOGGER << "gpu_index = " << gpu_index << ENDL;
startCuda(gpu_index);
endCublas(this->simP[gpu_index].CUBLAS);
SYSDEBUG_LOGGER << "ended cublas on device " << gpu_index << ENDL;
endCurand(this->simP[gpu_index].CURAND);
SYSDEBUG_LOGGER << "ended curand on device " << gpu_index << ENDL;
cudaErrchk(cudaStreamDestroy(this->simP[gpu_index].stream));
SYSDEBUG_LOGGER << "destroyed stream on device " << gpu_index << ENDL;
cudaErrchk(cudaDeviceSynchronize());
cudaErrchk(cudaDeviceReset());
SYSDEBUG_LOGGER << "reset device " << gpu_index << ENDL;
}
}
template<typename DOUBLE> std::size_t SGDLM::HostWrapperImpl<DOUBLE>::getNoSeries() const {
SYSDEBUG_LOGGER << "HostWrapperImpl::getNoSeries()" << ENDL;
return this->m;
}
template<typename DOUBLE> std::size_t SGDLM::HostWrapperImpl<DOUBLE>::getMaxP() const {
SYSDEBUG_LOGGER << "HostWrapperImpl::getMaxP()" << ENDL;
return this->max_p;
}
template<typename DOUBLE> bool SGDLM::HostWrapperImpl<DOUBLE>::getEvolutionMatrixConfiguration() const {
SYSDEBUG_LOGGER << "HostWrapperImpl::getEvolutionMatrixConfiguration()" << ENDL;
return this->use_state_evolution_matrix;
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::initMemory(std::size_t m, std::size_t max_p) {
SYSDEBUG_LOGGER << "HostWrapperImpl::initMemory(" << m << ", " << max_p << ")" << ENDL;
this->clearMemory(); // call clearMemory to set simulation memory un-initialized just in case dimensions change
this->m = m;
this->max_p = max_p;
if (this->no_gpus < 1) {
return;
}
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
memory_manager_GPU& MEM = P.MEM;
// define 0,+1,-1 on device (MEM)
P.zero = MEM.host_device_alloc<DOUBLE>(sizeof(DOUBLE));
assignScalar<<<1, 1, 0, P.stream>>>(1, P.zero, (DOUBLE) 0);
P.plus_one = MEM.host_device_alloc<DOUBLE>(sizeof(DOUBLE));
assignScalar<<<1, 1, 0, P.stream>>>(1, P.plus_one, (DOUBLE) 1);
P.minus_one = MEM.host_device_alloc<DOUBLE>(sizeof(DOUBLE));
assignScalar<<<1, 1, 0, P.stream>>>(1, P.minus_one, (DOUBLE) -1);
// allocate device memory for discount factors (MEM)
P.beta = MEM.device_alloc_vec<DOUBLE>(this->m);
P.data_delta = MEM.device_alloc_vec<DOUBLE>(this->m * this->max_p * this->max_p);
MEM.cpyToDeviceAsPtrArray<DOUBLE>((const DOUBLE*) P.data_delta, this->m, this->max_p * this->max_p, P.delta); // generate CPU+GPU pointer to individual matrices
// allocate device memory for simultaneous parental sets (MEM)
P.p = MEM.device_alloc_vec<unsigned int>(this->m);
P.sp_indices = MEM.device_alloc_vec<unsigned int>(this->m * this->max_p);
// allocate device memory for cache variables (MEM)
P.Q_t = MEM.device_alloc_vec<DOUBLE>(this->m);
MEM.cpyToDeviceAsPtrArray<DOUBLE>(P.Q_t, this->m, 1, P.Q_t_ptrptr);
P.e_t = MEM.device_alloc_vec<DOUBLE>(this->m);
MEM.cpyToDeviceAsPtrArray<DOUBLE>(P.e_t, this->m, 1, P.e_t_ptrptr);
P.data_A_t = MEM.device_alloc_vec<DOUBLE>(this->m * this->max_p);
MEM.cpyToDeviceAsPtrArray<DOUBLE>(P.data_A_t, this->m, this->max_p, P.A_t); // generate CPU+GPU pointer to individual matrices
// allocate device memory for predictors, data
P.y_t = MEM.device_alloc_vec<DOUBLE>(this->m);
P.data_F_t = MEM.device_alloc_vec<DOUBLE>(this->m * this->max_p);
MEM.cpyToDeviceAsPtrArray<DOUBLE>(P.data_F_t, this->m, this->max_p, P.F_t);
// allocate device memory for DLM parameters (MEM)
P.data_m_t = MEM.device_alloc_vec<DOUBLE>(this->m * this->max_p);
MEM.cpyToDeviceAsPtrArray<DOUBLE>((const DOUBLE*) P.data_m_t, this->m, this->max_p, P.m_t); // generate CPU+GPU pointer to individual matrices
P.data_C_t = MEM.device_alloc_vec<DOUBLE>(this->m * this->max_p * this->max_p);
MEM.cpyToDeviceAsPtrArray<DOUBLE>((const DOUBLE*) P.data_C_t, this->m, this->max_p * this->max_p, P.C_t); // generate CPU+GPU pointer to individual matrices
P.n_t = MEM.device_alloc_vec<DOUBLE>(this->m);
P.s_t = MEM.device_alloc_vec<DOUBLE>(this->m);
}
this->manageEvoMemory(this->use_state_evolution_matrix);
this->memory_initialized = true;
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::manageEvoMemory(bool use_state_evolution_matrix) {
SYSDEBUG_LOGGER << "HostWrapperImpl::initEvoMemory(" << use_state_evolution_matrix << ")" << ENDL;
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
memory_manager_GPU& MEM_evo = P.MEM_evo;
memory_manager_GPU& MEM_sim = P.MEM_sim;
// allocate device memory for evolution matrices (MEM)
if (use_state_evolution_matrix) {
if (!this->evo_memory_initialized) {
P.data_G_t = MEM_evo.device_alloc_vec<DOUBLE>(this->m * this->max_p * this->max_p);
P.G_t = NULL;
MEM_evo.cpyToDeviceAsPtrArray<DOUBLE>(P.data_G_t, this->m, this->max_p * this->max_p, P.G_t);
P.data_m_t_buffer = MEM_evo.device_alloc_vec<DOUBLE>(this->m * this->max_p);
P.m_t_buffer = NULL;
MEM_evo.cpyToDeviceAsPtrArray<DOUBLE>(P.data_m_t_buffer, this->m, this->max_p, P.m_t_buffer);
// C_t_buffer is later initialized onto the simulation memory
}
} else {
MEM_evo.clear();
if (!this->sim_memory_initialized) { // free C_t_buffer, which is managed by MEM_sim
MEM_sim.clear();
}
}
}
// manage outsourced memory
if (!this->sim_memory_initialized) { // if simulation memory is not needed, clear it to free C_t_buffer; if in use, do not alter
if (use_state_evolution_matrix) {
this->allocate_C_t_memory();
}
}
this->use_state_evolution_matrix = use_state_evolution_matrix;
this->evo_memory_initialized = use_state_evolution_matrix;
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::clearMemory() {
SYSDEBUG_LOGGER << "HostWrapperImpl::clearGPUs()" << ENDL;
this->memory_initialized = false;
this->sim_memory_initialized = false;
this->evo_memory_initialized = false;
this->nsim = 0;
this->nsim_batch = 0;
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
SYSDEBUG_LOGGER << "gpu_index = " << gpu_index << ENDL;
startCuda(gpu_index);
this->simP[gpu_index].MEM_sim.clear();
this->simP[gpu_index].MEM_evo.clear();
this->simP[gpu_index].MEM.clear();
SYSDEBUG_LOGGER << "cleared memory on device " << gpu_index << ENDL;
}
}
template<typename DOUBLE> std::size_t SGDLM::HostWrapperImpl<DOUBLE>::getNSim() const {
SYSDEBUG_LOGGER << "HostWrapperImpl::getNSim()" << ENDL;
return this->nsim;
}
template<typename DOUBLE> std::size_t SGDLM::HostWrapperImpl<DOUBLE>::getNSimBatch() const {
SYSDEBUG_LOGGER << "HostWrapperImpl::getNSimBatch()" << ENDL;
return this->nsim_batch;
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::initSimMemory(std::size_t nsim, std::size_t nsim_batch) {
SYSDEBUG_LOGGER << "HostWrapperImpl::initSimMemory(" << nsim << ", " << nsim_batch << ")" << ENDL;
myAssert(this->checkInitialized());
// set new dimensions
nsim /= this->no_gpus;
this->nsim = nsim * this->no_gpus;
this->nsim_batch = nsim_batch > this->nsim ? this->nsim : nsim_batch;
for (size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
memory_manager_GPU& MEM_sim = P.MEM_sim;
MEM_sim.clear(); // free existing memory before allocating new memory
P.nsim = nsim;
// FOR VB POSTERIOR ESTIMATION AND FORECASTING
/*
* lambdas is organized by batch : n-array of pointers to m-arrays
* randoms is organized by batch : n-array of pointers to (m * max_p)-arrays
* thetas is organized by batch : n-array of pointers to (m * max_p)-arrays
* Gammas is organized by batch : Gammas_batch_size-array of pointers to (m * m)-arrays
* chol_C_t is organized by dimension: m-array of (max_p * max_p)-arrays
* LU_infos is organized by batch : Gammas_batch_size-array of scalars
* LU_pivots is organized by batch : Gammas_batch_size-array of m-arrays
*
*
* chol_C_t_nrepeat_ptr = n x [chol(C_t[0]), chol(C_t[1]), ..., chol(C_t[m-1])]
* randoms_nrepeat_ptr = same logic as thetas_nrepeat_ptr---just pointing to randoms instead
* thetas_nrepeat_ptr = thetas[0], thetas[max_p], thetas[2*max_p], ..., thetas[(m-1)*max_p], ..., thetas[n*(m-1)*max_p]
*/
// allocate device memory
P.data_lambdas = MEM_sim.device_alloc_vec<DOUBLE>(this->m * P.nsim);
P.data_randoms = MEM_sim.device_alloc_vec<DOUBLE>(((this->max_p > 4) ? this->max_p : 4) * this->m * P.nsim); // allocate max(4, least max_p)*m*P.nsim so that memory P.data_random_pt2 still has 2*m*P.nsim entries allocated
P.data_randoms_pt2 = (DOUBLE*) ((char*) P.data_randoms + (2 * this->m * P.nsim) * sizeof(DOUBLE)); //&data_randoms[2 * m * nsim]; //TODO: verify
P.data_thetas = MEM_sim.device_alloc_vec<DOUBLE>(this->max_p * this->m * P.nsim);
P.data_Gammas = MEM_sim.device_alloc_vec<DOUBLE>(this->m * this->m * 2 * nsim_batch); // allocate for 2 * nsim_batch: VB_posterior can use double the batch size and forecasting will use the 2nd half for the inverse
P.data_chol_C_t = MEM_sim.device_alloc_vec<DOUBLE>(this->max_p * this->max_p * this->m);
P.LU_pivots = MEM_sim.device_alloc_vec<int>(this->m * 2 * nsim_batch);
P.LU_infos = MEM_sim.device_alloc_vec<int>(2 * nsim_batch);
// define array pointers
P.lambdas = NULL;
P.randoms_nrepeat_ptr = NULL;
P.Gammas = NULL;
P.thetas = NULL;
P.thetas_nrepeat_ptr = NULL;
P.lambdas_nrepeat_ptr = NULL;
P.chol_C_t = NULL;
P.chol_C_t_nrepeat_ptr = NULL;
// assign repeat pointers
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_lambdas, P.nsim, this->m, P.lambdas);
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_Gammas, 2 * nsim_batch, m * m, P.Gammas);
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_chol_C_t, this->m, this->max_p * this->max_p, P.chol_C_t);
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_thetas, P.nsim, this->m * this->max_p, P.thetas);
// assign nrepeat pointers
MEM_sim.cpyToDeviceAsPtrArrayByCol<DOUBLE>(P.data_randoms, P.nsim, this->m, this->max_p, P.randoms_nrepeat_ptr);
MEM_sim.cpyToDeviceAsPtrArrayRepeatByBatch<DOUBLE>(P.data_chol_C_t, this->m, this->max_p * this->max_p, P.nsim,
P.chol_C_t_nrepeat_ptr);
MEM_sim.cpyToDeviceAsPtrArrayByCol<DOUBLE>(P.data_thetas, P.nsim, this->m, this->max_p, P.thetas_nrepeat_ptr);
MEM_sim.cpyToDeviceAsPtrArrayByCol<DOUBLE>(P.data_lambdas, P.nsim, this->m, 1, P.lambdas_nrepeat_ptr);
// FOR VB POSTERIOR ESTIMATION
/*
* IS_weights is organized by batch : n-array of scalars
* mean_lambdas is organized by dimension : m-array of scalars
* mean_log_lambdas is organized by dimension: m-array of scalars
* mean_n_t is organized by dimension : m-array of scalars
* mean_s_t is organized by dimension : m-array of scalars
* mean_Q_t is organized by dimension : m-array of scalars
* mean_m_t is organized by dimension : m-array of max_p-arrays
* mean_C_t is organized by dimension : m-array of (max_p * max_p)-arrays
* C_t_buffer is organized by dimension : m-array of (max_p * max_p)-arrays
* INV_infos is organized by batch : m-array of scalars
* INV_pivots is organized by batch : m-array of max_p-arrays
*
*
* chol_C_t_nrepeat_ptr = n x [chol(C_t[0]), chol(C_t[1]), ..., chol(C_t[m-1])]
* randoms_nrepeat_ptr = same logic as thetas_nrepeat_ptr---just pointing to randoms instead
* thetas_nrepeat_ptr = thetas[0], thetas[max_p], thetas[2*max_p], ..., thetas[(m-1)*max_p], ..., thetas[n*(m-1)*max_p]
*/
// allocate device memory
P.IS_weights = MEM_sim.device_alloc_vec<DOUBLE>(P.nsim);
P.sum_det_weights = MEM_sim.device_alloc_vec<DOUBLE>(1);
P.INV_pivots = MEM_sim.device_alloc_vec<int>(this->max_p * this->m);
P.INV_infos = MEM_sim.device_alloc_vec<int>(this->m);
P.mean_lambdas = MEM_sim.device_alloc_vec<DOUBLE>(this->m);
P.mean_log_lambdas = MEM_sim.device_alloc_vec<DOUBLE>(this->m);
P.data_mean_m_t = MEM_sim.device_alloc_vec<DOUBLE>(this->max_p * this->m);
P.data_mean_C_t = MEM_sim.device_alloc_vec<DOUBLE>(this->max_p * this->max_p * this->m);
P.mean_n_t = MEM_sim.device_alloc_vec<DOUBLE>(this->m);
P.mean_s_t = MEM_sim.device_alloc_vec<DOUBLE>(this->m);
P.mean_Q_t = MEM_sim.device_alloc_vec<DOUBLE>(this->m);
P.data_C_t_buffer = MEM_sim.device_alloc_vec<DOUBLE>(this->m * this->max_p * this->max_p);
// define array pointers
P.mean_m_t = NULL;
P.mean_C_t = NULL;
P.C_t_buffer = NULL;
// assign array pointers
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_mean_m_t, this->m, this->max_p, P.mean_m_t);
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_mean_C_t, this->m, this->max_p * this->max_p, P.mean_C_t);
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_C_t_buffer, this->m, this->max_p * this->max_p, P.C_t_buffer);
// FOR FORECASTING
/*
* y is organized by batch : n-array of pointers to m-arrays
* nus is organized by batch : n-array of pointers to m-arrays
* Gammas_inv is organized by batch : Gammas_batch_size-array of pointers to (m * m)-arrays
*/
// allocate device memory
P.data_x_t = MEM_sim.device_alloc_vec<DOUBLE>(this->m * this->max_p);
P.data_y = MEM_sim.device_alloc_vec<DOUBLE>(this->m * P.nsim);
P.data_nus = MEM_sim.device_alloc_vec<DOUBLE>(this->m * P.nsim);
P.data_Gammas_inv = (DOUBLE*) ((char*) P.data_Gammas + (this->m * this->m * nsim_batch) * sizeof(DOUBLE)); //MEM_sim.device_alloc<DOUBLE>(3, dim_Gammas);
// define array pointers
P.x_t = NULL;
P.y = NULL;
P.nus = NULL;
P.Gammas_inv = NULL;
// assign array pointers
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_x_t, this->m, this->max_p, P.x_t);
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_y, P.nsim, this->m, P.y);
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_nus, P.nsim, this->m, P.nus);
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_Gammas_inv, nsim_batch, this->m * this->m, P.Gammas_inv);
}
this->sim_memory_initialized = true;
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::clearSimMemory() {
SYSDEBUG_LOGGER << "HostWrapperImpl::clearSimMemory()" << ENDL;
myAssert(this->checkInitialized());
this->sim_memory_initialized = false;
this->nsim = 0;
this->nsim_batch = 0;
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
SYSDEBUG_LOGGER << "gpu_index = " << gpu_index << ENDL;
startCuda(gpu_index);
this->simP[gpu_index].MEM_sim.clear();
SYSDEBUG_LOGGER << "cleared simulation memory on device " << gpu_index << ENDL;
}
if (this->evo_memory_initialized) { // re-allocate C_t_buffer
this->allocate_C_t_memory();
}
}
template<typename DOUBLE> bool SGDLM::HostWrapperImpl<DOUBLE>::isPrior() const {
SYSDEBUG_LOGGER << "HostWrapperImpl::isPrior()" << ENDL;
return this->is_prior;
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::isPrior(bool is_prior) {
SYSDEBUG_LOGGER << "HostWrapperImpl::isPrior(" << is_prior << ")" << ENDL;
this->is_prior = is_prior;
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::getParameters(DOUBLE* host_data_m_t,
DOUBLE* host_data_C_t, DOUBLE* host_data_n_t, DOUBLE* host_data_s_t) const {
SYSDEBUG_LOGGER << "HostWrapperImpl::getParameters()" << ENDL;
myAssert(this->checkInitialized());
startCuda(this->main_gpu);
const simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[this->main_gpu];
if (host_data_m_t != NULL) {
memory_manager_GPU::cpyToHost<DOUBLE>(P.data_m_t, host_data_m_t, this->m * this->max_p, P.stream);
}
if (host_data_C_t != NULL) {
memory_manager_GPU::cpyToHost<DOUBLE>(P.data_C_t, host_data_C_t, this->m * this->max_p * this->max_p, P.stream);
}
if (host_data_n_t != NULL) {
memory_manager_GPU::cpyToHost<DOUBLE>(P.n_t, host_data_n_t, this->m, P.stream);
}
if (host_data_s_t != NULL) {
memory_manager_GPU::cpyToHost<DOUBLE>(P.s_t, host_data_s_t, this->m, P.stream);
}
cudaErrchk(cudaStreamSynchronize(this->simP[this->main_gpu].stream));
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::setParameters(const DOUBLE* host_data_m_t,
const DOUBLE* host_data_C_t, const DOUBLE* host_data_n_t, const DOUBLE* host_data_s_t) {
SYSDEBUG_LOGGER << "HostWrapperImpl::setParameters()" << ENDL;
myAssert(this->checkInitialized());
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
if (host_data_m_t != NULL) {
memory_manager_GPU::cpyToDevice<DOUBLE>(P.data_m_t, host_data_m_t, this->m * this->max_p, P.stream);
}
if (host_data_C_t != NULL) {
memory_manager_GPU::cpyToDevice<DOUBLE>(P.data_C_t, host_data_C_t, this->m * this->max_p * this->max_p,
P.stream);
}
if (host_data_n_t != NULL) {
memory_manager_GPU::cpyToDevice<DOUBLE>(P.n_t, host_data_n_t, this->m, P.stream);
}
if (host_data_s_t != NULL) {
memory_manager_GPU::cpyToDevice<DOUBLE>(P.s_t, host_data_s_t, this->m, P.stream);
}
}
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::getDiscountFactors(DOUBLE* host_data_beta,
DOUBLE* host_data_delta) const {
SYSDEBUG_LOGGER << "HostWrapperImpl::getDiscountFactors()" << ENDL;
myAssert(this->checkInitialized());
startCuda(this->main_gpu);
const simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[this->main_gpu];
if (host_data_beta != NULL) {
memory_manager_GPU::cpyToHost<DOUBLE>(P.beta, host_data_beta, this->m, P.stream);
}
if (host_data_delta != NULL) {
memory_manager_GPU::cpyToHost<DOUBLE>(P.data_delta, host_data_delta, this->m * this->max_p * this->max_p,
P.stream);
}
cudaErrchk(cudaStreamSynchronize(this->simP[this->main_gpu].stream));
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::setDiscountFactors(const DOUBLE* host_data_beta,
const DOUBLE* host_data_delta) {
SYSDEBUG_LOGGER << "HostWrapperImpl::setDiscountFactors()" << ENDL;
myAssert(this->checkInitialized());
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
if (host_data_beta != NULL) {
memory_manager_GPU::cpyToDevice<DOUBLE>(P.beta, host_data_beta, this->m, P.stream);
}
if (host_data_delta != NULL) {
memory_manager_GPU::cpyToDevice<DOUBLE>(P.data_delta, host_data_delta, this->m * this->max_p * this->max_p,
P.stream);
}
}
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::getEvolutionMatrix(DOUBLE* host_data_G_t) const {
SYSDEBUG_LOGGER << "HostWrapperImpl::getEvolutionMatrix()" << ENDL;
myAssert(this->checkInitialized());
myAssert(this->checkUseStateEvolutionMatrix());
startCuda(this->main_gpu);
const simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[this->main_gpu];
if (host_data_G_t != NULL) {
memory_manager_GPU::cpyToHost<DOUBLE>(P.data_G_t, host_data_G_t, this->m * this->max_p * this->max_p, P.stream);
}
cudaErrchk(cudaStreamSynchronize(this->simP[this->main_gpu].stream));
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::setEvolutionMatrix(const DOUBLE* host_data_G_t) {
SYSDEBUG_LOGGER << "HostWrapperImpl::setEvolutionMatrix()" << ENDL;
myAssert(this->checkInitialized());
myAssert(this->checkUseStateEvolutionMatrix());
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
if (host_data_G_t != NULL) {
memory_manager_GPU::cpyToDevice<DOUBLE>(P.data_G_t, host_data_G_t, this->m * this->max_p * this->max_p,
P.stream);
}
}
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::getParentalSets(unsigned int* host_data_p,
unsigned int* host_data_sp_indices) const {
SYSDEBUG_LOGGER << "HostWrapperImpl::getParentalSets()" << ENDL;
myAssert(this->checkInitialized());
startCuda(this->main_gpu);
const simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[this->main_gpu];
if (host_data_p != NULL) {
memory_manager_GPU::cpyToHost<unsigned int>(P.p, host_data_p, this->m, P.stream);
}
if (host_data_sp_indices != NULL) {
memory_manager_GPU::cpyToHost<unsigned int>(P.sp_indices, host_data_sp_indices, this->m * this->max_p,
P.stream);
}
cudaErrchk(cudaStreamSynchronize(this->simP[this->main_gpu].stream));
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::setParentalSets(const unsigned int* host_data_p,
const unsigned int* host_data_sp_indices) {
SYSDEBUG_LOGGER << "HostWrapperImpl::setParentalSets()" << ENDL;
myAssert(this->checkInitialized());
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
if (host_data_p != NULL) {
memory_manager_GPU::cpyToDevice<unsigned int>(P.p, host_data_p, this->m, P.stream);
}
if (host_data_sp_indices != NULL) {
memory_manager_GPU::cpyToDevice<unsigned int>(P.sp_indices, host_data_sp_indices, this->m * this->max_p,
P.stream);
}
}
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::computePrior() {
SYSDEBUG_LOGGER << "HostWrapperImpl::computePrior()" << ENDL;
myAssert(this->checkInitialized());
myAssert(this->checkPrior(false));
// call SGDLM::compute_one_step_ahead_prior with G_t = NULL
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
SGDLM<DOUBLE>::compute_one_step_ahead_prior(this->m, this->max_p, P.p, P.m_t, P.C_t, P.n_t, P.s_t, P.beta,
(const DOUBLE**) P.delta, P.stream);
}
// wait for results
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
cudaErrchk(cudaStreamSynchronize(this->simP[gpu_index].stream));
}
this->isPrior(true);
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::computePrior(const DOUBLE* host_data_G_t) {
SYSDEBUG_LOGGER << "HostWrapperImpl::computePrior(...)" << ENDL;
myAssert(this->checkInitialized());
myAssert(this->checkPrior(false));
myAssert(this->checkUseStateEvolutionMatrix());
this->setEvolutionMatrix(host_data_G_t);
// call SGDLM::compute_one_step_ahead_prior with the current state evolution matrix
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
SGDLM<DOUBLE>::compute_one_step_ahead_prior(this->m, this->max_p, P.p, P.m_t, P.C_t, P.n_t, P.s_t, P.beta,
(const DOUBLE**) P.delta, P.stream, P.CUBLAS, P.zero, P.plus_one, (const DOUBLE**) P.G_t, P.C_t_buffer,
P.m_t_buffer);
}
// wait for results
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
cudaErrchk(cudaStreamSynchronize(this->simP[gpu_index].stream));
}
this->isPrior(true);
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::computeForecast(DOUBLE* host_data_ytp1,
const DOUBLE* host_data_x_tp1) {
SYSDEBUG_LOGGER << "HostWrapperImpl::computeForecast()" << ENDL;
myAssert(this->checkInitialized());
myAssert(this->checkSimInitialized());
myAssert(this->checkPrior(true));
// copy predictors onto device memory
for (size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
memory_manager_GPU::cpyToDevice<DOUBLE>(P.data_x_t, host_data_x_tp1, this->m * this->max_p, P.stream);
}
// compute forecasts
for (size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
SGDLM<DOUBLE>::forecast((const DOUBLE*) P.zero, (const DOUBLE*) P.plus_one, this->m, this->max_p,
(const unsigned int*) P.p, (const unsigned int*) P.sp_indices, (const DOUBLE**) P.m_t,
(const DOUBLE**) P.C_t, (const DOUBLE*) P.n_t, (const DOUBLE*) P.s_t, P.nsim, this->nsim_batch,
(const DOUBLE**) P.x_t, P.y, P.data_nus, P.nus, P.lambdas, P.data_randoms, P.data_randoms_pt2,
P.randoms_nrepeat_ptr, P.Gammas, P.Gammas_inv, P.LU_pivots, P.LU_infos, P.chol_C_t,
P.chol_C_t_nrepeat_ptr, P.thetas, P.thetas_nrepeat_ptr, P.stream, P.CUBLAS, P.CURAND);
}
// copy results on host memory
for (size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
// copy the result into the right place of the output array
DOUBLE* out_ptr_pos = (DOUBLE*) ((char*) host_data_ytp1 + gpu_index * this->m * P.nsim * sizeof(DOUBLE));
memory_manager_GPU::cpyToHost<DOUBLE>(P.data_y, out_ptr_pos, this->m * P.nsim, P.stream);
}
// wait until computations and memory transfers are complete
for (size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
cudaErrchk(cudaStreamSynchronize(this->simP[gpu_index].stream));
}
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::computePosterior(const DOUBLE* host_data_y_t,
const DOUBLE* host_data_F_t) {
SYSDEBUG_LOGGER << "HostWrapperImpl::computePosterior()" << ENDL;
myAssert(this->checkInitialized());
myAssert(this->checkPrior(true));
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
memory_manager_GPU::cpyToDevice<DOUBLE>(P.y_t, host_data_y_t, this->m, P.stream);
memory_manager_GPU::cpyToDevice<DOUBLE>(P.data_F_t, host_data_F_t, this->m * this->max_p, P.stream);
}
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
SGDLM<DOUBLE>::compute_posterior(P.zero, P.plus_one, P.minus_one, this->m, this->max_p, P.p, P.m_t, P.C_t,
P.n_t, P.s_t, (const DOUBLE*) P.y_t, (const DOUBLE**) P.F_t, P.Q_t, P.e_t, P.A_t, P.Q_t_ptrptr,
P.e_t_ptrptr, P.CUBLAS, P.stream);
}
// wait for results
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
cudaErrchk(cudaStreamSynchronize(this->simP[gpu_index].stream));
}
this->isPrior(false);
}
template<typename DOUBLE> void SGDLM::HostWrapperImpl<DOUBLE>::computeVBPosterior(DOUBLE* host_data_mean_m_t,
DOUBLE* host_data_mean_C_t, DOUBLE* host_data_mean_n_t, DOUBLE* host_data_mean_s_t,
DOUBLE* host_data_IS_weights, DOUBLE* host_sum_det_weights) {
SYSDEBUG_LOGGER << "HostWrapperImpl::runVB()" << ENDL;
myAssert(this->checkInitialized());
myAssert(this->checkSimInitialized());
myAssert(this->checkPrior(false));
size_t batch_multiplier = 2;
SYSDEBUG_LOGGER << "before starting VB simulation on all selected GPUs" << ENDL;
for (size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
SYSDEBUG_LOGGER << "gpu_index = " << gpu_index << ENDL;
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
SGDLM<DOUBLE>::VB_posterior((const DOUBLE*) P.zero, (const DOUBLE*) P.plus_one, this->m, this->max_p,
(const unsigned int*) P.p, (const unsigned int*) P.sp_indices, (const DOUBLE**) P.m_t,
(const DOUBLE**) P.C_t, (const DOUBLE*) P.n_t, (const DOUBLE*) P.s_t, P.nsim, P.lambdas, P.data_randoms,
P.data_randoms_pt2, P.randoms_nrepeat_ptr, P.Gammas, batch_multiplier * this->nsim_batch, P.IS_weights,
P.sum_det_weights, P.chol_C_t, P.chol_C_t_nrepeat_ptr, P.thetas, P.thetas_nrepeat_ptr, P.LU_pivots,
P.LU_infos, P.mean_lambdas, P.mean_log_lambdas, P.mean_m_t, P.mean_C_t, P.C_t_buffer, P.INV_pivots,
P.INV_infos, P.mean_n_t, P.mean_s_t, P.mean_Q_t, P.lambdas, P.lambdas_nrepeat_ptr, P.stream, P.CUBLAS,
P.CURAND);
}
SYSDEBUG_LOGGER << "VB simulation initiated" << ENDL;
// allocate temporary host memory to copy data in from every gpu
memory_manager host_MEM_temp;
DOUBLE* host_temp_mean_m_t = host_MEM_temp.host_alloc_vec<DOUBLE>(this->no_gpus * this->m * this->max_p);
DOUBLE* host_temp_mean_C_t = host_MEM_temp.host_alloc_vec<DOUBLE>(this->no_gpus * this->m * this->max_p * this->max_p);
DOUBLE* host_temp_mean_n_t = host_MEM_temp.host_alloc_vec<DOUBLE>(this->no_gpus * this->m);
DOUBLE* host_temp_mean_s_t = host_MEM_temp.host_alloc_vec<DOUBLE>(this->no_gpus * this->m);
DOUBLE* host_temp_IS_weights = host_MEM_temp.host_alloc_vec<DOUBLE>(this->nsim);
DOUBLE* host_temp_sum_det_weights = host_MEM_temp.host_alloc_vec<DOUBLE>(this->no_gpus);
// retrieve VB results into temporary host memory
for (size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
SYSDEBUG_LOGGER << "gpu_index = " << gpu_index << ENDL;
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
memory_manager_GPU::cpyToHost<DOUBLE>(P.data_mean_m_t, &host_temp_mean_m_t[gpu_index * this->m * this->max_p],
this->m * this->max_p, P.stream);
memory_manager_GPU::cpyToHost<DOUBLE>(P.data_mean_C_t,
&host_temp_mean_C_t[gpu_index * this->m * this->max_p * this->max_p],
this->m * this->max_p * this->max_p, P.stream);
memory_manager_GPU::cpyToHost<DOUBLE>(P.mean_n_t, &host_temp_mean_n_t[gpu_index * this->m], this->m,
P.stream);
memory_manager_GPU::cpyToHost<DOUBLE>(P.mean_s_t, &host_temp_mean_s_t[gpu_index * this->m], this->m,
P.stream);
memory_manager_GPU::cpyToHost<DOUBLE>(P.IS_weights, &host_temp_IS_weights[gpu_index * P.nsim], P.nsim,
P.stream);
memory_manager_GPU::cpyToHost<DOUBLE>(P.sum_det_weights, &host_temp_sum_det_weights[gpu_index], 1, P.stream);
}
SYSDEBUG_LOGGER << "waiting for memory transfer to complete" << ENDL;
// wait until all results are downloaded
for (size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
SYSDEBUG_LOGGER << "gpu_index = " << gpu_index << ENDL;
startCuda(gpu_index);
cudaErrchk(cudaStreamSynchronize(this->simP[gpu_index].stream));
SYSDEBUG_LOGGER << "synchronized stream on device " << gpu_index << ENDL;
}
// sum up determinant weights
host_sum_det_weights[0] = 0;
for (size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
SYSDEBUG_LOGGER << "the sum of the determinants of GPU " << gpu_index << " is: "
<< host_temp_sum_det_weights[gpu_index] << ENDL;
host_sum_det_weights[0] += host_temp_sum_det_weights[gpu_index];
}
SYSDEBUG_LOGGER << "host_sum_det_weights[0] = " << host_sum_det_weights[0] << ENDL;
// average the means from the different GPUs
for (size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
SYSDEBUG_LOGGER << "gpu_index = " << gpu_index << ENDL;
// calculate weight of the batch produced by this GPU
DOUBLE weight_scale = host_temp_sum_det_weights[gpu_index] / host_sum_det_weights[0];
SYSDEBUG_LOGGER << "weight_scale = " << host_temp_sum_det_weights[gpu_index] << " / " << host_sum_det_weights[0]
<< " = " << weight_scale << ENDL;
// write mean_m_t
for (size_t i = 0; i < this->m * this->max_p; i++) {
if (gpu_index == 0) { // initialize memory to 0
host_data_mean_m_t[i] = 0;
}
host_data_mean_m_t[i] += weight_scale * host_temp_mean_m_t[gpu_index * this->m * this->max_p + i];
}
// write mean_C_t
for (size_t i = 0; i < this->m * this->max_p * this->max_p; i++) {
if (gpu_index == 0) { // initialize memory to 0
host_data_mean_C_t[i] = 0;
}
host_data_mean_C_t[i] += weight_scale * host_temp_mean_C_t[gpu_index * this->m * this->max_p * this->max_p + i];
}
// write mean_n_t, mean_s_t
for (size_t i = 0; i < this->m; i++) {
if (gpu_index == 0) { // initialize memory to 0
host_data_mean_n_t[i] = 0;
host_data_mean_s_t[i] = 0;
}
host_data_mean_n_t[i] += weight_scale * host_temp_mean_n_t[gpu_index * this->m + i];
host_data_mean_s_t[i] += weight_scale * host_temp_mean_s_t[gpu_index * this->m + i];
}
// write IS_weights
DOUBLE sum_weights = 0;
for (size_t i = 0; i < this->simP[gpu_index].nsim; i++) {
host_data_IS_weights[gpu_index * this->simP[gpu_index].nsim + i] = weight_scale
* host_temp_IS_weights[gpu_index * this->simP[gpu_index].nsim + i];
sum_weights += host_data_IS_weights[gpu_index * this->simP[gpu_index].nsim + i];
}
SYSDEBUG_LOGGER << "sum_weights = " << sum_weights << ENDL;
}
SYSDEBUG_LOGGER << "before clearing host_MEM_temp" << ENDL;
host_MEM_temp.clear();
}
/*
*
*
*
*
*
*
*/
template<typename DOUBLE> inline bool SGDLM::HostWrapperImpl<DOUBLE>::checkInitialized() const {
if (!this->memory_initialized) {
ERROR_LOGGER << "The device memory is not initialized." << ENDL;
return false;
}
return true;
}
template<typename DOUBLE> inline bool SGDLM::HostWrapperImpl<DOUBLE>::checkSimInitialized() const {
if (!this->sim_memory_initialized) {
ERROR_LOGGER << "The simulation device memory is not initialized." << ENDL;
return false;
}
return true;
}
template<typename DOUBLE> inline bool SGDLM::HostWrapperImpl<DOUBLE>::checkUseStateEvolutionMatrix() const {
if (!this->use_state_evolution_matrix) {
ERROR_LOGGER << "The use of state evolution matrices is disabled." << ENDL;
return false;
}
return true;
}
template<typename DOUBLE> inline bool SGDLM::HostWrapperImpl<DOUBLE>::checkPrior(bool is_prior) const {
if (is_prior) {
if (!this->is_prior) {
ERROR_LOGGER << "This function can only be executed when the parameters are prior parameters." << ENDL;
return false;
}
return true;
} else {
if (this->is_prior) {
ERROR_LOGGER << "This function can only be executed when the parameters are posterior parameters." << ENDL;
return false;
}
return true;
}
}
/*
*
*
*
*
*
*
*/
template<typename DOUBLE> inline void SGDLM::HostWrapperImpl<DOUBLE>::allocate_C_t_memory() { // allocate C_t_buffer to simulation memory
SYSDEBUG_LOGGER << "HostWrapperImpl::allocate_C_t_memory()" << ENDL;
for (std::size_t gpu_index = 0; gpu_index < this->no_gpus; gpu_index++) {
startCuda(gpu_index);
simPointers<DOUBLE, memory_manager_GPU>& P = this->simP[gpu_index];
memory_manager_GPU& MEM_sim = P.MEM_sim;
P.data_C_t_buffer = MEM_sim.device_alloc_vec<DOUBLE>(this->m * this->max_p * this->max_p);
P.C_t_buffer = NULL;
MEM_sim.cpyToDeviceAsPtrArray<DOUBLE>(P.data_C_t_buffer, this->m, this->max_p * this->max_p, P.C_t_buffer);
}
}
/*
*
*
*
*
*
*
*/
// explicit instantiation
template class SGDLM::HostWrapperImpl<DOUBLETYPE>;
|
266d601255e047517acfa41cc319d4f5d1f69bc2.hip | // !!! This is a file automatically generated by hipify!!!
/* Gaussian Elimination.
*
* Copyright (C) 2012-2013 Orange Owl Solutions.
*
* This file is part of Bluebird Library.
* Gaussian Elimination is free software: you can redistribute it and/or modify
* it under the terms of the Lesser GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Gaussian Elimination is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* Lesser GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Gaussian Elimination. If not, see <http://www.gnu.org/licenses/>.
*
*
* For any request, question or bug reporting please visit http://www.orangeowlsolutions.com/
* or send an e-mail to: [email protected]
*
*
*/
/**************/
/* TIMING GPU */
/**************/
#include "TimingGPU.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
struct PrivateTimingGPU {
hipEvent_t start;
hipEvent_t stop;
};
// default constructor
TimingGPU::TimingGPU() { privateTimingGPU = new PrivateTimingGPU; }
// default destructor
TimingGPU::~TimingGPU() { }
void TimingGPU::StartCounter()
{
hipEventCreate(&((*privateTimingGPU).start));
hipEventCreate(&((*privateTimingGPU).stop));
hipEventRecord((*privateTimingGPU).start,0);
}
void TimingGPU::StartCounterFlags()
{
int eventflags = hipEventBlockingSync;
hipEventCreateWithFlags(&((*privateTimingGPU).start),eventflags);
hipEventCreateWithFlags(&((*privateTimingGPU).stop),eventflags);
hipEventRecord((*privateTimingGPU).start,0);
}
// Gets the counter in ms
float TimingGPU::GetCounter()
{
float time;
hipEventRecord((*privateTimingGPU).stop, 0);
hipEventSynchronize((*privateTimingGPU).stop);
hipEventElapsedTime(&time,(*privateTimingGPU).start,(*privateTimingGPU).stop);
return time;
}
| 266d601255e047517acfa41cc319d4f5d1f69bc2.cu | /* Gaussian Elimination.
*
* Copyright (C) 2012-2013 Orange Owl Solutions.
*
* This file is part of Bluebird Library.
* Gaussian Elimination is free software: you can redistribute it and/or modify
* it under the terms of the Lesser GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Gaussian Elimination is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* Lesser GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Gaussian Elimination. If not, see <http://www.gnu.org/licenses/>.
*
*
* For any request, question or bug reporting please visit http://www.orangeowlsolutions.com/
* or send an e-mail to: [email protected]
*
*
*/
/**************/
/* TIMING GPU */
/**************/
#include "TimingGPU.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
struct PrivateTimingGPU {
cudaEvent_t start;
cudaEvent_t stop;
};
// default constructor
TimingGPU::TimingGPU() { privateTimingGPU = new PrivateTimingGPU; }
// default destructor
TimingGPU::~TimingGPU() { }
void TimingGPU::StartCounter()
{
cudaEventCreate(&((*privateTimingGPU).start));
cudaEventCreate(&((*privateTimingGPU).stop));
cudaEventRecord((*privateTimingGPU).start,0);
}
void TimingGPU::StartCounterFlags()
{
int eventflags = cudaEventBlockingSync;
cudaEventCreateWithFlags(&((*privateTimingGPU).start),eventflags);
cudaEventCreateWithFlags(&((*privateTimingGPU).stop),eventflags);
cudaEventRecord((*privateTimingGPU).start,0);
}
// Gets the counter in ms
float TimingGPU::GetCounter()
{
float time;
cudaEventRecord((*privateTimingGPU).stop, 0);
cudaEventSynchronize((*privateTimingGPU).stop);
cudaEventElapsedTime(&time,(*privateTimingGPU).start,(*privateTimingGPU).stop);
return time;
}
|
36323a40dec692ece21fcbf5d12f89676d2a8cb7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <Std.h>
#include <CudaSupport.h>
#include <Definitions.h>
#include <CudaObjects.h>
#include <kernel.h>
///////////////////////////////////////////////////////////////////////////////
const dim3 BlockDim( BlockDimX, BlockDimY );
const size_t BlockSize = BlockDimX * BlockDimY;
const size_t SharedMemSize = BlockSize * sizeof( NumericType );
const size_t SharedMem2Size = SharedMemSize * 2;
///////////////////////////////////////////////////////////////////////////////
__device__ void blockReduceMax( volatile NumericType* shared,
const size_t threadIndex, const NumericType value )
{
shared[threadIndex] = value;
__syncthreads();
if( BlockSize >= 512 && threadIndex < 256 ) {
shared[threadIndex] = max( shared[threadIndex], shared[threadIndex + 256] );
}
__syncthreads();
if( BlockSize >= 256 && threadIndex < 128 ) {
shared[threadIndex] = max( shared[threadIndex], shared[threadIndex + 128] );
}
__syncthreads();
if( BlockSize >= 128 && threadIndex < 64 ) {
shared[threadIndex] = max( shared[threadIndex], shared[threadIndex + 128] );
}
__syncthreads();
if( BlockSize >= 64 && threadIndex < 32 ) {
shared[threadIndex] = max( shared[threadIndex], shared[threadIndex + 32] );
}
__syncthreads();
if( BlockSize >= 32 && threadIndex < 16 ) {
shared[threadIndex] = max( shared[threadIndex], shared[threadIndex + 16] );
}
__syncthreads();
if( BlockSize >= 16 && threadIndex < 8 ) {
shared[threadIndex] = max( shared[threadIndex], shared[threadIndex + 8] );
}
__syncthreads();
if( BlockSize >= 8 && threadIndex < 4 ) {
shared[threadIndex] = max( shared[threadIndex], shared[threadIndex + 4] );
}
__syncthreads();
if( BlockSize >= 4 && threadIndex < 2 ) {
shared[threadIndex] = max( shared[threadIndex], shared[threadIndex + 2] );
}
__syncthreads();
if( BlockSize >= 2 && threadIndex < 1 ) {
shared[threadIndex] = max( shared[threadIndex], shared[threadIndex + 1] );
}
}
__device__ void blockReduceSumTwo( volatile NumericType* shared,
const size_t threadIndex, const NumericType value1, const NumericType value2 )
{
shared[threadIndex] = value1;
shared[threadIndex + 1] = value2;
__syncthreads();
if( BlockSize >= 512 && threadIndex < 512 ) {
shared[threadIndex] += shared[threadIndex + 512];
shared[threadIndex + 1] += shared[threadIndex + 512 + 1];
}
__syncthreads();
if( BlockSize >= 256 && threadIndex < 256 ) {
shared[threadIndex] += shared[threadIndex + 256];
shared[threadIndex + 1] += shared[threadIndex + 256 + 1];
}
__syncthreads();
if( BlockSize >= 128 && threadIndex < 128 ) {
shared[threadIndex] += shared[threadIndex + 128];
shared[threadIndex + 1] += shared[threadIndex + 128 + 1];
}
__syncthreads();
if( BlockSize >= 64 && threadIndex < 64 ) {
shared[threadIndex] += shared[threadIndex + 64];
shared[threadIndex + 1] += shared[threadIndex + 64 + 1];
}
__syncthreads();
if( BlockSize >= 32 && threadIndex < 32 ) {
shared[threadIndex] += shared[threadIndex + 32];
shared[threadIndex + 1] += shared[threadIndex + 32 + 1];
}
__syncthreads();
if( BlockSize >= 16 && threadIndex < 16 ) {
shared[threadIndex] += shared[threadIndex + 16];
shared[threadIndex + 1] += shared[threadIndex + 16 + 1];
}
__syncthreads();
if( BlockSize >= 8 && threadIndex < 8 ) {
shared[threadIndex] += shared[threadIndex + 8];
shared[threadIndex + 1] += shared[threadIndex + 8 + 1];
}
__syncthreads();
if( BlockSize >= 4 && threadIndex < 4 ) {
shared[threadIndex] += shared[threadIndex + 4];
shared[threadIndex + 1] += shared[threadIndex + 4 + 1];
}
__syncthreads();
if( BlockSize >= 2 && threadIndex < 2 ) {
shared[threadIndex] += shared[threadIndex + 2];
shared[threadIndex + 1] += shared[threadIndex + 2 + 1];
}
}
///////////////////////////////////////////////////////////////////////////////
__device__ NumericType laplasOperator( cudaMatrix matrix, cudaUniformGrid grid, size_t x, size_t y )
{
const NumericType ldx = ( matrix( x, y ) - matrix( x - 1, y ) ) / grid.X.Step( x - 1 );
const NumericType rdx = ( matrix( x + 1, y ) - matrix( x, y ) ) / grid.X.Step( x );
const NumericType tdy = ( matrix( x, y ) - matrix( x, y - 1 ) ) / grid.Y.Step( y - 1 );
const NumericType bdy = ( matrix( x, y + 1 ) - matrix( x, y ) ) / grid.Y.Step( y );
const NumericType dx = ( ldx - rdx ) / grid.X.AverageStep( x );
const NumericType dy = ( tdy - bdy ) / grid.Y.AverageStep( y );
return ( dx + dy );
}
///////////////////////////////////////////////////////////////////////////////
// rij .
__global__ void kernelCalcR( cudaMatrix p, cudaUniformGrid grid, cudaMatrix r )
{
const size_t x = BlockDimX * blockIdx.x + threadIdx.x + 1;
const size_t y = BlockDimY * blockIdx.y + threadIdx.y + 1;
if( x < ( p.SizeX() - 1 ) && y < ( p.SizeY() - 1 ) ) {
r( x, y ) = laplasOperator( p, grid, x, y ) - F( grid.X[x], grid.Y[y] );
}
}
// gij .
__global__ void kernelCalcG( cudaMatrix r, const NumericType alpha, cudaMatrix g )
{
const size_t x = BlockDimX * blockIdx.x + threadIdx.x + 1;
const size_t y = BlockDimY * blockIdx.y + threadIdx.y + 1;
if( x < ( g.SizeX() - 1 ) && y < ( g.SizeY() - 1 ) ) {
g( x, y ) = r( x, y ) - alpha * g( x, y );
}
}
// pij , .
__global__ void kernelCalcP( cudaMatrix g, const NumericType tau, cudaMatrix p,
cudaMatrix differences )
{
extern __shared__ NumericType shared[];
const size_t x = BlockDimX * blockIdx.x + threadIdx.x + 1;
const size_t y = BlockDimY * blockIdx.y + threadIdx.y + 1;
const size_t threadIndex = threadIdx.y * BlockDimX + threadIdx.x;
NumericType difference = 0;
if( x < ( p.SizeX() - 1 ) && y < ( p.SizeY() - 1 ) ) {
const NumericType newValue = p( x, y ) - tau * g( x, y );
difference = abs( newValue - p( x, y ) );
p( x, y ) = newValue;
}
blockReduceMax( shared, threadIndex, difference );
if( threadIndex == 0 ) {
const size_t blockIndex = gridDim.x * blockIdx.y + blockIdx.x;
differences( blockIndex, 0 ) = shared[0];
}
}
// alpha.
__global__ void kernelCalcAlpha( cudaMatrix r, cudaMatrix g, cudaUniformGrid grid,
cudaMatrix alphas )
{
extern __shared__ NumericType shared[];
const size_t x = BlockDimX * blockIdx.x + threadIdx.x + 1;
const size_t y = BlockDimY * blockIdx.y + threadIdx.y + 1;
const size_t threadIndex = threadIdx.y * BlockDimX + threadIdx.x;
NumericType numerator = 0;
NumericType denominator = 0;
if( x < ( r.SizeX() - 1 ) && y < ( r.SizeY() - 1 ) ) {
const NumericType common = g( x, y ) * grid.X.AverageStep( x ) * grid.Y.AverageStep( y );
numerator = laplasOperator( r, grid, x, y ) * common;
denominator = laplasOperator( g, grid, x, y ) * common;
}
blockReduceSumTwo( shared, threadIndex * 2, numerator, denominator );
if( threadIndex == 0 ) {
const size_t blockIndex = gridDim.x * blockIdx.y + blockIdx.x;
alphas( blockIndex, 0 ) = shared[0];
alphas( blockIndex, 1 ) = shared[1];
}
}
// tau.
__global__ void kernelCalcTau( cudaMatrix r, cudaMatrix g, cudaUniformGrid grid,
cudaMatrix taus )
{
extern __shared__ NumericType shared[];
const size_t x = BlockDimX * blockIdx.x + threadIdx.x + 1;
const size_t y = BlockDimY * blockIdx.y + threadIdx.y + 1;
const size_t threadIndex = threadIdx.y * BlockDimX + threadIdx.x;
NumericType numerator = 0;
NumericType denominator = 0;
if( x < ( r.SizeX() - 1 ) && y < ( r.SizeY() - 1 ) ) {
const NumericType common = g( x, y ) * grid.X.AverageStep( x ) * grid.Y.AverageStep( y );
numerator = r( x, y ) * common;
denominator = laplasOperator( g, grid, x, y ) * common;
}
blockReduceSumTwo( shared, threadIndex * 2, numerator, denominator );
if( threadIndex == 0 ) {
const size_t blockIndex = gridDim.x * blockIdx.y + blockIdx.x;
taus( blockIndex, 0 ) = shared[0];
taus( blockIndex, 1 ) = shared[1];
}
}
///////////////////////////////////////////////////////////////////////////////
namespace { // anonymous namespace
inline NumericType CalcMax( cudaMatrix buffer )
{
vector<NumericType> differences( buffer.SizeX() );
buffer.GetPart( CMatrixPart( 0, buffer.SizeX(), 0, 1 ), differences );
return *max_element( differences.begin(), differences.end() );
}
inline CFraction CalcFraction( cudaMatrix buffer )
{
vector<NumericType> values( buffer.SizeX() * 2 );
buffer.GetPart( CMatrixPart( 0, buffer.SizeX(), 0, 2 ), values );
typedef vector<NumericType>::const_iterator CIterator;
const CIterator middle = values.begin() + buffer.SizeX();
const NumericType numerator = accumulate<CIterator, NumericType>( values.begin(), middle, 0 );
const NumericType denominator = accumulate<CIterator, NumericType>( middle, values.end(), 0 );
return CFraction( numerator, denominator );
}
} // end of anonymous namespace
///////////////////////////////////////////////////////////////////////////////
// rij .
void CalcR( dim3 gridDim, cudaMatrix p, cudaUniformGrid grid, cudaMatrix r )
{
hipLaunchKernelGGL(( kernelCalcR), dim3(gridDim), dim3(BlockDim), 0, 0, p, grid, r );
}
// gij .
void CalcG( dim3 gridDim, cudaMatrix r, const NumericType alpha, cudaMatrix g )
{
hipLaunchKernelGGL(( kernelCalcG), dim3(gridDim), dim3(BlockDim), 0, 0, r, alpha, g );
}
// pij , .
NumericType CalcP( dim3 gridDim,
cudaMatrix g, const NumericType tau, cudaMatrix p,
cudaMatrix buffer )
{
hipLaunchKernelGGL(( kernelCalcP), dim3(gridDim), dim3(BlockDim), SharedMemSize, 0, g, tau, p, buffer );
return CalcMax( buffer );
}
// alpha.
CFraction CalcAlpha( dim3 gridDim,
cudaMatrix r, cudaMatrix g, cudaUniformGrid grid,
cudaMatrix buffer )
{
hipLaunchKernelGGL(( kernelCalcAlpha), dim3(gridDim), dim3(BlockDim), SharedMem2Size, 0, r, g, grid, buffer );
return CalcFraction( buffer );
}
// tau.
CFraction CalcTau( dim3 gridDim,
cudaMatrix r, cudaMatrix g, cudaUniformGrid grid,
cudaMatrix buffer )
{
hipLaunchKernelGGL(( kernelCalcTau), dim3(gridDim), dim3(BlockDim), SharedMem2Size, 0, r, g, grid, buffer );
return CalcFraction( buffer );
}
///////////////////////////////////////////////////////////////////////////////
| 36323a40dec692ece21fcbf5d12f89676d2a8cb7.cu | #include <Std.h>
#include <CudaSupport.h>
#include <Definitions.h>
#include <CudaObjects.h>
#include <kernel.h>
///////////////////////////////////////////////////////////////////////////////
const dim3 BlockDim( BlockDimX, BlockDimY );
const size_t BlockSize = BlockDimX * BlockDimY;
const size_t SharedMemSize = BlockSize * sizeof( NumericType );
const size_t SharedMem2Size = SharedMemSize * 2;
///////////////////////////////////////////////////////////////////////////////
__device__ void blockReduceMax( volatile NumericType* shared,
const size_t threadIndex, const NumericType value )
{
shared[threadIndex] = value;
__syncthreads();
if( BlockSize >= 512 && threadIndex < 256 ) {
shared[threadIndex] = max( shared[threadIndex], shared[threadIndex + 256] );
}
__syncthreads();
if( BlockSize >= 256 && threadIndex < 128 ) {
shared[threadIndex] = max( shared[threadIndex], shared[threadIndex + 128] );
}
__syncthreads();
if( BlockSize >= 128 && threadIndex < 64 ) {
shared[threadIndex] = max( shared[threadIndex], shared[threadIndex + 128] );
}
__syncthreads();
if( BlockSize >= 64 && threadIndex < 32 ) {
shared[threadIndex] = max( shared[threadIndex], shared[threadIndex + 32] );
}
__syncthreads();
if( BlockSize >= 32 && threadIndex < 16 ) {
shared[threadIndex] = max( shared[threadIndex], shared[threadIndex + 16] );
}
__syncthreads();
if( BlockSize >= 16 && threadIndex < 8 ) {
shared[threadIndex] = max( shared[threadIndex], shared[threadIndex + 8] );
}
__syncthreads();
if( BlockSize >= 8 && threadIndex < 4 ) {
shared[threadIndex] = max( shared[threadIndex], shared[threadIndex + 4] );
}
__syncthreads();
if( BlockSize >= 4 && threadIndex < 2 ) {
shared[threadIndex] = max( shared[threadIndex], shared[threadIndex + 2] );
}
__syncthreads();
if( BlockSize >= 2 && threadIndex < 1 ) {
shared[threadIndex] = max( shared[threadIndex], shared[threadIndex + 1] );
}
}
__device__ void blockReduceSumTwo( volatile NumericType* shared,
const size_t threadIndex, const NumericType value1, const NumericType value2 )
{
shared[threadIndex] = value1;
shared[threadIndex + 1] = value2;
__syncthreads();
if( BlockSize >= 512 && threadIndex < 512 ) {
shared[threadIndex] += shared[threadIndex + 512];
shared[threadIndex + 1] += shared[threadIndex + 512 + 1];
}
__syncthreads();
if( BlockSize >= 256 && threadIndex < 256 ) {
shared[threadIndex] += shared[threadIndex + 256];
shared[threadIndex + 1] += shared[threadIndex + 256 + 1];
}
__syncthreads();
if( BlockSize >= 128 && threadIndex < 128 ) {
shared[threadIndex] += shared[threadIndex + 128];
shared[threadIndex + 1] += shared[threadIndex + 128 + 1];
}
__syncthreads();
if( BlockSize >= 64 && threadIndex < 64 ) {
shared[threadIndex] += shared[threadIndex + 64];
shared[threadIndex + 1] += shared[threadIndex + 64 + 1];
}
__syncthreads();
if( BlockSize >= 32 && threadIndex < 32 ) {
shared[threadIndex] += shared[threadIndex + 32];
shared[threadIndex + 1] += shared[threadIndex + 32 + 1];
}
__syncthreads();
if( BlockSize >= 16 && threadIndex < 16 ) {
shared[threadIndex] += shared[threadIndex + 16];
shared[threadIndex + 1] += shared[threadIndex + 16 + 1];
}
__syncthreads();
if( BlockSize >= 8 && threadIndex < 8 ) {
shared[threadIndex] += shared[threadIndex + 8];
shared[threadIndex + 1] += shared[threadIndex + 8 + 1];
}
__syncthreads();
if( BlockSize >= 4 && threadIndex < 4 ) {
shared[threadIndex] += shared[threadIndex + 4];
shared[threadIndex + 1] += shared[threadIndex + 4 + 1];
}
__syncthreads();
if( BlockSize >= 2 && threadIndex < 2 ) {
shared[threadIndex] += shared[threadIndex + 2];
shared[threadIndex + 1] += shared[threadIndex + 2 + 1];
}
}
///////////////////////////////////////////////////////////////////////////////
__device__ NumericType laplasOperator( cudaMatrix matrix, cudaUniformGrid grid, size_t x, size_t y )
{
const NumericType ldx = ( matrix( x, y ) - matrix( x - 1, y ) ) / grid.X.Step( x - 1 );
const NumericType rdx = ( matrix( x + 1, y ) - matrix( x, y ) ) / grid.X.Step( x );
const NumericType tdy = ( matrix( x, y ) - matrix( x, y - 1 ) ) / grid.Y.Step( y - 1 );
const NumericType bdy = ( matrix( x, y + 1 ) - matrix( x, y ) ) / grid.Y.Step( y );
const NumericType dx = ( ldx - rdx ) / grid.X.AverageStep( x );
const NumericType dy = ( tdy - bdy ) / grid.Y.AverageStep( y );
return ( dx + dy );
}
///////////////////////////////////////////////////////////////////////////////
// Вычисление невязки rij во внутренних точках.
__global__ void kernelCalcR( cudaMatrix p, cudaUniformGrid grid, cudaMatrix r )
{
const size_t x = BlockDimX * blockIdx.x + threadIdx.x + 1;
const size_t y = BlockDimY * blockIdx.y + threadIdx.y + 1;
if( x < ( p.SizeX() - 1 ) && y < ( p.SizeY() - 1 ) ) {
r( x, y ) = laplasOperator( p, grid, x, y ) - F( grid.X[x], grid.Y[y] );
}
}
// Вычисление значений gij во внутренних точках.
__global__ void kernelCalcG( cudaMatrix r, const NumericType alpha, cudaMatrix g )
{
const size_t x = BlockDimX * blockIdx.x + threadIdx.x + 1;
const size_t y = BlockDimY * blockIdx.y + threadIdx.y + 1;
if( x < ( g.SizeX() - 1 ) && y < ( g.SizeY() - 1 ) ) {
g( x, y ) = r( x, y ) - alpha * g( x, y );
}
}
// Вычисление значений pij во внутренних точках, возвращается максимум норма.
__global__ void kernelCalcP( cudaMatrix g, const NumericType tau, cudaMatrix p,
cudaMatrix differences )
{
extern __shared__ NumericType shared[];
const size_t x = BlockDimX * blockIdx.x + threadIdx.x + 1;
const size_t y = BlockDimY * blockIdx.y + threadIdx.y + 1;
const size_t threadIndex = threadIdx.y * BlockDimX + threadIdx.x;
NumericType difference = 0;
if( x < ( p.SizeX() - 1 ) && y < ( p.SizeY() - 1 ) ) {
const NumericType newValue = p( x, y ) - tau * g( x, y );
difference = abs( newValue - p( x, y ) );
p( x, y ) = newValue;
}
blockReduceMax( shared, threadIndex, difference );
if( threadIndex == 0 ) {
const size_t blockIndex = gridDim.x * blockIdx.y + blockIdx.x;
differences( blockIndex, 0 ) = shared[0];
}
}
// Вычисление alpha.
__global__ void kernelCalcAlpha( cudaMatrix r, cudaMatrix g, cudaUniformGrid grid,
cudaMatrix alphas )
{
extern __shared__ NumericType shared[];
const size_t x = BlockDimX * blockIdx.x + threadIdx.x + 1;
const size_t y = BlockDimY * blockIdx.y + threadIdx.y + 1;
const size_t threadIndex = threadIdx.y * BlockDimX + threadIdx.x;
NumericType numerator = 0;
NumericType denominator = 0;
if( x < ( r.SizeX() - 1 ) && y < ( r.SizeY() - 1 ) ) {
const NumericType common = g( x, y ) * grid.X.AverageStep( x ) * grid.Y.AverageStep( y );
numerator = laplasOperator( r, grid, x, y ) * common;
denominator = laplasOperator( g, grid, x, y ) * common;
}
blockReduceSumTwo( shared, threadIndex * 2, numerator, denominator );
if( threadIndex == 0 ) {
const size_t blockIndex = gridDim.x * blockIdx.y + blockIdx.x;
alphas( blockIndex, 0 ) = shared[0];
alphas( blockIndex, 1 ) = shared[1];
}
}
// Вычисление tau.
__global__ void kernelCalcTau( cudaMatrix r, cudaMatrix g, cudaUniformGrid grid,
cudaMatrix taus )
{
extern __shared__ NumericType shared[];
const size_t x = BlockDimX * blockIdx.x + threadIdx.x + 1;
const size_t y = BlockDimY * blockIdx.y + threadIdx.y + 1;
const size_t threadIndex = threadIdx.y * BlockDimX + threadIdx.x;
NumericType numerator = 0;
NumericType denominator = 0;
if( x < ( r.SizeX() - 1 ) && y < ( r.SizeY() - 1 ) ) {
const NumericType common = g( x, y ) * grid.X.AverageStep( x ) * grid.Y.AverageStep( y );
numerator = r( x, y ) * common;
denominator = laplasOperator( g, grid, x, y ) * common;
}
blockReduceSumTwo( shared, threadIndex * 2, numerator, denominator );
if( threadIndex == 0 ) {
const size_t blockIndex = gridDim.x * blockIdx.y + blockIdx.x;
taus( blockIndex, 0 ) = shared[0];
taus( blockIndex, 1 ) = shared[1];
}
}
///////////////////////////////////////////////////////////////////////////////
namespace { // anonymous namespace
inline NumericType CalcMax( cudaMatrix buffer )
{
vector<NumericType> differences( buffer.SizeX() );
buffer.GetPart( CMatrixPart( 0, buffer.SizeX(), 0, 1 ), differences );
return *max_element( differences.begin(), differences.end() );
}
inline CFraction CalcFraction( cudaMatrix buffer )
{
vector<NumericType> values( buffer.SizeX() * 2 );
buffer.GetPart( CMatrixPart( 0, buffer.SizeX(), 0, 2 ), values );
typedef vector<NumericType>::const_iterator CIterator;
const CIterator middle = values.begin() + buffer.SizeX();
const NumericType numerator = accumulate<CIterator, NumericType>( values.begin(), middle, 0 );
const NumericType denominator = accumulate<CIterator, NumericType>( middle, values.end(), 0 );
return CFraction( numerator, denominator );
}
} // end of anonymous namespace
///////////////////////////////////////////////////////////////////////////////
// Вычисление невязки rij во внутренних точках.
void CalcR( dim3 gridDim, cudaMatrix p, cudaUniformGrid grid, cudaMatrix r )
{
kernelCalcR<<<gridDim, BlockDim>>>( p, grid, r );
}
// Вычисление значений gij во внутренних точках.
void CalcG( dim3 gridDim, cudaMatrix r, const NumericType alpha, cudaMatrix g )
{
kernelCalcG<<<gridDim, BlockDim>>>( r, alpha, g );
}
// Вычисление значений pij во внутренних точках, возвращается максимум норма.
NumericType CalcP( dim3 gridDim,
cudaMatrix g, const NumericType tau, cudaMatrix p,
cudaMatrix buffer )
{
kernelCalcP<<<gridDim, BlockDim, SharedMemSize>>>( g, tau, p, buffer );
return CalcMax( buffer );
}
// Вычисление alpha.
CFraction CalcAlpha( dim3 gridDim,
cudaMatrix r, cudaMatrix g, cudaUniformGrid grid,
cudaMatrix buffer )
{
kernelCalcAlpha<<<gridDim, BlockDim, SharedMem2Size>>>( r, g, grid, buffer );
return CalcFraction( buffer );
}
// Вычисление tau.
CFraction CalcTau( dim3 gridDim,
cudaMatrix r, cudaMatrix g, cudaUniformGrid grid,
cudaMatrix buffer )
{
kernelCalcTau<<<gridDim, BlockDim, SharedMem2Size>>>( r, g, grid, buffer );
return CalcFraction( buffer );
}
///////////////////////////////////////////////////////////////////////////////
|
739a0c71068196e272a2f4d5a404d21e55d94dd6.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#define CHECK(cmd) \
{\
hipError_t error = cmd;\
if (error != hipSuccess) { \
fprintf(stderr, "error: '%s'(%d) at %s:%d\n", hipGetErrorString(error), error,__FILE__, __LINE__); \
exit(EXIT_FAILURE);\
}\
}
/*
* Square each element in the array A and write to array C.
*/
template <typename T>
__global__ void
vector_square(T *C_d, T *A_d, size_t N)
{
size_t offset = (blockIdx.x * blockDim.x + threadIdx.x);
size_t stride = blockDim.x * gridDim.x ;
for (size_t i=offset; i<N; i+=stride) {
C_d[i] = A_d[i] * A_d[i];
}
}
int main(int argc, char *argv[])
{
float *A_d, *C_d;
float *A_h, *C_h;
size_t N = 1000000;
size_t Nbytes = N * sizeof(float);
hipDeviceProp_t props;
CHECK(hipGetDeviceProperties(&props, 0/*deviceID*/));
printf ("info: running on device %s\n", props.name);
printf ("info: allocate host mem (%6.2f MB)\n", 2*Nbytes/1024.0/1024.0);
A_h = (float*)malloc(Nbytes);
CHECK(A_h == 0 ? hipErrorMemoryAllocation : hipSuccess );
C_h = (float*)malloc(Nbytes);
CHECK(C_h == 0 ? hipErrorMemoryAllocation : hipSuccess );
// Fill with Phi + i
for (size_t i=0; i<N; i++)
{
A_h[i] = 1.618f + i;
}
printf ("info: allocate device mem (%6.2f MB)\n", 2*Nbytes/1024.0/1024.0);
CHECK(hipMalloc(&A_d, Nbytes));
CHECK(hipMalloc(&C_d, Nbytes));
printf ("info: copy Host2Device\n");
CHECK ( hipMemcpy(A_d, A_h, Nbytes, hipMemcpyHostToDevice));
const unsigned blocks = 512;
const unsigned threadsPerBlock = 256;
printf ("info: launch 'vector_square' kernel\n");
hipLaunchKernelGGL(( vector_square) , dim3(blocks), dim3(threadsPerBlock), 0, 0, C_d, A_d, N);
printf ("info: copy Device2Host\n");
CHECK ( hipMemcpy(C_h, C_d, Nbytes, hipMemcpyDeviceToHost));
printf ("info: check result\n");
for (size_t i=0; i<N; i++) {
if (C_h[i] != A_h[i] * A_h[i]) {
CHECK(hipErrorUnknown);
}
}
printf ("PASSED!\n");
}
| 739a0c71068196e272a2f4d5a404d21e55d94dd6.cu | /*
Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <stdio.h>
#include <cuda_runtime.h>
#define CHECK(cmd) \
{\
cudaError_t error = cmd;\
if (error != cudaSuccess) { \
fprintf(stderr, "error: '%s'(%d) at %s:%d\n", cudaGetErrorString(error), error,__FILE__, __LINE__); \
exit(EXIT_FAILURE);\
}\
}
/*
* Square each element in the array A and write to array C.
*/
template <typename T>
__global__ void
vector_square(T *C_d, T *A_d, size_t N)
{
size_t offset = (blockIdx.x * blockDim.x + threadIdx.x);
size_t stride = blockDim.x * gridDim.x ;
for (size_t i=offset; i<N; i+=stride) {
C_d[i] = A_d[i] * A_d[i];
}
}
int main(int argc, char *argv[])
{
float *A_d, *C_d;
float *A_h, *C_h;
size_t N = 1000000;
size_t Nbytes = N * sizeof(float);
cudaDeviceProp props;
CHECK(cudaGetDeviceProperties(&props, 0/*deviceID*/));
printf ("info: running on device %s\n", props.name);
printf ("info: allocate host mem (%6.2f MB)\n", 2*Nbytes/1024.0/1024.0);
A_h = (float*)malloc(Nbytes);
CHECK(A_h == 0 ? cudaErrorMemoryAllocation : cudaSuccess );
C_h = (float*)malloc(Nbytes);
CHECK(C_h == 0 ? cudaErrorMemoryAllocation : cudaSuccess );
// Fill with Phi + i
for (size_t i=0; i<N; i++)
{
A_h[i] = 1.618f + i;
}
printf ("info: allocate device mem (%6.2f MB)\n", 2*Nbytes/1024.0/1024.0);
CHECK(cudaMalloc(&A_d, Nbytes));
CHECK(cudaMalloc(&C_d, Nbytes));
printf ("info: copy Host2Device\n");
CHECK ( cudaMemcpy(A_d, A_h, Nbytes, cudaMemcpyHostToDevice));
const unsigned blocks = 512;
const unsigned threadsPerBlock = 256;
printf ("info: launch 'vector_square' kernel\n");
vector_square <<<blocks, threadsPerBlock>>> (C_d, A_d, N);
printf ("info: copy Device2Host\n");
CHECK ( cudaMemcpy(C_h, C_d, Nbytes, cudaMemcpyDeviceToHost));
printf ("info: check result\n");
for (size_t i=0; i<N; i++) {
if (C_h[i] != A_h[i] * A_h[i]) {
CHECK(cudaErrorUnknown);
}
}
printf ("PASSED!\n");
}
|
51dd321167cce88184ec18b6b3476baa143c2d77.hip | // !!! This is a file automatically generated by hipify!!!
// author: Felice Pantaleo, CERN, 2018
#include <cassert>
#include <iostream>
#include <new>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "HeterogeneousCore/CUDAUtilities/interface/GPUSimpleVector.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/requireCUDADevices.h"
__global__ void vector_pushback(GPU::SimpleVector<int> *foo) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
foo->push_back(index);
}
__global__ void vector_reset(GPU::SimpleVector<int> *foo) { foo->reset(); }
__global__ void vector_emplace_back(GPU::SimpleVector<int> *foo) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
foo->emplace_back(index);
}
int main() {
requireCUDADevices();
auto maxN = 10000;
GPU::SimpleVector<int> *obj_ptr = nullptr;
GPU::SimpleVector<int> *d_obj_ptr = nullptr;
GPU::SimpleVector<int> *tmp_obj_ptr = nullptr;
int *data_ptr = nullptr;
int *d_data_ptr = nullptr;
cudaCheck(hipHostMalloc(&obj_ptr, sizeof(GPU::SimpleVector<int>)));
cudaCheck(hipHostMalloc(&data_ptr, maxN * sizeof(int)));
cudaCheck(hipMalloc(&d_data_ptr, maxN * sizeof(int)));
auto v = GPU::make_SimpleVector(obj_ptr, maxN, data_ptr);
cudaCheck(hipHostMalloc(&tmp_obj_ptr, sizeof(GPU::SimpleVector<int>)));
GPU::make_SimpleVector(tmp_obj_ptr, maxN, d_data_ptr);
assert(tmp_obj_ptr->size() == 0);
assert(tmp_obj_ptr->capacity() == static_cast<int>(maxN));
cudaCheck(hipMalloc(&d_obj_ptr, sizeof(GPU::SimpleVector<int>)));
// ... and copy the object to the device.
cudaCheck(hipMemcpy(d_obj_ptr, tmp_obj_ptr, sizeof(GPU::SimpleVector<int>), hipMemcpyDefault));
int numBlocks = 5;
int numThreadsPerBlock = 256;
hipLaunchKernelGGL(( vector_pushback), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, d_obj_ptr);
cudaCheck(hipGetLastError());
cudaCheck(hipDeviceSynchronize());
cudaCheck(hipMemcpy(obj_ptr, d_obj_ptr, sizeof(GPU::SimpleVector<int>), hipMemcpyDefault));
assert(obj_ptr->size() == (numBlocks * numThreadsPerBlock < maxN ? numBlocks * numThreadsPerBlock : maxN));
hipLaunchKernelGGL(( vector_reset), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, d_obj_ptr);
cudaCheck(hipGetLastError());
cudaCheck(hipDeviceSynchronize());
cudaCheck(hipMemcpy(obj_ptr, d_obj_ptr, sizeof(GPU::SimpleVector<int>), hipMemcpyDefault));
assert(obj_ptr->size() == 0);
hipLaunchKernelGGL(( vector_emplace_back), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, d_obj_ptr);
cudaCheck(hipGetLastError());
cudaCheck(hipDeviceSynchronize());
cudaCheck(hipMemcpy(obj_ptr, d_obj_ptr, sizeof(GPU::SimpleVector<int>), hipMemcpyDefault));
assert(obj_ptr->size() == (numBlocks * numThreadsPerBlock < maxN ? numBlocks * numThreadsPerBlock : maxN));
cudaCheck(hipMemcpy(data_ptr, d_data_ptr, obj_ptr->size() * sizeof(int), hipMemcpyDefault));
cudaCheck(hipHostFree(obj_ptr));
cudaCheck(hipHostFree(data_ptr));
cudaCheck(hipHostFree(tmp_obj_ptr));
cudaCheck(hipFree(d_data_ptr));
cudaCheck(hipFree(d_obj_ptr));
std::cout << "TEST PASSED" << std::endl;
return 0;
}
| 51dd321167cce88184ec18b6b3476baa143c2d77.cu | // author: Felice Pantaleo, CERN, 2018
#include <cassert>
#include <iostream>
#include <new>
#include <cuda.h>
#include <cuda_runtime.h>
#include "HeterogeneousCore/CUDAUtilities/interface/GPUSimpleVector.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/requireCUDADevices.h"
__global__ void vector_pushback(GPU::SimpleVector<int> *foo) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
foo->push_back(index);
}
__global__ void vector_reset(GPU::SimpleVector<int> *foo) { foo->reset(); }
__global__ void vector_emplace_back(GPU::SimpleVector<int> *foo) {
auto index = threadIdx.x + blockIdx.x * blockDim.x;
foo->emplace_back(index);
}
int main() {
requireCUDADevices();
auto maxN = 10000;
GPU::SimpleVector<int> *obj_ptr = nullptr;
GPU::SimpleVector<int> *d_obj_ptr = nullptr;
GPU::SimpleVector<int> *tmp_obj_ptr = nullptr;
int *data_ptr = nullptr;
int *d_data_ptr = nullptr;
cudaCheck(cudaMallocHost(&obj_ptr, sizeof(GPU::SimpleVector<int>)));
cudaCheck(cudaMallocHost(&data_ptr, maxN * sizeof(int)));
cudaCheck(cudaMalloc(&d_data_ptr, maxN * sizeof(int)));
auto v = GPU::make_SimpleVector(obj_ptr, maxN, data_ptr);
cudaCheck(cudaMallocHost(&tmp_obj_ptr, sizeof(GPU::SimpleVector<int>)));
GPU::make_SimpleVector(tmp_obj_ptr, maxN, d_data_ptr);
assert(tmp_obj_ptr->size() == 0);
assert(tmp_obj_ptr->capacity() == static_cast<int>(maxN));
cudaCheck(cudaMalloc(&d_obj_ptr, sizeof(GPU::SimpleVector<int>)));
// ... and copy the object to the device.
cudaCheck(cudaMemcpy(d_obj_ptr, tmp_obj_ptr, sizeof(GPU::SimpleVector<int>), cudaMemcpyDefault));
int numBlocks = 5;
int numThreadsPerBlock = 256;
vector_pushback<<<numBlocks, numThreadsPerBlock>>>(d_obj_ptr);
cudaCheck(cudaGetLastError());
cudaCheck(cudaDeviceSynchronize());
cudaCheck(cudaMemcpy(obj_ptr, d_obj_ptr, sizeof(GPU::SimpleVector<int>), cudaMemcpyDefault));
assert(obj_ptr->size() == (numBlocks * numThreadsPerBlock < maxN ? numBlocks * numThreadsPerBlock : maxN));
vector_reset<<<numBlocks, numThreadsPerBlock>>>(d_obj_ptr);
cudaCheck(cudaGetLastError());
cudaCheck(cudaDeviceSynchronize());
cudaCheck(cudaMemcpy(obj_ptr, d_obj_ptr, sizeof(GPU::SimpleVector<int>), cudaMemcpyDefault));
assert(obj_ptr->size() == 0);
vector_emplace_back<<<numBlocks, numThreadsPerBlock>>>(d_obj_ptr);
cudaCheck(cudaGetLastError());
cudaCheck(cudaDeviceSynchronize());
cudaCheck(cudaMemcpy(obj_ptr, d_obj_ptr, sizeof(GPU::SimpleVector<int>), cudaMemcpyDefault));
assert(obj_ptr->size() == (numBlocks * numThreadsPerBlock < maxN ? numBlocks * numThreadsPerBlock : maxN));
cudaCheck(cudaMemcpy(data_ptr, d_data_ptr, obj_ptr->size() * sizeof(int), cudaMemcpyDefault));
cudaCheck(cudaFreeHost(obj_ptr));
cudaCheck(cudaFreeHost(data_ptr));
cudaCheck(cudaFreeHost(tmp_obj_ptr));
cudaCheck(cudaFree(d_data_ptr));
cudaCheck(cudaFree(d_obj_ptr));
std::cout << "TEST PASSED" << std::endl;
return 0;
}
|
11cb666d23300781c4879363292b2ef940ab8236.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
int main(int argc, char **argv){
// define total data elements
int nElem = 1024;
// define grid and block structure
dim3 block(1024);
dim3 grid((nElem+block.x-1)/block.x);
printf("grid.x %d block.x %d \n", grid.x, block.x);
// reset block
block.x = 512;
grid.x = (nElem+block.x-1)/block.x;
printf("grid.x %d block.x %d \n", grid.x, block.x);
// reset block
block.x = 256;
grid.x = (nElem+block.x-1)/block.x;
printf("grid.x %d block.x %d \n", grid.x, block.x);
// reset block
block.x = 128;
grid.x = (nElem+block.x-1)/block.x;
printf("grid.x %d block.x %d \n", grid.x, block.x);
// reset device before you leave
hipDeviceReset();
return 0;
}
| 11cb666d23300781c4879363292b2ef940ab8236.cu | #include <cuda_runtime.h>
#include <stdio.h>
int main(int argc, char **argv){
// define total data elements
int nElem = 1024;
// define grid and block structure
dim3 block(1024);
dim3 grid((nElem+block.x-1)/block.x);
printf("grid.x %d block.x %d \n", grid.x, block.x);
// reset block
block.x = 512;
grid.x = (nElem+block.x-1)/block.x;
printf("grid.x %d block.x %d \n", grid.x, block.x);
// reset block
block.x = 256;
grid.x = (nElem+block.x-1)/block.x;
printf("grid.x %d block.x %d \n", grid.x, block.x);
// reset block
block.x = 128;
grid.x = (nElem+block.x-1)/block.x;
printf("grid.x %d block.x %d \n", grid.x, block.x);
// reset device before you leave
cudaDeviceReset();
return 0;
}
|
44c33cfbe05df8e28a62529364bfd3898c437bf6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/reduction_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void prod_reduction_forward(
const int N, const int K, const int M, const int G,
const Dtype* const X,
Dtype* const Y)
{
const int NK = N*K;
CUDA_KERNEL_LOOP(ij, NK) //for each entry of the matrix Y
{
const int i = ij/K;
const int j = ij%K;
Y[ij]=(Dtype)1.0;
for(int g=0; g<G; ++g) {
Y[ij] *= X[i*M+j*G+g];
}
}//for ij
}
template <typename Dtype>
__global__ void prod_reduction_backward(
const int N, const int M, const int K, const int G,
const Dtype* const X,
const Dtype* const Y,
const Dtype* const dY,
Dtype* const dX)
{
const int NM = N*M;
CUDA_KERNEL_LOOP(ij, NM) //for each entry of the matrix dX
{
const int i = ij/M;
const int j = ij%M;
const int dst = i*K+j/G;
dX[ij]=
Y[dst]==0 ?
(Dtype)0.0 :
dY[dst]*Y[dst]/X[ij];
}//for ij
}
template <typename Dtype>
void ReductionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* mult_data = NULL;
if (sum_multiplier_.count() > 0) {
mult_data = sum_multiplier_.gpu_data();
}
Dtype* top_data = top[0]->mutable_cpu_data();
if (op_ == ReductionParameter_ReductionOp_PROD) {
const int N = bottom[0]->shape(0);
const int M = bottom[0]->shape(1);
const int K = top[0]->shape(1);
const int G = M/K;
Dtype* top_data_gpu = top[0]->mutable_gpu_data();
const int NK = N*K;
hipLaunchKernelGGL(( prod_reduction_forward<Dtype>), dim3(CAFFE_GET_BLOCKS(NK)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N,K,M,G,
bottom_data,
top_data_gpu
);
CUDA_POST_KERNEL_CHECK;
return;
}
for (int i = 0; i < num_; ++i) {
switch (op_) {
case ReductionParameter_ReductionOp_SUM:
case ReductionParameter_ReductionOp_MEAN:
caffe_gpu_dot(dim_, mult_data, bottom_data, top_data);
break;
case ReductionParameter_ReductionOp_ASUM:
caffe_gpu_asum(dim_, bottom_data, top_data);
break;
case ReductionParameter_ReductionOp_SUMSQ:
caffe_gpu_dot(dim_, bottom_data, bottom_data, top_data);
break;
default:
LOG(FATAL) << "Unknown reduction op: "
<< ReductionParameter_ReductionOp_Name(op_);
}
bottom_data += dim_;
++top_data;
}
if (coeff_ != Dtype(1)) {
// Reset the top_data pointer.
top_data = top[0]->mutable_gpu_data();
caffe_gpu_scal(num_, coeff_, top_data);
}
}
template <typename Dtype>
void ReductionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) { return; }
// Get bottom_data, if needed.
const Dtype* bottom_data = NULL;
switch (op_) {
// Operations that don't need bottom_data
case ReductionParameter_ReductionOp_SUM:
case ReductionParameter_ReductionOp_MEAN:
break;
// Operations that need bottom_data
case ReductionParameter_ReductionOp_ASUM:
case ReductionParameter_ReductionOp_SUMSQ:
case ReductionParameter_ReductionOp_PROD:
bottom_data = bottom[0]->gpu_data();
break;
default:
LOG(FATAL) << "Unknown reduction op: "
<< ReductionParameter_ReductionOp_Name(op_);
}
const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (op_ == ReductionParameter_ReductionOp_PROD) {
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff_gpu = top[0]->gpu_diff();
const int N = bottom[0]->shape(0);
const int M = bottom[0]->shape(1);
const int K = top[0]->shape(1);
const int G = M/K;
const int NM = N*M;
hipLaunchKernelGGL(( prod_reduction_backward<Dtype>), dim3(CAFFE_GET_BLOCKS(NM)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N,M,K,G,
bottom_data,
top_data,
top_diff_gpu,
bottom_diff
);
CUDA_POST_KERNEL_CHECK;
return;
}
for (int i = 0; i < num_; ++i) {
const Dtype bottom_coeff = (*top_diff) * coeff_;
switch (op_) {
case ReductionParameter_ReductionOp_SUM:
case ReductionParameter_ReductionOp_MEAN:
caffe_gpu_set(dim_, bottom_coeff, bottom_diff);
break;
case ReductionParameter_ReductionOp_ASUM:
caffe_gpu_sign(dim_, bottom_data, bottom_diff);
caffe_gpu_scal(dim_, bottom_coeff, bottom_diff);
break;
case ReductionParameter_ReductionOp_SUMSQ:
caffe_gpu_scale(dim_, 2 * bottom_coeff, bottom_data, bottom_diff);
break;
default:
LOG(FATAL) << "Unknown reduction op: "
<< ReductionParameter_ReductionOp_Name(op_);
}
bottom_data += dim_;
bottom_diff += dim_;
++top_diff;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReductionLayer);
} // namespace caffe
| 44c33cfbe05df8e28a62529364bfd3898c437bf6.cu | #include <vector>
#include "caffe/layers/reduction_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void prod_reduction_forward(
const int N, const int K, const int M, const int G,
const Dtype* const X,
Dtype* const Y)
{
const int NK = N*K;
CUDA_KERNEL_LOOP(ij, NK) //for each entry of the matrix Y
{
const int i = ij/K;
const int j = ij%K;
Y[ij]=(Dtype)1.0;
for(int g=0; g<G; ++g) {
Y[ij] *= X[i*M+j*G+g];
}
}//for ij
}
template <typename Dtype>
__global__ void prod_reduction_backward(
const int N, const int M, const int K, const int G,
const Dtype* const X,
const Dtype* const Y,
const Dtype* const dY,
Dtype* const dX)
{
const int NM = N*M;
CUDA_KERNEL_LOOP(ij, NM) //for each entry of the matrix dX
{
const int i = ij/M;
const int j = ij%M;
const int dst = i*K+j/G;
dX[ij]=
Y[dst]==0 ?
(Dtype)0.0 :
dY[dst]*Y[dst]/X[ij];
}//for ij
}
template <typename Dtype>
void ReductionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* mult_data = NULL;
if (sum_multiplier_.count() > 0) {
mult_data = sum_multiplier_.gpu_data();
}
Dtype* top_data = top[0]->mutable_cpu_data();
if (op_ == ReductionParameter_ReductionOp_PROD) {
const int N = bottom[0]->shape(0);
const int M = bottom[0]->shape(1);
const int K = top[0]->shape(1);
const int G = M/K;
Dtype* top_data_gpu = top[0]->mutable_gpu_data();
const int NK = N*K;
prod_reduction_forward<Dtype><<<CAFFE_GET_BLOCKS(NK), CAFFE_CUDA_NUM_THREADS>>>(
N,K,M,G,
bottom_data,
top_data_gpu
);
CUDA_POST_KERNEL_CHECK;
return;
}
for (int i = 0; i < num_; ++i) {
switch (op_) {
case ReductionParameter_ReductionOp_SUM:
case ReductionParameter_ReductionOp_MEAN:
caffe_gpu_dot(dim_, mult_data, bottom_data, top_data);
break;
case ReductionParameter_ReductionOp_ASUM:
caffe_gpu_asum(dim_, bottom_data, top_data);
break;
case ReductionParameter_ReductionOp_SUMSQ:
caffe_gpu_dot(dim_, bottom_data, bottom_data, top_data);
break;
default:
LOG(FATAL) << "Unknown reduction op: "
<< ReductionParameter_ReductionOp_Name(op_);
}
bottom_data += dim_;
++top_data;
}
if (coeff_ != Dtype(1)) {
// Reset the top_data pointer.
top_data = top[0]->mutable_gpu_data();
caffe_gpu_scal(num_, coeff_, top_data);
}
}
template <typename Dtype>
void ReductionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) { return; }
// Get bottom_data, if needed.
const Dtype* bottom_data = NULL;
switch (op_) {
// Operations that don't need bottom_data
case ReductionParameter_ReductionOp_SUM:
case ReductionParameter_ReductionOp_MEAN:
break;
// Operations that need bottom_data
case ReductionParameter_ReductionOp_ASUM:
case ReductionParameter_ReductionOp_SUMSQ:
case ReductionParameter_ReductionOp_PROD:
bottom_data = bottom[0]->gpu_data();
break;
default:
LOG(FATAL) << "Unknown reduction op: "
<< ReductionParameter_ReductionOp_Name(op_);
}
const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (op_ == ReductionParameter_ReductionOp_PROD) {
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff_gpu = top[0]->gpu_diff();
const int N = bottom[0]->shape(0);
const int M = bottom[0]->shape(1);
const int K = top[0]->shape(1);
const int G = M/K;
const int NM = N*M;
prod_reduction_backward<Dtype><<<CAFFE_GET_BLOCKS(NM), CAFFE_CUDA_NUM_THREADS>>>(
N,M,K,G,
bottom_data,
top_data,
top_diff_gpu,
bottom_diff
);
CUDA_POST_KERNEL_CHECK;
return;
}
for (int i = 0; i < num_; ++i) {
const Dtype bottom_coeff = (*top_diff) * coeff_;
switch (op_) {
case ReductionParameter_ReductionOp_SUM:
case ReductionParameter_ReductionOp_MEAN:
caffe_gpu_set(dim_, bottom_coeff, bottom_diff);
break;
case ReductionParameter_ReductionOp_ASUM:
caffe_gpu_sign(dim_, bottom_data, bottom_diff);
caffe_gpu_scal(dim_, bottom_coeff, bottom_diff);
break;
case ReductionParameter_ReductionOp_SUMSQ:
caffe_gpu_scale(dim_, 2 * bottom_coeff, bottom_data, bottom_diff);
break;
default:
LOG(FATAL) << "Unknown reduction op: "
<< ReductionParameter_ReductionOp_Name(op_);
}
bottom_data += dim_;
bottom_diff += dim_;
++top_diff;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReductionLayer);
} // namespace caffe
|
4cf18045276949bc74dc11e814f0c3812eaaf92b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void k_Exposure( float* p_Input, int p_Width, int p_Height, float p_Exposure) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < p_Width && y < p_Height) {
const int index = (y * p_Width + x) * 4;
p_Input[index] = p_Input[index] * exp2(p_Exposure);
p_Input[index + 1] = p_Input[index + 1] * exp2(p_Exposure);
p_Input[index + 2] = p_Input[index + 2] * exp2(p_Exposure);
}} | 4cf18045276949bc74dc11e814f0c3812eaaf92b.cu | #include "includes.h"
__global__ void k_Exposure( float* p_Input, int p_Width, int p_Height, float p_Exposure) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < p_Width && y < p_Height) {
const int index = (y * p_Width + x) * 4;
p_Input[index] = p_Input[index] * exp2(p_Exposure);
p_Input[index + 1] = p_Input[index + 1] * exp2(p_Exposure);
p_Input[index + 2] = p_Input[index + 2] * exp2(p_Exposure);
}} |
37c69c56418994d9fe9b33c702db5feb0b55594f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <assert.h>
#include <iostream>
#include <stdlib.h>
#include <unistd.h>
extern "C" __global__
void memcpy_kernel(unsigned char* __restrict__ output, const unsigned char* __restrict__ input){
output += (blockIdx.x<<13)|(threadIdx.x<<2);
input += (blockIdx.x<<13)|(threadIdx.x<<2);
*((float* )&output[0]) = *((float* )&input[0]);
*((float* )&output[0x400]) = *((float* )&input[0x400]);
*((float* )&output[0x800]) = *((float* )&input[0x800]);
*((float* )&output[0xc00]) = *((float* )&input[0xc00]);
*((float* )&output[0x1000]) = *((float* )&input[0x1000]);
*((float* )&output[0x1400]) = *((float* )&input[0x1400]);
*((float* )&output[0x1800]) = *((float* )&input[0x1800]);
*((float* )&output[0x1c00]) = *((float* )&input[0x1c00]);
}
#define CALL(cmd) \
do {\
hipError_t cuda_error = cmd;\
if (cuda_error != hipSuccess) { \
std::cout<<"'"<<hipGetErrorString(cuda_error)<<"'("<<cuda_error<<")"<<" at "<<__FILE__<<":"<<__LINE__<<std::endl;\
exit(EXIT_FAILURE);\
}\
} while(0)
#define WARMUP 20
#define LOOP 100
static inline void b2s(size_t bytes, char * str){
if(bytes<1024){
sprintf(str, "%luB", bytes);
}else if(bytes<(1024*1024)){
double b= (double)bytes/1024.0;
sprintf(str, "%.2fKB", b);
}else if(bytes<(1024*1024*1024)){
double b= (double)bytes/(1024.0*1024);
sprintf(str, "%.2fMB", b);
}else{
double b= (double)bytes/(1024.0*1024*1024);
sprintf(str, "%.2fGB", b);
}
}
static inline int env_get_int(const char * var_name, int def_v)
{
char * v = getenv(var_name);
int r = def_v;
if(v)
r = atoi(v);
return r;
}
static inline float get_rand(){
static int inited = 0;
float v;
if(!inited){ srand(time(NULL)); inited = 1; }
v = rand() % 1000 + 1;
return v / 1000.0f;
}
static inline int valid_vec(const float * vec_a, const float * vec_b, int num)
{
int err_cnt = 0;
for(int i=0;i<num;i++){
if(vec_a[i] != vec_b[i])
err_cnt++;
}
return err_cnt;
}
int main() {
hipSetDevice(0);
unsigned char *A, *B;
const int dwords = env_get_int("DWORDS",64*3*224*224);
float * h_A = (float*)malloc(dwords*sizeof(float));
float * h_B = (float*)malloc(dwords*sizeof(float));
for (int i = 0; i < dwords; ++i) h_A[i] = get_rand();
CALL(hipMalloc(&A, dwords * sizeof(float)));
CALL(hipMalloc(&B, dwords * sizeof(float)));
CALL(hipMemcpy(A, h_A, dwords * sizeof(float), hipMemcpyHostToDevice));
// benchmark kernel
int bx = 256;
int gx = (dwords+255)>>11;
assert(dwords/(bx*8*4));
hipEvent_t start_ev, stop_ev;
CALL(hipEventCreate(&start_ev));
CALL(hipEventCreate(&stop_ev));
for(int i=0;i<WARMUP;i++)
hipLaunchKernelGGL(( memcpy_kernel), dim3(gx), dim3(bx), 0, 0, B, A);
CALL(hipEventRecord(start_ev, 0));
for(int i=0;i<LOOP;i++)
hipLaunchKernelGGL(( memcpy_kernel), dim3(gx), dim3(bx), 0, 0, B, A);
CALL(hipEventRecord( stop_ev, 0 ));
CALL(hipEventSynchronize(stop_ev));
float ms;
CALL(hipEventElapsedTime(&ms,start_ev, stop_ev));
ms/=LOOP;
CALL(hipMemcpy(h_B, B, dwords * sizeof(float), hipMemcpyDeviceToHost));
//if(valid_vec(h_A, h_B, dwords) != 0) printf("not valid copy!\n");
sleep(1);
// benchmark memcpy api
for(int i=0;i<WARMUP;i++)
CALL(hipMemcpy(B, A, dwords * sizeof(float), hipMemcpyDeviceToDevice));
CALL(hipEventRecord( start_ev, 0));
for(int i=0;i<LOOP;i++)
CALL(hipMemcpy(B, A, dwords * sizeof(float), hipMemcpyDeviceToDevice));
CALL(hipEventRecord( stop_ev, 0 ));
CALL(hipEventSynchronize(stop_ev));
float ms_api;
CALL(hipEventElapsedTime(&ms_api,start_ev, stop_ev));
ms_api/=LOOP;
char str[64];
b2s(dwords*sizeof(float), str);
printf("%s, bandwidth_kernel:%.3f(GB/s), bandwidth_api:%.3f(GB/s)\n", str, ((double)dwords*sizeof(float)*2)/((double)ms/1000)/1000000000.0,
((double)dwords*sizeof(float)*2)/((double)ms_api/1000)/1000000000.0 );
free(h_A);
free(h_B);
CALL(hipFree(A));
CALL(hipFree(B));
}
| 37c69c56418994d9fe9b33c702db5feb0b55594f.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <assert.h>
#include <iostream>
#include <stdlib.h>
#include <unistd.h>
extern "C" __global__
void memcpy_kernel(unsigned char* __restrict__ output, const unsigned char* __restrict__ input){
output += (blockIdx.x<<13)|(threadIdx.x<<2);
input += (blockIdx.x<<13)|(threadIdx.x<<2);
*((float* )&output[0]) = *((float* )&input[0]);
*((float* )&output[0x400]) = *((float* )&input[0x400]);
*((float* )&output[0x800]) = *((float* )&input[0x800]);
*((float* )&output[0xc00]) = *((float* )&input[0xc00]);
*((float* )&output[0x1000]) = *((float* )&input[0x1000]);
*((float* )&output[0x1400]) = *((float* )&input[0x1400]);
*((float* )&output[0x1800]) = *((float* )&input[0x1800]);
*((float* )&output[0x1c00]) = *((float* )&input[0x1c00]);
}
#define CALL(cmd) \
do {\
cudaError_t cuda_error = cmd;\
if (cuda_error != cudaSuccess) { \
std::cout<<"'"<<cudaGetErrorString(cuda_error)<<"'("<<cuda_error<<")"<<" at "<<__FILE__<<":"<<__LINE__<<std::endl;\
exit(EXIT_FAILURE);\
}\
} while(0)
#define WARMUP 20
#define LOOP 100
static inline void b2s(size_t bytes, char * str){
if(bytes<1024){
sprintf(str, "%luB", bytes);
}else if(bytes<(1024*1024)){
double b= (double)bytes/1024.0;
sprintf(str, "%.2fKB", b);
}else if(bytes<(1024*1024*1024)){
double b= (double)bytes/(1024.0*1024);
sprintf(str, "%.2fMB", b);
}else{
double b= (double)bytes/(1024.0*1024*1024);
sprintf(str, "%.2fGB", b);
}
}
static inline int env_get_int(const char * var_name, int def_v)
{
char * v = getenv(var_name);
int r = def_v;
if(v)
r = atoi(v);
return r;
}
static inline float get_rand(){
static int inited = 0;
float v;
if(!inited){ srand(time(NULL)); inited = 1; }
v = rand() % 1000 + 1;
return v / 1000.0f;
}
static inline int valid_vec(const float * vec_a, const float * vec_b, int num)
{
int err_cnt = 0;
for(int i=0;i<num;i++){
if(vec_a[i] != vec_b[i])
err_cnt++;
}
return err_cnt;
}
int main() {
cudaSetDevice(0);
unsigned char *A, *B;
const int dwords = env_get_int("DWORDS",64*3*224*224);
float * h_A = (float*)malloc(dwords*sizeof(float));
float * h_B = (float*)malloc(dwords*sizeof(float));
for (int i = 0; i < dwords; ++i) h_A[i] = get_rand();
CALL(cudaMalloc(&A, dwords * sizeof(float)));
CALL(cudaMalloc(&B, dwords * sizeof(float)));
CALL(cudaMemcpy(A, h_A, dwords * sizeof(float), cudaMemcpyHostToDevice));
// benchmark kernel
int bx = 256;
int gx = (dwords+255)>>11;
assert(dwords/(bx*8*4));
cudaEvent_t start_ev, stop_ev;
CALL(cudaEventCreate(&start_ev));
CALL(cudaEventCreate(&stop_ev));
for(int i=0;i<WARMUP;i++)
memcpy_kernel<<<gx, bx>>>(B, A);
CALL(cudaEventRecord(start_ev, 0));
for(int i=0;i<LOOP;i++)
memcpy_kernel<<<gx, bx>>>(B, A);
CALL(cudaEventRecord( stop_ev, 0 ));
CALL(cudaEventSynchronize(stop_ev));
float ms;
CALL(cudaEventElapsedTime(&ms,start_ev, stop_ev));
ms/=LOOP;
CALL(cudaMemcpy(h_B, B, dwords * sizeof(float), cudaMemcpyDeviceToHost));
//if(valid_vec(h_A, h_B, dwords) != 0) printf("not valid copy!\n");
sleep(1);
// benchmark memcpy api
for(int i=0;i<WARMUP;i++)
CALL(cudaMemcpy(B, A, dwords * sizeof(float), cudaMemcpyDeviceToDevice));
CALL(cudaEventRecord( start_ev, 0));
for(int i=0;i<LOOP;i++)
CALL(cudaMemcpy(B, A, dwords * sizeof(float), cudaMemcpyDeviceToDevice));
CALL(cudaEventRecord( stop_ev, 0 ));
CALL(cudaEventSynchronize(stop_ev));
float ms_api;
CALL(cudaEventElapsedTime(&ms_api,start_ev, stop_ev));
ms_api/=LOOP;
char str[64];
b2s(dwords*sizeof(float), str);
printf("%s, bandwidth_kernel:%.3f(GB/s), bandwidth_api:%.3f(GB/s)\n", str, ((double)dwords*sizeof(float)*2)/((double)ms/1000)/1000000000.0,
((double)dwords*sizeof(float)*2)/((double)ms_api/1000)/1000000000.0 );
free(h_A);
free(h_B);
CALL(cudaFree(A));
CALL(cudaFree(B));
}
|
fc57eac17889f2c29d28ced2eb71eba9b010d610.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudnn.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 3
#define TW 5
#define TC 32
#define C 64
#define N 32
#define H 112
#define W 112
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(hipError_t code)
{
if (code != hipSuccess)
{
std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[8];
__shared__ float pad_temp_shared[1920];
__shared__ float kernel_shared[768];
float pad_temp_shared_local[6];
float kernel_shared_local[12];
compute_local[(0)] = 0.000000e+00f;
compute_local[(4)] = 0.000000e+00f;
compute_local[(1)] = 0.000000e+00f;
compute_local[(5)] = 0.000000e+00f;
compute_local[(2)] = 0.000000e+00f;
compute_local[(6)] = 0.000000e+00f;
compute_local[(3)] = 0.000000e+00f;
compute_local[(7)] = 0.000000e+00f;
for (int rc_outer = 0; rc_outer < 4; ++rc_outer) {
for (int ry_outer = 0; ry_outer < 3; ++ry_outer) {
__syncthreads();
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + ((((int)threadIdx.x) * 9) / 120)) < 16) {
if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + ((((int)threadIdx.x) * 9) / 30)) < 64) {
if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1920) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 480) {
if (((int)threadIdx.x) < 27) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)))] = (((((1 <= (((((int)blockIdx.y) * 4) + (((((int)threadIdx.x) * 9) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + (((((int)threadIdx.x) * 9) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + ((((int)threadIdx.x) * 9) % 30)))) && (((((int)blockIdx.x) * 28) + ((((int)threadIdx.x) * 9) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + (((((int)threadIdx.x) * 9) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + ((((((int)threadIdx.x) * 9) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + ((((int)threadIdx.x) * 9) % 30)) - 113))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 1) / 120)) < 16) {
if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 1) / 30)) < 64) {
if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1919) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 479) {
if (((int)threadIdx.x) < 27) {
pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 1))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 1) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 1) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 1) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 1) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 1) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 1) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 1) % 30)) - 113))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 2) / 120)) < 16) {
if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 2) / 30)) < 64) {
if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1918) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 478) {
if (((int)threadIdx.x) < 27) {
pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 2))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 2) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 2) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 2) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 2) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 2) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 2) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 2) % 30)) - 113))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 3) / 120)) < 16) {
if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 3) / 30)) < 64) {
if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1917) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 477) {
if (((int)threadIdx.x) < 27) {
pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 3))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 3) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 3) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 3) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 3) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 3) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 3) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 3) % 30)) - 113))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 4) / 120)) < 16) {
if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 4) / 30)) < 64) {
if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1916) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 476) {
if (((int)threadIdx.x) < 27) {
pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 4))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 4) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 4) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 4) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 4) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 4) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 4) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 4) % 30)) - 113))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 5) / 120)) < 16) {
if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 5) / 30)) < 64) {
if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1915) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 475) {
if (((int)threadIdx.x) < 27) {
pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 5))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 5) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 5) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 5) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 5) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 5) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 5) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 5) % 30)) - 113))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 6) / 120)) < 16) {
if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 6) / 30)) < 64) {
if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1914) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 474) {
if (((int)threadIdx.x) < 26) {
pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 6))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 6) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 6) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 6) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 6) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 6) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 6) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 6) % 30)) - 113))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 7) / 120)) < 16) {
if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 7) / 30)) < 64) {
if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1913) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 473) {
if (((int)threadIdx.x) < 26) {
pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 7))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 7) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 7) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 7) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 7) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 7) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 7) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 7) % 30)) - 113))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 8) / 120)) < 16) {
if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 8) / 30)) < 64) {
if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1912) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 472) {
if (((int)threadIdx.x) < 26) {
pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 8))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 8) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 8) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 8) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 8) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 8) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 8) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 8) % 30)) - 113))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((int)threadIdx.x) / 12)) < 16) {
if ((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 32)) + ((((int)threadIdx.x) * 4) / 3)) < 256) {
if ((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) < 768) {
if (((((int)threadIdx.y) * 96) + (((int)threadIdx.x) * 4)) < 192) {
if (((int)threadIdx.x) < 24) {
kernel_shared[((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)))] = kernel[(((((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + (((int)threadIdx.y) * 1152)) + ((((int)threadIdx.x) / 12) * 576)) + (rc_outer * 144)) + ((((((int)threadIdx.x) % 12) * 4) / 3) * 9)) + (ry_outer * 3)) + ((((int)threadIdx.x) * 4) % 3)))];
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 4) + 1) / 48)) < 16) {
if ((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 32)) + (((((int)threadIdx.x) * 4) + 1) / 3)) < 256) {
if ((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) < 767) {
if (((((int)threadIdx.y) * 96) + (((int)threadIdx.x) * 4)) < 191) {
if (((int)threadIdx.x) < 24) {
kernel_shared[(((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) + 1))] = kernel[(((((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + (((int)threadIdx.y) * 1152)) + ((((((int)threadIdx.x) * 4) + 1) / 48) * 576)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 4) + 1) % 48) / 3) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.x) * 4) + 1) % 3)))];
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 4) + 2) / 48)) < 16) {
if ((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 32)) + (((((int)threadIdx.x) * 4) + 2) / 3)) < 256) {
if ((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) < 766) {
if (((((int)threadIdx.y) * 96) + (((int)threadIdx.x) * 4)) < 190) {
if (((int)threadIdx.x) < 24) {
kernel_shared[(((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) + 2))] = kernel[(((((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + (((int)threadIdx.y) * 1152)) + ((((((int)threadIdx.x) * 4) + 2) / 48) * 576)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 4) + 2) % 48) / 3) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.x) * 4) + 2) % 3)))];
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 4) + 3) / 48)) < 16) {
if ((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 32)) + ((((int)threadIdx.x) * 4) / 3)) < 255) {
if ((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) < 765) {
if (((((int)threadIdx.y) * 96) + (((int)threadIdx.x) * 4)) < 189) {
if (((int)threadIdx.x) < 24) {
kernel_shared[(((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) + 3))] = kernel[(((((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + (((int)threadIdx.y) * 1152)) + ((((((int)threadIdx.x) * 4) + 3) / 48) * 576)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 4) + 3) % 48) / 3) * 9)) + (ry_outer * 3)) + ((((int)threadIdx.x) * 4) % 3)))];
}
}
}
}
}
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 16; ++rc_inner_outer) {
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)))];
pad_temp_shared_local[(3)] = pad_temp_shared[(((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)) + 60))];
pad_temp_shared_local[(1)] = pad_temp_shared[(((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)) + 1))];
pad_temp_shared_local[(4)] = pad_temp_shared[(((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)) + 61))];
pad_temp_shared_local[(2)] = pad_temp_shared[(((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)) + 2))];
pad_temp_shared_local[(5)] = pad_temp_shared[(((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)) + 62))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 1))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 2))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 48))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 49))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 50))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 96))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 97))];
kernel_shared_local[(8)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 98))];
kernel_shared_local[(9)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 144))];
kernel_shared_local[(10)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 145))];
kernel_shared_local[(11)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 146))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(3)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(6)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(9)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(9)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(4)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(7)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(10)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(10)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(5)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(8)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(8)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(11)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(11)]));
}
}
}
compute[(((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)))] = compute_local[(0)];
compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 224))] = compute_local[(4)];
compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 12544))] = compute_local[(1)];
compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 12768))] = compute_local[(5)];
compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 25088))] = compute_local[(2)];
compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 25312))] = compute_local[(6)];
compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 37632))] = compute_local[(3)];
compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 37856))] = compute_local[(7)];
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 3:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 3; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 4:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 4; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 5:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 5; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
case 2:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 3:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 3; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 4:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 4; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 5:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 5; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
case 3:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 3; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 3; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 3:
#pragma unroll
for (unsigned int th = 0; th < 3; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 3; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 4:
#pragma unroll
for (unsigned int th = 0; th < 3; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 4; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 5:
#pragma unroll
for (unsigned int th = 0; th < 3; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 5; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[0];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[0];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[2];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[1];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[2];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 6]*data_array[2];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[0];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[0];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[1];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[3];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[0];
temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[1];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[2];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[3];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[5];
temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[1];
temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[2];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[4];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[5];
temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[2];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[5];
temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[12] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[0];
temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[3];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[13] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[0];
temp_result[12] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[1];
temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[3];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[4];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[6];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[7];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8];
temp_result[14] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[0];
temp_result[13] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[1];
temp_result[12] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[2];
temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[3];
temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[4];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[5];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[6];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[8];
temp_result[14] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[1];
temp_result[13] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[2];
temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[4];
temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[5];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[7];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[8];
temp_result[14] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[2];
temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[5];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[8];
temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[12] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[3];
temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[6];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[13] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[3];
temp_result[12] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[4];
temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[6];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[7];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[8];
temp_result[14] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[3];
temp_result[13] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[4];
temp_result[12] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[5];
temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[6];
temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[7];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[8];
temp_result[14] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 5]*data_array[4];
temp_result[13] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 5]*data_array[5];
temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 5]*data_array[7];
temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 5]*data_array[8];
temp_result[14] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 6]*data_array[5];
temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 6]*data_array[8];
temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[12] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 2]*data_array[6];
temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[13] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 3]*data_array[6];
temp_result[12] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 3]*data_array[7];
temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 3]*data_array[8];
temp_result[14] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 4]*data_array[6];
temp_result[13] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 4]*data_array[7];
temp_result[12] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 4]*data_array[8];
temp_result[14] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 5]*data_array[7];
temp_result[13] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 5]*data_array[8];
temp_result[14] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 6]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
hipMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
hipMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
hipMalloc(&device_input,C*H*W*sizeof(float));
hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
hipEvent_t event_start;
hipEvent_t event_stop;
hipEventCreate(&event_start);
hipEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
hipMalloc(&device_out,H*W*N*sizeof(float));
hipMemset(device_out,0,H*W*N*sizeof(float));
hipMalloc(&device_K,C*N*9*sizeof(float));
hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice);
hipEventRecord(event_start);
convGemm.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnGemmTime;
hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
hipEventRecord(event_start);
convWinogradeNon.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
hipEventRecord(event_start);
convFFT.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnFFTTime;
hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(4,28,2);
dim3 block(28,2,4);
hipEventRecord(event_start);
hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tvm;
hipEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
hipMemcpy(out_tvm,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
hipMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(hipFuncSetAttribute(conv2d,hipFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
hipEventRecord(event_start);
hipLaunchKernelGGL(( conv2d), dim3(TCS*THS), dim3(N * TWS), TC*(TH+2)*(WPAD)*4, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tdc;
hipEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/A100-layers-eval-oracle.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_tvm, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<
cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
| fc57eac17889f2c29d28ced2eb71eba9b010d610.cu | #include <cudnn.h>
#include <stdio.h>
#include <cuda.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 3
#define TW 5
#define TC 32
#define C 64
#define N 32
#define H 112
#define W 112
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(cudaError_t code)
{
if (code != cudaSuccess)
{
std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[8];
__shared__ float pad_temp_shared[1920];
__shared__ float kernel_shared[768];
float pad_temp_shared_local[6];
float kernel_shared_local[12];
compute_local[(0)] = 0.000000e+00f;
compute_local[(4)] = 0.000000e+00f;
compute_local[(1)] = 0.000000e+00f;
compute_local[(5)] = 0.000000e+00f;
compute_local[(2)] = 0.000000e+00f;
compute_local[(6)] = 0.000000e+00f;
compute_local[(3)] = 0.000000e+00f;
compute_local[(7)] = 0.000000e+00f;
for (int rc_outer = 0; rc_outer < 4; ++rc_outer) {
for (int ry_outer = 0; ry_outer < 3; ++ry_outer) {
__syncthreads();
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + ((((int)threadIdx.x) * 9) / 120)) < 16) {
if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + ((((int)threadIdx.x) * 9) / 30)) < 64) {
if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1920) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 480) {
if (((int)threadIdx.x) < 27) {
pad_temp_shared[((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)))] = (((((1 <= (((((int)blockIdx.y) * 4) + (((((int)threadIdx.x) * 9) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + (((((int)threadIdx.x) * 9) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + ((((int)threadIdx.x) * 9) % 30)))) && (((((int)blockIdx.x) * 28) + ((((int)threadIdx.x) * 9) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + (((((int)threadIdx.x) * 9) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + ((((((int)threadIdx.x) * 9) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + ((((int)threadIdx.x) * 9) % 30)) - 113))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 1) / 120)) < 16) {
if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 1) / 30)) < 64) {
if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1919) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 479) {
if (((int)threadIdx.x) < 27) {
pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 1))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 1) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 1) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 1) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 1) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 1) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 1) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 1) % 30)) - 113))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 2) / 120)) < 16) {
if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 2) / 30)) < 64) {
if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1918) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 478) {
if (((int)threadIdx.x) < 27) {
pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 2))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 2) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 2) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 2) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 2) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 2) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 2) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 2) % 30)) - 113))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 3) / 120)) < 16) {
if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 3) / 30)) < 64) {
if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1917) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 477) {
if (((int)threadIdx.x) < 27) {
pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 3))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 3) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 3) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 3) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 3) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 3) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 3) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 3) % 30)) - 113))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 4) / 120)) < 16) {
if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 4) / 30)) < 64) {
if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1916) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 476) {
if (((int)threadIdx.x) < 27) {
pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 4))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 4) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 4) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 4) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 4) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 4) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 4) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 4) % 30)) - 113))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 5) / 120)) < 16) {
if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 5) / 30)) < 64) {
if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1915) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 475) {
if (((int)threadIdx.x) < 27) {
pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 5))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 5) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 5) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 5) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 5) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 5) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 5) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 5) % 30)) - 113))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 6) / 120)) < 16) {
if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 6) / 30)) < 64) {
if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1914) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 474) {
if (((int)threadIdx.x) < 26) {
pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 6))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 6) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 6) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 6) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 6) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 6) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 6) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 6) % 30)) - 113))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 7) / 120)) < 16) {
if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 7) / 30)) < 64) {
if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1913) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 473) {
if (((int)threadIdx.x) < 26) {
pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 7))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 7) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 7) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 7) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 7) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 7) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 7) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 7) % 30)) - 113))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 9) + 8) / 120)) < 16) {
if ((((((int)threadIdx.z) * 16) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 9) + 8) / 30)) < 64) {
if ((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) < 1912) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 9)) < 472) {
if (((int)threadIdx.x) < 26) {
pad_temp_shared[(((((((int)threadIdx.z) * 480) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 9)) + 8))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 8) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 9) + 8) % 120) / 30)) + ry_outer) < 113)) && (1 <= ((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 8) % 30)))) && (((((int)blockIdx.x) * 28) + (((((int)threadIdx.x) * 9) + 8) % 30)) < 113)) ? data[(((((((((((rc_outer * 200704) + (((int)threadIdx.z) * 50176)) + (((int)threadIdx.y) * 25088)) + ((((((int)threadIdx.x) * 9) + 8) / 120) * 12544)) + (((int)blockIdx.y) * 448)) + (((((((int)threadIdx.x) * 9) + 8) % 120) / 30) * 112)) + (ry_outer * 112)) + (((int)blockIdx.x) * 28)) + (((((int)threadIdx.x) * 9) + 8) % 30)) - 113))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((int)threadIdx.x) / 12)) < 16) {
if ((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 32)) + ((((int)threadIdx.x) * 4) / 3)) < 256) {
if ((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) < 768) {
if (((((int)threadIdx.y) * 96) + (((int)threadIdx.x) * 4)) < 192) {
if (((int)threadIdx.x) < 24) {
kernel_shared[((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)))] = kernel[(((((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + (((int)threadIdx.y) * 1152)) + ((((int)threadIdx.x) / 12) * 576)) + (rc_outer * 144)) + ((((((int)threadIdx.x) % 12) * 4) / 3) * 9)) + (ry_outer * 3)) + ((((int)threadIdx.x) * 4) % 3)))];
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 4) + 1) / 48)) < 16) {
if ((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 32)) + (((((int)threadIdx.x) * 4) + 1) / 3)) < 256) {
if ((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) < 767) {
if (((((int)threadIdx.y) * 96) + (((int)threadIdx.x) * 4)) < 191) {
if (((int)threadIdx.x) < 24) {
kernel_shared[(((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) + 1))] = kernel[(((((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + (((int)threadIdx.y) * 1152)) + ((((((int)threadIdx.x) * 4) + 1) / 48) * 576)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 4) + 1) % 48) / 3) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.x) * 4) + 1) % 3)))];
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 4) + 2) / 48)) < 16) {
if ((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 32)) + (((((int)threadIdx.x) * 4) + 2) / 3)) < 256) {
if ((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) < 766) {
if (((((int)threadIdx.y) * 96) + (((int)threadIdx.x) * 4)) < 190) {
if (((int)threadIdx.x) < 24) {
kernel_shared[(((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) + 2))] = kernel[(((((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + (((int)threadIdx.y) * 1152)) + ((((((int)threadIdx.x) * 4) + 2) / 48) * 576)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 4) + 2) % 48) / 3) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.x) * 4) + 2) % 3)))];
}
}
}
}
}
if ((((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 4) + 3) / 48)) < 16) {
if ((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 32)) + ((((int)threadIdx.x) * 4) / 3)) < 255) {
if ((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) < 765) {
if (((((int)threadIdx.y) * 96) + (((int)threadIdx.x) * 4)) < 189) {
if (((int)threadIdx.x) < 24) {
kernel_shared[(((((((int)threadIdx.z) * 192) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 4)) + 3))] = kernel[(((((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + (((int)threadIdx.y) * 1152)) + ((((((int)threadIdx.x) * 4) + 3) / 48) * 576)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 4) + 3) % 48) / 3) * 9)) + (ry_outer * 3)) + ((((int)threadIdx.x) * 4) % 3)))];
}
}
}
}
}
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 16; ++rc_inner_outer) {
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)))];
pad_temp_shared_local[(3)] = pad_temp_shared[(((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)) + 60))];
pad_temp_shared_local[(1)] = pad_temp_shared[(((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)) + 1))];
pad_temp_shared_local[(4)] = pad_temp_shared[(((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)) + 61))];
pad_temp_shared_local[(2)] = pad_temp_shared[(((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)) + 2))];
pad_temp_shared_local[(5)] = pad_temp_shared[(((((rc_inner_outer * 120) + (((int)threadIdx.y) * 30)) + ((int)threadIdx.x)) + 62))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 1))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 2))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 48))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 49))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 50))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 96))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 97))];
kernel_shared_local[(8)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 98))];
kernel_shared_local[(9)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 144))];
kernel_shared_local[(10)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 145))];
kernel_shared_local[(11)] = kernel_shared[((((((int)threadIdx.z) * 192) + (rc_inner_outer * 3)) + 146))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(3)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(6)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(9)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(9)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(1)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(4)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(7)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(10)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(10)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(2)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(5)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(8)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(8)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(11)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(11)]));
}
}
}
compute[(((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)))] = compute_local[(0)];
compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 224))] = compute_local[(4)];
compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 12544))] = compute_local[(1)];
compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 12768))] = compute_local[(5)];
compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 25088))] = compute_local[(2)];
compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 25312))] = compute_local[(6)];
compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 37632))] = compute_local[(3)];
compute[((((((((((int)blockIdx.z) * 200704) + (((int)threadIdx.z) * 50176)) + (((int)blockIdx.y) * 448)) + (((int)threadIdx.y) * 112)) + (((int)blockIdx.x) * 28)) + ((int)threadIdx.x)) + 37856))] = compute_local[(7)];
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 3:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 3; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 4:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 4; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 5:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 5; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
case 2:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 3:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 3; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 4:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 4; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 5:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 5; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
case 3:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 3; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 3; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 3:
#pragma unroll
for (unsigned int th = 0; th < 3; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 3; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 4:
#pragma unroll
for (unsigned int th = 0; th < 3; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 4; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 5:
#pragma unroll
for (unsigned int th = 0; th < 3; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 5; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[0];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[0];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[2];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[1];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[2];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 6]*data_array[2];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[0];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[0];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[1];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[3];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[0];
temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[1];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[2];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[3];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[5];
temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[1];
temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[2];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[4];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[5];
temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[2];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 6]*data_array[5];
temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[12] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[0];
temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[3];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[13] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[0];
temp_result[12] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[1];
temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[3];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[4];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[6];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[7];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8];
temp_result[14] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[0];
temp_result[13] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[1];
temp_result[12] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[2];
temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[3];
temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[4];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[5];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[6];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[8];
temp_result[14] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[1];
temp_result[13] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[2];
temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[4];
temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[5];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[7];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[8];
temp_result[14] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[2];
temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[5];
temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 6]*data_array[8];
temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[12] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[3];
temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[6];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[13] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[3];
temp_result[12] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[4];
temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[6];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[7];
temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[8];
temp_result[14] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[3];
temp_result[13] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[4];
temp_result[12] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[5];
temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[6];
temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[7];
temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[8];
temp_result[14] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 5]*data_array[4];
temp_result[13] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 5]*data_array[5];
temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 5]*data_array[7];
temp_result[8] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 5]*data_array[8];
temp_result[14] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 6]*data_array[5];
temp_result[9] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 6]*data_array[8];
temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[12] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 2]*data_array[6];
temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[10] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[13] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 3]*data_array[6];
temp_result[12] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 3]*data_array[7];
temp_result[11] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 3]*data_array[8];
temp_result[14] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 4]*data_array[6];
temp_result[13] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 4]*data_array[7];
temp_result[12] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 4]*data_array[8];
temp_result[14] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 5]*data_array[7];
temp_result[13] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 5]*data_array[8];
temp_result[14] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 6]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
cudaMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
cudaMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
cudaMalloc(&device_input,C*H*W*sizeof(float));
cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
cudaEvent_t event_start;
cudaEvent_t event_stop;
cudaEventCreate(&event_start);
cudaEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
cudaMalloc(&device_out,H*W*N*sizeof(float));
cudaMemset(device_out,0,H*W*N*sizeof(float));
cudaMalloc(&device_K,C*N*9*sizeof(float));
cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice);
cudaEventRecord(event_start);
convGemm.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnGemmTime;
cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
cudaEventRecord(event_start);
convWinogradeNon.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
cudaEventRecord(event_start);
convFFT.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnFFTTime;
cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(4,28,2);
dim3 block(28,2,4);
cudaEventRecord(event_start);
default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tvm;
cudaEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
cudaMemcpy(out_tvm,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(cudaFuncSetAttribute(conv2d,cudaFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
cudaEventRecord(event_start);
conv2d<<<TCS*THS, N * TWS, TC*(TH+2)*(WPAD)*4>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tdc;
cudaEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/A100-layers-eval-oracle.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_tvm, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<
cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
|
a48382de51e69ac3fc31ce7543bd5ad682df97f2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <rocblas.h>
// includes, project
#include <cutil_inline.h>
// includes, kernels
#include <strsm_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int size);
////////////////////////////////////////////////////////////////////////////////
// export C interface
extern "C"
void computeGold( float* a, float* b, const unsigned int len, float* result);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
CUT_DEVICE_INIT(argc, argv);
runTest(INPUT_WIDTH);
CUT_EXIT(argc, argv);
}
int checkarray(float* reference, float* o_data, int num_elements) {
{
int error = 0;
for (int i=0; i<num_elements; i++) {
for (int j=0; j<num_elements; j++) {
float t = reference[j*num_elements+i]-o_data[j*num_elements+i];
if (t<0) t = -t;
float ref = reference[j*num_elements+i];
if (ref<0) ref = -ref;
if (t/ref>1e-3) {
if (error<4)
printf("%d, %d, %f, %f\n", i, j, reference[j*num_elements+i], o_data[j*num_elements+i]);
error++;
}
}
}
return error;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest(int size)
{
cublasStatus status;
status = hipblasInit();
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! CUBLAS initialization error\n");
exit (1);
}
unsigned int num_elements = size;
unsigned int timer;
cutilCheckError( cutCreateTimer(&timer));
const unsigned int in_mem_size = sizeof( float) * (num_elements*num_elements);
const unsigned int out_mem_size = sizeof( float) * (num_elements*num_elements);
// allocate host memory to store the input data
float* a_data = (float*) malloc( in_mem_size);
float* b_data = (float*) malloc( in_mem_size);
float* o_data = (float*) malloc( out_mem_size);
float* reference = (float*) malloc( out_mem_size);
// initialize the input data on the host to be integer values
// between 0 and 1000
for( unsigned int i = 0; i < num_elements; ++i)
{
for( unsigned int j = 0; j < num_elements; ++j) {
a_data[i*num_elements+j] = ((rand()/(float)RAND_MAX));
if (i>j) a_data[i*num_elements+j]=0.0f;
b_data[i*num_elements+j] = ((rand()/(float)RAND_MAX));
}
}
// printf("\n");
// compute reference solution
computeGold(a_data, b_data, num_elements, reference);
// allocate device memory input and output arrays
float* d_a_data;
float* d_odata;
cutilSafeCall( hipMalloc( (void**) &d_a_data, in_mem_size));
cutilSafeCall( hipMalloc( (void**) &d_odata, out_mem_size));
// copy host memory to device input array
// setup execution parameters
// Note that these scans only support a single thread-block worth of data,
// but we invoke them here on many blocks so that we can accurately compare
// performance
// make sure there are no CUDA errors before we start
cutilCheckMsg("Kernel execution failed");
printf("Running %d elements\n", num_elements);
float epsilon = 1e-4;
// execute the kernels
unsigned int numIterations = 1;
{
cutilSafeCall( hipMemcpy( d_a_data, a_data, in_mem_size, hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy( d_odata, b_data, in_mem_size, hipMemcpyHostToDevice) );
hipDeviceSynchronize();
cutStartTimer(timer);
hipblasStrsm('L', 'L', 'N', 'N', num_elements, num_elements, 1.0, d_a_data, num_elements, d_odata, num_elements);
hipDeviceSynchronize();
cutStopTimer(timer);
printf("Average time: %f ms\n", cutGetTimerValue(timer));
cutResetTimer(timer);
// copy result from device to host
cutilSafeCall(hipMemcpy( reference, d_odata, out_mem_size,
hipMemcpyDeviceToHost));
// unsigned int result_regtest = cutComparefe( reference, o_data, num_elements*num_elements, epsilon);
// printf( "cublas: Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
}
// // we do the transpose
// for (int i=0; i<num_elements; i++) {
// for (int j=0; j<num_elements; j++) {
// if (i<j) {
// float t = b_data[j*num_elements+i];
// b_data[j*num_elements+i] = b_data[i*num_elements+j];
// b_data[i*num_elements+j] = t;
// t = a_data[j*num_elements+i];
// a_data[j*num_elements+i] = a_data[i*num_elements+j];
// a_data[i*num_elements+j] = t;
// }
// }
// }
{
cutilSafeCall( hipMemcpy( d_a_data, a_data, in_mem_size, hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy( d_odata, b_data, in_mem_size, hipMemcpyHostToDevice) );
int block_width = 256;
hipDeviceSynchronize();
cutStartTimer(timer);
for (int i=0; i<num_elements; i+=block_width) {
hipblasStrsm('L', 'L', 'N', 'N', block_width, num_elements, 1.0, d_a_data+i*num_elements+i, num_elements, d_odata+i, num_elements);
// left matrix (i,i) (i+64, i+64) right matrix (0,i) (0, i+64)
// strsm to get the result matrix (0,i) (0, i+64)
// result(0, i+64) (0, h) - left matrix (i, i+64) (i+64,h) * result matrix (0,i) (0, i+64)
dim3 threads(block_width, 1);
int WC = num_elements - i - block_width;
if (WC==0) break;
int HC = num_elements;
dim3 grid(WC / threads.x, HC / threads.y / 16);
hipLaunchKernelGGL(( matmul_opt), dim3(grid), dim3(threads), 0, 0, d_odata+i, d_a_data+(i+block_width)+i*num_elements, d_odata+i+block_width, block_width, num_elements);
}
hipDeviceSynchronize();
cutStopTimer(timer);
printf("Average time: %f ms\n", cutGetTimerValue(timer));
cutResetTimer(timer);
cutilSafeCall(hipMemcpy( o_data, d_odata, out_mem_size,
hipMemcpyDeviceToHost));
int res = checkarray(reference, o_data, num_elements);
printf("Test %s %d\n", (0 == res) ? "PASSED" : "FAILED", res);
}
{
cutilSafeCall( hipMemcpy( d_a_data, a_data, in_mem_size, hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy( d_odata, b_data, in_mem_size, hipMemcpyHostToDevice) );
int block_width = 256;
hipDeviceSynchronize();
cutStartTimer(timer);
for (int i=0; i<num_elements; i+=block_width) {
hipblasStrsm('L', 'L', 'N', 'N', block_width, num_elements, 1.0, d_a_data+i*num_elements+i, num_elements, d_odata+i, num_elements);
// left matrix (i,i) (i+64, i+64) right matrix (0,i) (0, i+64)
// strsm to get the result matrix (0,i) (0, i+64)
// result(0, i+64) (0, h) - left matrix (i, i+64) (i+64,h) * result matrix (0,i) (0, i+64)
dim3 threads(32, 1);
int WC = num_elements - i - block_width;
if (WC==0) break;
int HC = num_elements;
dim3 grid(WC / threads.x, HC / threads.y / 1);
hipLaunchKernelGGL(( matmul_coalesced), dim3(grid), dim3(threads), 0, 0, d_odata+i, d_a_data+(i+block_width)+i*num_elements, d_odata+i+block_width, block_width, num_elements);
}
hipDeviceSynchronize();
cutStopTimer(timer);
printf("matmul_coalesced Average time: %f ms\n", cutGetTimerValue(timer));
cutResetTimer(timer);
cutilSafeCall(hipMemcpy( o_data, d_odata, out_mem_size,
hipMemcpyDeviceToHost));
int res = checkarray(reference, o_data, num_elements);
printf("Test %s %d\n", (0 == res) ? "PASSED" : "FAILED", res);
}
{
cutilSafeCall( hipMemcpy( d_a_data, a_data, in_mem_size, hipMemcpyHostToDevice) );
cutilSafeCall( hipMemcpy( d_odata, b_data, in_mem_size, hipMemcpyHostToDevice) );
int block_width = 256;
hipDeviceSynchronize();
cutStartTimer(timer);
for (int i=0; i<num_elements; i+=block_width) {
hipblasStrsm('L', 'L', 'N', 'N', block_width, num_elements, 1.0, d_a_data+i*num_elements+i, num_elements, d_odata+i, num_elements);
// left matrix (i,i) (i+64, i+64) right matrix (0,i) (0, i+64)
// strsm to get the result matrix (0,i) (0, i+64)
// result(0, i+64) (0, h) - left matrix (i, i+64) (i+64,h) * result matrix (0,i) (0, i+64)
dim3 threads(block_width, 1);
int WC = num_elements - i - block_width;
if (WC==0) break;
int HC = num_elements;
dim3 grid(WC / threads.x, HC / threads.y);
hipLaunchKernelGGL(( matrix_naive), dim3(grid), dim3(threads), 0, 0, d_odata+i, d_a_data+(i+block_width)+i*num_elements, d_odata+i+block_width, block_width, num_elements);
}
hipDeviceSynchronize();
cutStopTimer(timer);
printf("naive Average time: %f ms\n", cutGetTimerValue(timer));
cutResetTimer(timer);
cutilSafeCall(hipMemcpy( o_data, d_odata, out_mem_size,
hipMemcpyDeviceToHost));
int res = checkarray(reference, o_data, num_elements);
printf("Test %s %d\n", (0 == res) ? "PASSED" : "FAILED", res);
}
// cleanup memory
free( a_data);
free( o_data);
free( reference);
cutilSafeCall(hipFree(d_a_data));
cutilSafeCall(hipFree(d_odata));
cutilCheckError(cutDeleteTimer(timer));
status = hipblasShutdown();
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! shutdown error\n");
}
hipDeviceReset();
}
| a48382de51e69ac3fc31ce7543bd5ad682df97f2.cu |
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cublas.h>
// includes, project
#include <cutil_inline.h>
// includes, kernels
#include <strsm_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int size);
////////////////////////////////////////////////////////////////////////////////
// export C interface
extern "C"
void computeGold( float* a, float* b, const unsigned int len, float* result);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
CUT_DEVICE_INIT(argc, argv);
runTest(INPUT_WIDTH);
CUT_EXIT(argc, argv);
}
int checkarray(float* reference, float* o_data, int num_elements) {
{
int error = 0;
for (int i=0; i<num_elements; i++) {
for (int j=0; j<num_elements; j++) {
float t = reference[j*num_elements+i]-o_data[j*num_elements+i];
if (t<0) t = -t;
float ref = reference[j*num_elements+i];
if (ref<0) ref = -ref;
if (t/ref>1e-3) {
if (error<4)
printf("%d, %d, %f, %f\n", i, j, reference[j*num_elements+i], o_data[j*num_elements+i]);
error++;
}
}
}
return error;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest(int size)
{
cublasStatus status;
status = cublasInit();
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! CUBLAS initialization error\n");
exit (1);
}
unsigned int num_elements = size;
unsigned int timer;
cutilCheckError( cutCreateTimer(&timer));
const unsigned int in_mem_size = sizeof( float) * (num_elements*num_elements);
const unsigned int out_mem_size = sizeof( float) * (num_elements*num_elements);
// allocate host memory to store the input data
float* a_data = (float*) malloc( in_mem_size);
float* b_data = (float*) malloc( in_mem_size);
float* o_data = (float*) malloc( out_mem_size);
float* reference = (float*) malloc( out_mem_size);
// initialize the input data on the host to be integer values
// between 0 and 1000
for( unsigned int i = 0; i < num_elements; ++i)
{
for( unsigned int j = 0; j < num_elements; ++j) {
a_data[i*num_elements+j] = ((rand()/(float)RAND_MAX));
if (i>j) a_data[i*num_elements+j]=0.0f;
b_data[i*num_elements+j] = ((rand()/(float)RAND_MAX));
}
}
// printf("\n");
// compute reference solution
computeGold(a_data, b_data, num_elements, reference);
// allocate device memory input and output arrays
float* d_a_data;
float* d_odata;
cutilSafeCall( cudaMalloc( (void**) &d_a_data, in_mem_size));
cutilSafeCall( cudaMalloc( (void**) &d_odata, out_mem_size));
// copy host memory to device input array
// setup execution parameters
// Note that these scans only support a single thread-block worth of data,
// but we invoke them here on many blocks so that we can accurately compare
// performance
// make sure there are no CUDA errors before we start
cutilCheckMsg("Kernel execution failed");
printf("Running %d elements\n", num_elements);
float epsilon = 1e-4;
// execute the kernels
unsigned int numIterations = 1;
{
cutilSafeCall( cudaMemcpy( d_a_data, a_data, in_mem_size, cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy( d_odata, b_data, in_mem_size, cudaMemcpyHostToDevice) );
cudaThreadSynchronize();
cutStartTimer(timer);
cublasStrsm('L', 'L', 'N', 'N', num_elements, num_elements, 1.0, d_a_data, num_elements, d_odata, num_elements);
cudaThreadSynchronize();
cutStopTimer(timer);
printf("Average time: %f ms\n", cutGetTimerValue(timer));
cutResetTimer(timer);
// copy result from device to host
cutilSafeCall(cudaMemcpy( reference, d_odata, out_mem_size,
cudaMemcpyDeviceToHost));
// unsigned int result_regtest = cutComparefe( reference, o_data, num_elements*num_elements, epsilon);
// printf( "cublas: Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
}
// // we do the transpose
// for (int i=0; i<num_elements; i++) {
// for (int j=0; j<num_elements; j++) {
// if (i<j) {
// float t = b_data[j*num_elements+i];
// b_data[j*num_elements+i] = b_data[i*num_elements+j];
// b_data[i*num_elements+j] = t;
// t = a_data[j*num_elements+i];
// a_data[j*num_elements+i] = a_data[i*num_elements+j];
// a_data[i*num_elements+j] = t;
// }
// }
// }
{
cutilSafeCall( cudaMemcpy( d_a_data, a_data, in_mem_size, cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy( d_odata, b_data, in_mem_size, cudaMemcpyHostToDevice) );
int block_width = 256;
cudaThreadSynchronize();
cutStartTimer(timer);
for (int i=0; i<num_elements; i+=block_width) {
cublasStrsm('L', 'L', 'N', 'N', block_width, num_elements, 1.0, d_a_data+i*num_elements+i, num_elements, d_odata+i, num_elements);
// left matrix (i,i) (i+64, i+64) right matrix (0,i) (0, i+64)
// strsm to get the result matrix (0,i) (0, i+64)
// result(0, i+64) (0, h) - left matrix (i, i+64) (i+64,h) * result matrix (0,i) (0, i+64)
dim3 threads(block_width, 1);
int WC = num_elements - i - block_width;
if (WC==0) break;
int HC = num_elements;
dim3 grid(WC / threads.x, HC / threads.y / 16);
matmul_opt<<<grid, threads>>>(d_odata+i, d_a_data+(i+block_width)+i*num_elements, d_odata+i+block_width, block_width, num_elements);
}
cudaThreadSynchronize();
cutStopTimer(timer);
printf("Average time: %f ms\n", cutGetTimerValue(timer));
cutResetTimer(timer);
cutilSafeCall(cudaMemcpy( o_data, d_odata, out_mem_size,
cudaMemcpyDeviceToHost));
int res = checkarray(reference, o_data, num_elements);
printf("Test %s %d\n", (0 == res) ? "PASSED" : "FAILED", res);
}
{
cutilSafeCall( cudaMemcpy( d_a_data, a_data, in_mem_size, cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy( d_odata, b_data, in_mem_size, cudaMemcpyHostToDevice) );
int block_width = 256;
cudaThreadSynchronize();
cutStartTimer(timer);
for (int i=0; i<num_elements; i+=block_width) {
cublasStrsm('L', 'L', 'N', 'N', block_width, num_elements, 1.0, d_a_data+i*num_elements+i, num_elements, d_odata+i, num_elements);
// left matrix (i,i) (i+64, i+64) right matrix (0,i) (0, i+64)
// strsm to get the result matrix (0,i) (0, i+64)
// result(0, i+64) (0, h) - left matrix (i, i+64) (i+64,h) * result matrix (0,i) (0, i+64)
dim3 threads(32, 1);
int WC = num_elements - i - block_width;
if (WC==0) break;
int HC = num_elements;
dim3 grid(WC / threads.x, HC / threads.y / 1);
matmul_coalesced<<<grid, threads>>>(d_odata+i, d_a_data+(i+block_width)+i*num_elements, d_odata+i+block_width, block_width, num_elements);
}
cudaThreadSynchronize();
cutStopTimer(timer);
printf("matmul_coalesced Average time: %f ms\n", cutGetTimerValue(timer));
cutResetTimer(timer);
cutilSafeCall(cudaMemcpy( o_data, d_odata, out_mem_size,
cudaMemcpyDeviceToHost));
int res = checkarray(reference, o_data, num_elements);
printf("Test %s %d\n", (0 == res) ? "PASSED" : "FAILED", res);
}
{
cutilSafeCall( cudaMemcpy( d_a_data, a_data, in_mem_size, cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpy( d_odata, b_data, in_mem_size, cudaMemcpyHostToDevice) );
int block_width = 256;
cudaThreadSynchronize();
cutStartTimer(timer);
for (int i=0; i<num_elements; i+=block_width) {
cublasStrsm('L', 'L', 'N', 'N', block_width, num_elements, 1.0, d_a_data+i*num_elements+i, num_elements, d_odata+i, num_elements);
// left matrix (i,i) (i+64, i+64) right matrix (0,i) (0, i+64)
// strsm to get the result matrix (0,i) (0, i+64)
// result(0, i+64) (0, h) - left matrix (i, i+64) (i+64,h) * result matrix (0,i) (0, i+64)
dim3 threads(block_width, 1);
int WC = num_elements - i - block_width;
if (WC==0) break;
int HC = num_elements;
dim3 grid(WC / threads.x, HC / threads.y);
matrix_naive<<<grid, threads>>>(d_odata+i, d_a_data+(i+block_width)+i*num_elements, d_odata+i+block_width, block_width, num_elements);
}
cudaThreadSynchronize();
cutStopTimer(timer);
printf("naive Average time: %f ms\n", cutGetTimerValue(timer));
cutResetTimer(timer);
cutilSafeCall(cudaMemcpy( o_data, d_odata, out_mem_size,
cudaMemcpyDeviceToHost));
int res = checkarray(reference, o_data, num_elements);
printf("Test %s %d\n", (0 == res) ? "PASSED" : "FAILED", res);
}
// cleanup memory
free( a_data);
free( o_data);
free( reference);
cutilSafeCall(cudaFree(d_a_data));
cutilSafeCall(cudaFree(d_odata));
cutilCheckError(cutDeleteTimer(timer));
status = cublasShutdown();
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! shutdown error\n");
}
cudaThreadExit();
}
|
d826999b066c91294774126ff258f31351d24e1e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void scal_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] *= ALPHA;
} | d826999b066c91294774126ff258f31351d24e1e.cu | #include "includes.h"
__global__ void scal_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] *= ALPHA;
} |
881a8d0e7632d3e24077016ed35fc0e29944c524.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../../gpu_utils/runtime.h"
#include "GTJ.h"
__device__ void reset_tj_neuron(GTJNeurons *d_neurons, int *shared_buf, volatile int size, int start_id)
{
for (int idx=threadIdx.x; idx<size; idx+=blockDim.x) {
int nid = shared_buf[idx] - start_id;
d_neurons->p_refrac_step[nid] = d_neurons->p_refrac_time[nid] - 1;
d_neurons->p_vm[nid] = d_neurons->p_v_reset[nid];
}
}
__global__ void update_tj_neuron(GTJNeurons *d_neurons, int num, int start_id)
{
__shared__ int fire_table_t[MAXBLOCKSIZE];
__shared__ volatile int fire_cnt;
if (threadIdx.x == 0) {
fire_cnt = 0;
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int idx = tid; idx < num; idx +=blockDim.x*gridDim.x) {
bool fired = false;
int test_loc = 0;
int gnid = start_id + idx;
bool actived = d_neurons->p_refrac_step[idx] <= 0;
//real DT = 0.001;
if (actived) {
real I = gNeuronInput[gnid] + gNeuronInput_I[gnid] + d_neurons->p_i_tmp[idx];
d_neurons->p_vm[idx] = d_neurons->p_vm[idx] + DT * I/d_neurons->p_cm[idx];
gXInput[gnid] += gNeuronInput[gnid] + gNeuronInput_I[gnid];
fired = d_neurons->p_vm[idx] >= d_neurons->p_v_thresh[idx];
gFireCount[gnid] += fired;
for (int i=0; i<2; i++) {
if (fired) {
test_loc = atomicAdd((int*)&fire_cnt, 1);
if (test_loc < MAXBLOCKSIZE) {
fire_table_t[test_loc] = gnid;
fired = false;
}
}
__syncthreads();
if (fire_cnt >= MAXBLOCKSIZE) {
commit2globalTable(fire_table_t, MAXBLOCKSIZE, gFiredTable, &gFiredTableSizes[gCurrentIdx], gFiredTableCap*gCurrentIdx);
reset_tj_neuron(d_neurons, fire_table_t, MAXBLOCKSIZE, start_id);
if (threadIdx.x == 0) {
fire_cnt = 0;
}
}
__syncthreads();
}
__syncthreads();
if (fire_cnt > 0) {
commit2globalTable(fire_table_t, fire_cnt, gFiredTable, &gFiredTableSizes[gCurrentIdx], gFiredTableCap*gCurrentIdx);
reset_tj_neuron(d_neurons, fire_table_t, fire_cnt, start_id);
if (threadIdx.x == 0) {
fire_cnt = 0;
}
}
} else {
d_neurons->p_refrac_step[idx] = d_neurons->p_refrac_step[idx] - 1;
}
gNeuronInput[start_id + idx] = 0;
gNeuronInput_I[start_id + idx] = 0;
}
__syncthreads();
}
| 881a8d0e7632d3e24077016ed35fc0e29944c524.cu |
#include "../../gpu_utils/runtime.h"
#include "GTJ.h"
__device__ void reset_tj_neuron(GTJNeurons *d_neurons, int *shared_buf, volatile int size, int start_id)
{
for (int idx=threadIdx.x; idx<size; idx+=blockDim.x) {
int nid = shared_buf[idx] - start_id;
d_neurons->p_refrac_step[nid] = d_neurons->p_refrac_time[nid] - 1;
d_neurons->p_vm[nid] = d_neurons->p_v_reset[nid];
}
}
__global__ void update_tj_neuron(GTJNeurons *d_neurons, int num, int start_id)
{
__shared__ int fire_table_t[MAXBLOCKSIZE];
__shared__ volatile int fire_cnt;
if (threadIdx.x == 0) {
fire_cnt = 0;
}
__syncthreads();
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int idx = tid; idx < num; idx +=blockDim.x*gridDim.x) {
bool fired = false;
int test_loc = 0;
int gnid = start_id + idx;
bool actived = d_neurons->p_refrac_step[idx] <= 0;
//real DT = 0.001;
if (actived) {
real I = gNeuronInput[gnid] + gNeuronInput_I[gnid] + d_neurons->p_i_tmp[idx];
d_neurons->p_vm[idx] = d_neurons->p_vm[idx] + DT * I/d_neurons->p_cm[idx];
gXInput[gnid] += gNeuronInput[gnid] + gNeuronInput_I[gnid];
fired = d_neurons->p_vm[idx] >= d_neurons->p_v_thresh[idx];
gFireCount[gnid] += fired;
for (int i=0; i<2; i++) {
if (fired) {
test_loc = atomicAdd((int*)&fire_cnt, 1);
if (test_loc < MAXBLOCKSIZE) {
fire_table_t[test_loc] = gnid;
fired = false;
}
}
__syncthreads();
if (fire_cnt >= MAXBLOCKSIZE) {
commit2globalTable(fire_table_t, MAXBLOCKSIZE, gFiredTable, &gFiredTableSizes[gCurrentIdx], gFiredTableCap*gCurrentIdx);
reset_tj_neuron(d_neurons, fire_table_t, MAXBLOCKSIZE, start_id);
if (threadIdx.x == 0) {
fire_cnt = 0;
}
}
__syncthreads();
}
__syncthreads();
if (fire_cnt > 0) {
commit2globalTable(fire_table_t, fire_cnt, gFiredTable, &gFiredTableSizes[gCurrentIdx], gFiredTableCap*gCurrentIdx);
reset_tj_neuron(d_neurons, fire_table_t, fire_cnt, start_id);
if (threadIdx.x == 0) {
fire_cnt = 0;
}
}
} else {
d_neurons->p_refrac_step[idx] = d_neurons->p_refrac_step[idx] - 1;
}
gNeuronInput[start_id + idx] = 0;
gNeuronInput_I[start_id + idx] = 0;
}
__syncthreads();
}
|
dab5c72b2c3e6943d3828c047d200117955cd506.hip | // !!! This is a file automatically generated by hipify!!!
#include <unittest/unittest.h>
#include <thrust/functional.h>
#include <thrust/sequence.h>
#include <thrust/device_malloc_allocator.h>
#include <thrust/sort.h>
#include <thrust/system/hip/detail/detail/stable_radix_sort.h>
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
using namespace unittest;
typedef unittest::type_list<
#if !(defined(__GNUC__) && (__GNUC__ <= 4) && (__GNUC_MINOR__ <= 1))
// XXX GCC 4.1 miscompiles the char sorts with -O2 for some reason
unsigned char,
#endif
unsigned short,
unsigned int,
unsigned long,
unsigned long long> UnsignedIntegerTypes;
template <typename T>
struct TestRadixSortByKeyUnaligned
{
void operator()(const size_t n)
{
typedef thrust::device_vector<T> Vector1;
typedef thrust::device_vector<int> Vector2;
Vector1 unsorted_keys = unittest::random_integers<T>(n);
Vector1 sorted_keys = unsorted_keys;
Vector2 unsorted_values(n); thrust::sequence(unsorted_values.begin(), unsorted_values.end());
Vector2 sorted_values = unsorted_values;
thrust::sort_by_key(sorted_keys.begin(), sorted_keys.end(), sorted_values.begin());
for(int offset = 1; offset < 4; offset++)
{
Vector1 unaligned_unsorted_keys(n + offset, 0);
Vector1 unaligned_sorted_keys(n + offset, 0);
Vector2 unaligned_unsorted_values(n + offset, 0);
Vector2 unaligned_sorted_values(n + offset, 0);
thrust::copy( unsorted_keys.begin(), unsorted_keys.end(), unaligned_unsorted_keys.begin() + offset);
thrust::copy( sorted_keys.begin(), sorted_keys.end(), unaligned_sorted_keys.begin() + offset);
thrust::copy(unsorted_values.begin(), unsorted_values.end(), unaligned_unsorted_values.begin() + offset);
thrust::copy( sorted_values.begin(), sorted_values.end(), unaligned_sorted_values.begin() + offset);
thrust::system::cuda::detail::detail::stable_radix_sort_by_key(unaligned_unsorted_keys.begin() + offset, unaligned_unsorted_keys.end(), unaligned_unsorted_values.begin() + offset);
ASSERT_EQUAL( unaligned_unsorted_keys, unaligned_sorted_keys);
ASSERT_EQUAL(unaligned_unsorted_values, unaligned_sorted_values);
}
}
};
VariableUnitTest<TestRadixSortByKeyUnaligned, UnsignedIntegerTypes> TestRadixSortByKeyUnalignedInstance;
#endif // THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
| dab5c72b2c3e6943d3828c047d200117955cd506.cu | #include <unittest/unittest.h>
#include <thrust/functional.h>
#include <thrust/sequence.h>
#include <thrust/device_malloc_allocator.h>
#include <thrust/sort.h>
#include <thrust/system/cuda/detail/detail/stable_radix_sort.h>
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
using namespace unittest;
typedef unittest::type_list<
#if !(defined(__GNUC__) && (__GNUC__ <= 4) && (__GNUC_MINOR__ <= 1))
// XXX GCC 4.1 miscompiles the char sorts with -O2 for some reason
unsigned char,
#endif
unsigned short,
unsigned int,
unsigned long,
unsigned long long> UnsignedIntegerTypes;
template <typename T>
struct TestRadixSortByKeyUnaligned
{
void operator()(const size_t n)
{
typedef thrust::device_vector<T> Vector1;
typedef thrust::device_vector<int> Vector2;
Vector1 unsorted_keys = unittest::random_integers<T>(n);
Vector1 sorted_keys = unsorted_keys;
Vector2 unsorted_values(n); thrust::sequence(unsorted_values.begin(), unsorted_values.end());
Vector2 sorted_values = unsorted_values;
thrust::sort_by_key(sorted_keys.begin(), sorted_keys.end(), sorted_values.begin());
for(int offset = 1; offset < 4; offset++)
{
Vector1 unaligned_unsorted_keys(n + offset, 0);
Vector1 unaligned_sorted_keys(n + offset, 0);
Vector2 unaligned_unsorted_values(n + offset, 0);
Vector2 unaligned_sorted_values(n + offset, 0);
thrust::copy( unsorted_keys.begin(), unsorted_keys.end(), unaligned_unsorted_keys.begin() + offset);
thrust::copy( sorted_keys.begin(), sorted_keys.end(), unaligned_sorted_keys.begin() + offset);
thrust::copy(unsorted_values.begin(), unsorted_values.end(), unaligned_unsorted_values.begin() + offset);
thrust::copy( sorted_values.begin(), sorted_values.end(), unaligned_sorted_values.begin() + offset);
thrust::system::cuda::detail::detail::stable_radix_sort_by_key(unaligned_unsorted_keys.begin() + offset, unaligned_unsorted_keys.end(), unaligned_unsorted_values.begin() + offset);
ASSERT_EQUAL( unaligned_unsorted_keys, unaligned_sorted_keys);
ASSERT_EQUAL(unaligned_unsorted_values, unaligned_sorted_values);
}
}
};
VariableUnitTest<TestRadixSortByKeyUnaligned, UnsignedIntegerTypes> TestRadixSortByKeyUnalignedInstance;
#endif // THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
|
be057b7adda15f4afa36728f4419e9d82fa5efe7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/mv_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/funcs/sparse/sparse_blas.h"
#include "paddle/phi/kernels/sparse/empty_kernel.h"
namespace phi {
namespace sparse {
template <typename T, typename IntT>
__global__ void MvCooGradGpuKernel(const T *dout,
const T *vec,
const IntT *dx_indices,
T *dx_values,
int nnz) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < nnz; idx += blockDim.x * gridDim.x) {
int i = dx_indices[idx];
int j = dx_indices[idx + nnz];
dx_values[idx] = dout[i] * vec[j];
}
}
template <typename T, typename IntT>
__global__ void MvCsrGradGpuKernel(const T *dout,
const T *vec,
const IntT *dx_crows,
const IntT *dx_cols,
T *dx_values,
int row_number) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
for (; i < row_number; i += gridDim.x * blockDim.x) {
int row_first = static_cast<int>(dx_crows[i]);
int row_nnz = static_cast<int>(dx_crows[i + 1] - dx_crows[i]);
int non_zero_idx = blockIdx.y * blockDim.y + threadIdx.y;
for (; non_zero_idx < row_nnz; non_zero_idx += gridDim.y * blockDim.y) {
int j = dx_cols[row_first + non_zero_idx];
dx_values[row_first + non_zero_idx] = dout[i] * vec[j];
}
}
}
template <typename T, typename Context>
void MvCooGradKernel(const Context &dev_ctx,
const SparseCooTensor &x,
const DenseTensor &vec,
const DenseTensor &dout,
SparseCooTensor *dx,
DenseTensor *dvec) {
// dx{SparseCoo} = dout{Dense} * vec'{Dense}
if (dx) {
// InferMeta of SparseCooTensor 'dx', CreateLikeInferMeta
EmptyLikeCooKernel<T, Context>(dev_ctx, x, dx);
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, dx->nnz());
PD_VISIT_BASE_INTEGRAL_TYPES(
dx->non_zero_indices().dtype(), "MvCooGradKernel", ([&] {
hipLaunchKernelGGL(( MvCooGradGpuKernel<T>)
, dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
dev_ctx.stream(), dout.data<T>(),
vec.data<T>(),
dx->non_zero_indices().data<data_t>(),
dx->mutable_non_zero_elements()->data<T>(),
dx->nnz());
}));
}
// dvec{Dense} = x'{SparseCoo} * dout{Dense}
if (dvec) {
#if TORCH_HIP_VERSION >= 11000
// InferMeta of DenseTensor 'dvec'
dvec->Resize(vec.dims());
dev_ctx.template Alloc<T>(dvec);
auto sparse_blas = phi::funcs::sparse::GetSparseBlas<Context, T>(dev_ctx);
sparse_blas.SPMV(true, static_cast<T>(1), x, dout, static_cast<T>(0), dvec);
#else
PADDLE_THROW(
phi::errors::Unimplemented(" vec.grad of 'sparse.mv' use hipsparseSpMV, "
"which is supported from CUDA 11.0"));
#endif
}
}
template <typename T, typename Context>
void MvCsrGradKernel(const Context &dev_ctx,
const SparseCsrTensor &x,
const DenseTensor &vec,
const DenseTensor &dout,
SparseCsrTensor *dx,
DenseTensor *dvec) {
// dx{SparseCsr} = dout{Dense} * vec'{Dense}
if (dx) {
// InferMeta of SparseCsrTensor 'dx', CreateLikeInferMeta
EmptyLikeCsrKernel<T, Context>(dev_ctx, x, dx);
int row_number = dx->dims()[0];
int col_number = dx->dims()[1];
auto config = phi::backends::gpu::GetGpuLaunchConfig2D(
dev_ctx, col_number, row_number);
PD_VISIT_BASE_INTEGRAL_TYPES(
dx->non_zero_crows().dtype(), "MvCsrGradKernel", ([&] {
hipLaunchKernelGGL(( MvCsrGradGpuKernel<T>)
, dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
dev_ctx.stream(), dout.data<T>(),
vec.data<T>(),
dx->non_zero_crows().data<data_t>(),
dx->non_zero_cols().data<data_t>(),
dx->mutable_non_zero_elements()->data<T>(),
row_number);
}));
}
// dvec{Dense} = x'{SparseCsr} * dout{Dense}
if (dvec) {
#if TORCH_HIP_VERSION >= 11000
// InferMeta of DenseTensor 'dvec'
dvec->Resize(vec.dims());
dev_ctx.template Alloc<T>(dvec);
auto sparse_blas = phi::funcs::sparse::GetSparseBlas<Context, T>(dev_ctx);
sparse_blas.SPMV(true, static_cast<T>(1), x, dout, static_cast<T>(0), dvec);
#else
PADDLE_THROW(
phi::errors::Unimplemented(" vec.grad of 'sparse.mv' use hipsparseSpMV, "
"which is supported from CUDA 11.0"));
#endif
}
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(
mv_coo_grad, GPU, ALL_LAYOUT, phi::sparse::MvCooGradKernel, float, double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
PD_REGISTER_KERNEL(
mv_csr_grad, GPU, ALL_LAYOUT, phi::sparse::MvCsrGradKernel, float, double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}
| be057b7adda15f4afa36728f4419e9d82fa5efe7.cu | /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/sparse/mv_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/funcs/sparse/sparse_blas.h"
#include "paddle/phi/kernels/sparse/empty_kernel.h"
namespace phi {
namespace sparse {
template <typename T, typename IntT>
__global__ void MvCooGradGpuKernel(const T *dout,
const T *vec,
const IntT *dx_indices,
T *dx_values,
int nnz) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < nnz; idx += blockDim.x * gridDim.x) {
int i = dx_indices[idx];
int j = dx_indices[idx + nnz];
dx_values[idx] = dout[i] * vec[j];
}
}
template <typename T, typename IntT>
__global__ void MvCsrGradGpuKernel(const T *dout,
const T *vec,
const IntT *dx_crows,
const IntT *dx_cols,
T *dx_values,
int row_number) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
for (; i < row_number; i += gridDim.x * blockDim.x) {
int row_first = static_cast<int>(dx_crows[i]);
int row_nnz = static_cast<int>(dx_crows[i + 1] - dx_crows[i]);
int non_zero_idx = blockIdx.y * blockDim.y + threadIdx.y;
for (; non_zero_idx < row_nnz; non_zero_idx += gridDim.y * blockDim.y) {
int j = dx_cols[row_first + non_zero_idx];
dx_values[row_first + non_zero_idx] = dout[i] * vec[j];
}
}
}
template <typename T, typename Context>
void MvCooGradKernel(const Context &dev_ctx,
const SparseCooTensor &x,
const DenseTensor &vec,
const DenseTensor &dout,
SparseCooTensor *dx,
DenseTensor *dvec) {
// dx{SparseCoo} = dout{Dense} * vec'{Dense}
if (dx) {
// InferMeta of SparseCooTensor 'dx', CreateLikeInferMeta
EmptyLikeCooKernel<T, Context>(dev_ctx, x, dx);
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, dx->nnz());
PD_VISIT_BASE_INTEGRAL_TYPES(
dx->non_zero_indices().dtype(), "MvCooGradKernel", ([&] {
MvCooGradGpuKernel<T>
<<<config.block_per_grid.x,
config.thread_per_block.x,
0,
dev_ctx.stream()>>>(dout.data<T>(),
vec.data<T>(),
dx->non_zero_indices().data<data_t>(),
dx->mutable_non_zero_elements()->data<T>(),
dx->nnz());
}));
}
// dvec{Dense} = x'{SparseCoo} * dout{Dense}
if (dvec) {
#if CUDA_VERSION >= 11000
// InferMeta of DenseTensor 'dvec'
dvec->Resize(vec.dims());
dev_ctx.template Alloc<T>(dvec);
auto sparse_blas = phi::funcs::sparse::GetSparseBlas<Context, T>(dev_ctx);
sparse_blas.SPMV(true, static_cast<T>(1), x, dout, static_cast<T>(0), dvec);
#else
PADDLE_THROW(
phi::errors::Unimplemented(" vec.grad of 'sparse.mv' use cusparseSpMV, "
"which is supported from CUDA 11.0"));
#endif
}
}
template <typename T, typename Context>
void MvCsrGradKernel(const Context &dev_ctx,
const SparseCsrTensor &x,
const DenseTensor &vec,
const DenseTensor &dout,
SparseCsrTensor *dx,
DenseTensor *dvec) {
// dx{SparseCsr} = dout{Dense} * vec'{Dense}
if (dx) {
// InferMeta of SparseCsrTensor 'dx', CreateLikeInferMeta
EmptyLikeCsrKernel<T, Context>(dev_ctx, x, dx);
int row_number = dx->dims()[0];
int col_number = dx->dims()[1];
auto config = phi::backends::gpu::GetGpuLaunchConfig2D(
dev_ctx, col_number, row_number);
PD_VISIT_BASE_INTEGRAL_TYPES(
dx->non_zero_crows().dtype(), "MvCsrGradKernel", ([&] {
MvCsrGradGpuKernel<T>
<<<config.block_per_grid.x,
config.thread_per_block.x,
0,
dev_ctx.stream()>>>(dout.data<T>(),
vec.data<T>(),
dx->non_zero_crows().data<data_t>(),
dx->non_zero_cols().data<data_t>(),
dx->mutable_non_zero_elements()->data<T>(),
row_number);
}));
}
// dvec{Dense} = x'{SparseCsr} * dout{Dense}
if (dvec) {
#if CUDA_VERSION >= 11000
// InferMeta of DenseTensor 'dvec'
dvec->Resize(vec.dims());
dev_ctx.template Alloc<T>(dvec);
auto sparse_blas = phi::funcs::sparse::GetSparseBlas<Context, T>(dev_ctx);
sparse_blas.SPMV(true, static_cast<T>(1), x, dout, static_cast<T>(0), dvec);
#else
PADDLE_THROW(
phi::errors::Unimplemented(" vec.grad of 'sparse.mv' use cusparseSpMV, "
"which is supported from CUDA 11.0"));
#endif
}
}
} // namespace sparse
} // namespace phi
PD_REGISTER_KERNEL(
mv_coo_grad, GPU, ALL_LAYOUT, phi::sparse::MvCooGradKernel, float, double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}
PD_REGISTER_KERNEL(
mv_csr_grad, GPU, ALL_LAYOUT, phi::sparse::MvCsrGradKernel, float, double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}
|
ec0c6cfa14b283505894920da70cd4a97734326b.hip | // !!! This is a file automatically generated by hipify!!!
#include "common_hip.cuh"
#include "app.cuh"
#include <gflags/gflags.h>
bool SSSP_single_gpu();
bool SSSP_multi_gpu();
namespace sssp
{
struct App
{
static bool Single() { return SSSP_single_gpu(); }
static bool Multi() { return SSSP_multi_gpu(); }
};
} // namespace sssp
int main(int argc, char **argv)
{
Skeleton<sssp::App> app;
int exit = app(argc, argv);
return 0;
} | ec0c6cfa14b283505894920da70cd4a97734326b.cu |
#include "common.cuh"
#include "app.cuh"
#include <gflags/gflags.h>
bool SSSP_single_gpu();
bool SSSP_multi_gpu();
namespace sssp
{
struct App
{
static bool Single() { return SSSP_single_gpu(); }
static bool Multi() { return SSSP_multi_gpu(); }
};
} // namespace sssp
int main(int argc, char **argv)
{
Skeleton<sssp::App> app;
int exit = app(argc, argv);
return 0;
} |
5e2fafdb8b16f6259b8eb159fb64a4d3375948c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "header.h"
#include "util.h"
#include "mapper.cuh"
#include "reducer.cuh"
#include "wtime.h"
#include "barrier.cuh"
#include "gpu_graph.cuh"
#include "meta_data.cuh"
#include "mapper_enactor.cuh"
#include "reducer_enactor.cuh"
#include "cpu_sssp_route.hpp"
void bellman_ford_outbound_cpu(
graph<long, long, long,vertex_t, index_t, weight_t> *ggraph,
feature_t *vert_status,
data_out_cell_t *vert_data_out
){
vertex_t vert_count = ggraph->vert_count;
// vert_data_out is [to_final_node][from_at]
printf("CPU data init started\n");
for(feature_t fr = 0; fr < vert_count; fr++) {
for(feature_t to = 0; to < vert_count; to++) {
if(fr == to) {
vert_status[to * vert_count + fr] = 0;
vert_data_out[to * vert_count + fr] = -1;
} else {
vert_status[to * vert_count + fr] = INFTY;
vert_data_out[to * vert_count + fr] = -1;
}
}
}
printf("CPU data init ok\n");
int changed = true;
int iter = 0;
while(changed) {
changed = false;
for(vertex_t node_at = 0; node_at < vert_count; node_at++) {
vertex_t my_beg = ggraph->beg_pos[node_at];
vertex_t my_end = ggraph->beg_pos[node_at+1];
for(;my_beg < my_end; my_beg ++) {
vertex_t nebr = ggraph->adj_list[my_beg];
weight_t weit = ggraph->weight[my_beg];
// Now steal their routing table (pull)
for(vertex_t row = 0; row < vert_count; row++) {
if(vert_status[row * vert_count + node_at] == INFTY){
continue;
}
feature_t my_dist = vert_status[row * vert_count + nebr];
feature_t new_dist = vert_status[row * vert_count + node_at] + weit;
if(new_dist < my_dist) {
vert_status[row * vert_count + nebr] = new_dist;
vert_data_out[row * vert_count + nebr] = node_at;
changed = true;
} else if (new_dist == my_dist && node_at < vert_data_out[row * vert_count + nebr]){
vert_data_out[row * vert_count + nebr] = node_at;
changed = true;
}
}
}
}
printf("iter %d ok\n", iter);
iter++;
}
printf("BF converged at %d\n", iter);
}
void bellman_ford_inbound_cpu(
graph<long, long, long,vertex_t, index_t, weight_t> *ggraph,
feature_t *vert_status,
data_out_cell_t *vert_data_out
){
// Standard graph is outbound edges.
// RUN THIS ON INVERTED GRAPH
vertex_t vert_count = ggraph->vert_count;
// vert_data_out is [to_final_node][from_at]
printf("CPU data init started\n");
for(feature_t fr = 0; fr < vert_count; fr++) {
for(feature_t to = 0; to < vert_count; to++) {
if(fr == to) {
vert_status[to * vert_count + fr] = 0;
vert_data_out[to * vert_count + fr] = -1;
} else {
vert_status[to * vert_count + fr] = INFTY;
vert_data_out[to * vert_count + fr] = -1;
}
}
}
printf("CPU data init ok\n");
int changed = true;
int iter = 0;
while(changed) {
changed = false;
for(vertex_t node_at = 0; node_at < vert_count; node_at++) {
vertex_t my_beg = ggraph->beg_pos[node_at];
vertex_t my_end = ggraph->beg_pos[node_at+1];
for(;my_beg < my_end; my_beg ++) {
vertex_t nebr = ggraph->adj_list[my_beg];
weight_t weit = ggraph->weight[my_beg];
// Now steal their routing table (pull)
for(vertex_t row = 0; row < vert_count; row++) {
if(vert_status[row * vert_count + nebr] == INFTY){
continue;
}
feature_t my_dist = vert_status[row * vert_count + node_at];
feature_t new_dist = vert_status[row * vert_count + nebr] + weit;
if(new_dist < my_dist) {
vert_status[row * vert_count + node_at] = new_dist;
vert_data_out[row * vert_count + node_at] = nebr;
changed = true;
} else if (new_dist == my_dist && nebr < vert_data_out[row * vert_count + node_at]){
vert_data_out[row * vert_count + node_at] = nebr;
changed = true;
}
}
}
}
printf("iter %d ok\n", iter);
iter++;
}
printf("BF converged at %d\n", iter);
}
__global__ void bf_init_data_kernel(
vertex_t vert_count,
feature_t *vert_status,
data_out_cell_t *vert_data_out
){
ptr_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if(gid < vert_count) {
for(ptr_t to = 0; to < vert_count; to++) {
if(gid == to) {
vert_status[to * vert_count + gid] = 0;
vert_data_out[to * vert_count + gid] = -1;
} else {
vert_status[to * vert_count + gid] = INFTY;
vert_data_out[to * vert_count + gid] = -1;
}
}
}
}
__global__ void bf_iterate_data_kernel(
vertex_t vert_count,
feature_t *vert_status,
data_out_cell_t *vert_data_out,
vertex_t *beg_pos,
vertex_t *adj_list,
feature_t *weight_list,
bool* changed
){
index_t node_at = threadIdx.x + blockIdx.x * blockDim.x;
if(node_at == 0) {
*changed = false;
}
__syncthreads();
// bool* changed will only be written to with `true`, so race cond within iteration is ok
if(node_at < vert_count) {
ptr_t my_beg = beg_pos[node_at];
ptr_t my_end = beg_pos[node_at+1];
for(;my_beg < my_end; my_beg ++) {
ptr_t nebr = adj_list[my_beg];
weight_t weit = weight_list[my_beg];
// Now steal their routing table (pull)
for(ptr_t row = 0; row < vert_count; row++) {
if(vert_status[row * vert_count + nebr] == INFTY){
continue;
}
feature_t my_dist = vert_status[row * vert_count + node_at];
feature_t new_dist = vert_status[row * vert_count + nebr] + weit;
if(new_dist < my_dist) {
vert_status[row * vert_count + node_at] = new_dist;
vert_data_out[row * vert_count + node_at] = nebr;
*changed = true;
} else if (new_dist == my_dist && nebr < vert_data_out[row * vert_count + node_at]){
vert_data_out[row * vert_count + node_at] = nebr;
*changed = true;
}
}
}
}
}
void bellman_ford_inbound_gpu(
gpu_graph *ggraph,
feature_t *vert_status,
data_out_cell_t *vert_data_out,
feature_t block_size
){
// Standard graph is outbound edges.
// RUN THIS ON INVERTED GRAPH
vertex_t vert_count = ggraph->vert_count;
// vert_data_out is [to_final_node][from_at]
// do data init
feature_t grid_size = (vert_count + block_size - 1)/block_size;
H_ERR(hipDeviceSynchronize());
hipLaunchKernelGGL(( bf_init_data_kernel), dim3(grid_size),dim3(block_size), 0, 0, vert_count, vert_status, vert_data_out);
H_ERR(hipDeviceSynchronize());
bool* g_changed;
H_ERR(hipMallocManaged((void **)&g_changed, sizeof(bool)));
H_ERR(hipDeviceSynchronize());
printf("GPU data init ok\n");
*g_changed = true;
int iter = 0;
while(*g_changed) {
*g_changed = false;
hipLaunchKernelGGL(( bf_iterate_data_kernel), dim3(grid_size),dim3(block_size), 0, 0,
vert_count,
vert_status,
vert_data_out,
ggraph->beg_pos,
ggraph->adj_list,
ggraph->weight_list,
g_changed
);
H_ERR(hipDeviceSynchronize());
//H_ERR(hipMemcpy(&changed, g_changed, sizeof(bool), hipMemcpyDeviceToHost));
printf("iter %d ok\n", iter);
iter++;
}
printf("BF_gpu converged at %d\n", iter);
}
int main(int args, char **argv)
{
// Based on the high-diameter SSSP
std::cout<<"Input: /path/to/exe beg_inv csr_inv wt_inv gpu=1 block_size=?? show_debug=1\n";
if(args<7){
std::cout<<"Wrong input\n";exit(-1);
}
for(int i = 0; i < args; i++) {
std::cout<<argv[i]<<" ";
}
std::cout<<"\n";
double tm_map,tm_red,tm_scan;
char *file_beg_pos = argv[1];
char *file_adj_list = argv[2];
char *file_weight_list = argv[3];
const int ENABLE_GPU = atoi(argv[4]);
const int BLOCK_SIZE = atoi(argv[5]);
const int ENABLE_DEBUG = atoi(argv[6]);
//Read graph to CPU
graph<long, long, long,vertex_t, index_t, weight_t>
*ginst=new graph<long, long, long,vertex_t, index_t, weight_t>
(file_beg_pos, file_adj_list, file_weight_list);
double total_time = 0;
if(ginst->vert_count >= (1<<15)) {
printf("***Vertex count > 2**15 may result in segfault from array address exceeding 2**32.\n");
printf("***Proceed at your own risk!\n");
}
feature_t *vert_status;
data_out_cell_t *vert_data_out;
vertex_t vert_count = ginst->vert_count;
const ptr_t STATUS_SZ = sizeof(feature_t) * vert_count * vert_count;
const ptr_t DATA_SZ = sizeof(data_out_cell_t) * vert_count * vert_count;
//vert_status = (feature_t*) malloc(STATUS_SZ);
//vert_data_out = (data_out_cell_t*) malloc(DATA_SZ);
H_ERR(hipMallocManaged((void **)&vert_status, STATUS_SZ));
H_ERR(hipMallocManaged((void **)&vert_data_out, DATA_SZ));
// vert_data_out is [to_final_node][from_at]
gpu_graph ggraph(ginst);
/*feature_t *g_vert_status;
data_out_cell_t *g_vert_data_out;
if(ENABLE_GPU) {
H_ERR(hipMalloc((void **)&g_vert_status, sizeof(feature_t) * vert_count * vert_count));
H_ERR(hipMalloc((void **)&g_vert_data_out, sizeof(data_out_cell_t) * vert_count * vert_count));
}*/
printf("Init ok\n");
double time = wtime();
//bellman_ford_outbound_cpu(ginst, vert_status, vert_data_out);
if(!ENABLE_GPU){
printf("CPU run started\n");
bellman_ford_inbound_cpu(ginst, vert_status, vert_data_out);
} else {
printf("GPU run started\n");
bellman_ford_inbound_gpu(&ggraph, vert_status, vert_data_out, BLOCK_SIZE);
}
time = wtime() - time;
std::cout<<"Total APSP time: "<<time<<" second(s).\n";
/*if(ENABLE_GPU) {
H_ERR(hipMemcpy(vert_status, g_vert_status, STATUS_SZ, hipMemcpyDeviceToHost));
H_ERR(hipMemcpy(vert_data_out, g_vert_data_out, STATUS_SZ, hipMemcpyDeviceToHost));
}*/
for(vertex_t src_v = 0; src_v < ginst->vert_count; src_v++) {
if(ENABLE_DEBUG) {
printf("\t\t--- At start node %d ---\n", src_v);
feature_t *cpu_dist;
data_out_cell_t *cpu_routes;
cpu_sssp<index_t, vertex_t, weight_t, feature_t>
(cpu_dist, cpu_routes, src_v, ginst->vert_count, ginst->edge_count, ginst->beg_pos,
ginst->adj_list, ginst->weight);
feature_t *gpu_dist = &(vert_status[src_v * vert_count]);
data_out_cell_t *gpu_routes = &(vert_data_out[src_v * vert_count]);
if (memcmp(cpu_dist, gpu_dist, sizeof(feature_t) * ginst->vert_count) == 0) {
printf("Distance result correct\n");
//Now check route
if (memcmp(cpu_routes, gpu_routes, sizeof(data_out_cell_t) * ginst->vert_count) == 0) {
printf("Route result correct\n");
}else{
printf("Route result wrong!\n");
//TODO: "deep inspect" route by traversing back to root and check weight
printf("GPU - CPU\n");
for(vertex_t i = 0; i < ginst->vert_count; i++){
if (gpu_routes[i] != cpu_routes[i]) {
printf("%d: (%d, %d) - (%d, %d): %d\n", i,
gpu_routes[i],
gpu_dist[gpu_routes[i]],
cpu_routes[i],
cpu_dist[cpu_routes[i]],
gpu_dist[gpu_routes[i]] - cpu_dist[cpu_routes[i]]
);
printf("\tG: ");
vertex_t current = i;
while(current != -1) {
printf("%d->", current);
current = gpu_routes[current];
}
printf("\n");
printf("\tC: ");
current = i;
while(current != -1) {
printf("%d->", current);
current = cpu_routes[current];
}
printf("\n");
}
}
break;
}
} else {
printf("Distance result wrong!\n");
for(int i = 0; i < 10; i ++) {
std::cout<<gpu_dist[i]<<" "<<cpu_dist[i]<<"\n";
}
break;
}
}
}
}
| 5e2fafdb8b16f6259b8eb159fb64a4d3375948c9.cu | #include "header.h"
#include "util.h"
#include "mapper.cuh"
#include "reducer.cuh"
#include "wtime.h"
#include "barrier.cuh"
#include "gpu_graph.cuh"
#include "meta_data.cuh"
#include "mapper_enactor.cuh"
#include "reducer_enactor.cuh"
#include "cpu_sssp_route.hpp"
void bellman_ford_outbound_cpu(
graph<long, long, long,vertex_t, index_t, weight_t> *ggraph,
feature_t *vert_status,
data_out_cell_t *vert_data_out
){
vertex_t vert_count = ggraph->vert_count;
// vert_data_out is [to_final_node][from_at]
printf("CPU data init started\n");
for(feature_t fr = 0; fr < vert_count; fr++) {
for(feature_t to = 0; to < vert_count; to++) {
if(fr == to) {
vert_status[to * vert_count + fr] = 0;
vert_data_out[to * vert_count + fr] = -1;
} else {
vert_status[to * vert_count + fr] = INFTY;
vert_data_out[to * vert_count + fr] = -1;
}
}
}
printf("CPU data init ok\n");
int changed = true;
int iter = 0;
while(changed) {
changed = false;
for(vertex_t node_at = 0; node_at < vert_count; node_at++) {
vertex_t my_beg = ggraph->beg_pos[node_at];
vertex_t my_end = ggraph->beg_pos[node_at+1];
for(;my_beg < my_end; my_beg ++) {
vertex_t nebr = ggraph->adj_list[my_beg];
weight_t weit = ggraph->weight[my_beg];
// Now steal their routing table (pull)
for(vertex_t row = 0; row < vert_count; row++) {
if(vert_status[row * vert_count + node_at] == INFTY){
continue;
}
feature_t my_dist = vert_status[row * vert_count + nebr];
feature_t new_dist = vert_status[row * vert_count + node_at] + weit;
if(new_dist < my_dist) {
vert_status[row * vert_count + nebr] = new_dist;
vert_data_out[row * vert_count + nebr] = node_at;
changed = true;
} else if (new_dist == my_dist && node_at < vert_data_out[row * vert_count + nebr]){
vert_data_out[row * vert_count + nebr] = node_at;
changed = true;
}
}
}
}
printf("iter %d ok\n", iter);
iter++;
}
printf("BF converged at %d\n", iter);
}
void bellman_ford_inbound_cpu(
graph<long, long, long,vertex_t, index_t, weight_t> *ggraph,
feature_t *vert_status,
data_out_cell_t *vert_data_out
){
// Standard graph is outbound edges.
// RUN THIS ON INVERTED GRAPH
vertex_t vert_count = ggraph->vert_count;
// vert_data_out is [to_final_node][from_at]
printf("CPU data init started\n");
for(feature_t fr = 0; fr < vert_count; fr++) {
for(feature_t to = 0; to < vert_count; to++) {
if(fr == to) {
vert_status[to * vert_count + fr] = 0;
vert_data_out[to * vert_count + fr] = -1;
} else {
vert_status[to * vert_count + fr] = INFTY;
vert_data_out[to * vert_count + fr] = -1;
}
}
}
printf("CPU data init ok\n");
int changed = true;
int iter = 0;
while(changed) {
changed = false;
for(vertex_t node_at = 0; node_at < vert_count; node_at++) {
vertex_t my_beg = ggraph->beg_pos[node_at];
vertex_t my_end = ggraph->beg_pos[node_at+1];
for(;my_beg < my_end; my_beg ++) {
vertex_t nebr = ggraph->adj_list[my_beg];
weight_t weit = ggraph->weight[my_beg];
// Now steal their routing table (pull)
for(vertex_t row = 0; row < vert_count; row++) {
if(vert_status[row * vert_count + nebr] == INFTY){
continue;
}
feature_t my_dist = vert_status[row * vert_count + node_at];
feature_t new_dist = vert_status[row * vert_count + nebr] + weit;
if(new_dist < my_dist) {
vert_status[row * vert_count + node_at] = new_dist;
vert_data_out[row * vert_count + node_at] = nebr;
changed = true;
} else if (new_dist == my_dist && nebr < vert_data_out[row * vert_count + node_at]){
vert_data_out[row * vert_count + node_at] = nebr;
changed = true;
}
}
}
}
printf("iter %d ok\n", iter);
iter++;
}
printf("BF converged at %d\n", iter);
}
__global__ void bf_init_data_kernel(
vertex_t vert_count,
feature_t *vert_status,
data_out_cell_t *vert_data_out
){
ptr_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if(gid < vert_count) {
for(ptr_t to = 0; to < vert_count; to++) {
if(gid == to) {
vert_status[to * vert_count + gid] = 0;
vert_data_out[to * vert_count + gid] = -1;
} else {
vert_status[to * vert_count + gid] = INFTY;
vert_data_out[to * vert_count + gid] = -1;
}
}
}
}
__global__ void bf_iterate_data_kernel(
vertex_t vert_count,
feature_t *vert_status,
data_out_cell_t *vert_data_out,
vertex_t *beg_pos,
vertex_t *adj_list,
feature_t *weight_list,
bool* changed
){
index_t node_at = threadIdx.x + blockIdx.x * blockDim.x;
if(node_at == 0) {
*changed = false;
}
__syncthreads();
// bool* changed will only be written to with `true`, so race cond within iteration is ok
if(node_at < vert_count) {
ptr_t my_beg = beg_pos[node_at];
ptr_t my_end = beg_pos[node_at+1];
for(;my_beg < my_end; my_beg ++) {
ptr_t nebr = adj_list[my_beg];
weight_t weit = weight_list[my_beg];
// Now steal their routing table (pull)
for(ptr_t row = 0; row < vert_count; row++) {
if(vert_status[row * vert_count + nebr] == INFTY){
continue;
}
feature_t my_dist = vert_status[row * vert_count + node_at];
feature_t new_dist = vert_status[row * vert_count + nebr] + weit;
if(new_dist < my_dist) {
vert_status[row * vert_count + node_at] = new_dist;
vert_data_out[row * vert_count + node_at] = nebr;
*changed = true;
} else if (new_dist == my_dist && nebr < vert_data_out[row * vert_count + node_at]){
vert_data_out[row * vert_count + node_at] = nebr;
*changed = true;
}
}
}
}
}
void bellman_ford_inbound_gpu(
gpu_graph *ggraph,
feature_t *vert_status,
data_out_cell_t *vert_data_out,
feature_t block_size
){
// Standard graph is outbound edges.
// RUN THIS ON INVERTED GRAPH
vertex_t vert_count = ggraph->vert_count;
// vert_data_out is [to_final_node][from_at]
// do data init
feature_t grid_size = (vert_count + block_size - 1)/block_size;
H_ERR(cudaDeviceSynchronize());
bf_init_data_kernel<<<grid_size,block_size>>>(vert_count, vert_status, vert_data_out);
H_ERR(cudaDeviceSynchronize());
bool* g_changed;
H_ERR(cudaMallocManaged((void **)&g_changed, sizeof(bool)));
H_ERR(cudaDeviceSynchronize());
printf("GPU data init ok\n");
*g_changed = true;
int iter = 0;
while(*g_changed) {
*g_changed = false;
bf_iterate_data_kernel<<<grid_size,block_size>>>(
vert_count,
vert_status,
vert_data_out,
ggraph->beg_pos,
ggraph->adj_list,
ggraph->weight_list,
g_changed
);
H_ERR(cudaThreadSynchronize());
//H_ERR(cudaMemcpy(&changed, g_changed, sizeof(bool), cudaMemcpyDeviceToHost));
printf("iter %d ok\n", iter);
iter++;
}
printf("BF_gpu converged at %d\n", iter);
}
int main(int args, char **argv)
{
// Based on the high-diameter SSSP
std::cout<<"Input: /path/to/exe beg_inv csr_inv wt_inv gpu=1 block_size=?? show_debug=1\n";
if(args<7){
std::cout<<"Wrong input\n";exit(-1);
}
for(int i = 0; i < args; i++) {
std::cout<<argv[i]<<" ";
}
std::cout<<"\n";
double tm_map,tm_red,tm_scan;
char *file_beg_pos = argv[1];
char *file_adj_list = argv[2];
char *file_weight_list = argv[3];
const int ENABLE_GPU = atoi(argv[4]);
const int BLOCK_SIZE = atoi(argv[5]);
const int ENABLE_DEBUG = atoi(argv[6]);
//Read graph to CPU
graph<long, long, long,vertex_t, index_t, weight_t>
*ginst=new graph<long, long, long,vertex_t, index_t, weight_t>
(file_beg_pos, file_adj_list, file_weight_list);
double total_time = 0;
if(ginst->vert_count >= (1<<15)) {
printf("***Vertex count > 2**15 may result in segfault from array address exceeding 2**32.\n");
printf("***Proceed at your own risk!\n");
}
feature_t *vert_status;
data_out_cell_t *vert_data_out;
vertex_t vert_count = ginst->vert_count;
const ptr_t STATUS_SZ = sizeof(feature_t) * vert_count * vert_count;
const ptr_t DATA_SZ = sizeof(data_out_cell_t) * vert_count * vert_count;
//vert_status = (feature_t*) malloc(STATUS_SZ);
//vert_data_out = (data_out_cell_t*) malloc(DATA_SZ);
H_ERR(cudaMallocManaged((void **)&vert_status, STATUS_SZ));
H_ERR(cudaMallocManaged((void **)&vert_data_out, DATA_SZ));
// vert_data_out is [to_final_node][from_at]
gpu_graph ggraph(ginst);
/*feature_t *g_vert_status;
data_out_cell_t *g_vert_data_out;
if(ENABLE_GPU) {
H_ERR(cudaMalloc((void **)&g_vert_status, sizeof(feature_t) * vert_count * vert_count));
H_ERR(cudaMalloc((void **)&g_vert_data_out, sizeof(data_out_cell_t) * vert_count * vert_count));
}*/
printf("Init ok\n");
double time = wtime();
//bellman_ford_outbound_cpu(ginst, vert_status, vert_data_out);
if(!ENABLE_GPU){
printf("CPU run started\n");
bellman_ford_inbound_cpu(ginst, vert_status, vert_data_out);
} else {
printf("GPU run started\n");
bellman_ford_inbound_gpu(&ggraph, vert_status, vert_data_out, BLOCK_SIZE);
}
time = wtime() - time;
std::cout<<"Total APSP time: "<<time<<" second(s).\n";
/*if(ENABLE_GPU) {
H_ERR(cudaMemcpy(vert_status, g_vert_status, STATUS_SZ, cudaMemcpyDeviceToHost));
H_ERR(cudaMemcpy(vert_data_out, g_vert_data_out, STATUS_SZ, cudaMemcpyDeviceToHost));
}*/
for(vertex_t src_v = 0; src_v < ginst->vert_count; src_v++) {
if(ENABLE_DEBUG) {
printf("\t\t--- At start node %d ---\n", src_v);
feature_t *cpu_dist;
data_out_cell_t *cpu_routes;
cpu_sssp<index_t, vertex_t, weight_t, feature_t>
(cpu_dist, cpu_routes, src_v, ginst->vert_count, ginst->edge_count, ginst->beg_pos,
ginst->adj_list, ginst->weight);
feature_t *gpu_dist = &(vert_status[src_v * vert_count]);
data_out_cell_t *gpu_routes = &(vert_data_out[src_v * vert_count]);
if (memcmp(cpu_dist, gpu_dist, sizeof(feature_t) * ginst->vert_count) == 0) {
printf("Distance result correct\n");
//Now check route
if (memcmp(cpu_routes, gpu_routes, sizeof(data_out_cell_t) * ginst->vert_count) == 0) {
printf("Route result correct\n");
}else{
printf("Route result wrong!\n");
//TODO: "deep inspect" route by traversing back to root and check weight
printf("GPU - CPU\n");
for(vertex_t i = 0; i < ginst->vert_count; i++){
if (gpu_routes[i] != cpu_routes[i]) {
printf("%d: (%d, %d) - (%d, %d): %d\n", i,
gpu_routes[i],
gpu_dist[gpu_routes[i]],
cpu_routes[i],
cpu_dist[cpu_routes[i]],
gpu_dist[gpu_routes[i]] - cpu_dist[cpu_routes[i]]
);
printf("\tG: ");
vertex_t current = i;
while(current != -1) {
printf("%d->", current);
current = gpu_routes[current];
}
printf("\n");
printf("\tC: ");
current = i;
while(current != -1) {
printf("%d->", current);
current = cpu_routes[current];
}
printf("\n");
}
}
break;
}
} else {
printf("Distance result wrong!\n");
for(int i = 0; i < 10; i ++) {
std::cout<<gpu_dist[i]<<" "<<cpu_dist[i]<<"\n";
}
break;
}
}
}
}
|
bd6b7bcfda943dcaac5eee61f853260fb6b2f3d5.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <cfloat>
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <rocblas.h>
#include <cusolverDn.h>
#include "struct.h"
#include "constants.h"
void assignObjfcnStructMemory(long long &, fcndata &, double *);
void objfcn(double *, double *, fcndata &);
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
mxInitGPU();
fcndata fcnObj = {0};
mxGPUArray const *ctlGrpStk;
mxGPUArray const *cenIniMat, *difIniMat, *grpNdeVec, *wgtGrpVec;
mxGPUArray const *vfdElmVtxMat, *tgtCenPosMat, *tgtUniDirMat, *tgtElmVolVec;
ctlGrpStk = mxGPUCreateFromMxArray(prhs[ 0]);
cenIniMat = mxGPUCreateFromMxArray(prhs[ 1]);
difIniMat = mxGPUCreateFromMxArray(prhs[ 2]);
grpNdeVec = mxGPUCreateFromMxArray(prhs[ 3]);
wgtGrpVec = mxGPUCreateFromMxArray(prhs[ 4]);
vfdElmVtxMat = mxGPUCreateFromMxArray(prhs[ 5]);
tgtCenPosMat = mxGPUCreateFromMxArray(prhs[ 6]);
tgtUniDirMat = mxGPUCreateFromMxArray(prhs[ 7]);
tgtElmVolVec = mxGPUCreateFromMxArray(prhs[ 8]);
fcnObj.vfd.cenKnlType = mxGetScalar(prhs[ 9]);
fcnObj.vfd.cenKnlWidth = mxGetScalar(prhs[10]);
fcnObj.vfd.dirKnlType = mxGetScalar(prhs[11]);
fcnObj.vfd.dirKnlWidth = mxGetScalar(prhs[12]);
fcnObj.prm.knlOrder = mxGetScalar(prhs[13]);
fcnObj.prm.knlWidth = mxGetScalar(prhs[14]);
fcnObj.prm.knlEps = mxGetScalar(prhs[15]);
fcnObj.prm.timeStp = mxGetScalar(prhs[16]);
fcnObj.prm.timeNum = mxGetScalar(prhs[17]);
fcnObj.prm.tgtWgt = mxGetScalar(prhs[18]);
// ---
double *d_ctlGrpStk = (double *) mxGPUGetDataReadOnly(ctlGrpStk);
fcnObj.prm.d_cenIniMat = (double *) mxGPUGetDataReadOnly(cenIniMat);
fcnObj.prm.d_difIniMat = (double *) mxGPUGetDataReadOnly(difIniMat);
fcnObj.prm.d_grpNdeVec = (int *) mxGPUGetDataReadOnly(grpNdeVec);
fcnObj.prm.d_wgtGrpVec = (double *) mxGPUGetDataReadOnly(wgtGrpVec);
fcnObj.elm.d_vfdElmVtxMat = (int *) mxGPUGetDataReadOnly(vfdElmVtxMat);
fcnObj.tgt.d_cenPosMat = (double *) mxGPUGetDataReadOnly(tgtCenPosMat);
fcnObj.tgt.d_uniDirMat = (double *) mxGPUGetDataReadOnly(tgtUniDirMat);
fcnObj.tgt.d_elmVolVec = (double *) mxGPUGetDataReadOnly(tgtElmVolVec);
mwSize const *vfdElmDims = mxGPUGetDimensions(vfdElmVtxMat);
mwSize const *tgtElmDims = mxGPUGetDimensions(tgtCenPosMat);
fcnObj.prm.rgdGrpNum = mxGPUGetNumberOfElements(wgtGrpVec);
fcnObj.prm.rgdNdeNum = mxGPUGetNumberOfElements(grpNdeVec);
fcnObj.prm.vfdNdeNum = fcnObj.prm.rgdNdeNum;
fcnObj.prm.vfdElmNum = vfdElmDims[0];
fcnObj.tgt.tgtElmNum = tgtElmDims[0];
// ---
int rgdGrpNum = fcnObj.prm.rgdGrpNum;
int rgdNdeNum = fcnObj.prm.rgdNdeNum;
int vfdElmNum = fcnObj.prm.vfdElmNum;
int timeNum = fcnObj.prm.timeNum;
long long gpuAloDblMemCnt = rgdGrpNum * (timeNum - 1)
+ rgdNdeNum * ( rgdNdeNum * 2 + DIMNUM * timeNum + DIMNUM * (timeNum - 1) * 2
+ RGDDOF * (timeNum - 1) + RGDDOF * timeNum)
+ vfdElmNum * (DIMNUM * 2 + 2) + fcnObj.tgt.tgtElmNum
+ SUMBLKDIM;
double *gpuDblSpace;
hipError_t error = hipMalloc((void **) &gpuDblSpace, sizeof(double) * gpuAloDblMemCnt);
if ( error != hipSuccess )
mexErrMsgIdAndTxt("objfcn2D:hipMalloc", "Fail to allocate device memory.");
hipMalloc((void **) &(fcnObj.d_status), sizeof(int));
long long gpuAsgDblMemCnt;
assignObjfcnStructMemory(gpuAsgDblMemCnt, fcnObj, gpuDblSpace);
if ( gpuAsgDblMemCnt != gpuAloDblMemCnt )
{
mexErrMsgIdAndTxt("objfcn2D:memAssign",
"Assigned device double memory (%lld) mismatches the allocated memory (%lld).",
gpuAsgDblMemCnt, gpuAloDblMemCnt);
}
// ---
hipblasCreate(&(fcnObj.blasHdl));
hipsolverDnCreate(&(fcnObj.solvHdl));
hipsolverDnDpotrf_bufferSize(fcnObj.solvHdl, HIPBLAS_FILL_MODE_LOWER,
fcnObj.prm.rgdNdeNum, fcnObj.d_rgdKnlMat,
fcnObj.prm.rgdNdeNum, &(fcnObj.h_Lwork));
hipMalloc((void **) &(fcnObj.d_workspace), sizeof(double) * fcnObj.h_Lwork);
// ---
double h_objVal;
objfcn(&h_objVal, d_ctlGrpStk, fcnObj);
plhs[0] = mxCreateDoubleScalar(h_objVal);
// ---
//
mxGPUDestroyGPUArray(ctlGrpStk);
mxGPUDestroyGPUArray(cenIniMat);
mxGPUDestroyGPUArray(difIniMat);
mxGPUDestroyGPUArray(grpNdeVec);
mxGPUDestroyGPUArray(wgtGrpVec);
mxGPUDestroyGPUArray(vfdElmVtxMat);
mxGPUDestroyGPUArray(tgtCenPosMat);
mxGPUDestroyGPUArray(tgtUniDirMat);
mxGPUDestroyGPUArray(tgtElmVolVec);
mxFree((void *) vfdElmDims);
mxFree((void *) tgtElmDims);
hipFree(gpuDblSpace);
hipFree(fcnObj.d_status);
hipFree(fcnObj.d_workspace);
hipblasDestroy(fcnObj.blasHdl);
hipsolverDnDestroy(fcnObj.solvHdl);
return;
}
| bd6b7bcfda943dcaac5eee61f853260fb6b2f3d5.cu | #include <cstdio>
#include <cstdlib>
#include <cfloat>
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cublas_v2.h>
#include <cusolverDn.h>
#include "struct.h"
#include "constants.h"
void assignObjfcnStructMemory(long long &, fcndata &, double *);
void objfcn(double *, double *, fcndata &);
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
mxInitGPU();
fcndata fcnObj = {0};
mxGPUArray const *ctlGrpStk;
mxGPUArray const *cenIniMat, *difIniMat, *grpNdeVec, *wgtGrpVec;
mxGPUArray const *vfdElmVtxMat, *tgtCenPosMat, *tgtUniDirMat, *tgtElmVolVec;
ctlGrpStk = mxGPUCreateFromMxArray(prhs[ 0]);
cenIniMat = mxGPUCreateFromMxArray(prhs[ 1]);
difIniMat = mxGPUCreateFromMxArray(prhs[ 2]);
grpNdeVec = mxGPUCreateFromMxArray(prhs[ 3]);
wgtGrpVec = mxGPUCreateFromMxArray(prhs[ 4]);
vfdElmVtxMat = mxGPUCreateFromMxArray(prhs[ 5]);
tgtCenPosMat = mxGPUCreateFromMxArray(prhs[ 6]);
tgtUniDirMat = mxGPUCreateFromMxArray(prhs[ 7]);
tgtElmVolVec = mxGPUCreateFromMxArray(prhs[ 8]);
fcnObj.vfd.cenKnlType = mxGetScalar(prhs[ 9]);
fcnObj.vfd.cenKnlWidth = mxGetScalar(prhs[10]);
fcnObj.vfd.dirKnlType = mxGetScalar(prhs[11]);
fcnObj.vfd.dirKnlWidth = mxGetScalar(prhs[12]);
fcnObj.prm.knlOrder = mxGetScalar(prhs[13]);
fcnObj.prm.knlWidth = mxGetScalar(prhs[14]);
fcnObj.prm.knlEps = mxGetScalar(prhs[15]);
fcnObj.prm.timeStp = mxGetScalar(prhs[16]);
fcnObj.prm.timeNum = mxGetScalar(prhs[17]);
fcnObj.prm.tgtWgt = mxGetScalar(prhs[18]);
// ---
double *d_ctlGrpStk = (double *) mxGPUGetDataReadOnly(ctlGrpStk);
fcnObj.prm.d_cenIniMat = (double *) mxGPUGetDataReadOnly(cenIniMat);
fcnObj.prm.d_difIniMat = (double *) mxGPUGetDataReadOnly(difIniMat);
fcnObj.prm.d_grpNdeVec = (int *) mxGPUGetDataReadOnly(grpNdeVec);
fcnObj.prm.d_wgtGrpVec = (double *) mxGPUGetDataReadOnly(wgtGrpVec);
fcnObj.elm.d_vfdElmVtxMat = (int *) mxGPUGetDataReadOnly(vfdElmVtxMat);
fcnObj.tgt.d_cenPosMat = (double *) mxGPUGetDataReadOnly(tgtCenPosMat);
fcnObj.tgt.d_uniDirMat = (double *) mxGPUGetDataReadOnly(tgtUniDirMat);
fcnObj.tgt.d_elmVolVec = (double *) mxGPUGetDataReadOnly(tgtElmVolVec);
mwSize const *vfdElmDims = mxGPUGetDimensions(vfdElmVtxMat);
mwSize const *tgtElmDims = mxGPUGetDimensions(tgtCenPosMat);
fcnObj.prm.rgdGrpNum = mxGPUGetNumberOfElements(wgtGrpVec);
fcnObj.prm.rgdNdeNum = mxGPUGetNumberOfElements(grpNdeVec);
fcnObj.prm.vfdNdeNum = fcnObj.prm.rgdNdeNum;
fcnObj.prm.vfdElmNum = vfdElmDims[0];
fcnObj.tgt.tgtElmNum = tgtElmDims[0];
// ---
int rgdGrpNum = fcnObj.prm.rgdGrpNum;
int rgdNdeNum = fcnObj.prm.rgdNdeNum;
int vfdElmNum = fcnObj.prm.vfdElmNum;
int timeNum = fcnObj.prm.timeNum;
long long gpuAloDblMemCnt = rgdGrpNum * (timeNum - 1)
+ rgdNdeNum * ( rgdNdeNum * 2 + DIMNUM * timeNum + DIMNUM * (timeNum - 1) * 2
+ RGDDOF * (timeNum - 1) + RGDDOF * timeNum)
+ vfdElmNum * (DIMNUM * 2 + 2) + fcnObj.tgt.tgtElmNum
+ SUMBLKDIM;
double *gpuDblSpace;
cudaError_t error = cudaMalloc((void **) &gpuDblSpace, sizeof(double) * gpuAloDblMemCnt);
if ( error != cudaSuccess )
mexErrMsgIdAndTxt("objfcn2D:cudaMalloc", "Fail to allocate device memory.");
cudaMalloc((void **) &(fcnObj.d_status), sizeof(int));
long long gpuAsgDblMemCnt;
assignObjfcnStructMemory(gpuAsgDblMemCnt, fcnObj, gpuDblSpace);
if ( gpuAsgDblMemCnt != gpuAloDblMemCnt )
{
mexErrMsgIdAndTxt("objfcn2D:memAssign",
"Assigned device double memory (%lld) mismatches the allocated memory (%lld).",
gpuAsgDblMemCnt, gpuAloDblMemCnt);
}
// ---
cublasCreate(&(fcnObj.blasHdl));
cusolverDnCreate(&(fcnObj.solvHdl));
cusolverDnDpotrf_bufferSize(fcnObj.solvHdl, CUBLAS_FILL_MODE_LOWER,
fcnObj.prm.rgdNdeNum, fcnObj.d_rgdKnlMat,
fcnObj.prm.rgdNdeNum, &(fcnObj.h_Lwork));
cudaMalloc((void **) &(fcnObj.d_workspace), sizeof(double) * fcnObj.h_Lwork);
// ---
double h_objVal;
objfcn(&h_objVal, d_ctlGrpStk, fcnObj);
plhs[0] = mxCreateDoubleScalar(h_objVal);
// ---
//
mxGPUDestroyGPUArray(ctlGrpStk);
mxGPUDestroyGPUArray(cenIniMat);
mxGPUDestroyGPUArray(difIniMat);
mxGPUDestroyGPUArray(grpNdeVec);
mxGPUDestroyGPUArray(wgtGrpVec);
mxGPUDestroyGPUArray(vfdElmVtxMat);
mxGPUDestroyGPUArray(tgtCenPosMat);
mxGPUDestroyGPUArray(tgtUniDirMat);
mxGPUDestroyGPUArray(tgtElmVolVec);
mxFree((void *) vfdElmDims);
mxFree((void *) tgtElmDims);
cudaFree(gpuDblSpace);
cudaFree(fcnObj.d_status);
cudaFree(fcnObj.d_workspace);
cublasDestroy(fcnObj.blasHdl);
cusolverDnDestroy(fcnObj.solvHdl);
return;
}
|
4a75baa0df8adbdbee767d0e5586fe10ed4d3bee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
#include <chrono>
#include <vector>
#include <algorithm>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
using namespace std;
/* ----- BEGIN Shared Library Export ----- */
// taken from http://stackoverflow.com/questions/2164827/explicitly-exporting-shared-library-functions-in-linux
#if defined(_MSC_VER)
// Microsoft
#define EXPORT __declspec(dllexport)
#define IMPORT __declspec(dllimport)
#elif defined(_GCC)
// GCC
#define EXPORT __attribute__((visibility("default")))
#define IMPORT
#else
// do nothing and hope for the best?
#define EXPORT
#define IMPORT
#pragma warning Unknown dynamic link import/export semantics.
#endif
/* ----- END Shared Library Export ----- */
/* ----- BEGIN Class Type ----- */
typedef int obj_id_t;
typedef int class_id_t;
/* ----- END Class Type ----- */
/* ----- BEGIN Environment (lexical variables) ----- */
// environment_struct must be defined later
typedef struct environment_struct environment_t;
/* ----- END Environment (lexical variables) ----- */
/* ----- BEGIN Forward declarations ----- */
typedef struct result_t result_t;
/* ----- END Forward declarations ----- */
// Define program result variable. Also contains benchmark numbers.
result_t *program_result;
// Variables for measuring time
chrono::high_resolution_clock::time_point start_time;
chrono::high_resolution_clock::time_point end_time;
/* ----- BEGIN Macros ----- */
#define timeStartMeasure() start_time = chrono::high_resolution_clock::now();
#define timeReportMeasure(result_var, variable_name) \
end_time = chrono::high_resolution_clock::now(); \
result_var->time_##variable_name = result_var->time_##variable_name + chrono::duration_cast<chrono::microseconds>(end_time - start_time).count();
/* ----- END Macros ----- */
/* ----- BEGIN Structs ----- */
struct variable_size_array_t {
void *content;
int size;
variable_size_array_t(void *content_ = NULL, int size_ = 0) : content(content_), size(size_) { };
static const variable_size_array_t error_return_value;
};
// error_return_value is used in case a host section terminates abnormally
const variable_size_array_t variable_size_array_t::error_return_value =
variable_size_array_t(NULL, 0);
/* ----- BEGIN Union Type ----- */
typedef union union_type_value {
obj_id_t object_id;
int int_;
float float_;
bool bool_;
void *pointer;
variable_size_array_t variable_size_array;
__host__ __device__ union_type_value(int value) : int_(value) { };
__host__ __device__ union_type_value(float value) : float_(value) { };
__host__ __device__ union_type_value(bool value) : bool_(value) { };
__host__ __device__ union_type_value(void *value) : pointer(value) { };
__host__ __device__ union_type_value(variable_size_array_t value) : variable_size_array(value) { };
__host__ __device__ static union_type_value from_object_id(obj_id_t value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_int(int value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_float(float value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_bool(bool value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_pointer(void *value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_variable_size_array_t(variable_size_array_t value)
{
return union_type_value(value);
}
} union_v_t;
typedef struct union_type_struct
{
class_id_t class_id;
union_v_t value;
__host__ __device__ union_type_struct(
class_id_t class_id_ = 0, union_v_t value_ = union_v_t(0))
: class_id(class_id_), value(value_) { };
static const union_type_struct error_return_value;
} union_t;
// error_return_value is used in case a host section terminates abnormally
const union_type_struct union_t::error_return_value = union_type_struct(0, union_v_t(0));
/* ----- END Union Type ----- */
typedef struct result_t {
variable_size_array_t result;
int last_error;
uint64_t time_setup_cuda;
uint64_t time_prepare_env;
uint64_t time_kernel;
uint64_t time_free_memory;
uint64_t time_transfer_memory;
uint64_t time_allocate_memory;
// Memory management
vector<void*> *device_allocations;
} result_t;
/* ----- END Structs ----- */
struct environment_struct
{
};
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int index)
{
{
return (index % 25000);
}
}
#endif
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int index)
{
{
return (((index + 101)) % 25000);
}
}
#endif
// TODO: There should be a better to check if _block_k_5_ is already defined
#ifndef _block_k_5__func
#define _block_k_5__func
__device__ int _block_k_5_(environment_t *_env_, int p1, int p2)
{
{
return (p1 * p2);
}
}
#endif
__global__ void kernel_1(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_5_(_env_, _block_k_2_(_env_, _tid_), _block_k_4_(_env_, _tid_));
}
}
#undef checkErrorReturn
#define checkErrorReturn(result_var, expr) \
if (result_var->last_error = expr) \
{\
hipError_t error = hipGetLastError();\
printf("!!! Cuda Failure %s:%d (%i): '%s'\n", __FILE__, __LINE__, expr, hipGetErrorString(error));\
hipDeviceReset();\
return result_var;\
}
extern "C" EXPORT result_t *launch_kernel(environment_t *host_env)
{
// CUDA Initialization
program_result = new result_t();
program_result->device_allocations = new vector<void*>();
timeStartMeasure();
hipError_t cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
program_result->last_error = -1;
return program_result;
}
checkErrorReturn(program_result, hipFree(0));
timeReportMeasure(program_result, setup_cuda);
/* Prepare environment */
/* Allocate device environment and copy over struct */
environment_t *dev_env;
timeStartMeasure();
checkErrorReturn(program_result, hipMalloc(&dev_env, sizeof(environment_t)));
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
checkErrorReturn(program_result, hipMemcpy(dev_env, host_env, sizeof(environment_t), hipMemcpyHostToDevice));
timeReportMeasure(program_result, transfer_memory);
/* Launch all kernels */
timeStartMeasure();
int * _kernel_result_2;
checkErrorReturn(program_result, hipMalloc(&_kernel_result_2, (sizeof(int) * 30000000)));
program_result->device_allocations->push_back(_kernel_result_2);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
hipLaunchKernelGGL(( kernel_1), dim3(29297), dim3(1024), 0, 0, dev_env, 30000000, _kernel_result_2);
checkErrorReturn(program_result, hipPeekAtLastError());
checkErrorReturn(program_result, hipDeviceSynchronize());
timeReportMeasure(program_result, kernel);
/* Copy over result to the host */
program_result->result = ({
variable_size_array_t device_array = variable_size_array_t((void *) _kernel_result_2, 30000000);
int * tmp_result = (int *) malloc(sizeof(int) * device_array.size);
timeStartMeasure();
checkErrorReturn(program_result, hipMemcpy(tmp_result, device_array.content, sizeof(int) * device_array.size, hipMemcpyDeviceToHost));
timeReportMeasure(program_result, transfer_memory);
variable_size_array_t((void *) tmp_result, device_array.size);
});
/* Free device memory */
timeStartMeasure();
checkErrorReturn(program_result, hipFree(_kernel_result_2));
timeReportMeasure(program_result, free_memory);
delete program_result->device_allocations;
return program_result;
}
| 4a75baa0df8adbdbee767d0e5586fe10ed4d3bee.cu | #include <stdio.h>
#include <assert.h>
#include <chrono>
#include <vector>
#include <algorithm>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
using namespace std;
/* ----- BEGIN Shared Library Export ----- */
// taken from http://stackoverflow.com/questions/2164827/explicitly-exporting-shared-library-functions-in-linux
#if defined(_MSC_VER)
// Microsoft
#define EXPORT __declspec(dllexport)
#define IMPORT __declspec(dllimport)
#elif defined(_GCC)
// GCC
#define EXPORT __attribute__((visibility("default")))
#define IMPORT
#else
// do nothing and hope for the best?
#define EXPORT
#define IMPORT
#pragma warning Unknown dynamic link import/export semantics.
#endif
/* ----- END Shared Library Export ----- */
/* ----- BEGIN Class Type ----- */
typedef int obj_id_t;
typedef int class_id_t;
/* ----- END Class Type ----- */
/* ----- BEGIN Environment (lexical variables) ----- */
// environment_struct must be defined later
typedef struct environment_struct environment_t;
/* ----- END Environment (lexical variables) ----- */
/* ----- BEGIN Forward declarations ----- */
typedef struct result_t result_t;
/* ----- END Forward declarations ----- */
// Define program result variable. Also contains benchmark numbers.
result_t *program_result;
// Variables for measuring time
chrono::high_resolution_clock::time_point start_time;
chrono::high_resolution_clock::time_point end_time;
/* ----- BEGIN Macros ----- */
#define timeStartMeasure() start_time = chrono::high_resolution_clock::now();
#define timeReportMeasure(result_var, variable_name) \
end_time = chrono::high_resolution_clock::now(); \
result_var->time_##variable_name = result_var->time_##variable_name + chrono::duration_cast<chrono::microseconds>(end_time - start_time).count();
/* ----- END Macros ----- */
/* ----- BEGIN Structs ----- */
struct variable_size_array_t {
void *content;
int size;
variable_size_array_t(void *content_ = NULL, int size_ = 0) : content(content_), size(size_) { };
static const variable_size_array_t error_return_value;
};
// error_return_value is used in case a host section terminates abnormally
const variable_size_array_t variable_size_array_t::error_return_value =
variable_size_array_t(NULL, 0);
/* ----- BEGIN Union Type ----- */
typedef union union_type_value {
obj_id_t object_id;
int int_;
float float_;
bool bool_;
void *pointer;
variable_size_array_t variable_size_array;
__host__ __device__ union_type_value(int value) : int_(value) { };
__host__ __device__ union_type_value(float value) : float_(value) { };
__host__ __device__ union_type_value(bool value) : bool_(value) { };
__host__ __device__ union_type_value(void *value) : pointer(value) { };
__host__ __device__ union_type_value(variable_size_array_t value) : variable_size_array(value) { };
__host__ __device__ static union_type_value from_object_id(obj_id_t value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_int(int value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_float(float value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_bool(bool value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_pointer(void *value)
{
return union_type_value(value);
}
__host__ __device__ static union_type_value from_variable_size_array_t(variable_size_array_t value)
{
return union_type_value(value);
}
} union_v_t;
typedef struct union_type_struct
{
class_id_t class_id;
union_v_t value;
__host__ __device__ union_type_struct(
class_id_t class_id_ = 0, union_v_t value_ = union_v_t(0))
: class_id(class_id_), value(value_) { };
static const union_type_struct error_return_value;
} union_t;
// error_return_value is used in case a host section terminates abnormally
const union_type_struct union_t::error_return_value = union_type_struct(0, union_v_t(0));
/* ----- END Union Type ----- */
typedef struct result_t {
variable_size_array_t result;
int last_error;
uint64_t time_setup_cuda;
uint64_t time_prepare_env;
uint64_t time_kernel;
uint64_t time_free_memory;
uint64_t time_transfer_memory;
uint64_t time_allocate_memory;
// Memory management
vector<void*> *device_allocations;
} result_t;
/* ----- END Structs ----- */
struct environment_struct
{
};
// TODO: There should be a better to check if _block_k_2_ is already defined
#ifndef _block_k_2__func
#define _block_k_2__func
__device__ int _block_k_2_(environment_t *_env_, int index)
{
{
return (index % 25000);
}
}
#endif
// TODO: There should be a better to check if _block_k_4_ is already defined
#ifndef _block_k_4__func
#define _block_k_4__func
__device__ int _block_k_4_(environment_t *_env_, int index)
{
{
return (((index + 101)) % 25000);
}
}
#endif
// TODO: There should be a better to check if _block_k_5_ is already defined
#ifndef _block_k_5__func
#define _block_k_5__func
__device__ int _block_k_5_(environment_t *_env_, int p1, int p2)
{
{
return (p1 * p2);
}
}
#endif
__global__ void kernel_1(environment_t *_env_, int _num_threads_, int *_result_)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ < _num_threads_)
{
_result_[_tid_] = _block_k_5_(_env_, _block_k_2_(_env_, _tid_), _block_k_4_(_env_, _tid_));
}
}
#undef checkErrorReturn
#define checkErrorReturn(result_var, expr) \
if (result_var->last_error = expr) \
{\
cudaError_t error = cudaGetLastError();\
printf("!!! Cuda Failure %s:%d (%i): '%s'\n", __FILE__, __LINE__, expr, cudaGetErrorString(error));\
cudaDeviceReset();\
return result_var;\
}
extern "C" EXPORT result_t *launch_kernel(environment_t *host_env)
{
// CUDA Initialization
program_result = new result_t();
program_result->device_allocations = new vector<void*>();
timeStartMeasure();
cudaError_t cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
program_result->last_error = -1;
return program_result;
}
checkErrorReturn(program_result, cudaFree(0));
timeReportMeasure(program_result, setup_cuda);
/* Prepare environment */
/* Allocate device environment and copy over struct */
environment_t *dev_env;
timeStartMeasure();
checkErrorReturn(program_result, cudaMalloc(&dev_env, sizeof(environment_t)));
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
checkErrorReturn(program_result, cudaMemcpy(dev_env, host_env, sizeof(environment_t), cudaMemcpyHostToDevice));
timeReportMeasure(program_result, transfer_memory);
/* Launch all kernels */
timeStartMeasure();
int * _kernel_result_2;
checkErrorReturn(program_result, cudaMalloc(&_kernel_result_2, (sizeof(int) * 30000000)));
program_result->device_allocations->push_back(_kernel_result_2);
timeReportMeasure(program_result, allocate_memory);
timeStartMeasure();
kernel_1<<<29297, 1024>>>(dev_env, 30000000, _kernel_result_2);
checkErrorReturn(program_result, cudaPeekAtLastError());
checkErrorReturn(program_result, cudaThreadSynchronize());
timeReportMeasure(program_result, kernel);
/* Copy over result to the host */
program_result->result = ({
variable_size_array_t device_array = variable_size_array_t((void *) _kernel_result_2, 30000000);
int * tmp_result = (int *) malloc(sizeof(int) * device_array.size);
timeStartMeasure();
checkErrorReturn(program_result, cudaMemcpy(tmp_result, device_array.content, sizeof(int) * device_array.size, cudaMemcpyDeviceToHost));
timeReportMeasure(program_result, transfer_memory);
variable_size_array_t((void *) tmp_result, device_array.size);
});
/* Free device memory */
timeStartMeasure();
checkErrorReturn(program_result, cudaFree(_kernel_result_2));
timeReportMeasure(program_result, free_memory);
delete program_result->device_allocations;
return program_result;
}
|
c139266bdf722b59efea72b04179e0e635859cfb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from ztrtri_upper.cu normal z -> d, Fri Jan 30 19:00:10 2015
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
This file implements upper case, and is called by dtrtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "common_magma.h"
#include "dtrtri.h"
/*
This inverts the diagonal IB by IB inner blocks of A,
and stores the results in d_dinvA.
Each thread block with IB threads does one inner block.
Each thread deals with one row of the inner block.
*/
static __device__ void
dtrtri_diag_upper_device(
magma_diag_t diag, int n, const double *A, int lda, double *d_dinvA)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int blk_ind = bx*IB;
//int ind = blk_ind + tx;
A += blk_ind + blk_ind*lda; // A(blk_ind, blk_ind)
// TODO sB should be [IB][IB+1] to avoid bank conflicts, right?
__shared__ double sB[IB*IB];
double y_tx;
// load upper triangle of inner block of A; zero lower triangle & outside matrix
#pragma unroll
for( int j=0; j < IB; j++ ) {
if (tx <= j && blk_ind + j < n) {
sB[tx + j*IB] = A[tx + j*lda];
}
else {
sB[tx + j*IB] = MAGMA_D_ZERO;
}
}
__syncthreads();
// invert the diagonal
if (diag == MagmaUnit) {
sB[tx + tx*IB] = MAGMA_D_ONE;
}
else {
if ( sB[tx + tx*IB] == MAGMA_D_ZERO ) { // singular or outside matrix
sB[tx + tx*IB] = MAGMA_D_ONE;
}
else {
sB[tx + tx*IB] = MAGMA_D_ONE / sB[tx + tx*IB];
}
}
// compute elements 0:j-1 of j-th column.
for( int j=1; j < IB; j++ ) {
if ( tx < j ) {
// trmv: y = sB(0:j-1, 0:j-1) * sB(0:j-1, j)
// each thread sums one element, y[tx]
y_tx = MAGMA_D_ZERO;
#pragma unroll
for( int k=0; k < j; k++ )
y_tx += sB[tx + k*IB] * sB[k + j*IB];
// scal: sB(0:j-1, j) = -sB(j,j) * y
sB[tx + j*IB] = -sB[j + j*IB] * y_tx;
}
__syncthreads();
}
// go to the (bx / ib_per_NB) outer NB*NB block,
// then the (bx % ib_per_NB) inner IB*IB block inside that.
int ib_per_NB = NB/IB;
d_dinvA += (bx / ib_per_NB)*NB*NB
+ (bx % ib_per_NB)*(NB*IB + IB);
// write result
#pragma unroll
for( int j=0; j < IB; j++ ) {
d_dinvA[tx + j*NB] = sB[tx + j*IB];
}
}
/*
Let A be an NB*NB upper triangular matrix, and B its inverse.
Then the block decomposition
[ A11 A12 ] * [ B11 B12 ] = [ I 0 ]
[ 0 A22 ] [ 0 B22 ] [ 0 I ]
yields
A11*B11 = I ==> B11 = A11^{-1},
A22*B22 = I ==> B22 = A22^{-1},
A11*B12 + A12*B22 = 0 ==> B12 = -A11^{-1}*A12*B22 = -B11*A12*B22.
dtrtri_diag_kernel inverts A11 and A22.
triple_dgemm16 routines multiply:
part 1: B12 = A12 * B22,
part 2: B12 = -B11 * B12.
At this level, inner block is jb=16, with one 4x4 thread block per inner block.
Each submatrix Aij and Bij is jb x jb.
The submatrix dimension is multiplied by 2 at each level,
so the next level is jb*2 = 32.
A "page" is the next bigger block, here jb*2=32,
[ B11 B12 ]
which contains [ 0 B22 ].
Outer blocks are NB x NB.
A12 may have < jb cols, but is guaranteed to have jb rows since A22 is on
the bottom. Unfortunately, this means checking every single reference. We
could easily verify that A12 is full, and select between a fast version
without checks and a slow version with checks.
B is stored in workspace that is a full multiple of NB x NB; no checks needed.
We split this into part1 & part2 to synchronize all blocks and make sure
that writes to B12 are observed by all blocks.
*/
/*
* B12 = A12 * B22
*/
static __device__ void
triple_dgemm16_part1_upper_device(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
int col = page*jb*2 + jb;
__shared__ double sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const double *A, *B;
double *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const double *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
double rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 4 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
static __device__ void
triple_dgemm16_part2_upper_device(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
__shared__ double sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const double *A, *B;
double *C;
int lda = NB; // shadows lda argument
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
C = d_dinvA + jb*NB; // B12
B = C; // B12, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const double *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
double rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 4 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B12 = A12 * B22
*/
static __device__ void
triple_dgemm32_part1_upper_device(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
int col = page*jb*2 + jb;
__shared__ double sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const double *A, *B;
double *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const double *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
double rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 8 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
static __device__ void
triple_dgemm32_part2_upper_device(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
//int col = page*jb*2 + jb;
__shared__ double sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const double *A, *B;
double *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
C = d_dinvA + jb*NB; // B12
B = C; // B12, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const double *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
double rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 8 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B12 = A12 * B22
*/
static __device__ void
triple_dgemm64_part1_upper_device(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
int col = page*jb*2 + jb;
__shared__ double sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const double *A, *B;
double *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const double *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
double rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
static __device__ void
triple_dgemm64_part2_upper_device(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
//int col = page*jb*2 + jb;
__shared__ double sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const double *A, *B;
double *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
C = d_dinvA + jb*NB; // B12
B = C; // B12, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const double *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
double rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B12 = A12 * B22
*/
static __device__ void
triple_dgemm_above64_part1_upper_device(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
int col = page*jb*2 + jb;
__shared__ double sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const double *A, *B;
double *C;
int ldb = NB;
int ldc = NB;
// For jb > 64, we process B12 as gridDim.x sections of 64 rows each, with gridDim.x > 1.
// Each section needs all of the B matrix, so C cannot overwrite B.
// Therefore, store B21 temporarily in the previously unused B12 matrix
// (i.e., above diagonal), then in part 3, zero out B12.
//
// Kernels with jb <= 64 don't have this problem, because only the
// NT x 16 section of C that overwrites the same section of B depends
// on that section of B.
//
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb; // B12; write to B21 temp location
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const double *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
double rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
static __device__ void
triple_dgemm_above64_part2_upper_device(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
//int col = page*jb*2 + jb;
__shared__ double sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const double *A, *B;
double *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
B = d_dinvA + jb; // B12, read from B21 temp location
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const double *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
double rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* zero out B21 temp location
*/
static __device__ void
triple_dgemm_above64_part3_upper_device(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part three---------------------------//
{
// zero out B21 temp location
double *B21;
int ldb = NB;
B21 = d_dinvA + jb;
B21 += ibx + id + iby*ldb;
#pragma unroll
for( int i = 0; i < 16; i++ ) {
B21[i*ldb] = MAGMA_D_ZERO;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
dtrtri_diag_upper_kernel(
magma_diag_t diag, int n, const double *A, int lda, double *d_dinvA)
{
dtrtri_diag_upper_device(diag, n, A, lda, d_dinvA);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm16_part1_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm16_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm16_part2_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm16_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm32_part1_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm32_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm32_part2_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm32_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm64_part1_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm64_part2_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm_above64_part1_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm_above64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm_above64_part2_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm_above64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm_above64_part3_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm_above64_part3_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
dtrtri_diag_upper_kernel_batched(
magma_diag_t diag, int n, double const * const * dA_array, int lda, double **dinvA_array)
{
int batchid = blockIdx.z;
dtrtri_diag_upper_device(diag, n, dA_array[batchid], lda, dinvA_array[batchid]);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm16_part1_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm16_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm16_part2_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm16_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm32_part1_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm32_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm32_part2_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm32_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm64_part1_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm64_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm64_part2_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm64_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm_above64_part1_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm_above64_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm_above64_part2_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm_above64_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm_above64_part3_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm_above64_part3_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
| c139266bdf722b59efea72b04179e0e635859cfb.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from ztrtri_upper.cu normal z -> d, Fri Jan 30 19:00:10 2015
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
This file implements upper case, and is called by dtrtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "common_magma.h"
#include "dtrtri.h"
/*
This inverts the diagonal IB by IB inner blocks of A,
and stores the results in d_dinvA.
Each thread block with IB threads does one inner block.
Each thread deals with one row of the inner block.
*/
static __device__ void
dtrtri_diag_upper_device(
magma_diag_t diag, int n, const double *A, int lda, double *d_dinvA)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int blk_ind = bx*IB;
//int ind = blk_ind + tx;
A += blk_ind + blk_ind*lda; // A(blk_ind, blk_ind)
// TODO sB should be [IB][IB+1] to avoid bank conflicts, right?
__shared__ double sB[IB*IB];
double y_tx;
// load upper triangle of inner block of A; zero lower triangle & outside matrix
#pragma unroll
for( int j=0; j < IB; j++ ) {
if (tx <= j && blk_ind + j < n) {
sB[tx + j*IB] = A[tx + j*lda];
}
else {
sB[tx + j*IB] = MAGMA_D_ZERO;
}
}
__syncthreads();
// invert the diagonal
if (diag == MagmaUnit) {
sB[tx + tx*IB] = MAGMA_D_ONE;
}
else {
if ( sB[tx + tx*IB] == MAGMA_D_ZERO ) { // singular or outside matrix
sB[tx + tx*IB] = MAGMA_D_ONE;
}
else {
sB[tx + tx*IB] = MAGMA_D_ONE / sB[tx + tx*IB];
}
}
// compute elements 0:j-1 of j-th column.
for( int j=1; j < IB; j++ ) {
if ( tx < j ) {
// trmv: y = sB(0:j-1, 0:j-1) * sB(0:j-1, j)
// each thread sums one element, y[tx]
y_tx = MAGMA_D_ZERO;
#pragma unroll
for( int k=0; k < j; k++ )
y_tx += sB[tx + k*IB] * sB[k + j*IB];
// scal: sB(0:j-1, j) = -sB(j,j) * y
sB[tx + j*IB] = -sB[j + j*IB] * y_tx;
}
__syncthreads();
}
// go to the (bx / ib_per_NB) outer NB*NB block,
// then the (bx % ib_per_NB) inner IB*IB block inside that.
int ib_per_NB = NB/IB;
d_dinvA += (bx / ib_per_NB)*NB*NB
+ (bx % ib_per_NB)*(NB*IB + IB);
// write result
#pragma unroll
for( int j=0; j < IB; j++ ) {
d_dinvA[tx + j*NB] = sB[tx + j*IB];
}
}
/*
Let A be an NB*NB upper triangular matrix, and B its inverse.
Then the block decomposition
[ A11 A12 ] * [ B11 B12 ] = [ I 0 ]
[ 0 A22 ] [ 0 B22 ] [ 0 I ]
yields
A11*B11 = I ==> B11 = A11^{-1},
A22*B22 = I ==> B22 = A22^{-1},
A11*B12 + A12*B22 = 0 ==> B12 = -A11^{-1}*A12*B22 = -B11*A12*B22.
dtrtri_diag_kernel inverts A11 and A22.
triple_dgemm16 routines multiply:
part 1: B12 = A12 * B22,
part 2: B12 = -B11 * B12.
At this level, inner block is jb=16, with one 4x4 thread block per inner block.
Each submatrix Aij and Bij is jb x jb.
The submatrix dimension is multiplied by 2 at each level,
so the next level is jb*2 = 32.
A "page" is the next bigger block, here jb*2=32,
[ B11 B12 ]
which contains [ 0 B22 ].
Outer blocks are NB x NB.
A12 may have < jb cols, but is guaranteed to have jb rows since A22 is on
the bottom. Unfortunately, this means checking every single reference. We
could easily verify that A12 is full, and select between a fast version
without checks and a slow version with checks.
B is stored in workspace that is a full multiple of NB x NB; no checks needed.
We split this into part1 & part2 to synchronize all blocks and make sure
that writes to B12 are observed by all blocks.
*/
/*
* B12 = A12 * B22
*/
static __device__ void
triple_dgemm16_part1_upper_device(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
int col = page*jb*2 + jb;
__shared__ double sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const double *A, *B;
double *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const double *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
double rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 4 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
static __device__ void
triple_dgemm16_part2_upper_device(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
__shared__ double sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const double *A, *B;
double *C;
int lda = NB; // shadows lda argument
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
C = d_dinvA + jb*NB; // B12
B = C; // B12, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const double *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
double rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 4 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B12 = A12 * B22
*/
static __device__ void
triple_dgemm32_part1_upper_device(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
int col = page*jb*2 + jb;
__shared__ double sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const double *A, *B;
double *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const double *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
double rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 8 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
static __device__ void
triple_dgemm32_part2_upper_device(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
//int col = page*jb*2 + jb;
__shared__ double sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const double *A, *B;
double *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
C = d_dinvA + jb*NB; // B12
B = C; // B12, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const double *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
double rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 8 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B12 = A12 * B22
*/
static __device__ void
triple_dgemm64_part1_upper_device(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
int col = page*jb*2 + jb;
__shared__ double sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const double *A, *B;
double *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const double *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
double rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
static __device__ void
triple_dgemm64_part2_upper_device(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
//int col = page*jb*2 + jb;
__shared__ double sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const double *A, *B;
double *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
C = d_dinvA + jb*NB; // B12
B = C; // B12, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const double *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
double rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B12 = A12 * B22
*/
static __device__ void
triple_dgemm_above64_part1_upper_device(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
int col = page*jb*2 + jb;
__shared__ double sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const double *A, *B;
double *C;
int ldb = NB;
int ldc = NB;
// For jb > 64, we process B12 as gridDim.x sections of 64 rows each, with gridDim.x > 1.
// Each section needs all of the B matrix, so C cannot overwrite B.
// Therefore, store B21 temporarily in the previously unused B12 matrix
// (i.e., above diagonal), then in part 3, zero out B12.
//
// Kernels with jb <= 64 don't have this problem, because only the
// NT x 16 section of C that overwrites the same section of B depends
// on that section of B.
//
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb; // B12; write to B21 temp location
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const double *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
double rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
daxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
daxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
daxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
daxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
daxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
daxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
daxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
daxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
daxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
daxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
daxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
static __device__ void
triple_dgemm_above64_part2_upper_device(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
//int col = page*jb*2 + jb;
__shared__ double sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const double *A, *B;
double *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
B = d_dinvA + jb; // B12, read from B21 temp location
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const double *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
double rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
double rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
daxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
daxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
daxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
daxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
daxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
daxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
daxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
daxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
daxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
daxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
daxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
daxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
daxpy16( rA[0], &sB[12][0], rC );
daxpy16( rA[1], &sB[13][0], rC );
daxpy16( rA[2], &sB[14][0], rC );
daxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* zero out B21 temp location
*/
static __device__ void
triple_dgemm_above64_part3_upper_device(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part three---------------------------//
{
// zero out B21 temp location
double *B21;
int ldb = NB;
B21 = d_dinvA + jb;
B21 += ibx + id + iby*ldb;
#pragma unroll
for( int i = 0; i < 16; i++ ) {
B21[i*ldb] = MAGMA_D_ZERO;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
dtrtri_diag_upper_kernel(
magma_diag_t diag, int n, const double *A, int lda, double *d_dinvA)
{
dtrtri_diag_upper_device(diag, n, A, lda, d_dinvA);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm16_part1_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm16_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm16_part2_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm16_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm32_part1_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm32_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm32_part2_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm32_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm64_part1_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm64_part2_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm_above64_part1_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm_above64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm_above64_part2_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm_above64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm_above64_part3_upper_kernel(
int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages)
{
triple_dgemm_above64_part3_upper_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
dtrtri_diag_upper_kernel_batched(
magma_diag_t diag, int n, double const * const * dA_array, int lda, double **dinvA_array)
{
int batchid = blockIdx.z;
dtrtri_diag_upper_device(diag, n, dA_array[batchid], lda, dinvA_array[batchid]);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm16_part1_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm16_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm16_part2_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm16_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm32_part1_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm32_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm32_part2_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm32_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm64_part1_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm64_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm64_part2_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm64_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm_above64_part1_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm_above64_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm_above64_part2_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm_above64_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_dgemm_above64_part3_upper_kernel_batched(
int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_dgemm_above64_part3_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
|
01ae597686403f658fcf3c532a8b20e77be3f5d8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Tools.hpp"
__global__ static void juliaAnimation(uchar4* ptrDevPixels, int w, int h, int N, DomaineMaths domainNew, CalibreurCudas calibreur);
__device__ static float julia(float x, float y, int N);
void launchJuliaAnimation(uchar4* ptrDevPixels, int w, int h, int N, const DomaineMaths& domainNew){
dim3 blockPerGrid = dim3(32, 32, 1);
dim3 threadPerBlock = dim3(16, 16, 1);
//TODO Check the value 0.7f
CalibreurCudas calibreur(0, N, 0.0f, 0.7f);
hipLaunchKernelGGL(( juliaAnimation), dim3(blockPerGrid),dim3(threadPerBlock), 0, 0, ptrDevPixels, w, h, N, domainNew, calibreur);
}
__global__ static void juliaAnimation(uchar4* ptrDevPixels, int w, int h, int N, DomaineMaths domainNew, CalibreurCudas calibreur){
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
int nbThreadY = gridDim.y * blockDim.y;
int nbThreadX = gridDim.x * blockDim.x;
int nbThreadCuda = nbThreadY * nbThreadX;
float dx = (float) (domainNew.dx / (float) w);
float dy = (float) (domainNew.dy / (float) h);
unsigned char r, g, b;
int tid = j + (i * nbThreadX);
float x, y;
while(tid < (w * h)){
int pixelI = tid / w;
int pixelJ = tid - w * pixelI;
x = domainNew.x0 + pixelJ * dx;
y = domainNew.y0 + pixelI * dy;
float h = julia(x, y, N);
if(h == 0){
r = 0;
g = 0;
b = 0;
} else {
h = calibreur.calibrate(h);
HSB_TO_RVB(h, 1.0, 1.0, r, g, b);
}
ptrDevPixels[tid].x = r;
ptrDevPixels[tid].y = g;
ptrDevPixels[tid].z = b;
ptrDevPixels[tid].w = 255;
tid += nbThreadCuda;
}
}
#define CREAL -0.745
#define CIMAG 0.1
__device__ static float julia(float x, float y, int N){
float real = x;
float imag = y;
float n = 0;
float norm;
do{
float tmpReal = real;
real = real * real - imag * imag + CREAL;
imag = tmpReal * imag + imag * tmpReal + CIMAG;
++n;
norm = sqrt(real * real + imag * imag);
} while (norm <= 2.0 && n < N);
return n == N ? 0 : n;
}
| 01ae597686403f658fcf3c532a8b20e77be3f5d8.cu | #include "Tools.hpp"
__global__ static void juliaAnimation(uchar4* ptrDevPixels, int w, int h, int N, DomaineMaths domainNew, CalibreurCudas calibreur);
__device__ static float julia(float x, float y, int N);
void launchJuliaAnimation(uchar4* ptrDevPixels, int w, int h, int N, const DomaineMaths& domainNew){
dim3 blockPerGrid = dim3(32, 32, 1);
dim3 threadPerBlock = dim3(16, 16, 1);
//TODO Check the value 0.7f
CalibreurCudas calibreur(0, N, 0.0f, 0.7f);
juliaAnimation<<<blockPerGrid,threadPerBlock>>>(ptrDevPixels, w, h, N, domainNew, calibreur);
}
__global__ static void juliaAnimation(uchar4* ptrDevPixels, int w, int h, int N, DomaineMaths domainNew, CalibreurCudas calibreur){
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
int nbThreadY = gridDim.y * blockDim.y;
int nbThreadX = gridDim.x * blockDim.x;
int nbThreadCuda = nbThreadY * nbThreadX;
float dx = (float) (domainNew.dx / (float) w);
float dy = (float) (domainNew.dy / (float) h);
unsigned char r, g, b;
int tid = j + (i * nbThreadX);
float x, y;
while(tid < (w * h)){
int pixelI = tid / w;
int pixelJ = tid - w * pixelI;
x = domainNew.x0 + pixelJ * dx;
y = domainNew.y0 + pixelI * dy;
float h = julia(x, y, N);
if(h == 0){
r = 0;
g = 0;
b = 0;
} else {
h = calibreur.calibrate(h);
HSB_TO_RVB(h, 1.0, 1.0, r, g, b);
}
ptrDevPixels[tid].x = r;
ptrDevPixels[tid].y = g;
ptrDevPixels[tid].z = b;
ptrDevPixels[tid].w = 255;
tid += nbThreadCuda;
}
}
#define CREAL -0.745
#define CIMAG 0.1
__device__ static float julia(float x, float y, int N){
float real = x;
float imag = y;
float n = 0;
float norm;
do{
float tmpReal = real;
real = real * real - imag * imag + CREAL;
imag = tmpReal * imag + imag * tmpReal + CIMAG;
++n;
norm = sqrt(real * real + imag * imag);
} while (norm <= 2.0 && n < N);
return n == N ? 0 : n;
}
|
0c82fa8e9a93ff8fa5835ca7152e57f9a37faa2f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <iostream>
#include <cstdio>
#include <cmath>
#include <sstream>
#include <algorithm>
#include <omp.h>
#ifdef HAVE_CUB
#include <hipcub/hipcub.hpp>
#endif //HAVE_CUB
#ifdef USE_NVTX
#include <roctracer/roctx.h>
const uint32_t colors[] = { 0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff };
const int num_colors = sizeof(colors)/sizeof(uint32_t);
#define PUSH_RANGE(name,cid) { \
int color_id = cid; \
color_id = color_id%num_colors;\
nvtxEventAttributes_t eventAttrib = {0}; \
eventAttrib.version = NVTX_VERSION; \
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \
eventAttrib.colorType = NVTX_COLOR_ARGB; \
eventAttrib.color = colors[color_id]; \
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \
eventAttrib.message.ascii = name; \
nvtxRangePushEx(&eventAttrib); \
}
#define POP_RANGE roctxRangePop();
#else
#define PUSH_RANGE(name,cid)
#define POP_RANGE
#endif
#define CUDA_RT_CALL( call ) \
{ \
hipError_t cudaStatus = call; \
if ( hipSuccess != cudaStatus ) \
fprintf(stderr, "ERROR: CUDA RT call \"%s\" in line %d of file %s failed with %s (%d).\n", \
#call, __LINE__, __FILE__, hipGetErrorString(cudaStatus), cudaStatus); \
}
constexpr int MAX_NUM_DEVICES=32;
typedef float real;
constexpr real tol = 1.0e-8;
const real PI = 2.0 * std::asin(1.0);
__global__ void initialize_boundaries(
real* __restrict__ const a_new,
real* __restrict__ const a,
const real pi,
const int offset,
const int nx, const int my_ny, const int ny )
{
for (int iy = blockIdx.x * blockDim.x + threadIdx.x;
iy < my_ny;
iy += blockDim.x * gridDim.x) {
const real y0 = sin( 2.0 * pi * (offset+iy) / (ny-1) );
a[ iy*nx + 0 ] = y0;
a[ iy*nx + (nx-1) ] = y0;
a_new[ iy*nx + 0 ] = y0;
a_new[ iy*nx + (nx-1) ] = y0;
}
}
template<int BLOCK_DIM_X, int BLOCK_DIM_Y>
__global__ void jacobi_kernel(
real* __restrict__ const a_new,
const real* __restrict__ const a,
real* __restrict__ const l2_norm,
const int iy_start, const int iy_end,
const int nx,
real* __restrict__ const a_new_top,
const int top_iy,
real* __restrict__ const a_new_bottom,
const int bottom_iy
)
{
#ifdef HAVE_CUB
typedef hipcub::BlockReduce<real,BLOCK_DIM_X,hipcub::BLOCK_REDUCE_WARP_REDUCTIONS,BLOCK_DIM_Y> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
#endif //HAVE_CUB
int iy = blockIdx.y * blockDim.y + threadIdx.y + iy_start;
int ix = blockIdx.x * blockDim.x + threadIdx.x + 1;
real local_l2_norm = 0.0;
if(iy < iy_end && ix < (nx - 1)) {
const real new_val = 0.25 * ( a[ iy * nx + ix + 1 ] + a[ iy * nx + ix - 1 ]
+ a[ (iy+1) * nx + ix ] + a[ (iy-1) * nx + ix ] );
a_new[ iy * nx + ix ] = new_val;
if ( iy_start == iy )
{
a_new_top[ top_iy*nx + ix ] = new_val;
}
if ( (iy_end - 1) == iy )
{
a_new_bottom[ bottom_iy*nx + ix ] = new_val;
}
real residue = new_val - a[ iy * nx + ix ];
local_l2_norm += residue * residue;
}
#ifdef HAVE_CUB
real block_l2_norm = BlockReduce(temp_storage).Sum(local_l2_norm);
if ( 0 == threadIdx.y && 0 == threadIdx.x )
atomicAdd( l2_norm, block_l2_norm );
#else
atomicAdd( l2_norm, local_l2_norm );
#endif //HAVE_CUB
}
double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print );
template<typename T>
T get_argval(char ** begin, char ** end, const std::string& arg, const T default_val) {
T argval = default_val;
char ** itr = std::find(begin, end, arg);
if (itr != end && ++itr != end) {
std::istringstream inbuf(*itr);
inbuf >> argval;
}
return argval;
}
bool get_arg(char ** begin, char ** end, const std::string& arg) {
char ** itr = std::find(begin, end, arg);
if (itr != end) {
return true;
}
return false;
}
int main(int argc, char * argv[])
{
const int iter_max = get_argval<int>(argv, argv+argc,"-niter", 1000);
const int nccheck = get_argval<int>(argv, argv+argc,"-nccheck", 1);
const int nx = get_argval<int>(argv, argv+argc,"-nx", 7168);
const int ny = get_argval<int>(argv, argv+argc,"-ny", 7168);
const bool csv = get_arg(argv, argv+argc,"-csv");
real* a_new[MAX_NUM_DEVICES];
real* a_ref_h;
real* a_h;
double runtime_serial = 0.0;
int iy_end[MAX_NUM_DEVICES];
hipEvent_t compute_done[2][MAX_NUM_DEVICES];
bool result_correct = true;
bool p2p_works = true;
int num_devices = 0;
CUDA_RT_CALL( hipGetDeviceCount( &num_devices ) );
real l2_norm = 1.0;
#pragma omp parallel num_threads( num_devices ) shared( l2_norm )
{
real* a;
hipStream_t compute_stream;
hipStream_t push_top_stream;
hipStream_t push_bottom_stream;
hipEvent_t push_top_done;
hipEvent_t push_bottom_done;
real* l2_norm_d;
real* l2_norm_h;
// Ensure correctness if ny%size != 0
int chunk_size = ::ceil( (1.0*(ny-2))/num_devices );
int dev_id = omp_get_thread_num();
CUDA_RT_CALL( hipSetDevice( dev_id ) );
CUDA_RT_CALL( hipFree( 0 ) );
if ( 0 == dev_id ) {
CUDA_RT_CALL( hipHostMalloc( &a_ref_h, nx*ny*sizeof(real) ) );
CUDA_RT_CALL( hipHostMalloc( &a_h, nx*ny*sizeof(real) ) );
runtime_serial = single_gpu(nx,ny,iter_max,a_ref_h,nccheck,!csv);
}
#pragma omp barrier
const int top = dev_id > 0 ? dev_id - 1 : (num_devices-1);
const int bottom = (dev_id+1)%num_devices;
if ( top != dev_id )
{
int canAccessPeer = 0;
CUDA_RT_CALL( hipDeviceCanAccessPeer ( &canAccessPeer, dev_id, top ) );
if ( canAccessPeer )
{
CUDA_RT_CALL( hipDeviceEnablePeerAccess ( top, 0 ) );
}
else
{
std::cerr<<"P2P access required from "<<dev_id<<" to "<<top<<std::endl;
#pragma omp critical
{
if (p2p_works) p2p_works = false;
}
}
if ( top != bottom )
{
canAccessPeer = 0;
CUDA_RT_CALL( hipDeviceCanAccessPeer ( &canAccessPeer, dev_id, bottom ) );
if ( canAccessPeer )
{
CUDA_RT_CALL( hipDeviceEnablePeerAccess ( bottom, 0 ) );
}
else
{
std::cerr<<"P2P access required from "<<dev_id<<" to "<<bottom<<std::endl;
#pragma omp critical
{
if (p2p_works) p2p_works = false;
}
}
}
}
#pragma omp barrier
if ( p2p_works )
{
CUDA_RT_CALL( hipMalloc( &a, nx*(chunk_size+2)*sizeof(real) ) );
CUDA_RT_CALL( hipMalloc( a_new+dev_id, nx*(chunk_size+2)*sizeof(real) ) );
CUDA_RT_CALL( hipMemset( a, 0, nx*(chunk_size+2)*sizeof(real) ) );
CUDA_RT_CALL( hipMemset( a_new[dev_id], 0, nx*(chunk_size+2)*sizeof(real) ) );
//Calculate local domain boundaries
int iy_start_global = dev_id * chunk_size + 1;
int iy_end_global = iy_start_global + chunk_size - 1;
// Do not process boundaries
iy_end_global = ::min( iy_end_global, ny - 2 );
int iy_start = 1;
iy_end[dev_id] = (iy_end_global-iy_start_global + 1)+iy_start;
//Set diriclet boundary conditions on left and right boarder
hipLaunchKernelGGL(( initialize_boundaries), dim3((ny/num_devices)/128+1),dim3(128), 0, 0, a, a_new[dev_id], PI, iy_start_global-1, nx, (chunk_size+2), ny );
CUDA_RT_CALL( hipGetLastError() );
CUDA_RT_CALL( hipDeviceSynchronize() );
CUDA_RT_CALL( hipStreamCreate(&compute_stream) );
CUDA_RT_CALL( hipStreamCreate(&push_top_stream) );
CUDA_RT_CALL( hipStreamCreate(&push_bottom_stream) );
CUDA_RT_CALL( hipEventCreateWithFlags ( compute_done[0]+dev_id, hipEventDisableTiming ) );
CUDA_RT_CALL( hipEventCreateWithFlags ( compute_done[1]+dev_id, hipEventDisableTiming ) );
CUDA_RT_CALL( hipEventCreateWithFlags ( &push_top_done, hipEventDisableTiming ) );
CUDA_RT_CALL( hipEventCreateWithFlags ( &push_bottom_done, hipEventDisableTiming ) );
CUDA_RT_CALL( hipMalloc( &l2_norm_d, sizeof(real) ) );
CUDA_RT_CALL( hipHostMalloc( &l2_norm_h, sizeof(real) ) );
CUDA_RT_CALL( hipDeviceSynchronize() );
#pragma omp master
{
if (!csv) printf("Jacobi relaxation: %d iterations on %d x %d mesh with norm check every %d iterations\n", iter_max, ny, nx, nccheck);
}
constexpr int dim_block_x = 32;
constexpr int dim_block_y = 4;
dim3 dim_grid((nx - 1)/ dim_block_x + 1, (ny - 1) / (num_devices * dim_block_y) + 1, 1);
int iter = 0;
#pragma omp master
{
l2_norm = 1.0;
}
CUDA_RT_CALL( hipDeviceSynchronize() );
#pragma omp barrier
double start = omp_get_wtime();
PUSH_RANGE("Jacobi solve",0)
while ( l2_norm > tol && iter < iter_max )
{
CUDA_RT_CALL( hipMemsetAsync(l2_norm_d, 0 , sizeof(real), compute_stream ) );
//need to wait for other threads due to sharing of a_new and compute_done between threads
#pragma omp barrier
CUDA_RT_CALL( hipStreamWaitEvent( compute_stream, compute_done[iter%2][top], 0 ) );
CUDA_RT_CALL( hipStreamWaitEvent( compute_stream, compute_done[iter%2][bottom], 0 ) );
hipLaunchKernelGGL(( jacobi_kernel<dim_block_x,dim_block_y>), dim3(dim_grid),dim3({dim_block_x),dim_block_y,1},0,compute_stream, a_new[dev_id], a, l2_norm_d, iy_start, iy_end[dev_id], nx, a_new[top], iy_end[top], a_new[bottom], 0 );
CUDA_RT_CALL( hipGetLastError() );
CUDA_RT_CALL( hipEventRecord( compute_done[(iter+1)%2][dev_id], compute_stream ) );
if ( (iter % nccheck) == 0 || (!csv && (iter % 100) == 0) ) {
CUDA_RT_CALL( hipMemcpyAsync( l2_norm_h, l2_norm_d, sizeof(real), hipMemcpyDeviceToHost, compute_stream ) );
#pragma omp barrier
#pragma omp single
{
l2_norm = 0.0;
}
#pragma omp barrier
CUDA_RT_CALL( hipStreamSynchronize( compute_stream ) );
#pragma omp atomic
l2_norm += *(l2_norm_h);
#pragma omp barrier
#pragma omp single
{
l2_norm = std::sqrt( l2_norm );
}
#pragma omp barrier
if(!csv && (iter % 100) == 0)
{
#pragma omp master
printf("%5d, %0.6f\n", iter, l2_norm);
}
}
#pragma omp barrier
std::swap(a_new[dev_id],a);
iter++;
}
CUDA_RT_CALL( hipDeviceSynchronize() );
#pragma omp barrier
double stop = omp_get_wtime();
POP_RANGE
CUDA_RT_CALL( hipMemcpy( a_h+iy_start_global*nx, a+nx, ::min((ny-iy_start_global)*nx,chunk_size*nx)*sizeof(real), hipMemcpyDeviceToHost ) );
#pragma omp barrier
#pragma omp master
{
result_correct = true;
for (int iy = 1; result_correct && (iy < (ny-1)); ++iy) {
for (int ix = 1; result_correct && (ix < (nx-1)); ++ix) {
if ( ::fabs( a_ref_h[ iy * nx + ix ] - a_h[ iy * nx + ix ] ) > tol ) {
fprintf(stderr,"ERROR: a[%d * %d + %d] = %f does not match %f (reference)\n", iy,nx,ix, a_h[ iy * nx + ix ], a_ref_h[ iy * nx + ix ]);
result_correct = false;
}
}}
if (result_correct)
{
if (csv) {
printf( "multi_threaded_p2p, %d, %d, %d, %d, %d, 1, %f, %f\n", nx, ny, iter_max, nccheck, num_devices, (stop-start), runtime_serial );
}
else {
printf( "Num GPUs: %d.\n", num_devices );
printf( "%dx%d: 1 GPU: %8.4f s, %d GPUs: %8.4f s, speedup: %8.2f, efficiency: %8.2f \n", ny,nx, runtime_serial, num_devices, (stop-start), runtime_serial/(stop-start), runtime_serial/(num_devices*(stop-start))*100 );
}
}
}
CUDA_RT_CALL( hipEventDestroy( push_bottom_done ) );
CUDA_RT_CALL( hipEventDestroy( push_top_done ) );
CUDA_RT_CALL( hipEventDestroy( compute_done[1][dev_id] ) );
CUDA_RT_CALL( hipEventDestroy( compute_done[0][dev_id] ) );
CUDA_RT_CALL( hipStreamDestroy( push_bottom_stream ) );
CUDA_RT_CALL( hipStreamDestroy( push_top_stream ) );
CUDA_RT_CALL( hipStreamDestroy( compute_stream ) );
CUDA_RT_CALL( hipHostFree( l2_norm_h ) );
CUDA_RT_CALL( hipFree( l2_norm_d ) );
CUDA_RT_CALL( hipFree( a_new[dev_id] ) );
CUDA_RT_CALL( hipFree( a ) );
if ( 0 == dev_id ) {
CUDA_RT_CALL( hipHostFree( a_h ) );
CUDA_RT_CALL( hipHostFree( a_ref_h ) );
}
}
CUDA_RT_CALL( hipDeviceReset() );
}
return result_correct ? 0 : 1;
}
double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print )
{
real* a;
real* a_new;
hipStream_t compute_stream;
hipStream_t push_top_stream;
hipStream_t push_bottom_stream;
hipEvent_t compute_done;
hipEvent_t push_top_done;
hipEvent_t push_bottom_done;
real* l2_norm_d;
real* l2_norm_h;
int iy_start = 1;
int iy_end = (ny-1);
CUDA_RT_CALL( hipMalloc( &a, nx*ny*sizeof(real) ) );
CUDA_RT_CALL( hipMalloc( &a_new, nx*ny*sizeof(real) ) );
CUDA_RT_CALL( hipMemset( a, 0, nx*ny*sizeof(real) ) );
CUDA_RT_CALL( hipMemset( a_new, 0, nx*ny*sizeof(real) ) );
//Set diriclet boundary conditions on left and right boarder
hipLaunchKernelGGL(( initialize_boundaries), dim3(ny/128+1),dim3(128), 0, 0, a, a_new, PI, 0, nx, ny, ny );
CUDA_RT_CALL( hipGetLastError() );
CUDA_RT_CALL( hipDeviceSynchronize() );
CUDA_RT_CALL( hipStreamCreate(&compute_stream) );
CUDA_RT_CALL( hipStreamCreate(&push_top_stream) );
CUDA_RT_CALL( hipStreamCreate(&push_bottom_stream) );
CUDA_RT_CALL( hipEventCreateWithFlags ( &compute_done, hipEventDisableTiming ) );
CUDA_RT_CALL( hipEventCreateWithFlags ( &push_top_done, hipEventDisableTiming ) );
CUDA_RT_CALL( hipEventCreateWithFlags ( &push_bottom_done, hipEventDisableTiming ) );
CUDA_RT_CALL( hipMalloc( &l2_norm_d, sizeof(real) ) );
CUDA_RT_CALL( hipHostMalloc( &l2_norm_h, sizeof(real) ) );
CUDA_RT_CALL( hipDeviceSynchronize() );
if (print) printf("Single GPU jacobi relaxation: %d iterations on %d x %d mesh with norm check every %d iterations\n", iter_max, ny, nx, nccheck);
constexpr int dim_block_x = 32;
constexpr int dim_block_y = 4;
dim3 dim_grid((nx - 1) / dim_block_x + 1, (ny - 1) / dim_block_y + 1, 1);
int iter = 0;
real l2_norm = 1.0;
CUDA_RT_CALL( hipDeviceSynchronize() );
double start = omp_get_wtime();
PUSH_RANGE("Jacobi solve",0)
while ( l2_norm > tol && iter < iter_max )
{
CUDA_RT_CALL( hipMemsetAsync(l2_norm_d, 0 , sizeof(real), compute_stream ) );
CUDA_RT_CALL( hipStreamWaitEvent( compute_stream, push_top_done, 0 ) );
CUDA_RT_CALL( hipStreamWaitEvent( compute_stream, push_bottom_done, 0 ) );
hipLaunchKernelGGL(( jacobi_kernel<dim_block_x,dim_block_y>), dim3(dim_grid),dim3({dim_block_x),dim_block_y,1},0,compute_stream, a_new, a, l2_norm_d, iy_start, iy_end, nx, a_new, iy_start, a_new, (iy_end - 1) );
CUDA_RT_CALL( hipGetLastError() );
CUDA_RT_CALL( hipEventRecord( compute_done, compute_stream ) );
if ( (iter % nccheck) == 0 || ( print && ( (iter % 100) == 0 ) ) ) {
CUDA_RT_CALL( hipMemcpyAsync( l2_norm_h, l2_norm_d, sizeof(real), hipMemcpyDeviceToHost, compute_stream ) );
}
//Apply periodic boundary conditions
CUDA_RT_CALL( hipStreamWaitEvent( push_top_stream, compute_done, 0 ) );
CUDA_RT_CALL( hipMemcpyAsync(
a_new,
a_new+(iy_end-1)*nx,
nx*sizeof(real), hipMemcpyDeviceToDevice, push_top_stream ) );
CUDA_RT_CALL( hipEventRecord( push_top_done, push_top_stream ) );
CUDA_RT_CALL( hipStreamWaitEvent( push_bottom_stream, compute_done, 0 ) );
CUDA_RT_CALL( hipMemcpyAsync(
a_new+iy_end*nx,
a_new+iy_start*nx,
nx*sizeof(real), hipMemcpyDeviceToDevice, compute_stream ) );
CUDA_RT_CALL( hipEventRecord( push_bottom_done, push_bottom_stream ) );
if ( (iter % nccheck) == 0 || ( print && ( (iter % 100) == 0 ) ) ) {
CUDA_RT_CALL( hipStreamSynchronize( compute_stream ) );
l2_norm = *l2_norm_h;
l2_norm = std::sqrt( l2_norm );
if( print && (iter % 100) == 0) printf("%5d, %0.6f\n", iter, l2_norm);
}
std::swap(a_new,a);
iter++;
}
CUDA_RT_CALL( hipDeviceSynchronize() );
POP_RANGE
double stop = omp_get_wtime();
CUDA_RT_CALL( hipMemcpy( a_ref_h, a, nx*ny*sizeof(real), hipMemcpyDeviceToHost ) );
CUDA_RT_CALL( hipEventDestroy( push_bottom_done ) );
CUDA_RT_CALL( hipEventDestroy( push_top_done ) );
CUDA_RT_CALL( hipEventDestroy( compute_done ) );
CUDA_RT_CALL( hipStreamDestroy( push_bottom_stream ) );
CUDA_RT_CALL( hipStreamDestroy( push_top_stream ) );
CUDA_RT_CALL( hipStreamDestroy( compute_stream ) );
CUDA_RT_CALL( hipHostFree( l2_norm_h ) );
CUDA_RT_CALL( hipFree( l2_norm_d ) );
CUDA_RT_CALL( hipFree( a_new ) );
CUDA_RT_CALL( hipFree( a ) );
return (stop-start);
}
| 0c82fa8e9a93ff8fa5835ca7152e57f9a37faa2f.cu | /* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <iostream>
#include <cstdio>
#include <cmath>
#include <sstream>
#include <algorithm>
#include <omp.h>
#ifdef HAVE_CUB
#include <cub/block/block_reduce.cuh>
#endif //HAVE_CUB
#ifdef USE_NVTX
#include <nvToolsExt.h>
const uint32_t colors[] = { 0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff };
const int num_colors = sizeof(colors)/sizeof(uint32_t);
#define PUSH_RANGE(name,cid) { \
int color_id = cid; \
color_id = color_id%num_colors;\
nvtxEventAttributes_t eventAttrib = {0}; \
eventAttrib.version = NVTX_VERSION; \
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \
eventAttrib.colorType = NVTX_COLOR_ARGB; \
eventAttrib.color = colors[color_id]; \
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \
eventAttrib.message.ascii = name; \
nvtxRangePushEx(&eventAttrib); \
}
#define POP_RANGE nvtxRangePop();
#else
#define PUSH_RANGE(name,cid)
#define POP_RANGE
#endif
#define CUDA_RT_CALL( call ) \
{ \
cudaError_t cudaStatus = call; \
if ( cudaSuccess != cudaStatus ) \
fprintf(stderr, "ERROR: CUDA RT call \"%s\" in line %d of file %s failed with %s (%d).\n", \
#call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), cudaStatus); \
}
constexpr int MAX_NUM_DEVICES=32;
typedef float real;
constexpr real tol = 1.0e-8;
const real PI = 2.0 * std::asin(1.0);
__global__ void initialize_boundaries(
real* __restrict__ const a_new,
real* __restrict__ const a,
const real pi,
const int offset,
const int nx, const int my_ny, const int ny )
{
for (int iy = blockIdx.x * blockDim.x + threadIdx.x;
iy < my_ny;
iy += blockDim.x * gridDim.x) {
const real y0 = sin( 2.0 * pi * (offset+iy) / (ny-1) );
a[ iy*nx + 0 ] = y0;
a[ iy*nx + (nx-1) ] = y0;
a_new[ iy*nx + 0 ] = y0;
a_new[ iy*nx + (nx-1) ] = y0;
}
}
template<int BLOCK_DIM_X, int BLOCK_DIM_Y>
__global__ void jacobi_kernel(
real* __restrict__ const a_new,
const real* __restrict__ const a,
real* __restrict__ const l2_norm,
const int iy_start, const int iy_end,
const int nx,
real* __restrict__ const a_new_top,
const int top_iy,
real* __restrict__ const a_new_bottom,
const int bottom_iy
)
{
#ifdef HAVE_CUB
typedef cub::BlockReduce<real,BLOCK_DIM_X,cub::BLOCK_REDUCE_WARP_REDUCTIONS,BLOCK_DIM_Y> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
#endif //HAVE_CUB
int iy = blockIdx.y * blockDim.y + threadIdx.y + iy_start;
int ix = blockIdx.x * blockDim.x + threadIdx.x + 1;
real local_l2_norm = 0.0;
if(iy < iy_end && ix < (nx - 1)) {
const real new_val = 0.25 * ( a[ iy * nx + ix + 1 ] + a[ iy * nx + ix - 1 ]
+ a[ (iy+1) * nx + ix ] + a[ (iy-1) * nx + ix ] );
a_new[ iy * nx + ix ] = new_val;
if ( iy_start == iy )
{
a_new_top[ top_iy*nx + ix ] = new_val;
}
if ( (iy_end - 1) == iy )
{
a_new_bottom[ bottom_iy*nx + ix ] = new_val;
}
real residue = new_val - a[ iy * nx + ix ];
local_l2_norm += residue * residue;
}
#ifdef HAVE_CUB
real block_l2_norm = BlockReduce(temp_storage).Sum(local_l2_norm);
if ( 0 == threadIdx.y && 0 == threadIdx.x )
atomicAdd( l2_norm, block_l2_norm );
#else
atomicAdd( l2_norm, local_l2_norm );
#endif //HAVE_CUB
}
double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print );
template<typename T>
T get_argval(char ** begin, char ** end, const std::string& arg, const T default_val) {
T argval = default_val;
char ** itr = std::find(begin, end, arg);
if (itr != end && ++itr != end) {
std::istringstream inbuf(*itr);
inbuf >> argval;
}
return argval;
}
bool get_arg(char ** begin, char ** end, const std::string& arg) {
char ** itr = std::find(begin, end, arg);
if (itr != end) {
return true;
}
return false;
}
int main(int argc, char * argv[])
{
const int iter_max = get_argval<int>(argv, argv+argc,"-niter", 1000);
const int nccheck = get_argval<int>(argv, argv+argc,"-nccheck", 1);
const int nx = get_argval<int>(argv, argv+argc,"-nx", 7168);
const int ny = get_argval<int>(argv, argv+argc,"-ny", 7168);
const bool csv = get_arg(argv, argv+argc,"-csv");
real* a_new[MAX_NUM_DEVICES];
real* a_ref_h;
real* a_h;
double runtime_serial = 0.0;
int iy_end[MAX_NUM_DEVICES];
cudaEvent_t compute_done[2][MAX_NUM_DEVICES];
bool result_correct = true;
bool p2p_works = true;
int num_devices = 0;
CUDA_RT_CALL( cudaGetDeviceCount( &num_devices ) );
real l2_norm = 1.0;
#pragma omp parallel num_threads( num_devices ) shared( l2_norm )
{
real* a;
cudaStream_t compute_stream;
cudaStream_t push_top_stream;
cudaStream_t push_bottom_stream;
cudaEvent_t push_top_done;
cudaEvent_t push_bottom_done;
real* l2_norm_d;
real* l2_norm_h;
// Ensure correctness if ny%size != 0
int chunk_size = std::ceil( (1.0*(ny-2))/num_devices );
int dev_id = omp_get_thread_num();
CUDA_RT_CALL( cudaSetDevice( dev_id ) );
CUDA_RT_CALL( cudaFree( 0 ) );
if ( 0 == dev_id ) {
CUDA_RT_CALL( cudaMallocHost( &a_ref_h, nx*ny*sizeof(real) ) );
CUDA_RT_CALL( cudaMallocHost( &a_h, nx*ny*sizeof(real) ) );
runtime_serial = single_gpu(nx,ny,iter_max,a_ref_h,nccheck,!csv);
}
#pragma omp barrier
const int top = dev_id > 0 ? dev_id - 1 : (num_devices-1);
const int bottom = (dev_id+1)%num_devices;
if ( top != dev_id )
{
int canAccessPeer = 0;
CUDA_RT_CALL( cudaDeviceCanAccessPeer ( &canAccessPeer, dev_id, top ) );
if ( canAccessPeer )
{
CUDA_RT_CALL( cudaDeviceEnablePeerAccess ( top, 0 ) );
}
else
{
std::cerr<<"P2P access required from "<<dev_id<<" to "<<top<<std::endl;
#pragma omp critical
{
if (p2p_works) p2p_works = false;
}
}
if ( top != bottom )
{
canAccessPeer = 0;
CUDA_RT_CALL( cudaDeviceCanAccessPeer ( &canAccessPeer, dev_id, bottom ) );
if ( canAccessPeer )
{
CUDA_RT_CALL( cudaDeviceEnablePeerAccess ( bottom, 0 ) );
}
else
{
std::cerr<<"P2P access required from "<<dev_id<<" to "<<bottom<<std::endl;
#pragma omp critical
{
if (p2p_works) p2p_works = false;
}
}
}
}
#pragma omp barrier
if ( p2p_works )
{
CUDA_RT_CALL( cudaMalloc( &a, nx*(chunk_size+2)*sizeof(real) ) );
CUDA_RT_CALL( cudaMalloc( a_new+dev_id, nx*(chunk_size+2)*sizeof(real) ) );
CUDA_RT_CALL( cudaMemset( a, 0, nx*(chunk_size+2)*sizeof(real) ) );
CUDA_RT_CALL( cudaMemset( a_new[dev_id], 0, nx*(chunk_size+2)*sizeof(real) ) );
//Calculate local domain boundaries
int iy_start_global = dev_id * chunk_size + 1;
int iy_end_global = iy_start_global + chunk_size - 1;
// Do not process boundaries
iy_end_global = std::min( iy_end_global, ny - 2 );
int iy_start = 1;
iy_end[dev_id] = (iy_end_global-iy_start_global + 1)+iy_start;
//Set diriclet boundary conditions on left and right boarder
initialize_boundaries<<<(ny/num_devices)/128+1,128>>>( a, a_new[dev_id], PI, iy_start_global-1, nx, (chunk_size+2), ny );
CUDA_RT_CALL( cudaGetLastError() );
CUDA_RT_CALL( cudaDeviceSynchronize() );
CUDA_RT_CALL( cudaStreamCreate(&compute_stream) );
CUDA_RT_CALL( cudaStreamCreate(&push_top_stream) );
CUDA_RT_CALL( cudaStreamCreate(&push_bottom_stream) );
CUDA_RT_CALL( cudaEventCreateWithFlags ( compute_done[0]+dev_id, cudaEventDisableTiming ) );
CUDA_RT_CALL( cudaEventCreateWithFlags ( compute_done[1]+dev_id, cudaEventDisableTiming ) );
CUDA_RT_CALL( cudaEventCreateWithFlags ( &push_top_done, cudaEventDisableTiming ) );
CUDA_RT_CALL( cudaEventCreateWithFlags ( &push_bottom_done, cudaEventDisableTiming ) );
CUDA_RT_CALL( cudaMalloc( &l2_norm_d, sizeof(real) ) );
CUDA_RT_CALL( cudaMallocHost( &l2_norm_h, sizeof(real) ) );
CUDA_RT_CALL( cudaDeviceSynchronize() );
#pragma omp master
{
if (!csv) printf("Jacobi relaxation: %d iterations on %d x %d mesh with norm check every %d iterations\n", iter_max, ny, nx, nccheck);
}
constexpr int dim_block_x = 32;
constexpr int dim_block_y = 4;
dim3 dim_grid((nx - 1)/ dim_block_x + 1, (ny - 1) / (num_devices * dim_block_y) + 1, 1);
int iter = 0;
#pragma omp master
{
l2_norm = 1.0;
}
CUDA_RT_CALL( cudaDeviceSynchronize() );
#pragma omp barrier
double start = omp_get_wtime();
PUSH_RANGE("Jacobi solve",0)
while ( l2_norm > tol && iter < iter_max )
{
CUDA_RT_CALL( cudaMemsetAsync(l2_norm_d, 0 , sizeof(real), compute_stream ) );
//need to wait for other threads due to sharing of a_new and compute_done between threads
#pragma omp barrier
CUDA_RT_CALL( cudaStreamWaitEvent( compute_stream, compute_done[iter%2][top], 0 ) );
CUDA_RT_CALL( cudaStreamWaitEvent( compute_stream, compute_done[iter%2][bottom], 0 ) );
jacobi_kernel<dim_block_x,dim_block_y><<<dim_grid,{dim_block_x,dim_block_y,1},0,compute_stream>>>( a_new[dev_id], a, l2_norm_d, iy_start, iy_end[dev_id], nx, a_new[top], iy_end[top], a_new[bottom], 0 );
CUDA_RT_CALL( cudaGetLastError() );
CUDA_RT_CALL( cudaEventRecord( compute_done[(iter+1)%2][dev_id], compute_stream ) );
if ( (iter % nccheck) == 0 || (!csv && (iter % 100) == 0) ) {
CUDA_RT_CALL( cudaMemcpyAsync( l2_norm_h, l2_norm_d, sizeof(real), cudaMemcpyDeviceToHost, compute_stream ) );
#pragma omp barrier
#pragma omp single
{
l2_norm = 0.0;
}
#pragma omp barrier
CUDA_RT_CALL( cudaStreamSynchronize( compute_stream ) );
#pragma omp atomic
l2_norm += *(l2_norm_h);
#pragma omp barrier
#pragma omp single
{
l2_norm = std::sqrt( l2_norm );
}
#pragma omp barrier
if(!csv && (iter % 100) == 0)
{
#pragma omp master
printf("%5d, %0.6f\n", iter, l2_norm);
}
}
#pragma omp barrier
std::swap(a_new[dev_id],a);
iter++;
}
CUDA_RT_CALL( cudaDeviceSynchronize() );
#pragma omp barrier
double stop = omp_get_wtime();
POP_RANGE
CUDA_RT_CALL( cudaMemcpy( a_h+iy_start_global*nx, a+nx, std::min((ny-iy_start_global)*nx,chunk_size*nx)*sizeof(real), cudaMemcpyDeviceToHost ) );
#pragma omp barrier
#pragma omp master
{
result_correct = true;
for (int iy = 1; result_correct && (iy < (ny-1)); ++iy) {
for (int ix = 1; result_correct && (ix < (nx-1)); ++ix) {
if ( std::fabs( a_ref_h[ iy * nx + ix ] - a_h[ iy * nx + ix ] ) > tol ) {
fprintf(stderr,"ERROR: a[%d * %d + %d] = %f does not match %f (reference)\n", iy,nx,ix, a_h[ iy * nx + ix ], a_ref_h[ iy * nx + ix ]);
result_correct = false;
}
}}
if (result_correct)
{
if (csv) {
printf( "multi_threaded_p2p, %d, %d, %d, %d, %d, 1, %f, %f\n", nx, ny, iter_max, nccheck, num_devices, (stop-start), runtime_serial );
}
else {
printf( "Num GPUs: %d.\n", num_devices );
printf( "%dx%d: 1 GPU: %8.4f s, %d GPUs: %8.4f s, speedup: %8.2f, efficiency: %8.2f \n", ny,nx, runtime_serial, num_devices, (stop-start), runtime_serial/(stop-start), runtime_serial/(num_devices*(stop-start))*100 );
}
}
}
CUDA_RT_CALL( cudaEventDestroy( push_bottom_done ) );
CUDA_RT_CALL( cudaEventDestroy( push_top_done ) );
CUDA_RT_CALL( cudaEventDestroy( compute_done[1][dev_id] ) );
CUDA_RT_CALL( cudaEventDestroy( compute_done[0][dev_id] ) );
CUDA_RT_CALL( cudaStreamDestroy( push_bottom_stream ) );
CUDA_RT_CALL( cudaStreamDestroy( push_top_stream ) );
CUDA_RT_CALL( cudaStreamDestroy( compute_stream ) );
CUDA_RT_CALL( cudaFreeHost( l2_norm_h ) );
CUDA_RT_CALL( cudaFree( l2_norm_d ) );
CUDA_RT_CALL( cudaFree( a_new[dev_id] ) );
CUDA_RT_CALL( cudaFree( a ) );
if ( 0 == dev_id ) {
CUDA_RT_CALL( cudaFreeHost( a_h ) );
CUDA_RT_CALL( cudaFreeHost( a_ref_h ) );
}
}
CUDA_RT_CALL( cudaDeviceReset() );
}
return result_correct ? 0 : 1;
}
double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print )
{
real* a;
real* a_new;
cudaStream_t compute_stream;
cudaStream_t push_top_stream;
cudaStream_t push_bottom_stream;
cudaEvent_t compute_done;
cudaEvent_t push_top_done;
cudaEvent_t push_bottom_done;
real* l2_norm_d;
real* l2_norm_h;
int iy_start = 1;
int iy_end = (ny-1);
CUDA_RT_CALL( cudaMalloc( &a, nx*ny*sizeof(real) ) );
CUDA_RT_CALL( cudaMalloc( &a_new, nx*ny*sizeof(real) ) );
CUDA_RT_CALL( cudaMemset( a, 0, nx*ny*sizeof(real) ) );
CUDA_RT_CALL( cudaMemset( a_new, 0, nx*ny*sizeof(real) ) );
//Set diriclet boundary conditions on left and right boarder
initialize_boundaries<<<ny/128+1,128>>>( a, a_new, PI, 0, nx, ny, ny );
CUDA_RT_CALL( cudaGetLastError() );
CUDA_RT_CALL( cudaDeviceSynchronize() );
CUDA_RT_CALL( cudaStreamCreate(&compute_stream) );
CUDA_RT_CALL( cudaStreamCreate(&push_top_stream) );
CUDA_RT_CALL( cudaStreamCreate(&push_bottom_stream) );
CUDA_RT_CALL( cudaEventCreateWithFlags ( &compute_done, cudaEventDisableTiming ) );
CUDA_RT_CALL( cudaEventCreateWithFlags ( &push_top_done, cudaEventDisableTiming ) );
CUDA_RT_CALL( cudaEventCreateWithFlags ( &push_bottom_done, cudaEventDisableTiming ) );
CUDA_RT_CALL( cudaMalloc( &l2_norm_d, sizeof(real) ) );
CUDA_RT_CALL( cudaMallocHost( &l2_norm_h, sizeof(real) ) );
CUDA_RT_CALL( cudaDeviceSynchronize() );
if (print) printf("Single GPU jacobi relaxation: %d iterations on %d x %d mesh with norm check every %d iterations\n", iter_max, ny, nx, nccheck);
constexpr int dim_block_x = 32;
constexpr int dim_block_y = 4;
dim3 dim_grid((nx - 1) / dim_block_x + 1, (ny - 1) / dim_block_y + 1, 1);
int iter = 0;
real l2_norm = 1.0;
CUDA_RT_CALL( cudaDeviceSynchronize() );
double start = omp_get_wtime();
PUSH_RANGE("Jacobi solve",0)
while ( l2_norm > tol && iter < iter_max )
{
CUDA_RT_CALL( cudaMemsetAsync(l2_norm_d, 0 , sizeof(real), compute_stream ) );
CUDA_RT_CALL( cudaStreamWaitEvent( compute_stream, push_top_done, 0 ) );
CUDA_RT_CALL( cudaStreamWaitEvent( compute_stream, push_bottom_done, 0 ) );
jacobi_kernel<dim_block_x,dim_block_y><<<dim_grid,{dim_block_x,dim_block_y,1},0,compute_stream>>>( a_new, a, l2_norm_d, iy_start, iy_end, nx, a_new, iy_start, a_new, (iy_end - 1) );
CUDA_RT_CALL( cudaGetLastError() );
CUDA_RT_CALL( cudaEventRecord( compute_done, compute_stream ) );
if ( (iter % nccheck) == 0 || ( print && ( (iter % 100) == 0 ) ) ) {
CUDA_RT_CALL( cudaMemcpyAsync( l2_norm_h, l2_norm_d, sizeof(real), cudaMemcpyDeviceToHost, compute_stream ) );
}
//Apply periodic boundary conditions
CUDA_RT_CALL( cudaStreamWaitEvent( push_top_stream, compute_done, 0 ) );
CUDA_RT_CALL( cudaMemcpyAsync(
a_new,
a_new+(iy_end-1)*nx,
nx*sizeof(real), cudaMemcpyDeviceToDevice, push_top_stream ) );
CUDA_RT_CALL( cudaEventRecord( push_top_done, push_top_stream ) );
CUDA_RT_CALL( cudaStreamWaitEvent( push_bottom_stream, compute_done, 0 ) );
CUDA_RT_CALL( cudaMemcpyAsync(
a_new+iy_end*nx,
a_new+iy_start*nx,
nx*sizeof(real), cudaMemcpyDeviceToDevice, compute_stream ) );
CUDA_RT_CALL( cudaEventRecord( push_bottom_done, push_bottom_stream ) );
if ( (iter % nccheck) == 0 || ( print && ( (iter % 100) == 0 ) ) ) {
CUDA_RT_CALL( cudaStreamSynchronize( compute_stream ) );
l2_norm = *l2_norm_h;
l2_norm = std::sqrt( l2_norm );
if( print && (iter % 100) == 0) printf("%5d, %0.6f\n", iter, l2_norm);
}
std::swap(a_new,a);
iter++;
}
CUDA_RT_CALL( cudaDeviceSynchronize() );
POP_RANGE
double stop = omp_get_wtime();
CUDA_RT_CALL( cudaMemcpy( a_ref_h, a, nx*ny*sizeof(real), cudaMemcpyDeviceToHost ) );
CUDA_RT_CALL( cudaEventDestroy( push_bottom_done ) );
CUDA_RT_CALL( cudaEventDestroy( push_top_done ) );
CUDA_RT_CALL( cudaEventDestroy( compute_done ) );
CUDA_RT_CALL( cudaStreamDestroy( push_bottom_stream ) );
CUDA_RT_CALL( cudaStreamDestroy( push_top_stream ) );
CUDA_RT_CALL( cudaStreamDestroy( compute_stream ) );
CUDA_RT_CALL( cudaFreeHost( l2_norm_h ) );
CUDA_RT_CALL( cudaFree( l2_norm_d ) );
CUDA_RT_CALL( cudaFree( a_new ) );
CUDA_RT_CALL( cudaFree( a ) );
return (stop-start);
}
|
a74c6432652b04a6e9c27da025f41e8927dba7bb.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/utilities/error.hpp>
#include <nvtext/tokenize.hpp>
#include <nvtext/detail/tokenize.hpp>
#include <text/utilities/tokenize_ops.cuh>
#include <thrust/transform.h>
namespace nvtext
{
namespace detail
{
namespace
{
// common pattern for token_count functions
template<typename TokenCounter>
std::unique_ptr<cudf::column> token_count_fn( cudf::size_type strings_count, TokenCounter tokenizer,
rmm::mr::device_memory_resource* mr,
hipStream_t stream )
{
// create output column
auto token_counts = cudf::make_numeric_column( cudf::data_type{cudf::INT32}, strings_count,
cudf::mask_state::UNALLOCATED, stream, mr);
auto d_token_counts = token_counts->mutable_view().data<int32_t>();
// add the counts to the column
thrust::transform( rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(strings_count),
d_token_counts, tokenizer );
return token_counts;
}
// common pattern for tokenize functions
template<typename Tokenizer>
std::unique_ptr<cudf::column> tokenize_fn( cudf::size_type strings_count, Tokenizer tokenizer,
rmm::mr::device_memory_resource* mr,
hipStream_t stream )
{
auto execpol = rmm::exec_policy(stream);
// get the number of tokens in each string
auto const token_counts = token_count_fn( strings_count, tokenizer, mr, stream );
auto d_token_counts = token_counts->view();
// create token-index offsets from the counts
rmm::device_vector<int32_t> token_offsets(strings_count+1);
thrust::inclusive_scan( execpol->on(stream),
d_token_counts.template begin<int32_t>(),
d_token_counts.template end<int32_t>(),
token_offsets.begin()+1 );
CUDA_TRY(hipMemsetAsync( token_offsets.data().get(), 0, sizeof(int32_t), stream ));
auto const total_tokens = token_offsets.back();
// build a list of pointers to each token
rmm::device_vector<string_index_pair> tokens(total_tokens);
// now go get the tokens
tokenizer.d_offsets = token_offsets.data().get();
tokenizer.d_tokens = tokens.data().get();
thrust::for_each_n(execpol->on(stream),
thrust::make_counting_iterator<cudf::size_type>(0), strings_count, tokenizer );
// create the strings column using the tokens pointers
return cudf::make_strings_column(tokens,stream,mr);
}
} // namespace
// detail APIs
// zero or more character tokenizer
std::unique_ptr<cudf::column> tokenize( cudf::strings_column_view const& strings,
cudf::string_scalar const& delimiter,
rmm::mr::device_memory_resource* mr,
hipStream_t stream )
{
CUDF_EXPECTS( delimiter.is_valid(), "Parameter delimiter must be valid");
cudf::string_view d_delimiter( delimiter.data(), delimiter.size() );
auto strings_column = cudf::column_device_view::create(strings.parent(),stream);
return tokenize_fn( strings.size(), strings_tokenizer{*strings_column,d_delimiter}, mr, stream );
}
// zero or more character token counter
std::unique_ptr<cudf::column> count_tokens( cudf::strings_column_view const& strings,
cudf::string_scalar const& delimiter,
rmm::mr::device_memory_resource* mr,
hipStream_t stream )
{
CUDF_EXPECTS( delimiter.is_valid(), "Parameter delimiter must be valid");
cudf::string_view d_delimiter( delimiter.data(), delimiter.size() );
auto strings_column = cudf::column_device_view::create(strings.parent(),stream);
return token_count_fn( strings.size(), strings_tokenizer{*strings_column,d_delimiter}, mr, stream );
}
// one or more string delimiter tokenizer
std::unique_ptr<cudf::column> tokenize( cudf::strings_column_view const& strings,
cudf::strings_column_view const& delimiters,
rmm::mr::device_memory_resource* mr,
hipStream_t stream )
{
CUDF_EXPECTS( delimiters.size()>0, "Parameter delimiters must not be empty");
CUDF_EXPECTS( !delimiters.has_nulls(), "Parameter delimiters must not have nulls");
auto strings_column = cudf::column_device_view::create(strings.parent(),stream);
auto delimiters_column = cudf::column_device_view::create(delimiters.parent(),stream);
return tokenize_fn( strings.size(),
multi_delimiter_strings_tokenizer{*strings_column,
delimiters_column->begin<cudf::string_view>(),
delimiters_column->end<cudf::string_view>()},
mr, stream );
}
// one or more string delimiter token counter
std::unique_ptr<cudf::column> count_tokens( cudf::strings_column_view const& strings,
cudf::strings_column_view const& delimiters,
rmm::mr::device_memory_resource* mr,
hipStream_t stream )
{
CUDF_EXPECTS( delimiters.size()>0, "Parameter delimiters must not be empty");
CUDF_EXPECTS( !delimiters.has_nulls(), "Parameter delimiters must not have nulls");
auto strings_column = cudf::column_device_view::create(strings.parent(),stream);
auto delimiters_column = cudf::column_device_view::create(delimiters.parent(),stream);
return token_count_fn( strings.size(),
multi_delimiter_strings_tokenizer{*strings_column,
delimiters_column->begin<cudf::string_view>(),
delimiters_column->end<cudf::string_view>()},
mr, stream );
}
} // namespace detail
// external APIs
std::unique_ptr<cudf::column> tokenize( cudf::strings_column_view const& strings,
cudf::string_scalar const& delimiter,
rmm::mr::device_memory_resource* mr )
{
return detail::tokenize( strings, delimiter, mr );
}
std::unique_ptr<cudf::column> tokenize( cudf::strings_column_view const& strings,
cudf::strings_column_view const& delimiters,
rmm::mr::device_memory_resource* mr )
{
return detail::tokenize( strings, delimiters, mr );
}
std::unique_ptr<cudf::column> count_tokens( cudf::strings_column_view const& strings,
cudf::string_scalar const& delimiter,
rmm::mr::device_memory_resource* mr)
{
return detail::count_tokens( strings, delimiter, mr );
}
std::unique_ptr<cudf::column> count_tokens( cudf::strings_column_view const& strings,
cudf::strings_column_view const& delimiters,
rmm::mr::device_memory_resource* mr)
{
return detail::count_tokens( strings, delimiters, mr );
}
} // namespace nvtext
| a74c6432652b04a6e9c27da025f41e8927dba7bb.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/utilities/error.hpp>
#include <nvtext/tokenize.hpp>
#include <nvtext/detail/tokenize.hpp>
#include <text/utilities/tokenize_ops.cuh>
#include <thrust/transform.h>
namespace nvtext
{
namespace detail
{
namespace
{
// common pattern for token_count functions
template<typename TokenCounter>
std::unique_ptr<cudf::column> token_count_fn( cudf::size_type strings_count, TokenCounter tokenizer,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream )
{
// create output column
auto token_counts = cudf::make_numeric_column( cudf::data_type{cudf::INT32}, strings_count,
cudf::mask_state::UNALLOCATED, stream, mr);
auto d_token_counts = token_counts->mutable_view().data<int32_t>();
// add the counts to the column
thrust::transform( rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(strings_count),
d_token_counts, tokenizer );
return token_counts;
}
// common pattern for tokenize functions
template<typename Tokenizer>
std::unique_ptr<cudf::column> tokenize_fn( cudf::size_type strings_count, Tokenizer tokenizer,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream )
{
auto execpol = rmm::exec_policy(stream);
// get the number of tokens in each string
auto const token_counts = token_count_fn( strings_count, tokenizer, mr, stream );
auto d_token_counts = token_counts->view();
// create token-index offsets from the counts
rmm::device_vector<int32_t> token_offsets(strings_count+1);
thrust::inclusive_scan( execpol->on(stream),
d_token_counts.template begin<int32_t>(),
d_token_counts.template end<int32_t>(),
token_offsets.begin()+1 );
CUDA_TRY(cudaMemsetAsync( token_offsets.data().get(), 0, sizeof(int32_t), stream ));
auto const total_tokens = token_offsets.back();
// build a list of pointers to each token
rmm::device_vector<string_index_pair> tokens(total_tokens);
// now go get the tokens
tokenizer.d_offsets = token_offsets.data().get();
tokenizer.d_tokens = tokens.data().get();
thrust::for_each_n(execpol->on(stream),
thrust::make_counting_iterator<cudf::size_type>(0), strings_count, tokenizer );
// create the strings column using the tokens pointers
return cudf::make_strings_column(tokens,stream,mr);
}
} // namespace
// detail APIs
// zero or more character tokenizer
std::unique_ptr<cudf::column> tokenize( cudf::strings_column_view const& strings,
cudf::string_scalar const& delimiter,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream )
{
CUDF_EXPECTS( delimiter.is_valid(), "Parameter delimiter must be valid");
cudf::string_view d_delimiter( delimiter.data(), delimiter.size() );
auto strings_column = cudf::column_device_view::create(strings.parent(),stream);
return tokenize_fn( strings.size(), strings_tokenizer{*strings_column,d_delimiter}, mr, stream );
}
// zero or more character token counter
std::unique_ptr<cudf::column> count_tokens( cudf::strings_column_view const& strings,
cudf::string_scalar const& delimiter,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream )
{
CUDF_EXPECTS( delimiter.is_valid(), "Parameter delimiter must be valid");
cudf::string_view d_delimiter( delimiter.data(), delimiter.size() );
auto strings_column = cudf::column_device_view::create(strings.parent(),stream);
return token_count_fn( strings.size(), strings_tokenizer{*strings_column,d_delimiter}, mr, stream );
}
// one or more string delimiter tokenizer
std::unique_ptr<cudf::column> tokenize( cudf::strings_column_view const& strings,
cudf::strings_column_view const& delimiters,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream )
{
CUDF_EXPECTS( delimiters.size()>0, "Parameter delimiters must not be empty");
CUDF_EXPECTS( !delimiters.has_nulls(), "Parameter delimiters must not have nulls");
auto strings_column = cudf::column_device_view::create(strings.parent(),stream);
auto delimiters_column = cudf::column_device_view::create(delimiters.parent(),stream);
return tokenize_fn( strings.size(),
multi_delimiter_strings_tokenizer{*strings_column,
delimiters_column->begin<cudf::string_view>(),
delimiters_column->end<cudf::string_view>()},
mr, stream );
}
// one or more string delimiter token counter
std::unique_ptr<cudf::column> count_tokens( cudf::strings_column_view const& strings,
cudf::strings_column_view const& delimiters,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream )
{
CUDF_EXPECTS( delimiters.size()>0, "Parameter delimiters must not be empty");
CUDF_EXPECTS( !delimiters.has_nulls(), "Parameter delimiters must not have nulls");
auto strings_column = cudf::column_device_view::create(strings.parent(),stream);
auto delimiters_column = cudf::column_device_view::create(delimiters.parent(),stream);
return token_count_fn( strings.size(),
multi_delimiter_strings_tokenizer{*strings_column,
delimiters_column->begin<cudf::string_view>(),
delimiters_column->end<cudf::string_view>()},
mr, stream );
}
} // namespace detail
// external APIs
std::unique_ptr<cudf::column> tokenize( cudf::strings_column_view const& strings,
cudf::string_scalar const& delimiter,
rmm::mr::device_memory_resource* mr )
{
return detail::tokenize( strings, delimiter, mr );
}
std::unique_ptr<cudf::column> tokenize( cudf::strings_column_view const& strings,
cudf::strings_column_view const& delimiters,
rmm::mr::device_memory_resource* mr )
{
return detail::tokenize( strings, delimiters, mr );
}
std::unique_ptr<cudf::column> count_tokens( cudf::strings_column_view const& strings,
cudf::string_scalar const& delimiter,
rmm::mr::device_memory_resource* mr)
{
return detail::count_tokens( strings, delimiter, mr );
}
std::unique_ptr<cudf::column> count_tokens( cudf::strings_column_view const& strings,
cudf::strings_column_view const& delimiters,
rmm::mr::device_memory_resource* mr)
{
return detail::count_tokens( strings, delimiters, mr );
}
} // namespace nvtext
|
62116b66a8b79c66186fe2d8d0a4294137ac11c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
using namespace std;
typedef struct Matrix {
int width;
int height;
float *elements;
} Mat;
#define BLOCK_SIZE 16
#define w 4096
#define h 4096
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
void MatMul(const Matrix A, const Matrix B, Matrix C);
int main() {
Mat h_A;
h_A.width = w;
h_A.height = h;
h_A.elements = (float *)malloc(sizeof(float) * h_A.width * h_A.height);
for (int i = 0; i < h_A.height; ++i) {
for (int j = 0; j < h_A.width; ++j) {
h_A.elements[i * h_A.width + j] = 1;
}
}
Mat h_B;
h_B.width = w;
h_B.height = h;
h_B.elements = (float *)malloc(sizeof(float) * h_B.width * h_B.height);
for (int i = 0; i < h_B.height; ++i) {
for (int j = 0; j < h_B.width; ++j) {
h_B.elements[i * h_B.width + j] = 1;
}
}
Mat h_C;
h_C.width = w;
h_C.height = h;
h_C.elements = (float *)malloc(sizeof(float) * h_C.width * h_C.height);
for (int i = 0; i < h_C.height; ++i) {
for (int j = 0; j < h_C.width; ++j) {
h_C.elements[i * h_C.width + j] = 0;
}
}
MatMul(h_A, h_B, h_C);
float tmp_value = w;
float sum_error = 0;
for (int i = 0; i < h_C.height; ++i) {
for (int j = 0; j < h_C.width; ++j) {
sum_error += fabs(tmp_value - h_C.elements[i * h_C.width + j]);
}
}
cout << "sum error : " << sum_error << endl;
free(h_A.elements);
free(h_B.elements);
free(h_C.elements);
return 0;
}
void MatMul(const Matrix A, const Matrix B, Matrix C) {
Mat d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
hipMalloc((void **)&d_A.elements, size);
hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice);
Mat d_B;
d_B.width = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
hipMalloc((void **)&d_B.elements, size);
hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice);
Mat d_C;
d_C.width = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
hipMalloc((void **)&d_C.elements, size);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(2, 2);
cout << dimGrid.x << " " << dimGrid.y << endl;
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C);
hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost);
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
}
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) {
for (int row = blockIdx.y * blockDim.y + threadIdx.y; row < C.height;
row += gridDim.y * blockDim.y) {
for (int col = blockIdx.x * blockDim.x + threadIdx.x; col < C.width;
col += gridDim.x * blockDim.x) {
float CValue = 0;
for (int e = 0; e < A.width; ++e) {
CValue += A.elements[row * A.width + e] * B.elements[e * B.width + col];
}
C.elements[row * C.width + col] = CValue;
}
}
}
| 62116b66a8b79c66186fe2d8d0a4294137ac11c0.cu | #include <iostream>
using namespace std;
typedef struct Matrix {
int width;
int height;
float *elements;
} Mat;
#define BLOCK_SIZE 16
#define w 4096
#define h 4096
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
void MatMul(const Matrix A, const Matrix B, Matrix C);
int main() {
Mat h_A;
h_A.width = w;
h_A.height = h;
h_A.elements = (float *)malloc(sizeof(float) * h_A.width * h_A.height);
for (int i = 0; i < h_A.height; ++i) {
for (int j = 0; j < h_A.width; ++j) {
h_A.elements[i * h_A.width + j] = 1;
}
}
Mat h_B;
h_B.width = w;
h_B.height = h;
h_B.elements = (float *)malloc(sizeof(float) * h_B.width * h_B.height);
for (int i = 0; i < h_B.height; ++i) {
for (int j = 0; j < h_B.width; ++j) {
h_B.elements[i * h_B.width + j] = 1;
}
}
Mat h_C;
h_C.width = w;
h_C.height = h;
h_C.elements = (float *)malloc(sizeof(float) * h_C.width * h_C.height);
for (int i = 0; i < h_C.height; ++i) {
for (int j = 0; j < h_C.width; ++j) {
h_C.elements[i * h_C.width + j] = 0;
}
}
MatMul(h_A, h_B, h_C);
float tmp_value = w;
float sum_error = 0;
for (int i = 0; i < h_C.height; ++i) {
for (int j = 0; j < h_C.width; ++j) {
sum_error += fabs(tmp_value - h_C.elements[i * h_C.width + j]);
}
}
cout << "sum error : " << sum_error << endl;
free(h_A.elements);
free(h_B.elements);
free(h_C.elements);
return 0;
}
void MatMul(const Matrix A, const Matrix B, Matrix C) {
Mat d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc((void **)&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
Mat d_B;
d_B.width = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc((void **)&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
Mat d_C;
d_C.width = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc((void **)&d_C.elements, size);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(2, 2);
cout << dimGrid.x << " " << dimGrid.y << endl;
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) {
for (int row = blockIdx.y * blockDim.y + threadIdx.y; row < C.height;
row += gridDim.y * blockDim.y) {
for (int col = blockIdx.x * blockDim.x + threadIdx.x; col < C.width;
col += gridDim.x * blockDim.x) {
float CValue = 0;
for (int e = 0; e < A.width; ++e) {
CValue += A.elements[row * A.width + e] * B.elements[e * B.width + col];
}
C.elements[row * C.width + col] = CValue;
}
}
}
|
bab15215c1d0a6ed389739318d8bacbdf512d54c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <util.cuh>
#include <worker.cuh>
using namespace std;
/*
* ====================
* WorkResult
* ====================
*/
WorkResult::WorkResult(WorkResult::RESULTS resultType, Cost& results) : _resultType(resultType), _results(&results) {
}
WorkResult::WorkResult(WorkResult::RESULTS resultType) : _resultType(resultType), _results(NULL) {
}
WorkResult::~WorkResult() {
delete _results; // delete NULL is ok
}
Cost& WorkResult::getResults() const {
return *_results;
}
WorkResult::RESULTS WorkResult::getResultType() const {
return _resultType;
}
/*
* ====================
* Worker
* ====================
*/
Worker::Worker(ConvNet& convNet) : _convNet(&convNet) {
}
/*
* ====================
* DataWorker
* ====================
*/
DataWorker::DataWorker(ConvNet& convNet, CPUData& data) : Worker(convNet), _data(&data) {
_dp = &convNet.getDataProvider();
}
DataWorker::~DataWorker() {
_dp->clearData();
}
/*
* ====================
* TrainingWorker
* ====================
*/
TrainingWorker::TrainingWorker(ConvNet& convNet, CPUData& data, bool test)
: DataWorker(convNet, data), _test(test) {
}
// Need to setData here (as opposed to the constructor) because the constructor executes in
// the original CPU thread, which is not the one with GPU access.
void TrainingWorker::run() {
_dp->setData(*_data);
Cost& batchCost = *new Cost(0);
for (int i = 0; i < _dp->getNumMinibatches(); i++) {
_convNet->fprop(i, _test ? PASS_TEST : PASS_TRAIN);
_convNet->getCost(batchCost);
if (!_test) {
_convNet->bprop(PASS_TRAIN);
_convNet->updateWeights();
}
}
hipDeviceSynchronize(); // Blocks until all the deivice finish the job.
_convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost));
}
/*
* ====================
* SyncWorker
* ====================
*/
SyncWorker::SyncWorker(ConvNet& convNet) : Worker(convNet) {
}
void SyncWorker::run() {
_convNet->copyToCPU();
_convNet->getResultQueue().enqueue(new WorkResult(WorkResult::SYNC_DONE));
}
/*
* ====================
* GradCheckWorker
* ====================
*/
GradCheckWorker::GradCheckWorker(ConvNet& convNet, CPUData& data)
: DataWorker(convNet, data) {
}
void GradCheckWorker::run() {
_dp->setData(*_data);
_convNet->checkGradients();
exit(0);
}
/*
* ====================
* MultiviewTestWorker
* ====================
*/
MultiviewTestWorker::MultiviewTestWorker(ConvNet& convNet, CPUData& data, int numViews, int logregIdx)
: DataWorker(convNet, data), _numViews(numViews), _logregIdx(logregIdx) {
assert(_data->getNumCases() % _numViews == 0);
}
void MultiviewTestWorker::run() {
_dp->setData(*_data);
Layer& logregLayer = _convNet->getLayer(_logregIdx);
int numCasesReal = _dp->getNumCases() / _numViews;
int numMiniReal = DIVUP(numCasesReal, _dp->getMinibatchSize());
Cost& batchCost = *new Cost(0);
for (int i = 0; i < numMiniReal; i++) {
NVMatrix softmaxActs;
for (int v = 0; v < _numViews; v++) {
GPUData& mini = _dp->getDataSlice(v * numCasesReal + i * _dp->getMinibatchSize(),
min((v + 1) * numCasesReal, v * numCasesReal + (i + 1) * _dp->getMinibatchSize()));
_convNet->fprop(mini, PASS_TEST);
if (v == 0) {
logregLayer.getPrev()[1]->getActs().copy(softmaxActs);
} else {
softmaxActs.add(logregLayer.getPrev()[1]->getActs());
}
}
softmaxActs.scale(1.0 / _numViews);
NVMatrixV logregInput;
logregInput.push_back(&logregLayer.getPrev()[0]->getActs());
logregInput.push_back(&softmaxActs);
logregLayer.fprop(logregInput, PASS_TEST);
_convNet->getCost(batchCost);
}
hipDeviceSynchronize();
_convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost));
}
/*
* ====================
* FeatureWorker
* ====================
*/
FeatureWorker::FeatureWorker(ConvNet& convNet, CPUData& data, Matrix& ftrs, int layerIdx)
: DataWorker(convNet, data), _ftrs(&ftrs), _layerIdx(layerIdx) {
assert(ftrs.getNumRows() == data.getNumCases());
assert(!ftrs.isTrans());
}
FeatureWorker::~FeatureWorker() {
delete _ftrs;
}
void FeatureWorker::run() {
_dp->setData(*_data);
Layer& ftrLayer = _convNet->getLayer(_layerIdx);
Cost& batchCost = *new Cost(0);
for (int i = 0; i < _dp->getNumMinibatches(); i++) {
_convNet->fprop(i, PASS_TEST);
_convNet->getCost(batchCost);
Matrix& miniFtrs = _ftrs->sliceRows(i * _dp->getMinibatchSize(),
min(_dp->getNumCases(), (i + 1) * _dp->getMinibatchSize()));
NVMatrix& acts = ftrLayer.getActs();
NVMatrix acts_T;
if (acts.isTrans()) {
NVMatrix& soft_T = acts.getTranspose();
soft_T.transpose(acts_T);
delete &soft_T;
} else {
acts.transpose(acts_T);
}
acts_T.copyToHost(miniFtrs);
delete &miniFtrs;
}
hipDeviceSynchronize();
_convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost));
} | bab15215c1d0a6ed389739318d8bacbdf512d54c.cu | /*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <util.cuh>
#include <worker.cuh>
using namespace std;
/*
* ====================
* WorkResult
* ====================
*/
WorkResult::WorkResult(WorkResult::RESULTS resultType, Cost& results) : _resultType(resultType), _results(&results) {
}
WorkResult::WorkResult(WorkResult::RESULTS resultType) : _resultType(resultType), _results(NULL) {
}
WorkResult::~WorkResult() {
delete _results; // delete NULL is ok
}
Cost& WorkResult::getResults() const {
return *_results;
}
WorkResult::RESULTS WorkResult::getResultType() const {
return _resultType;
}
/*
* ====================
* Worker
* ====================
*/
Worker::Worker(ConvNet& convNet) : _convNet(&convNet) {
}
/*
* ====================
* DataWorker
* ====================
*/
DataWorker::DataWorker(ConvNet& convNet, CPUData& data) : Worker(convNet), _data(&data) {
_dp = &convNet.getDataProvider();
}
DataWorker::~DataWorker() {
_dp->clearData();
}
/*
* ====================
* TrainingWorker
* ====================
*/
TrainingWorker::TrainingWorker(ConvNet& convNet, CPUData& data, bool test)
: DataWorker(convNet, data), _test(test) {
}
// Need to setData here (as opposed to the constructor) because the constructor executes in
// the original CPU thread, which is not the one with GPU access.
void TrainingWorker::run() {
_dp->setData(*_data);
Cost& batchCost = *new Cost(0);
for (int i = 0; i < _dp->getNumMinibatches(); i++) {
_convNet->fprop(i, _test ? PASS_TEST : PASS_TRAIN);
_convNet->getCost(batchCost);
if (!_test) {
_convNet->bprop(PASS_TRAIN);
_convNet->updateWeights();
}
}
cudaThreadSynchronize(); // Blocks until all the deivice finish the job.
_convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost));
}
/*
* ====================
* SyncWorker
* ====================
*/
SyncWorker::SyncWorker(ConvNet& convNet) : Worker(convNet) {
}
void SyncWorker::run() {
_convNet->copyToCPU();
_convNet->getResultQueue().enqueue(new WorkResult(WorkResult::SYNC_DONE));
}
/*
* ====================
* GradCheckWorker
* ====================
*/
GradCheckWorker::GradCheckWorker(ConvNet& convNet, CPUData& data)
: DataWorker(convNet, data) {
}
void GradCheckWorker::run() {
_dp->setData(*_data);
_convNet->checkGradients();
exit(0);
}
/*
* ====================
* MultiviewTestWorker
* ====================
*/
MultiviewTestWorker::MultiviewTestWorker(ConvNet& convNet, CPUData& data, int numViews, int logregIdx)
: DataWorker(convNet, data), _numViews(numViews), _logregIdx(logregIdx) {
assert(_data->getNumCases() % _numViews == 0);
}
void MultiviewTestWorker::run() {
_dp->setData(*_data);
Layer& logregLayer = _convNet->getLayer(_logregIdx);
int numCasesReal = _dp->getNumCases() / _numViews;
int numMiniReal = DIVUP(numCasesReal, _dp->getMinibatchSize());
Cost& batchCost = *new Cost(0);
for (int i = 0; i < numMiniReal; i++) {
NVMatrix softmaxActs;
for (int v = 0; v < _numViews; v++) {
GPUData& mini = _dp->getDataSlice(v * numCasesReal + i * _dp->getMinibatchSize(),
min((v + 1) * numCasesReal, v * numCasesReal + (i + 1) * _dp->getMinibatchSize()));
_convNet->fprop(mini, PASS_TEST);
if (v == 0) {
logregLayer.getPrev()[1]->getActs().copy(softmaxActs);
} else {
softmaxActs.add(logregLayer.getPrev()[1]->getActs());
}
}
softmaxActs.scale(1.0 / _numViews);
NVMatrixV logregInput;
logregInput.push_back(&logregLayer.getPrev()[0]->getActs());
logregInput.push_back(&softmaxActs);
logregLayer.fprop(logregInput, PASS_TEST);
_convNet->getCost(batchCost);
}
cudaThreadSynchronize();
_convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost));
}
/*
* ====================
* FeatureWorker
* ====================
*/
FeatureWorker::FeatureWorker(ConvNet& convNet, CPUData& data, Matrix& ftrs, int layerIdx)
: DataWorker(convNet, data), _ftrs(&ftrs), _layerIdx(layerIdx) {
assert(ftrs.getNumRows() == data.getNumCases());
assert(!ftrs.isTrans());
}
FeatureWorker::~FeatureWorker() {
delete _ftrs;
}
void FeatureWorker::run() {
_dp->setData(*_data);
Layer& ftrLayer = _convNet->getLayer(_layerIdx);
Cost& batchCost = *new Cost(0);
for (int i = 0; i < _dp->getNumMinibatches(); i++) {
_convNet->fprop(i, PASS_TEST);
_convNet->getCost(batchCost);
Matrix& miniFtrs = _ftrs->sliceRows(i * _dp->getMinibatchSize(),
min(_dp->getNumCases(), (i + 1) * _dp->getMinibatchSize()));
NVMatrix& acts = ftrLayer.getActs();
NVMatrix acts_T;
if (acts.isTrans()) {
NVMatrix& soft_T = acts.getTranspose();
soft_T.transpose(acts_T);
delete &soft_T;
} else {
acts.transpose(acts_T);
}
acts_T.copyToHost(miniFtrs);
delete &miniFtrs;
}
cudaThreadSynchronize();
_convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost));
} |
e7cd81be6068f2a4dee38a06d08523d07cb1eba8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/stochastic_dropout_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void DropoutForward(const int n, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
template <typename Dtype>
void StochasticDropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (this->phase_ == TRAIN) {
//LOG(INFO) << "layer drop : " << prob_layer_uint_ << "curProb : " << curProb_;
if (prob_layer_uint_){
if (curProb_ > 0.7) curProb_ = 0.7;
scale_ = 1. / (1. - curProb_);
if (scale_ > 1.5) scale_ = 1.5;
//scale_ = 1;
uint_thres_ = static_cast<unsigned int>(UINT_MAX * curProb_);
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
}
else{
//no dropout performed
caffe_copy(count, bottom_data, top_data);
}
} else {
caffe_copy(count, bottom_data, top_data);
}
}
template <typename Dtype>
__global__ void DropoutBackward(const int n, const Dtype* in_diff,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);
}
}
template <typename Dtype>
void StochasticDropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->phase_ == TRAIN) {
if (prob_layer_uint_){
const unsigned int* mask =
static_cast<const unsigned int*>(rand_vec_.gpu_data());
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutBackward<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
else{
//no dropout
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(StochasticDropoutLayer);
} // namespace caffe
| e7cd81be6068f2a4dee38a06d08523d07cb1eba8.cu | #include <vector>
#include "caffe/layers/stochastic_dropout_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void DropoutForward(const int n, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
template <typename Dtype>
void StochasticDropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (this->phase_ == TRAIN) {
//LOG(INFO) << "layer drop : " << prob_layer_uint_ << "curProb : " << curProb_;
if (prob_layer_uint_){
if (curProb_ > 0.7) curProb_ = 0.7;
scale_ = 1. / (1. - curProb_);
if (scale_ > 1.5) scale_ = 1.5;
//scale_ = 1;
uint_thres_ = static_cast<unsigned int>(UINT_MAX * curProb_);
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
}
else{
//no dropout performed
caffe_copy(count, bottom_data, top_data);
}
} else {
caffe_copy(count, bottom_data, top_data);
}
}
template <typename Dtype>
__global__ void DropoutBackward(const int n, const Dtype* in_diff,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);
}
}
template <typename Dtype>
void StochasticDropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (this->phase_ == TRAIN) {
if (prob_layer_uint_){
const unsigned int* mask =
static_cast<const unsigned int*>(rand_vec_.gpu_data());
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutBackward<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
else{
//no dropout
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(StochasticDropoutLayer);
} // namespace caffe
|
099b97ad6ba4d6168d4422b32cb094dc2ad8d626.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#define TX 16
#define TY 16
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define PI 3.141592654f
#define TPB 256 // reduction
__device__
float findX(int idx, int w) {
return ((idx - idx/w * w) * SCALEX / (w / 2) - SCALEX);
}
__device__
float findY(int idx, int h) {
return (SCALEY - idx/h * SCALEY / (h / 2));
}
int divUp(int a, int b) { return (a + b - 1) / b; }
__device__
float clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__device__
unsigned char value(float n1, float n2, int hue) {
if (hue > 360) hue -= 360;
else if (hue < 0) hue += 360;
if (hue < 60)
return (unsigned char)(255 * (n1 + (n2 - n1) * hue / 60));
if (hue < 180)
return (unsigned char)(255 * n2);
if (hue < 240)
return (unsigned char)(255 * (n1 + (n2 - n1) * (240 - hue) / 60));
return (unsigned char)(255 * n1);
}
__device__
int idxClip(int idx, int idxMax) {
return idx > (idxMax - 1) ? (idxMax - 1) : (idx < 0 ? 0 : idx);
}
__device__
int flatten(int col, int row, int width, int height) {
return idxClip(col, width) + idxClip(row, height) * width;
}
__device__
int getBin(float varX, float varY, int w) {
int x = int(varX / (float(SCALEX) / float(w / 2))) + w / 2;
int y = w / 2 - int(varY / (float(SCALEY) / float(w / 2)));
return x + y * w;
}
//////////////////////////////////////////////////////////////////
__global__
void ReductionKernel(float3* d_particals,float * d_doubderv, float* d_dnscell) {
__shared__ float cache1[SQRTNUM];
__shared__ float cache2[SQRTNUM];
for (int j = 0; j < SQRTNUM * SQRTNUM; j++) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
float xi = d_particals[j].x, pi = d_particals[j].y;
float temp1 = 0.0;
float temp2 = 0.0;
while (index < NUM) {
float x = d_particals[index].x, p = d_particals[index].y, time = d_particals[index].z;
if (time >= 0) {
temp1 += 1 / (2 * PI * HX * HP * NUM) * (pow(p - pi, 2) - HP * HP) / (pow(HP, 4)) *
expf(-pow(x - xi, 2) / (2 * HX * HX) - pow(p - pi, 2) / (2 * HP * HP));
temp2 += 1 / (2 * PI * HX * HP * NUM) *
expf(-pow(x - xi, 2) / (2 * HX * HX) - pow(p - pi, 2) / (2 * HP * HP));
index += stride;
}
else{
temp1 += 0;
temp2 += 0;
index += stride;
}
}
cache1[threadIdx.x] = temp1;
cache2[threadIdx.x] = temp2;
__syncthreads();
// reduction
unsigned int i = blockDim.x / 2;
while (i != 0) {
if (threadIdx.x < i) {
cache1[threadIdx.x] += cache1[threadIdx.x + i];
cache2[threadIdx.x] += cache2[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
if (threadIdx.x == 0) {
atomicAdd(&d_doubderv[j], cache1[0]);
atomicAdd(&d_dnscell[j], cache2[0]);
}
}
}
__global__
void clearDensityAndOutArrKernel(uchar4* d_out, float* d_density, int w, int h) {
const int col = threadIdx.x + blockDim.x * blockIdx.x;
const int row = threadIdx.y + blockDim.y * blockIdx.y;
const int idx = flatten(col, row, w, h);
d_density[idx] = 0;
d_out[idx].x = 0;
d_out[idx].y = 0;
d_out[idx].z = 0;
d_out[idx].w = 255;
}
__global__
void plotKernel(uchar4* d_out, float* d_density, int w, int h) {
const int col = threadIdx.x + blockDim.x * blockIdx.x;
const int row = threadIdx.y + blockDim.y * blockIdx.y;
const int idx = flatten(col, row, w, h);
if ((col >= w) || (row >= h)) return;
float l = d_density[idx];
float s = 1;
int hc = (180 + (int)(360.0f * d_density[idx] * 1.5 )) % 360;
float m1, m2;
if (l <= 5.0f)
m2 = l * (1 + s);
else
m2 = l + s - l * s;
m1 = 2 * l - m2;
d_out[idx].x = 255 * d_density[idx];
d_out[idx].y = 0;
d_out[idx].z = 0;
d_out[idx].w = 255;
//d_out[idx].x = value(m1, m2, hc + 120);;
//d_out[idx].y = value(m1, m2, hc);
//d_out[idx].z = value(m1, m2, hc - 120);
//d_out[idx].w = 255;
if (col == h / 2 || row == w / 2) {
d_out[idx].x = 255;
d_out[idx].y = 255;
d_out[idx].z = 255;
d_out[idx].w = 255;
}
}
__global__
void makeHistKernel(float3* d_particals, float* d_density, int sqrtnum, int wight) {
const int col = threadIdx.x + blockDim.x * blockIdx.x;
const int row = threadIdx.y + blockDim.y * blockIdx.y;
const int idx = flatten(col, row, sqrtnum, sqrtnum);
if ((col >= sqrtnum) || (row >= sqrtnum)) return;
float x = d_particals[idx].x, p = d_particals[idx].y, time = d_particals[idx].z;
if (time > 0) {
int mapIdx = getBin(x, p, wight);
d_density[mapIdx] = 1;
}
}
__global__
void makeHistKernelColor(float3* d_particals, float* d_density, int w, int h) {
const int col = threadIdx.x + blockDim.x * blockIdx.x;
const int row = threadIdx.y + blockDim.y * blockIdx.y;
const int idx = flatten(col, row, w, h);
if ((col >= w) || (row >= h)) return;
float sum = 0.0f;
float x = findX(idx, w), y = findY(idx, h);
for (int i = 0; i < NUM; i++) {
float xi = d_particals[i].x, yi = d_particals[i].y;
sum += 1 / (2 * PI * HX * HP) * expf(-(x - xi) * (x - xi) / (2 * HX * HX) - (y - yi) * (y - yi) / (2 * HP * HP));
}
d_density[idx] = sum / float(NUM);
}
__global__
void timeNextKernel(float3 * d_particals, float * d_doubderv, float* d_dnscell, int sqrtnum) {
const int col = threadIdx.x + blockDim.x * blockIdx.x;
const int row = threadIdx.y + blockDim.y * blockIdx.y;
const int idx = flatten(col, row, sqrtnum, sqrtnum);
if ((col >= sqrtnum) || (row >= sqrtnum)) return;
float x = d_particals[idx].x, p = d_particals[idx].y, time = d_particals[idx].z;
float potderv = APOT * x - BPOT * x * x; // potential derivative
float pot3derv = -2 * BPOT; // potential derivative
float doubderv = d_doubderv[idx]; // double dot density
float dnscell = d_dnscell[idx]; // density value in cell
float deltaX = STEPTIME * (p / MASS);
float deltaP = STEPTIME * (-potderv + pot3derv / (4 * 6) * doubderv / dnscell);
if (deltaX + x > SCALEX || deltaX + x < -SCALEX || deltaP + p > SCALEY || deltaP + p < -SCALEY || time < 0) {
d_particals[idx].x = -SCALEX;
d_particals[idx].y = -SCALEY;
d_particals[idx].z = -1;
}
else {
d_particals[idx].x += deltaX;
d_particals[idx].y += deltaP;
d_particals[idx].z += STEPTIME;
}
}
//////////////////////////////////////////////////////////////////
void initialConditions(uchar4 * d_out, float3 * d_particals, float * d_density, int w, int h) {
float3* initialRandomValues = new float3[NUM];
FILE* infile = fopen("randForCubic2.txt", "r");
for (int i = 0; i < NUM; i++) {
if (fscanf(infile, "%f \t %f\t %f \n", &initialRandomValues[i].x, &initialRandomValues[i].y, &initialRandomValues[i].z) == EOF) break;
}
fclose(infile);
hipMemcpy(d_particals, initialRandomValues, NUM * sizeof(float3), hipMemcpyHostToDevice);
const dim3 blockSizePart(TX, TY);
const dim3 gridSizePart(divUp(SQRTNUM, 4), divUp(SQRTNUM, 4));
hipLaunchKernelGGL(( makeHistKernel) , dim3(gridSizePart), dim3(blockSizePart) , 0, 0, d_particals, d_density, SQRTNUM, w);
//const dim3 blockSize(TX, TY);
//const dim3 gridSize(divUp(w, TX), divUp(h, TY));
//makeHistKernelColor << <gridSize, blockSize >> > (d_particals, d_density, w, h);
}
void kernelLauncher(uchar4 * d_out, float3 * d_particals, float * d_density, float * d_doubderv, float * d_dnscell, int w, int h) {
const dim3 blockSize(TX, TY);
const dim3 gridSize(divUp(w, TX), divUp(h, TY));
const dim3 blockSizePart(TX, TY);
const dim3 gridSizePart(divUp(SQRTNUM, TX), divUp(SQRTNUM, TX));
//Reduction
dim3 gridSizeReduction = 16;
dim3 blockSizeReduction = 4 * 16;
hipMemset(d_doubderv, 0.0, NUM * sizeof(float));
hipMemset(d_dnscell, 0.0, NUM * sizeof(float));
hipLaunchKernelGGL(( ReductionKernel), dim3(gridSizeReduction), dim3(blockSizeReduction) , 0, 0, d_particals, d_doubderv, d_dnscell);
hipLaunchKernelGGL(( timeNextKernel), dim3(gridSizePart), dim3(blockSizePart), 0, 0, d_particals,d_doubderv, d_dnscell, SQRTNUM);
hipLaunchKernelGGL(( clearDensityAndOutArrKernel) , dim3(gridSize), dim3(blockSize) , 0, 0, d_out, d_density, w, h);
hipLaunchKernelGGL(( makeHistKernel), dim3(gridSizePart), dim3(blockSizePart) , 0, 0, d_particals, d_density, SQRTNUM, w);
//makeHistKernelColor << <gridSize, blockSize >> > (d_particals, d_density, w, h);
hipLaunchKernelGGL(( plotKernel) , dim3(gridSize), dim3(blockSize) , 0, 0, d_out, d_density, w, h);
}
| 099b97ad6ba4d6168d4422b32cb094dc2ad8d626.cu | #include "kernel.h"
#define TX 16
#define TY 16
#include <curand.h>
#include <curand_kernel.h>
#define PI 3.141592654f
#define TPB 256 // reduction
__device__
float findX(int idx, int w) {
return ((idx - idx/w * w) * SCALEX / (w / 2) - SCALEX);
}
__device__
float findY(int idx, int h) {
return (SCALEY - idx/h * SCALEY / (h / 2));
}
int divUp(int a, int b) { return (a + b - 1) / b; }
__device__
float clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__device__
unsigned char value(float n1, float n2, int hue) {
if (hue > 360) hue -= 360;
else if (hue < 0) hue += 360;
if (hue < 60)
return (unsigned char)(255 * (n1 + (n2 - n1) * hue / 60));
if (hue < 180)
return (unsigned char)(255 * n2);
if (hue < 240)
return (unsigned char)(255 * (n1 + (n2 - n1) * (240 - hue) / 60));
return (unsigned char)(255 * n1);
}
__device__
int idxClip(int idx, int idxMax) {
return idx > (idxMax - 1) ? (idxMax - 1) : (idx < 0 ? 0 : idx);
}
__device__
int flatten(int col, int row, int width, int height) {
return idxClip(col, width) + idxClip(row, height) * width;
}
__device__
int getBin(float varX, float varY, int w) {
int x = int(varX / (float(SCALEX) / float(w / 2))) + w / 2;
int y = w / 2 - int(varY / (float(SCALEY) / float(w / 2)));
return x + y * w;
}
//////////////////////////////////////////////////////////////////
__global__
void ReductionKernel(float3* d_particals,float * d_doubderv, float* d_dnscell) {
__shared__ float cache1[SQRTNUM];
__shared__ float cache2[SQRTNUM];
for (int j = 0; j < SQRTNUM * SQRTNUM; j++) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
float xi = d_particals[j].x, pi = d_particals[j].y;
float temp1 = 0.0;
float temp2 = 0.0;
while (index < NUM) {
float x = d_particals[index].x, p = d_particals[index].y, time = d_particals[index].z;
if (time >= 0) {
temp1 += 1 / (2 * PI * HX * HP * NUM) * (pow(p - pi, 2) - HP * HP) / (pow(HP, 4)) *
expf(-pow(x - xi, 2) / (2 * HX * HX) - pow(p - pi, 2) / (2 * HP * HP));
temp2 += 1 / (2 * PI * HX * HP * NUM) *
expf(-pow(x - xi, 2) / (2 * HX * HX) - pow(p - pi, 2) / (2 * HP * HP));
index += stride;
}
else{
temp1 += 0;
temp2 += 0;
index += stride;
}
}
cache1[threadIdx.x] = temp1;
cache2[threadIdx.x] = temp2;
__syncthreads();
// reduction
unsigned int i = blockDim.x / 2;
while (i != 0) {
if (threadIdx.x < i) {
cache1[threadIdx.x] += cache1[threadIdx.x + i];
cache2[threadIdx.x] += cache2[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
if (threadIdx.x == 0) {
atomicAdd(&d_doubderv[j], cache1[0]);
atomicAdd(&d_dnscell[j], cache2[0]);
}
}
}
__global__
void clearDensityAndOutArrKernel(uchar4* d_out, float* d_density, int w, int h) {
const int col = threadIdx.x + blockDim.x * blockIdx.x;
const int row = threadIdx.y + blockDim.y * blockIdx.y;
const int idx = flatten(col, row, w, h);
d_density[idx] = 0;
d_out[idx].x = 0;
d_out[idx].y = 0;
d_out[idx].z = 0;
d_out[idx].w = 255;
}
__global__
void plotKernel(uchar4* d_out, float* d_density, int w, int h) {
const int col = threadIdx.x + blockDim.x * blockIdx.x;
const int row = threadIdx.y + blockDim.y * blockIdx.y;
const int idx = flatten(col, row, w, h);
if ((col >= w) || (row >= h)) return;
float l = d_density[idx];
float s = 1;
int hc = (180 + (int)(360.0f * d_density[idx] * 1.5 )) % 360;
float m1, m2;
if (l <= 5.0f)
m2 = l * (1 + s);
else
m2 = l + s - l * s;
m1 = 2 * l - m2;
d_out[idx].x = 255 * d_density[idx];
d_out[idx].y = 0;
d_out[idx].z = 0;
d_out[idx].w = 255;
//d_out[idx].x = value(m1, m2, hc + 120);;
//d_out[idx].y = value(m1, m2, hc);
//d_out[idx].z = value(m1, m2, hc - 120);
//d_out[idx].w = 255;
if (col == h / 2 || row == w / 2) {
d_out[idx].x = 255;
d_out[idx].y = 255;
d_out[idx].z = 255;
d_out[idx].w = 255;
}
}
__global__
void makeHistKernel(float3* d_particals, float* d_density, int sqrtnum, int wight) {
const int col = threadIdx.x + blockDim.x * blockIdx.x;
const int row = threadIdx.y + blockDim.y * blockIdx.y;
const int idx = flatten(col, row, sqrtnum, sqrtnum);
if ((col >= sqrtnum) || (row >= sqrtnum)) return;
float x = d_particals[idx].x, p = d_particals[idx].y, time = d_particals[idx].z;
if (time > 0) {
int mapIdx = getBin(x, p, wight);
d_density[mapIdx] = 1;
}
}
__global__
void makeHistKernelColor(float3* d_particals, float* d_density, int w, int h) {
const int col = threadIdx.x + blockDim.x * blockIdx.x;
const int row = threadIdx.y + blockDim.y * blockIdx.y;
const int idx = flatten(col, row, w, h);
if ((col >= w) || (row >= h)) return;
float sum = 0.0f;
float x = findX(idx, w), y = findY(idx, h);
for (int i = 0; i < NUM; i++) {
float xi = d_particals[i].x, yi = d_particals[i].y;
sum += 1 / (2 * PI * HX * HP) * expf(-(x - xi) * (x - xi) / (2 * HX * HX) - (y - yi) * (y - yi) / (2 * HP * HP));
}
d_density[idx] = sum / float(NUM);
}
__global__
void timeNextKernel(float3 * d_particals, float * d_doubderv, float* d_dnscell, int sqrtnum) {
const int col = threadIdx.x + blockDim.x * blockIdx.x;
const int row = threadIdx.y + blockDim.y * blockIdx.y;
const int idx = flatten(col, row, sqrtnum, sqrtnum);
if ((col >= sqrtnum) || (row >= sqrtnum)) return;
float x = d_particals[idx].x, p = d_particals[idx].y, time = d_particals[idx].z;
float potderv = APOT * x - BPOT * x * x; // potential derivative
float pot3derv = -2 * BPOT; // potential derivative
float doubderv = d_doubderv[idx]; // double dot density
float dnscell = d_dnscell[idx]; // density value in cell
float deltaX = STEPTIME * (p / MASS);
float deltaP = STEPTIME * (-potderv + pot3derv / (4 * 6) * doubderv / dnscell);
if (deltaX + x > SCALEX || deltaX + x < -SCALEX || deltaP + p > SCALEY || deltaP + p < -SCALEY || time < 0) {
d_particals[idx].x = -SCALEX;
d_particals[idx].y = -SCALEY;
d_particals[idx].z = -1;
}
else {
d_particals[idx].x += deltaX;
d_particals[idx].y += deltaP;
d_particals[idx].z += STEPTIME;
}
}
//////////////////////////////////////////////////////////////////
void initialConditions(uchar4 * d_out, float3 * d_particals, float * d_density, int w, int h) {
float3* initialRandomValues = new float3[NUM];
FILE* infile = fopen("randForCubic2.txt", "r");
for (int i = 0; i < NUM; i++) {
if (fscanf(infile, "%f \t %f\t %f \n", &initialRandomValues[i].x, &initialRandomValues[i].y, &initialRandomValues[i].z) == EOF) break;
}
fclose(infile);
cudaMemcpy(d_particals, initialRandomValues, NUM * sizeof(float3), cudaMemcpyHostToDevice);
const dim3 blockSizePart(TX, TY);
const dim3 gridSizePart(divUp(SQRTNUM, 4), divUp(SQRTNUM, 4));
makeHistKernel <<<gridSizePart, blockSizePart >>> (d_particals, d_density, SQRTNUM, w);
//const dim3 blockSize(TX, TY);
//const dim3 gridSize(divUp(w, TX), divUp(h, TY));
//makeHistKernelColor << <gridSize, blockSize >> > (d_particals, d_density, w, h);
}
void kernelLauncher(uchar4 * d_out, float3 * d_particals, float * d_density, float * d_doubderv, float * d_dnscell, int w, int h) {
const dim3 blockSize(TX, TY);
const dim3 gridSize(divUp(w, TX), divUp(h, TY));
const dim3 blockSizePart(TX, TY);
const dim3 gridSizePart(divUp(SQRTNUM, TX), divUp(SQRTNUM, TX));
//Reduction
dim3 gridSizeReduction = 16;
dim3 blockSizeReduction = 4 * 16;
cudaMemset(d_doubderv, 0.0, NUM * sizeof(float));
cudaMemset(d_dnscell, 0.0, NUM * sizeof(float));
ReductionKernel<<<gridSizeReduction, blockSizeReduction >>> (d_particals, d_doubderv, d_dnscell);
timeNextKernel<<<gridSizePart, blockSizePart>>>(d_particals,d_doubderv, d_dnscell, SQRTNUM);
clearDensityAndOutArrKernel <<<gridSize, blockSize >>> (d_out, d_density, w, h);
makeHistKernel<<<gridSizePart, blockSizePart >>> (d_particals, d_density, SQRTNUM, w);
//makeHistKernelColor << <gridSize, blockSize >> > (d_particals, d_density, w, h);
plotKernel <<<gridSize, blockSize >>> (d_out, d_density, w, h);
}
|
3dcbf2f96cb6905cb6a9d99dbcf86bb038de98c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***
Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
***/
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <gmp.h>
#include "cgbn/cgbn.h"
#include "types.h"
#include "sizes.h"
typedef enum test_enum {
test_set_1, test_swap_1, test_add_1, test_negate_1, test_sub_1,
test_mul_1, test_mul_high_1, test_sqr_1, test_sqr_high_1, test_div_1, test_rem_1,
test_div_rem_1, test_sqrt_1, test_sqrt_rem_1, test_equals_1, test_equals_2, test_equals_3, test_compare_1, test_compare_2,
test_compare_3, test_compare_4, test_extract_bits_1, test_insert_bits_1,
test_get_ui32_set_ui32_1, test_add_ui32_1, test_sub_ui32_1, test_mul_ui32_1, test_div_ui32_1, test_rem_ui32_1,
test_equals_ui32_1, test_equals_ui32_2, test_equals_ui32_3, test_equals_ui32_4, test_compare_ui32_1, test_compare_ui32_2,
test_extract_bits_ui32_1, test_insert_bits_ui32_1, test_binary_inverse_ui32_1, test_gcd_ui32_1,
test_mul_wide_1, test_sqr_wide_1, test_div_wide_1, test_rem_wide_1, test_div_rem_wide_1, test_sqrt_wide_1, test_sqrt_rem_wide_1,
test_bitwise_and_1, test_bitwise_ior_1, test_bitwise_xor_1, test_bitwise_complement_1, test_bitwise_select_1, test_bitwise_mask_copy_1,
test_bitwise_mask_and_1, test_bitwise_mask_ior_1, test_bitwise_mask_xor_1, test_bitwise_mask_select_1, test_shift_left_1,
test_shift_right_1, test_rotate_left_1, test_rotate_right_1, test_pop_count_1, test_clz_1, test_ctz_1,
test_accumulator_1, test_accumulator_2, test_binary_inverse_1, test_gcd_1, test_modular_inverse_1, test_modular_power_1,
test_bn2mont_1, test_mont2bn_1, test_mont_mul_1, test_mont_sqr_1, test_mont_reduce_wide_1, test_barrett_div_1,
test_barrett_rem_1, test_barrett_div_rem_1, test_barrett_div_wide_1, test_barrett_rem_wide_1, test_barrett_div_rem_wide_1
} test_t;
template<test_t test, class params>
struct implementation {
public:
__device__ __forceinline__ static void run(typename types<params>::input_t *inputs, typename types<params>::output_t *outputs, int32_t instance) {
printf("TEST NOT IMPLEMENTED! FIX ME!\n");
}
};
#include "tests/tests.h"
static gmp_randstate_t _state;
static uint32_t _seed=0;
static uint32_t _bits=0;
static uint32_t _count=0;
static void *_cpu_data=NULL;
static void *_gpu_data=NULL;
#define $GPU(call) if((call)!=0) { printf("\nCall \"" #call "\" failed from %s, line %d\n", __FILE__, __LINE__); exit(1); }
void zero_words(uint32_t *x, uint32_t count) {
int index;
for(index=0;index<count;index++)
x[index]=0;
}
void print_words(uint32_t *x, uint32_t count) {
int index;
for(index=count-1;index>=0;index--)
printf("%08X", x[index]);
printf("\n");
}
void copy_words(uint32_t *from, uint32_t *to, uint32_t count) {
int index;
for(index=0;index<count;index++)
to[index]=from[index];
}
int compare_words(uint32_t *x, uint32_t *y, uint32_t count) {
int index;
for(index=count-1;index>=0;index--) {
if(x[index]!=y[index]) {
if(x[index]>y[index])
return 1;
else
return -1;
}
}
return 0;
}
void random_words(uint32_t *x, uint32_t count, gmp_randstate_t state) {
int32_t index;
for(index=0;index<count;index++)
x[index]=gmp_urandomb_ui(state, 32);
}
void hard_random_words(uint32_t *x, uint32_t count, gmp_randstate_t state) {
uint32_t values[6]={0x0, 0x1, 0x7FFFFFFF, 0x80000000, 0x80000001, 0xFFFFFFFF};
int32_t offset, bit, bits, index;
switch(gmp_urandomb_ui(state, 16)%3) {
case 0:
for(index=0;index<count;index++)
x[index]=gmp_urandomb_ui(state, 32);
break;
case 1:
for(index=0;index<count;index++)
x[index]=values[gmp_urandomb_ui(state, 16)%6];
break;
case 2:
zero_words(x, count);
offset=0;
while(offset<count*32) {
bit=gmp_urandomb_ui(state, 16)%2;
bits=gmp_urandomb_ui(state, 32)%(32*count/2)+16;
if(bit==1) {
if(bits>count*32-offset)
bits=count*32-offset;
while(bits>0) {
if(offset%32==0 && bits>=32) {
while(bits>=32) {
x[offset/32]=0xFFFFFFFF;
bits-=32;
offset+=32;
}
}
else {
x[offset/32]=x[offset/32] + (1<<offset%32);
bits--;
offset++;
}
}
}
else
offset+=bits;
}
break;
}
}
template<class params>
static void generate_data(uint32_t count) {
typename types<params>::input_t *inputs;
int32_t instance;
// printf("generating %d\n", params::size);
if(_cpu_data!=NULL) {
free(_cpu_data);
_cpu_data=NULL;
}
if(_gpu_data!=NULL) {
$GPU(hipFree(_gpu_data));
_gpu_data=NULL;
}
_cpu_data=malloc(sizeof(typename types<params>::input_t)*count);
inputs=(typename types<params>::input_t *)_cpu_data;
gmp_randseed_ui(_state, _seed);
for(instance=0;instance<count;instance++) {
hard_random_words(inputs[instance].h1._limbs, params::size/32, _state);
hard_random_words(inputs[instance].h2._limbs, params::size/32, _state);
random_words(inputs[instance].x1._limbs, params::size/32, _state);
random_words(inputs[instance].x2._limbs, params::size/32, _state);
random_words(inputs[instance].x3._limbs, params::size/32, _state);
random_words(inputs[instance].u, 32, _state);
}
$GPU(hipMalloc((void **)&_gpu_data, sizeof(typename types<params>::input_t)*count));
$GPU(hipMemcpy(_gpu_data, _cpu_data, sizeof(typename types<params>::input_t)*count, hipMemcpyHostToDevice));
}
template<class params>
static typename types<params>::input_t *cpu_data(uint32_t count) {
if(params::size!=_bits || count>_count || _gpu_data==NULL) {
if(_seed==0) {
_seed=time(NULL);
gmp_randinit_default(_state);
}
generate_data<params>(count);
_bits=params::size;
_count=count;
}
return (typename types<params>::input_t *)_cpu_data;
}
template<class params>
static typename types<params>::input_t *gpu_data(uint32_t count) {
if(params::size!=_bits || count>_count || _gpu_data==NULL) {
if(_seed==0) {
_seed=time(NULL);
gmp_randinit_default(_state);
}
generate_data<params>(count);
_bits=params::size;
_count=count;
}
return (typename types<params>::input_t *)_gpu_data;
}
template<test_t TEST, class params>
__global__ void gpu_kernel(typename types<params>::input_t *inputs, typename types<params>::output_t *outputs, uint32_t count) {
implementation<TEST, params> impl;
int32_t instance=(blockIdx.x * blockDim.x + threadIdx.x)/params::TPI;
if(instance>=count)
return;
impl.run(inputs, outputs, instance);
}
template<test_t TEST, class params>
void gpu_run(typename types<params>::input_t *inputs, typename types<params>::output_t *outputs, uint32_t count) {
uint32_t TPB=(params::TPB==0) ? 128 : params::TPB;
uint32_t TPI=params::TPI, IPB=TPB/TPI;
uint32_t blocks=(count+IPB+1)/IPB;
hipLaunchKernelGGL(( gpu_kernel<TEST, params>), dim3(blocks), dim3(TPB), 0, 0, inputs, outputs, count);
}
template<test_t TEST, class params>
void cpu_run(typename types<params>::input_t *inputs, typename types<params>::output_t *outputs, uint32_t count) {
implementation<TEST, params> impl;
#pragma omp parallel for
for(int index=0;index<count;index++)
impl.run(inputs, outputs, index);
}
template<test_t TEST, class params>
bool run_test(uint32_t count) {
typename types<params>::input_t *cpu_inputs, *gpu_inputs;
typename types<params>::output_t *compare, *cpu_outputs, *gpu_outputs;
int instance;
if(params::size>1024)
count=count*(1024*1024/params::size)/1024;
cpu_inputs=cpu_data<params>(count);
gpu_inputs=gpu_data<params>(count);
compare=(typename types<params>::output_t *)malloc(sizeof(typename types<params>::output_t)*count);
cpu_outputs=(typename types<params>::output_t *)malloc(sizeof(typename types<params>::output_t)*count);
memset(cpu_outputs, 0, sizeof(typename types<params>::output_t)*count);
$GPU(hipMalloc((void **)&gpu_outputs, sizeof(typename types<params>::output_t)*count));
$GPU(hipMemset(gpu_outputs, 0, sizeof(typename types<params>::output_t)*count));
cpu_run<TEST, params>(cpu_inputs, cpu_outputs, count);
gpu_run<TEST, params>(gpu_inputs, gpu_outputs, count);
$GPU(hipMemcpy(compare, gpu_outputs, sizeof(typename types<params>::output_t)*count, hipMemcpyDeviceToHost));
for(instance=0;instance<count;instance++) {
if(compare_words(cpu_outputs[instance].r1._limbs, compare[instance].r1._limbs, params::size/32)!=0 ||
compare_words(cpu_outputs[instance].r2._limbs, compare[instance].r2._limbs, params::size/32)!=0) {
printf("Test failed at index %d\n", instance);
printf("h1: ");
print_words(cpu_inputs[instance].h1._limbs, params::size/32);
printf("\n");
printf("h2: ");
print_words(cpu_inputs[instance].h2._limbs, params::size/32);
printf("\n");
printf("x1: ");
print_words(cpu_inputs[instance].x1._limbs, params::size/32);
printf("\n");
// printf("x2: ");
// print_words(cpu_inputs[instance].x2._limbs, params::size/32);
// printf("\n");
// printf("x3: ");
// print_words(cpu_inputs[instance].x3._limbs, params::size/32);
// printf("\n");
printf("u0: %08X u1: %08X u2: %08X\n\n", cpu_inputs[instance].u[0], cpu_inputs[instance].u[1], cpu_inputs[instance].u[2]);
printf("CPU R1: ");
print_words(cpu_outputs[instance].r1._limbs, params::size/32);
printf("\n");
printf("GPU R1: ");
print_words(compare[instance].r1._limbs, params::size/32);
printf("\n");
printf("CPU R2: ");
print_words(cpu_outputs[instance].r2._limbs, params::size/32);
printf("\n");
printf("GPU R2: ");
print_words(compare[instance].r2._limbs, params::size/32);
printf("\n");
return false;
}
}
free(compare);
free(cpu_outputs);
$GPU(hipFree(gpu_outputs));
return true;
}
#define LONG_TEST 1000000
#define MEDIUM_TEST 100000
#define SHORT_TEST 10000
#define TINY_TEST 1000
#define SINGLE_TEST 1
/*
int main() {
run_test<test_add_1, 2048>(LONG_TEST);
run_test<test_sub_1, 2048>(LONG_TEST);
}
*/
#include "gtest/gtest.h"
#include "unit_tests.cc"
int main(int argc, char **argv) {
int nDevice=-1, result;
hipGetDeviceCount(&nDevice);
if(nDevice<=0) {
printf("Error no cuda device found. Aborting tests\n");
exit(EXIT_FAILURE);
}
testing::InitGoogleTest(&argc, argv);
result=RUN_ALL_TESTS();
if(result!=0)
printf("Please report random seed %08X along with failure\n", _seed);
return result;
}
| 3dcbf2f96cb6905cb6a9d99dbcf86bb038de98c0.cu | /***
Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
***/
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <gmp.h>
#include "cgbn/cgbn.h"
#include "types.h"
#include "sizes.h"
typedef enum test_enum {
test_set_1, test_swap_1, test_add_1, test_negate_1, test_sub_1,
test_mul_1, test_mul_high_1, test_sqr_1, test_sqr_high_1, test_div_1, test_rem_1,
test_div_rem_1, test_sqrt_1, test_sqrt_rem_1, test_equals_1, test_equals_2, test_equals_3, test_compare_1, test_compare_2,
test_compare_3, test_compare_4, test_extract_bits_1, test_insert_bits_1,
test_get_ui32_set_ui32_1, test_add_ui32_1, test_sub_ui32_1, test_mul_ui32_1, test_div_ui32_1, test_rem_ui32_1,
test_equals_ui32_1, test_equals_ui32_2, test_equals_ui32_3, test_equals_ui32_4, test_compare_ui32_1, test_compare_ui32_2,
test_extract_bits_ui32_1, test_insert_bits_ui32_1, test_binary_inverse_ui32_1, test_gcd_ui32_1,
test_mul_wide_1, test_sqr_wide_1, test_div_wide_1, test_rem_wide_1, test_div_rem_wide_1, test_sqrt_wide_1, test_sqrt_rem_wide_1,
test_bitwise_and_1, test_bitwise_ior_1, test_bitwise_xor_1, test_bitwise_complement_1, test_bitwise_select_1, test_bitwise_mask_copy_1,
test_bitwise_mask_and_1, test_bitwise_mask_ior_1, test_bitwise_mask_xor_1, test_bitwise_mask_select_1, test_shift_left_1,
test_shift_right_1, test_rotate_left_1, test_rotate_right_1, test_pop_count_1, test_clz_1, test_ctz_1,
test_accumulator_1, test_accumulator_2, test_binary_inverse_1, test_gcd_1, test_modular_inverse_1, test_modular_power_1,
test_bn2mont_1, test_mont2bn_1, test_mont_mul_1, test_mont_sqr_1, test_mont_reduce_wide_1, test_barrett_div_1,
test_barrett_rem_1, test_barrett_div_rem_1, test_barrett_div_wide_1, test_barrett_rem_wide_1, test_barrett_div_rem_wide_1
} test_t;
template<test_t test, class params>
struct implementation {
public:
__device__ __forceinline__ static void run(typename types<params>::input_t *inputs, typename types<params>::output_t *outputs, int32_t instance) {
printf("TEST NOT IMPLEMENTED! FIX ME!\n");
}
};
#include "tests/tests.h"
static gmp_randstate_t _state;
static uint32_t _seed=0;
static uint32_t _bits=0;
static uint32_t _count=0;
static void *_cpu_data=NULL;
static void *_gpu_data=NULL;
#define $GPU(call) if((call)!=0) { printf("\nCall \"" #call "\" failed from %s, line %d\n", __FILE__, __LINE__); exit(1); }
void zero_words(uint32_t *x, uint32_t count) {
int index;
for(index=0;index<count;index++)
x[index]=0;
}
void print_words(uint32_t *x, uint32_t count) {
int index;
for(index=count-1;index>=0;index--)
printf("%08X", x[index]);
printf("\n");
}
void copy_words(uint32_t *from, uint32_t *to, uint32_t count) {
int index;
for(index=0;index<count;index++)
to[index]=from[index];
}
int compare_words(uint32_t *x, uint32_t *y, uint32_t count) {
int index;
for(index=count-1;index>=0;index--) {
if(x[index]!=y[index]) {
if(x[index]>y[index])
return 1;
else
return -1;
}
}
return 0;
}
void random_words(uint32_t *x, uint32_t count, gmp_randstate_t state) {
int32_t index;
for(index=0;index<count;index++)
x[index]=gmp_urandomb_ui(state, 32);
}
void hard_random_words(uint32_t *x, uint32_t count, gmp_randstate_t state) {
uint32_t values[6]={0x0, 0x1, 0x7FFFFFFF, 0x80000000, 0x80000001, 0xFFFFFFFF};
int32_t offset, bit, bits, index;
switch(gmp_urandomb_ui(state, 16)%3) {
case 0:
for(index=0;index<count;index++)
x[index]=gmp_urandomb_ui(state, 32);
break;
case 1:
for(index=0;index<count;index++)
x[index]=values[gmp_urandomb_ui(state, 16)%6];
break;
case 2:
zero_words(x, count);
offset=0;
while(offset<count*32) {
bit=gmp_urandomb_ui(state, 16)%2;
bits=gmp_urandomb_ui(state, 32)%(32*count/2)+16;
if(bit==1) {
if(bits>count*32-offset)
bits=count*32-offset;
while(bits>0) {
if(offset%32==0 && bits>=32) {
while(bits>=32) {
x[offset/32]=0xFFFFFFFF;
bits-=32;
offset+=32;
}
}
else {
x[offset/32]=x[offset/32] + (1<<offset%32);
bits--;
offset++;
}
}
}
else
offset+=bits;
}
break;
}
}
template<class params>
static void generate_data(uint32_t count) {
typename types<params>::input_t *inputs;
int32_t instance;
// printf("generating %d\n", params::size);
if(_cpu_data!=NULL) {
free(_cpu_data);
_cpu_data=NULL;
}
if(_gpu_data!=NULL) {
$GPU(cudaFree(_gpu_data));
_gpu_data=NULL;
}
_cpu_data=malloc(sizeof(typename types<params>::input_t)*count);
inputs=(typename types<params>::input_t *)_cpu_data;
gmp_randseed_ui(_state, _seed);
for(instance=0;instance<count;instance++) {
hard_random_words(inputs[instance].h1._limbs, params::size/32, _state);
hard_random_words(inputs[instance].h2._limbs, params::size/32, _state);
random_words(inputs[instance].x1._limbs, params::size/32, _state);
random_words(inputs[instance].x2._limbs, params::size/32, _state);
random_words(inputs[instance].x3._limbs, params::size/32, _state);
random_words(inputs[instance].u, 32, _state);
}
$GPU(cudaMalloc((void **)&_gpu_data, sizeof(typename types<params>::input_t)*count));
$GPU(cudaMemcpy(_gpu_data, _cpu_data, sizeof(typename types<params>::input_t)*count, cudaMemcpyHostToDevice));
}
template<class params>
static typename types<params>::input_t *cpu_data(uint32_t count) {
if(params::size!=_bits || count>_count || _gpu_data==NULL) {
if(_seed==0) {
_seed=time(NULL);
gmp_randinit_default(_state);
}
generate_data<params>(count);
_bits=params::size;
_count=count;
}
return (typename types<params>::input_t *)_cpu_data;
}
template<class params>
static typename types<params>::input_t *gpu_data(uint32_t count) {
if(params::size!=_bits || count>_count || _gpu_data==NULL) {
if(_seed==0) {
_seed=time(NULL);
gmp_randinit_default(_state);
}
generate_data<params>(count);
_bits=params::size;
_count=count;
}
return (typename types<params>::input_t *)_gpu_data;
}
template<test_t TEST, class params>
__global__ void gpu_kernel(typename types<params>::input_t *inputs, typename types<params>::output_t *outputs, uint32_t count) {
implementation<TEST, params> impl;
int32_t instance=(blockIdx.x * blockDim.x + threadIdx.x)/params::TPI;
if(instance>=count)
return;
impl.run(inputs, outputs, instance);
}
template<test_t TEST, class params>
void gpu_run(typename types<params>::input_t *inputs, typename types<params>::output_t *outputs, uint32_t count) {
uint32_t TPB=(params::TPB==0) ? 128 : params::TPB;
uint32_t TPI=params::TPI, IPB=TPB/TPI;
uint32_t blocks=(count+IPB+1)/IPB;
gpu_kernel<TEST, params><<<blocks, TPB>>>(inputs, outputs, count);
}
template<test_t TEST, class params>
void cpu_run(typename types<params>::input_t *inputs, typename types<params>::output_t *outputs, uint32_t count) {
implementation<TEST, params> impl;
#pragma omp parallel for
for(int index=0;index<count;index++)
impl.run(inputs, outputs, index);
}
template<test_t TEST, class params>
bool run_test(uint32_t count) {
typename types<params>::input_t *cpu_inputs, *gpu_inputs;
typename types<params>::output_t *compare, *cpu_outputs, *gpu_outputs;
int instance;
if(params::size>1024)
count=count*(1024*1024/params::size)/1024;
cpu_inputs=cpu_data<params>(count);
gpu_inputs=gpu_data<params>(count);
compare=(typename types<params>::output_t *)malloc(sizeof(typename types<params>::output_t)*count);
cpu_outputs=(typename types<params>::output_t *)malloc(sizeof(typename types<params>::output_t)*count);
memset(cpu_outputs, 0, sizeof(typename types<params>::output_t)*count);
$GPU(cudaMalloc((void **)&gpu_outputs, sizeof(typename types<params>::output_t)*count));
$GPU(cudaMemset(gpu_outputs, 0, sizeof(typename types<params>::output_t)*count));
cpu_run<TEST, params>(cpu_inputs, cpu_outputs, count);
gpu_run<TEST, params>(gpu_inputs, gpu_outputs, count);
$GPU(cudaMemcpy(compare, gpu_outputs, sizeof(typename types<params>::output_t)*count, cudaMemcpyDeviceToHost));
for(instance=0;instance<count;instance++) {
if(compare_words(cpu_outputs[instance].r1._limbs, compare[instance].r1._limbs, params::size/32)!=0 ||
compare_words(cpu_outputs[instance].r2._limbs, compare[instance].r2._limbs, params::size/32)!=0) {
printf("Test failed at index %d\n", instance);
printf("h1: ");
print_words(cpu_inputs[instance].h1._limbs, params::size/32);
printf("\n");
printf("h2: ");
print_words(cpu_inputs[instance].h2._limbs, params::size/32);
printf("\n");
printf("x1: ");
print_words(cpu_inputs[instance].x1._limbs, params::size/32);
printf("\n");
// printf("x2: ");
// print_words(cpu_inputs[instance].x2._limbs, params::size/32);
// printf("\n");
// printf("x3: ");
// print_words(cpu_inputs[instance].x3._limbs, params::size/32);
// printf("\n");
printf("u0: %08X u1: %08X u2: %08X\n\n", cpu_inputs[instance].u[0], cpu_inputs[instance].u[1], cpu_inputs[instance].u[2]);
printf("CPU R1: ");
print_words(cpu_outputs[instance].r1._limbs, params::size/32);
printf("\n");
printf("GPU R1: ");
print_words(compare[instance].r1._limbs, params::size/32);
printf("\n");
printf("CPU R2: ");
print_words(cpu_outputs[instance].r2._limbs, params::size/32);
printf("\n");
printf("GPU R2: ");
print_words(compare[instance].r2._limbs, params::size/32);
printf("\n");
return false;
}
}
free(compare);
free(cpu_outputs);
$GPU(cudaFree(gpu_outputs));
return true;
}
#define LONG_TEST 1000000
#define MEDIUM_TEST 100000
#define SHORT_TEST 10000
#define TINY_TEST 1000
#define SINGLE_TEST 1
/*
int main() {
run_test<test_add_1, 2048>(LONG_TEST);
run_test<test_sub_1, 2048>(LONG_TEST);
}
*/
#include "gtest/gtest.h"
#include "unit_tests.cc"
int main(int argc, char **argv) {
int nDevice=-1, result;
cudaGetDeviceCount(&nDevice);
if(nDevice<=0) {
printf("Error no cuda device found. Aborting tests\n");
exit(EXIT_FAILURE);
}
testing::InitGoogleTest(&argc, argv);
result=RUN_ALL_TESTS();
if(result!=0)
printf("Please report random seed %08X along with failure\n", _seed);
return result;
}
|
30db9049a9a0932983a0df984a743a5872304601.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <amg_eigensolver.h>
#include <amg_config.h>
#include <eigensolvers/eigensolver.h>
#include <basic_types.h>
#include <misc.h>
#include <assert.h>
#include <util.h>
using std::string;
namespace amgx
{
template< class T_Config >
void AMG_EigenSolver<T_Config>::process_config(AMG_Config &in_cfg, std::string solver_scope)
{
if (in_cfg.getParameter<int>("print_config", solver_scope) == 1)
{
in_cfg.printAMGConfig();
}
}
template< class T_Config >
void AMG_EigenSolver<T_Config>::init()
{
std::string solver_value, solver_scope;
m_cfg->template getParameter<std::string>("eig_solver", solver_value, "default", solver_scope);
process_config(*m_cfg, solver_scope);
// pass thread manager to solver
solver = EigenSolverFactory<T_Config>::allocate(*m_cfg, "default", "eig_solver", m_resources->get_tmng());
if ( m_with_timings )
{
hipEventCreate(&m_setup_start);
hipEventCreate(&m_setup_stop);
hipEventCreate(&m_solve_start);
hipEventCreate(&m_solve_stop);
}
}
template< class T_Config >
AMG_EigenSolver<T_Config>::AMG_EigenSolver(Resources *res, AMG_Configuration *cfg) : m_with_timings(false), m_resources(res), m_cfg_self(false)
{
if (cfg)
{
m_cfg = cfg->getConfigObject();
}
else
{
m_cfg = res->getResourcesConfig();
}
init();
}
template< class T_Config >
AMG_EigenSolver<T_Config>::AMG_EigenSolver(Resources *res, AMG_Configuration &cfg) : m_with_timings(false), m_resources(res), m_cfg_self(true)
{
m_cfg = new AMG_Config;
*m_cfg = *(cfg.getConfigObject());
init();
}
template< class T_Config >
AMG_EigenSolver<T_Config>::AMG_EigenSolver(const AMG_EigenSolver<T_Config> &other)
{
solver = other.solver;
m_resources = other.getResources();
m_cfg = other.getConfig();
m_cfg_self = false;
solver->incr_ref_count();
m_ptrA = other.m_ptrA;
m_with_timings = other.m_with_timings;
if ( m_with_timings )
{
hipEventCreate(&m_setup_start);
hipEventCreate(&m_setup_stop);
hipEventCreate(&m_solve_start);
hipEventCreate(&m_solve_stop);
}
}
template< class T_Config >
AMG_EigenSolver<T_Config> &AMG_EigenSolver<T_Config>::operator=(const AMG_EigenSolver<T_Config> &other)
{
solver = other.solver;
m_resources = other.getResources();
m_cfg = other.getConfig();
m_cfg_self = false;
solver->incr_ref_count();
m_ptrA = other.m_ptrA;
m_with_timings = other.m_with_timings;
if ( m_with_timings )
{
hipEventCreate(&m_setup_start);
hipEventCreate(&m_setup_stop);
hipEventCreate(&m_solve_start);
hipEventCreate(&m_solve_stop);
}
return *this;
}
template< class T_Config >
AMG_EigenSolver<T_Config>::~AMG_EigenSolver()
{
if (m_cfg_self)
{
delete m_cfg;
}
if ( solver->decr_ref_count() )
{
delete solver;
if ( !m_with_timings )
{
return;
}
std::cerr << std::endl;
float elapsed_time = 0.0f;
hipEventElapsedTime(&elapsed_time, m_setup_start, m_setup_stop);
std::cerr << "AMG_EigenSolver::setup time: " << 1.0e-3 * elapsed_time << "s" << std::endl;
hipEventElapsedTime(&elapsed_time, m_solve_start, m_solve_stop);
std::cerr << "AMG_EigenSolver::solve time: " << 1.0e-3 * elapsed_time << "s" << std::endl;
hipEventDestroy(m_setup_start);
hipEventDestroy(m_setup_stop);
hipEventDestroy(m_solve_start);
hipEventDestroy(m_solve_stop);
}
}
/****************************************************
* Sets A as the matrix for the AMG system
****************************************************/
template< class T_Config >
void AMG_EigenSolver<T_Config>::setup( Matrix<T_Config> &A)//&A0)
{
if ( m_with_timings )
{
hipEventRecord(m_setup_start);
}
// postpone free syncs, use device pool
memory::setAsyncFreeFlag(true);
solver->setup(A);
m_resources->get_tmng()->wait_threads();
thrust::global_thread_handle::joinDevicePools();
// reset settings to normal
memory::setAsyncFreeFlag(false);
// free postponed objects
thrust::global_thread_handle::cudaFreeWait();
if ( m_with_timings )
{
hipEventRecord(m_setup_stop);
hipEventSynchronize(m_setup_stop);
}
}
template< class T_Config >
AMGX_ERROR AMG_EigenSolver<T_Config>::setup_no_throw( Matrix<T_Config> &A)
{
AMGX_ERROR rc = AMGX_OK;
AMGX_TRIES()
{
this->setup(A);
}
AMGX_CATCHES(rc)
return rc;
}
template< class T_Config >
void AMG_EigenSolver<T_Config>::pagerank_setup( Vector<T_Config> &vec)//&A0)
{
if ( m_with_timings )
{
hipEventRecord(m_setup_start);
}
// postpone free syncs, use device pool
memory::setAsyncFreeFlag(true);
solver->solver_pagerank_setup(vec);
m_resources->get_tmng()->wait_threads();
thrust::global_thread_handle::joinDevicePools();
// reset settings to normal
memory::setAsyncFreeFlag(false);
// free postponed objects
thrust::global_thread_handle::cudaFreeWait();
if ( m_with_timings )
{
hipEventRecord(m_setup_stop);
hipEventSynchronize(m_setup_stop);
}
}
template< class T_Config >
AMGX_ERROR AMG_EigenSolver<T_Config>::pagerank_setup_no_throw( Vector<T_Config> &vec)
{
AMGX_ERROR rc = AMGX_OK;
AMGX_TRIES()
{
this->pagerank_setup(vec);
}
AMGX_CATCHES(rc)
return rc;
}
template< class T_Config >
void AMG_EigenSolver<T_Config>::setup_capi( std::shared_ptr<Matrix<T_Config>> pA0)
{
m_ptrA = pA0;
setup(*m_ptrA);
}
template< class T_Config >
AMGX_ERROR AMG_EigenSolver<T_Config>::setup_capi_no_throw( std::shared_ptr<Matrix<T_Config>> pA0)
{
AMGX_ERROR rc = AMGX_OK;
AMGX_TRIES()
{
this->setup_capi(pA0);
}
AMGX_CATCHES(rc)
return rc;
}
/****************************************************
* Solves the AMG system Ax=b
***************************************************/
template<class T_Config>
AMGX_ERROR AMG_EigenSolver<T_Config>::solve_no_throw( Vector<T_Config> &x, AMGX_STATUS &status )
{
if ( m_with_timings )
{
hipEventRecord(m_solve_start);
}
AMGX_ERROR e = solver->solve_no_throw( x, status );
thrust::global_thread_handle::cudaFreeWait();
if ( m_with_timings )
{
hipEventRecord(m_solve_stop);
hipEventSynchronize(m_solve_stop);
}
return e;
}
template<class T_Config>
int AMG_EigenSolver<T_Config>::get_num_iters()
{
return solver->get_num_iters();
}
/****************************************
* Explict instantiations
***************************************/
//template class AMG_EigenSolver<CommonMatrixTraits>;
#define AMGX_CASE_LINE(CASE) template class AMG_EigenSolver<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace amgx
| 30db9049a9a0932983a0df984a743a5872304601.cu | /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <amg_eigensolver.h>
#include <amg_config.h>
#include <eigensolvers/eigensolver.h>
#include <basic_types.h>
#include <misc.h>
#include <assert.h>
#include <util.h>
using std::string;
namespace amgx
{
template< class T_Config >
void AMG_EigenSolver<T_Config>::process_config(AMG_Config &in_cfg, std::string solver_scope)
{
if (in_cfg.getParameter<int>("print_config", solver_scope) == 1)
{
in_cfg.printAMGConfig();
}
}
template< class T_Config >
void AMG_EigenSolver<T_Config>::init()
{
std::string solver_value, solver_scope;
m_cfg->template getParameter<std::string>("eig_solver", solver_value, "default", solver_scope);
process_config(*m_cfg, solver_scope);
// pass thread manager to solver
solver = EigenSolverFactory<T_Config>::allocate(*m_cfg, "default", "eig_solver", m_resources->get_tmng());
if ( m_with_timings )
{
cudaEventCreate(&m_setup_start);
cudaEventCreate(&m_setup_stop);
cudaEventCreate(&m_solve_start);
cudaEventCreate(&m_solve_stop);
}
}
template< class T_Config >
AMG_EigenSolver<T_Config>::AMG_EigenSolver(Resources *res, AMG_Configuration *cfg) : m_with_timings(false), m_resources(res), m_cfg_self(false)
{
if (cfg)
{
m_cfg = cfg->getConfigObject();
}
else
{
m_cfg = res->getResourcesConfig();
}
init();
}
template< class T_Config >
AMG_EigenSolver<T_Config>::AMG_EigenSolver(Resources *res, AMG_Configuration &cfg) : m_with_timings(false), m_resources(res), m_cfg_self(true)
{
m_cfg = new AMG_Config;
*m_cfg = *(cfg.getConfigObject());
init();
}
template< class T_Config >
AMG_EigenSolver<T_Config>::AMG_EigenSolver(const AMG_EigenSolver<T_Config> &other)
{
solver = other.solver;
m_resources = other.getResources();
m_cfg = other.getConfig();
m_cfg_self = false;
solver->incr_ref_count();
m_ptrA = other.m_ptrA;
m_with_timings = other.m_with_timings;
if ( m_with_timings )
{
cudaEventCreate(&m_setup_start);
cudaEventCreate(&m_setup_stop);
cudaEventCreate(&m_solve_start);
cudaEventCreate(&m_solve_stop);
}
}
template< class T_Config >
AMG_EigenSolver<T_Config> &AMG_EigenSolver<T_Config>::operator=(const AMG_EigenSolver<T_Config> &other)
{
solver = other.solver;
m_resources = other.getResources();
m_cfg = other.getConfig();
m_cfg_self = false;
solver->incr_ref_count();
m_ptrA = other.m_ptrA;
m_with_timings = other.m_with_timings;
if ( m_with_timings )
{
cudaEventCreate(&m_setup_start);
cudaEventCreate(&m_setup_stop);
cudaEventCreate(&m_solve_start);
cudaEventCreate(&m_solve_stop);
}
return *this;
}
template< class T_Config >
AMG_EigenSolver<T_Config>::~AMG_EigenSolver()
{
if (m_cfg_self)
{
delete m_cfg;
}
if ( solver->decr_ref_count() )
{
delete solver;
if ( !m_with_timings )
{
return;
}
std::cerr << std::endl;
float elapsed_time = 0.0f;
cudaEventElapsedTime(&elapsed_time, m_setup_start, m_setup_stop);
std::cerr << "AMG_EigenSolver::setup time: " << 1.0e-3 * elapsed_time << "s" << std::endl;
cudaEventElapsedTime(&elapsed_time, m_solve_start, m_solve_stop);
std::cerr << "AMG_EigenSolver::solve time: " << 1.0e-3 * elapsed_time << "s" << std::endl;
cudaEventDestroy(m_setup_start);
cudaEventDestroy(m_setup_stop);
cudaEventDestroy(m_solve_start);
cudaEventDestroy(m_solve_stop);
}
}
/****************************************************
* Sets A as the matrix for the AMG system
****************************************************/
template< class T_Config >
void AMG_EigenSolver<T_Config>::setup( Matrix<T_Config> &A)//&A0)
{
if ( m_with_timings )
{
cudaEventRecord(m_setup_start);
}
// postpone free syncs, use device pool
memory::setAsyncFreeFlag(true);
solver->setup(A);
m_resources->get_tmng()->wait_threads();
thrust::global_thread_handle::joinDevicePools();
// reset settings to normal
memory::setAsyncFreeFlag(false);
// free postponed objects
thrust::global_thread_handle::cudaFreeWait();
if ( m_with_timings )
{
cudaEventRecord(m_setup_stop);
cudaEventSynchronize(m_setup_stop);
}
}
template< class T_Config >
AMGX_ERROR AMG_EigenSolver<T_Config>::setup_no_throw( Matrix<T_Config> &A)
{
AMGX_ERROR rc = AMGX_OK;
AMGX_TRIES()
{
this->setup(A);
}
AMGX_CATCHES(rc)
return rc;
}
template< class T_Config >
void AMG_EigenSolver<T_Config>::pagerank_setup( Vector<T_Config> &vec)//&A0)
{
if ( m_with_timings )
{
cudaEventRecord(m_setup_start);
}
// postpone free syncs, use device pool
memory::setAsyncFreeFlag(true);
solver->solver_pagerank_setup(vec);
m_resources->get_tmng()->wait_threads();
thrust::global_thread_handle::joinDevicePools();
// reset settings to normal
memory::setAsyncFreeFlag(false);
// free postponed objects
thrust::global_thread_handle::cudaFreeWait();
if ( m_with_timings )
{
cudaEventRecord(m_setup_stop);
cudaEventSynchronize(m_setup_stop);
}
}
template< class T_Config >
AMGX_ERROR AMG_EigenSolver<T_Config>::pagerank_setup_no_throw( Vector<T_Config> &vec)
{
AMGX_ERROR rc = AMGX_OK;
AMGX_TRIES()
{
this->pagerank_setup(vec);
}
AMGX_CATCHES(rc)
return rc;
}
template< class T_Config >
void AMG_EigenSolver<T_Config>::setup_capi( std::shared_ptr<Matrix<T_Config>> pA0)
{
m_ptrA = pA0;
setup(*m_ptrA);
}
template< class T_Config >
AMGX_ERROR AMG_EigenSolver<T_Config>::setup_capi_no_throw( std::shared_ptr<Matrix<T_Config>> pA0)
{
AMGX_ERROR rc = AMGX_OK;
AMGX_TRIES()
{
this->setup_capi(pA0);
}
AMGX_CATCHES(rc)
return rc;
}
/****************************************************
* Solves the AMG system Ax=b
***************************************************/
template<class T_Config>
AMGX_ERROR AMG_EigenSolver<T_Config>::solve_no_throw( Vector<T_Config> &x, AMGX_STATUS &status )
{
if ( m_with_timings )
{
cudaEventRecord(m_solve_start);
}
AMGX_ERROR e = solver->solve_no_throw( x, status );
thrust::global_thread_handle::cudaFreeWait();
if ( m_with_timings )
{
cudaEventRecord(m_solve_stop);
cudaEventSynchronize(m_solve_stop);
}
return e;
}
template<class T_Config>
int AMG_EigenSolver<T_Config>::get_num_iters()
{
return solver->get_num_iters();
}
/****************************************
* Explict instantiations
***************************************/
//template class AMG_EigenSolver<CommonMatrixTraits>;
#define AMGX_CASE_LINE(CASE) template class AMG_EigenSolver<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace amgx
|
a6d7bac2d5466fd0dfacf65d2a3ba45ff2a6ae96.hip | // !!! This is a file automatically generated by hipify!!!
#include <call_kernel.h>
//pass
//--blockDim=512 --gridDim=1 --warp-sync=32 --no-inline
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#define N 32//128//512
__device__ static __attribute__((always_inline)) void scan_warp (int* A)
{
unsigned int tid = threadIdx.x;
unsigned int lane = tid % 32;
if (lane >= 1) A[tid] = A[tid - 1] + A[tid]; // this conditional is always true!!
if (lane >= 2) A[tid] = A[tid - 2] + A[tid];
if (lane >= 4) A[tid] = A[tid - 4] + A[tid];
if (lane >= 8) A[tid] = A[tid - 8] + A[tid];
if (lane >= 16) A[tid] = A[tid - 16] + A[tid];
}
__global__ void scan (int* A)
{
unsigned int tid = threadIdx.x;
unsigned int lane = tid % 32;
int temp [32] = {0};
scan_warp(A);
__syncthreads();
if (lane == 31) // ?????????
temp[tid / 32] = A[tid];
__syncthreads();
if (tid / 32 == 0)
scan_warp(temp);
__syncthreads();
A[tid] += temp[tid/32];
}
int main(){
int *a;
int *dev_a;
int size = N*sizeof(int);
hipMalloc((void**)&dev_a, size);
a = (int*)malloc(size);
for (int i = 0; i < N; i++)
a[i] = i;
hipMemcpy(dev_a,a,size, hipMemcpyHostToDevice);
printf("old a: ");
for (int i = 0; i < N; i++)
printf("%d ", a[i]);
//scan<<<1,N>>>(dev_a);
ESBMC_verify_kernel(scan, 1, N, dev_a);
hipMemcpy(a,dev_a,size,hipMemcpyDeviceToHost);
printf("\nnew a: ");
for (int i = 0; i < N; i++)
printf("%d ", a[i]);
free(a);
hipFree(dev_a);
return 0;
}
| a6d7bac2d5466fd0dfacf65d2a3ba45ff2a6ae96.cu | #include <call_kernel.h>
//pass
//--blockDim=512 --gridDim=1 --warp-sync=32 --no-inline
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#define N 32//128//512
__device__ static __attribute__((always_inline)) void scan_warp (int* A)
{
unsigned int tid = threadIdx.x;
unsigned int lane = tid % 32;
if (lane >= 1) A[tid] = A[tid - 1] + A[tid]; // this conditional is always true!!
if (lane >= 2) A[tid] = A[tid - 2] + A[tid];
if (lane >= 4) A[tid] = A[tid - 4] + A[tid];
if (lane >= 8) A[tid] = A[tid - 8] + A[tid];
if (lane >= 16) A[tid] = A[tid - 16] + A[tid];
}
__global__ void scan (int* A)
{
unsigned int tid = threadIdx.x;
unsigned int lane = tid % 32;
int temp [32] = {0};
scan_warp(A);
__syncthreads();
if (lane == 31) // ?????????
temp[tid / 32] = A[tid];
__syncthreads();
if (tid / 32 == 0)
scan_warp(temp);
__syncthreads();
A[tid] += temp[tid/32];
}
int main(){
int *a;
int *dev_a;
int size = N*sizeof(int);
cudaMalloc((void**)&dev_a, size);
a = (int*)malloc(size);
for (int i = 0; i < N; i++)
a[i] = i;
cudaMemcpy(dev_a,a,size, cudaMemcpyHostToDevice);
printf("old a: ");
for (int i = 0; i < N; i++)
printf("%d ", a[i]);
//scan<<<1,N>>>(dev_a);
ESBMC_verify_kernel(scan, 1, N, dev_a);
cudaMemcpy(a,dev_a,size,cudaMemcpyDeviceToHost);
printf("\nnew a: ");
for (int i = 0; i < N; i++)
printf("%d ", a[i]);
free(a);
cudaFree(dev_a);
return 0;
}
|
be80ff13ffe1a1e0728c4ed225bc2966d93ea12c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <io/utilities/block_utils.cuh>
#include "parquet_gpu.hpp"
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <hipcub/hipcub.hpp>
#include <thrust/gather.h>
#include <thrust/iterator/discard_iterator.h>
#include <hipcub/hipcub.hpp>
#include <cuda/std/chrono>
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
// Spark doesn't support RLE encoding for BOOLEANs
#ifdef ENABLE_BOOL_RLE
constexpr bool enable_bool_rle = true;
#else
constexpr bool enable_bool_rle = false;
#endif
using ::cudf::detail::device_2dspan;
constexpr int init_hash_bits = 12;
constexpr uint32_t rle_buffer_size = (1 << 9);
struct frag_init_state_s {
parquet_column_device_view col;
PageFragment frag;
uint32_t total_dupes;
size_type start_value_idx;
volatile uint32_t scratch_red[32];
uint32_t dict[max_page_fragment_size];
union {
uint16_t u16[1 << (init_hash_bits)];
uint32_t u32[1 << (init_hash_bits - 1)];
} map;
};
struct page_enc_state_s {
uint8_t *cur; //!< current output ptr
uint8_t *rle_out; //!< current RLE write ptr
uint32_t rle_run; //!< current RLE run
uint32_t run_val; //!< current RLE run value
uint32_t rle_pos; //!< RLE encoder positions
uint32_t rle_numvals; //!< RLE input value count
uint32_t rle_lit_count;
uint32_t rle_rpt_count;
uint32_t page_start_val;
volatile uint32_t rpt_map[4];
volatile uint32_t scratch_red[32];
EncPage page;
EncColumnChunk ck;
parquet_column_device_view col;
gpu_inflate_input_s comp_in;
gpu_inflate_status_s comp_stat;
uint16_t vals[rle_buffer_size];
};
/**
* @brief Return a 12-bit hash from a byte sequence
*/
inline __device__ uint32_t hash_string(const string_view &val)
{
char const *ptr = val.data();
uint32_t len = val.size_bytes();
if (len != 0) {
return (ptr[0] + (ptr[len - 1] << 5) + (len << 10)) & ((1 << init_hash_bits) - 1);
} else {
return 0;
}
}
inline __device__ uint32_t uint32_init_hash(uint32_t v)
{
return (v + (v >> 11) + (v >> 22)) & ((1 << init_hash_bits) - 1);
}
inline __device__ uint32_t uint64_init_hash(uint64_t v)
{
return uint32_init_hash(static_cast<uint32_t>(v + (v >> 32)));
}
/**
* @brief Initializes encoder page fragments
*
* Based on the number of rows in each fragment, populates the value count, the size of data in the
* fragment, the number of unique values, and the data size of unique values.
*
* @param[in] frag Fragment array [fragment_id][column_id]
* @param[in] col_desc Column description array [column_id]
* @param[in] num_fragments Number of fragments per column
* @param[in] num_columns Number of columns
*/
// blockDim {512,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size)
gpuInitPageFragments(device_2dspan<PageFragment> frag,
device_span<parquet_column_device_view const> col_desc,
uint32_t fragment_size,
uint32_t max_num_rows)
{
__shared__ __align__(16) frag_init_state_s state_g;
using block_reduce = hipcub::BlockReduce<uint32_t, block_size>;
using block_scan = hipcub::BlockScan<uint32_t, block_size>;
__shared__ union {
typename block_reduce::TempStorage reduce_storage;
typename block_scan::TempStorage scan_storage;
} temp_storage;
frag_init_state_s *const s = &state_g;
uint32_t t = threadIdx.x;
uint32_t start_row, dtype_len, dtype_len_in, dtype;
if (t == 0) s->col = col_desc[blockIdx.x];
for (uint32_t i = 0; i < sizeof(s->map) / sizeof(uint32_t); i += block_size) {
if (i + t < sizeof(s->map) / sizeof(uint32_t)) s->map.u32[i + t] = 0;
}
__syncthreads();
start_row = blockIdx.y * fragment_size;
if (!t) {
// frag.num_rows = fragment_size except for the last page fragment which can be smaller.
// num_rows is fixed but fragment size could be larger if the data is strings or nested.
s->frag.num_rows = min(fragment_size, max_num_rows - min(start_row, max_num_rows));
s->frag.non_nulls = 0;
s->frag.num_dict_vals = 0;
s->frag.fragment_data_size = 0;
s->frag.dict_data_size = 0;
s->total_dupes = 0;
// To use num_vals instead of num_rows, we need to calculate num_vals on the fly.
// For list<list<int>>, values between i and i+50 can be calculated by
// off_11 = off[i], off_12 = off[i+50]
// off_21 = child.off[off_11], off_22 = child.off[off_12]
// etc...
size_type end_value_idx = start_row + s->frag.num_rows;
if (s->col.parent_column == nullptr) {
s->start_value_idx = start_row;
} else {
auto col = *(s->col.parent_column);
auto current_start_value_idx = start_row;
while (col.type().id() == type_id::LIST or col.type().id() == type_id::STRUCT) {
if (col.type().id() == type_id::STRUCT) {
current_start_value_idx += col.offset();
end_value_idx += col.offset();
col = col.child(0);
} else {
auto offset_col = col.child(lists_column_view::offsets_column_index);
current_start_value_idx =
offset_col.element<size_type>(current_start_value_idx + col.offset());
end_value_idx = offset_col.element<size_type>(end_value_idx + col.offset());
col = col.child(lists_column_view::child_column_index);
}
}
s->start_value_idx = current_start_value_idx;
}
s->frag.start_value_idx = s->start_value_idx;
s->frag.num_leaf_values = end_value_idx - s->start_value_idx;
if (s->col.level_offsets != nullptr) {
// For nested schemas, the number of values in a fragment is not directly related to the
// number of encoded data elements or the number of rows. It is simply the number of
// repetition/definition values which together encode validity and nesting information.
size_type first_level_val_idx = s->col.level_offsets[start_row];
size_type last_level_val_idx = s->col.level_offsets[start_row + s->frag.num_rows];
s->frag.num_values = last_level_val_idx - first_level_val_idx;
} else {
s->frag.num_values = s->frag.num_rows;
}
}
dtype = s->col.physical_type;
dtype_len =
(dtype == INT96) ? 12 : (dtype == INT64 || dtype == DOUBLE) ? 8 : (dtype == BOOLEAN) ? 1 : 4;
if (dtype == INT32) {
dtype_len_in = GetDtypeLogicalLen(s->col.leaf_column);
} else if (dtype == INT96) {
// cudf doesn't support INT96 internally and uses INT64, so treat INT96 as an INT64 for
// computing dictionary hash values and reading the data, but we do treat it as 12 bytes for
// dtype_len, which determines how much memory we need to allocate for the fragment.
dtype_len_in = 8;
} else {
dtype_len_in = dtype_len;
}
__syncthreads();
size_type nvals = s->frag.num_leaf_values;
size_type start_value_idx = s->start_value_idx;
for (uint32_t i = 0; i < nvals; i += block_size) {
uint32_t val_idx = start_value_idx + i + t;
uint32_t is_valid = (i + t < nvals && val_idx < s->col.leaf_column->size())
? s->col.leaf_column->is_valid(val_idx)
: 0;
uint32_t len, nz_pos, hash;
if (is_valid) {
len = dtype_len;
if (dtype != BOOLEAN) {
if (dtype == BYTE_ARRAY) {
auto str = s->col.leaf_column->element<string_view>(val_idx);
len += str.size_bytes();
hash = hash_string(str);
} else if (dtype_len_in == 8) {
hash = uint64_init_hash(s->col.leaf_column->element<uint64_t>(val_idx));
} else {
hash = uint32_init_hash((dtype_len_in == 4)
? s->col.leaf_column->element<uint32_t>(val_idx)
: (dtype_len_in == 2)
? s->col.leaf_column->element<uint16_t>(val_idx)
: s->col.leaf_column->element<uint8_t>(val_idx));
}
}
} else {
len = 0;
}
uint32_t non_nulls;
block_scan(temp_storage.scan_storage).ExclusiveSum(is_valid, nz_pos, non_nulls);
nz_pos += s->frag.non_nulls;
__syncthreads();
len = block_reduce(temp_storage.reduce_storage).Sum(len);
if (!t) {
s->frag.non_nulls += non_nulls;
s->frag.fragment_data_size += len;
}
__syncthreads();
if (is_valid && dtype != BOOLEAN) {
uint32_t *dict_index = s->col.dict_index;
if (dict_index) {
atomicAdd(&s->map.u32[hash >> 1], (hash & 1) ? 1 << 16 : 1);
dict_index[start_value_idx + nz_pos] =
((i + t) << init_hash_bits) |
hash; // Store the hash along with the index, so we don't have to recompute it
}
}
__syncthreads();
}
__syncthreads();
// Reorder the 16-bit local indices according to the hash values
if (s->col.dict_index) {
static_assert((init_hash_bits == 12), "Hardcoded for init_hash_bits=12");
// Cumulative sum of hash map counts
uint32_t count01 = s->map.u32[t * 4 + 0];
uint32_t count23 = s->map.u32[t * 4 + 1];
uint32_t count45 = s->map.u32[t * 4 + 2];
uint32_t count67 = s->map.u32[t * 4 + 3];
uint32_t sum01 = count01 + (count01 << 16);
uint32_t sum23 = count23 + (count23 << 16);
uint32_t sum45 = count45 + (count45 << 16);
uint32_t sum67 = count67 + (count67 << 16);
sum23 += (sum01 >> 16) * 0x10001;
sum45 += (sum23 >> 16) * 0x10001;
sum67 += (sum45 >> 16) * 0x10001;
uint32_t sum_w = sum67 >> 16;
block_scan(temp_storage.scan_storage).InclusiveSum(sum_w, sum_w);
sum_w = (sum_w - (sum67 >> 16)) * 0x10001;
s->map.u32[t * 4 + 0] = sum_w + sum01 - count01;
s->map.u32[t * 4 + 1] = sum_w + sum23 - count23;
s->map.u32[t * 4 + 2] = sum_w + sum45 - count45;
s->map.u32[t * 4 + 3] = sum_w + sum67 - count67;
}
__syncthreads();
// Put the indices back in hash order
if (s->col.dict_index) {
uint32_t *dict_index = s->col.dict_index + start_row;
uint32_t nnz = s->frag.non_nulls;
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t pos = 0, hash = 0, pos_old, pos_new, sh, colliding_row, val = 0;
bool collision;
if (i + t < nnz) {
val = dict_index[i + t];
hash = val & ((1 << init_hash_bits) - 1);
sh = (hash & 1) ? 16 : 0;
pos_old = s->map.u16[hash];
}
// The isolation of the atomicAdd, along with pos_old/pos_new is to guarantee deterministic
// behavior for the first row in the hash map that will be used for early duplicate detection
__syncthreads();
if (i + t < nnz) {
pos = (atomicAdd(&s->map.u32[hash >> 1], 1 << sh) >> sh) & 0xffff;
s->dict[pos] = val;
}
__syncthreads();
collision = false;
if (i + t < nnz) {
pos_new = s->map.u16[hash];
collision = (pos != pos_old && pos_new > pos_old + 1);
if (collision) { colliding_row = s->dict[pos_old]; }
}
__syncthreads();
if (collision) { atomicMin(&s->dict[pos_old], val); }
__syncthreads();
// Resolve collision
if (collision && val == s->dict[pos_old]) { s->dict[pos] = colliding_row; }
}
__syncthreads();
// Now that the values are ordered by hash, compare every entry with the first entry in the hash
// map, the position of the first entry can be inferred from the hash map counts
uint32_t dupe_data_size = 0;
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t ck_row = 0, ck_row_ref = 0, is_dupe = 0;
if (i + t < nnz) {
uint32_t dict_val = s->dict[i + t];
uint32_t hash = dict_val & ((1 << init_hash_bits) - 1);
ck_row = start_row + (dict_val >> init_hash_bits);
ck_row_ref = start_row + (s->dict[(hash > 0) ? s->map.u16[hash - 1] : 0] >> init_hash_bits);
if (ck_row_ref != ck_row) {
if (dtype == BYTE_ARRAY) {
auto str1 = s->col.leaf_column->element<string_view>(ck_row);
auto str2 = s->col.leaf_column->element<string_view>(ck_row_ref);
is_dupe = (str1 == str2);
dupe_data_size += (is_dupe) ? 4 + str1.size_bytes() : 0;
} else {
if (dtype_len_in == 8) {
auto v1 = s->col.leaf_column->element<uint64_t>(ck_row);
auto v2 = s->col.leaf_column->element<uint64_t>(ck_row_ref);
is_dupe = (v1 == v2);
dupe_data_size += (is_dupe) ? 8 : 0;
} else {
uint32_t v1, v2;
if (dtype_len_in == 4) {
v1 = s->col.leaf_column->element<uint32_t>(ck_row);
v2 = s->col.leaf_column->element<uint32_t>(ck_row_ref);
} else if (dtype_len_in == 2) {
v1 = s->col.leaf_column->element<uint16_t>(ck_row);
v2 = s->col.leaf_column->element<uint16_t>(ck_row_ref);
} else {
v1 = s->col.leaf_column->element<uint8_t>(ck_row);
v2 = s->col.leaf_column->element<uint8_t>(ck_row_ref);
}
is_dupe = (v1 == v2);
dupe_data_size += (is_dupe) ? 4 : 0;
}
}
}
}
uint32_t dupes_in_block;
uint32_t dupes_before;
block_scan(temp_storage.scan_storage).InclusiveSum(is_dupe, dupes_before, dupes_in_block);
dupes_before += s->total_dupes;
__syncthreads();
if (t == 0) { s->total_dupes += dupes_in_block; }
if (i + t < nnz) {
if (!is_dupe) {
s->col.dict_data[start_row + i + t - dupes_before] = ck_row;
} else {
s->col.dict_index[ck_row] = ck_row_ref | (1u << 31);
}
}
}
__syncthreads();
dupe_data_size = block_reduce(temp_storage.reduce_storage).Sum(dupe_data_size);
if (!t) {
s->frag.dict_data_size = s->frag.fragment_data_size - dupe_data_size;
s->frag.num_dict_vals = s->frag.non_nulls - s->total_dupes;
}
}
__syncthreads();
if (t == 0) frag[blockIdx.x][blockIdx.y] = s->frag;
}
// blockDim {128,1,1}
__global__ void __launch_bounds__(128)
gpuInitFragmentStats(device_2dspan<statistics_group> groups,
device_2dspan<PageFragment const> fragments,
device_span<parquet_column_device_view const> col_desc)
{
// TODO: why not 1 block per warp?
__shared__ __align__(8) statistics_group group_g[4];
uint32_t lane_id = threadIdx.x & 0x1f;
uint32_t frag_id = blockIdx.y * 4 + (threadIdx.x >> 5);
uint32_t column_id = blockIdx.x;
auto num_fragments_per_column = fragments.size().second;
statistics_group *const g = &group_g[threadIdx.x >> 5];
if (!lane_id && frag_id < num_fragments_per_column) {
g->col = &col_desc[column_id];
g->start_row = fragments[column_id][frag_id].start_value_idx;
g->num_rows = fragments[column_id][frag_id].num_leaf_values;
}
__syncthreads();
if (frag_id < num_fragments_per_column and lane_id == 0) groups[column_id][frag_id] = *g;
}
// blockDim {128,1,1}
__global__ void __launch_bounds__(128)
gpuInitPages(device_2dspan<EncColumnChunk> chunks,
device_span<gpu::EncPage> pages,
device_span<parquet_column_device_view const> col_desc,
statistics_merge_group *page_grstats,
statistics_merge_group *chunk_grstats,
int32_t num_columns)
{
// TODO: All writing seems to be done by thread 0. Could be replaced by thrust foreach
__shared__ __align__(8) parquet_column_device_view col_g;
__shared__ __align__(8) EncColumnChunk ck_g;
__shared__ __align__(8) PageFragment frag_g;
__shared__ __align__(8) EncPage page_g;
__shared__ __align__(8) statistics_merge_group pagestats_g;
uint32_t t = threadIdx.x;
if (t == 0) {
col_g = col_desc[blockIdx.x];
ck_g = chunks[blockIdx.y][blockIdx.x];
page_g = {};
}
__syncthreads();
if (t < 32) {
uint32_t fragments_in_chunk = 0;
uint32_t rows_in_page = 0;
uint32_t values_in_page = 0;
uint32_t leaf_values_in_page = 0;
uint32_t page_size = 0;
uint32_t num_pages = 0;
uint32_t num_rows = 0;
uint32_t page_start = 0;
uint32_t page_offset = ck_g.ck_stat_size;
uint32_t num_dict_entries = 0;
uint32_t comp_page_offset = ck_g.ck_stat_size;
uint32_t cur_row = ck_g.start_row;
uint32_t ck_max_stats_len = 0;
uint32_t max_stats_len = 0;
if (!t) {
pagestats_g.col = &col_desc[blockIdx.x];
pagestats_g.start_chunk = ck_g.first_fragment;
pagestats_g.num_chunks = 0;
}
if (ck_g.has_dictionary) {
if (!t) {
page_g.page_data = ck_g.uncompressed_bfr + page_offset;
page_g.compressed_data = ck_g.compressed_bfr + comp_page_offset;
page_g.num_fragments = 0;
page_g.page_type = PageType::DICTIONARY_PAGE;
page_g.dict_bits_plus1 = 0;
page_g.chunk = &chunks[blockIdx.y][blockIdx.x];
page_g.chunk_id = blockIdx.y * num_columns + blockIdx.x;
page_g.hdr_size = 0;
page_g.max_hdr_size = 32;
page_g.max_data_size = ck_g.dictionary_size;
page_g.start_row = cur_row;
page_g.num_rows = ck_g.total_dict_entries;
page_g.num_leaf_values = ck_g.total_dict_entries;
page_g.num_values = ck_g.total_dict_entries;
page_offset += page_g.max_hdr_size + page_g.max_data_size;
comp_page_offset += page_g.max_hdr_size + GetMaxCompressedBfrSize(page_g.max_data_size);
}
__syncwarp();
if (t == 0) {
if (not pages.empty()) pages[ck_g.first_page] = page_g;
if (page_grstats) page_grstats[ck_g.first_page] = pagestats_g;
}
num_pages = 1;
}
__syncwarp();
// This loop goes over one page fragment at a time and adds it to page.
// When page size crosses a particular limit, then it moves on to the next page and then next
// page fragment gets added to that one.
// This doesn't actually deal with data. It's agnostic. It only cares about number of rows and
// page size.
do {
uint32_t fragment_data_size, max_page_size, minmax_len = 0;
__syncwarp();
if (num_rows < ck_g.num_rows) {
if (t == 0) { frag_g = ck_g.fragments[fragments_in_chunk]; }
if (!t && ck_g.stats && col_g.stats_dtype == dtype_string) {
minmax_len = max(ck_g.stats[fragments_in_chunk].min_value.str_val.length,
ck_g.stats[fragments_in_chunk].max_value.str_val.length);
}
} else if (!t) {
frag_g.fragment_data_size = 0;
frag_g.num_rows = 0;
}
__syncwarp();
if (ck_g.has_dictionary && fragments_in_chunk < ck_g.num_dict_fragments) {
fragment_data_size =
frag_g.num_leaf_values * 2; // Assume worst-case of 2-bytes per dictionary index
} else {
fragment_data_size = frag_g.fragment_data_size;
}
// TODO (dm): this convoluted logic to limit page size needs refactoring
max_page_size = (values_in_page * 2 >= ck_g.num_values)
? 256 * 1024
: (values_in_page * 3 >= ck_g.num_values) ? 384 * 1024 : 512 * 1024;
if (num_rows >= ck_g.num_rows ||
(values_in_page > 0 &&
(page_size + fragment_data_size > max_page_size ||
(ck_g.has_dictionary && fragments_in_chunk == ck_g.num_dict_fragments)))) {
uint32_t dict_bits_plus1;
if (ck_g.has_dictionary && page_start < ck_g.num_dict_fragments) {
uint32_t dict_bits;
if (num_dict_entries <= 2) {
dict_bits = 1;
} else if (num_dict_entries <= 4) {
dict_bits = 2;
} else if (num_dict_entries <= 16) {
dict_bits = 4;
} else if (num_dict_entries <= 256) {
dict_bits = 8;
} else if (num_dict_entries <= 4096) {
dict_bits = 12;
} else {
dict_bits = 16;
}
page_size = 1 + 5 + ((values_in_page * dict_bits + 7) >> 3) + (values_in_page >> 8);
dict_bits_plus1 = dict_bits + 1;
} else {
dict_bits_plus1 = 0;
}
if (!t) {
page_g.num_fragments = fragments_in_chunk - page_start;
page_g.chunk = &chunks[blockIdx.y][blockIdx.x];
page_g.chunk_id = blockIdx.y * num_columns + blockIdx.x;
page_g.page_type = PageType::DATA_PAGE;
page_g.dict_bits_plus1 = dict_bits_plus1;
page_g.hdr_size = 0;
page_g.max_hdr_size = 32; // Max size excluding statistics
if (ck_g.stats) {
uint32_t stats_hdr_len = 16;
if (col_g.stats_dtype == dtype_string) {
stats_hdr_len += 5 * 3 + 2 * max_stats_len;
} else {
stats_hdr_len += ((col_g.stats_dtype >= dtype_int64) ? 10 : 5) * 3;
}
page_g.max_hdr_size += stats_hdr_len;
}
page_g.page_data = ck_g.uncompressed_bfr + page_offset;
page_g.compressed_data = ck_g.compressed_bfr + comp_page_offset;
page_g.start_row = cur_row;
page_g.num_rows = rows_in_page;
page_g.num_leaf_values = leaf_values_in_page;
page_g.num_values = values_in_page;
uint32_t def_level_bits = col_g.num_def_level_bits();
uint32_t rep_level_bits = col_g.num_rep_level_bits();
// Run length = 4, max(rle/bitpack header) = 5, add one byte per 256 values for overhead
// TODO (dm): Improve readability of these calculations.
uint32_t def_level_size =
(def_level_bits != 0)
? 4 + 5 + ((def_level_bits * page_g.num_values + 7) >> 3) + (page_g.num_values >> 8)
: 0;
uint32_t rep_level_size =
(rep_level_bits != 0)
? 4 + 5 + ((rep_level_bits * page_g.num_values + 7) >> 3) + (page_g.num_values >> 8)
: 0;
page_g.max_data_size = page_size + def_level_size + rep_level_size;
pagestats_g.start_chunk = ck_g.first_fragment + page_start;
pagestats_g.num_chunks = page_g.num_fragments;
page_offset += page_g.max_hdr_size + page_g.max_data_size;
comp_page_offset += page_g.max_hdr_size + GetMaxCompressedBfrSize(page_g.max_data_size);
cur_row += rows_in_page;
ck_max_stats_len = max(ck_max_stats_len, max_stats_len);
}
__syncwarp();
if (t == 0) {
if (not pages.empty()) { pages[ck_g.first_page + num_pages] = page_g; }
if (page_grstats) { page_grstats[ck_g.first_page + num_pages] = pagestats_g; }
}
num_pages++;
page_size = 0;
rows_in_page = 0;
values_in_page = 0;
leaf_values_in_page = 0;
page_start = fragments_in_chunk;
max_stats_len = 0;
}
max_stats_len = max(max_stats_len, minmax_len);
num_dict_entries += frag_g.num_dict_vals;
page_size += fragment_data_size;
rows_in_page += frag_g.num_rows;
values_in_page += frag_g.num_values;
leaf_values_in_page += frag_g.num_leaf_values;
num_rows += frag_g.num_rows;
fragments_in_chunk++;
} while (frag_g.num_rows != 0);
__syncwarp();
if (!t) {
if (ck_g.ck_stat_size == 0 && ck_g.stats) {
uint32_t ck_stat_size = 48 + 2 * ck_max_stats_len;
page_offset += ck_stat_size;
comp_page_offset += ck_stat_size;
ck_g.ck_stat_size = ck_stat_size;
}
ck_g.num_pages = num_pages;
ck_g.bfr_size = page_offset;
ck_g.compressed_size = comp_page_offset;
pagestats_g.start_chunk = ck_g.first_page + ck_g.has_dictionary; // Exclude dictionary
pagestats_g.num_chunks = num_pages - ck_g.has_dictionary;
}
}
__syncthreads();
if (t == 0) {
if (not pages.empty()) ck_g.pages = &pages[ck_g.first_page];
chunks[blockIdx.y][blockIdx.x] = ck_g;
if (chunk_grstats) chunk_grstats[blockIdx.y * num_columns + blockIdx.x] = pagestats_g;
}
}
/**
* @brief Mask table representing how many consecutive repeats are needed to code a repeat run
*[nbits-1]
*/
static __device__ __constant__ uint32_t kRleRunMask[16] = {
0x00ffffff, 0x0fff, 0x00ff, 0x3f, 0x0f, 0x0f, 0x7, 0x7, 0x3, 0x3, 0x3, 0x3, 0x1, 0x1, 0x1, 0x1};
/**
* @brief Variable-length encode an integer
*/
inline __device__ uint8_t *VlqEncode(uint8_t *p, uint32_t v)
{
while (v > 0x7f) {
*p++ = (v | 0x80);
v >>= 7;
}
*p++ = v;
return p;
}
/**
* @brief Pack literal values in output bitstream (1,2,4,8,12 or 16 bits per value)
*/
inline __device__ void PackLiterals(
uint8_t *dst, uint32_t v, uint32_t count, uint32_t w, uint32_t t)
{
if (w == 1 || w == 2 || w == 4 || w == 8 || w == 12 || w == 16) {
if (t <= (count | 0x1f)) {
if (w == 1 || w == 2 || w == 4) {
uint32_t mask = 0;
if (w == 1) {
v |= shuffle_xor(v, 1) << 1;
v |= shuffle_xor(v, 2) << 2;
v |= shuffle_xor(v, 4) << 4;
mask = 0x7;
} else if (w == 2) {
v |= shuffle_xor(v, 1) << 2;
v |= shuffle_xor(v, 2) << 4;
mask = 0x3;
} else if (w == 4) {
v |= shuffle_xor(v, 1) << 4;
mask = 0x1;
}
if (t < count && mask && !(t & mask)) { dst[(t * w) >> 3] = v; }
return;
} else if (w == 8) {
if (t < count) { dst[t] = v; }
return;
} else if (w == 12) {
v |= shuffle_xor(v, 1) << 12;
if (t < count && !(t & 1)) {
dst[(t >> 1) * 3 + 0] = v;
dst[(t >> 1) * 3 + 1] = v >> 8;
dst[(t >> 1) * 3 + 2] = v >> 16;
}
return;
} else if (w == 16) {
if (t < count) {
dst[t * 2 + 0] = v;
dst[t * 2 + 1] = v >> 8;
}
return;
}
} else {
return;
}
} else {
// Scratch space to temporarily write to. Needed because we will use atomics to write 32 bit
// words but the destination mem may not be a multiple of 4 bytes.
// TODO (dm): This assumes blockdim = 128 and max bits per value = 16. Reduce magic numbers.
__shared__ uint32_t scratch[64];
if (t < 64) { scratch[t] = 0; }
__syncthreads();
if (t <= count) {
uint64_t v64 = v;
v64 <<= (t * w) & 0x1f;
// Copy 64 bit word into two 32 bit words while following C++ strict aliasing rules.
uint32_t v32[2];
memcpy(&v32, &v64, sizeof(uint64_t));
// Atomically write result to scratch
if (v32[0]) { atomicOr(scratch + ((t * w) >> 5), v32[0]); }
if (v32[1]) { atomicOr(scratch + ((t * w) >> 5) + 1, v32[1]); }
}
__syncthreads();
// Copy scratch data to final destination
auto available_bytes = (count * w + 7) / 8;
auto scratch_bytes = reinterpret_cast<char *>(&scratch[0]);
if (t < available_bytes) { dst[t] = scratch_bytes[t]; }
if (t + 128 < available_bytes) { dst[t + 128] = scratch_bytes[t + 128]; }
__syncthreads();
}
}
/**
* @brief RLE encoder
*
* @param[in,out] s Page encode state
* @param[in] numvals Total count of input values
* @param[in] nbits number of bits per symbol (1..16)
* @param[in] flush nonzero if last batch in block
* @param[in] t thread id (0..127)
*/
static __device__ void RleEncode(
page_enc_state_s *s, uint32_t numvals, uint32_t nbits, uint32_t flush, uint32_t t)
{
uint32_t rle_pos = s->rle_pos;
uint32_t rle_run = s->rle_run;
while (rle_pos < numvals || (flush && rle_run)) {
uint32_t pos = rle_pos + t;
if (rle_run > 0 && !(rle_run & 1)) {
// Currently in a long repeat run
uint32_t mask = ballot(pos < numvals && s->vals[pos & (rle_buffer_size - 1)] == s->run_val);
uint32_t rle_rpt_count, max_rpt_count;
if (!(t & 0x1f)) { s->rpt_map[t >> 5] = mask; }
__syncthreads();
if (t < 32) {
uint32_t c32 = ballot(t >= 4 || s->rpt_map[t] != 0xffffffffu);
if (!t) {
uint32_t last_idx = __ffs(c32) - 1;
s->rle_rpt_count =
last_idx * 32 + ((last_idx < 4) ? __ffs(~s->rpt_map[last_idx]) - 1 : 0);
}
}
__syncthreads();
max_rpt_count = min(numvals - rle_pos, 128);
rle_rpt_count = s->rle_rpt_count;
rle_run += rle_rpt_count << 1;
rle_pos += rle_rpt_count;
if (rle_rpt_count < max_rpt_count || (flush && rle_pos == numvals)) {
if (t == 0) {
uint32_t const run_val = s->run_val;
uint8_t *dst = VlqEncode(s->rle_out, rle_run);
*dst++ = run_val;
if (nbits > 8) { *dst++ = run_val >> 8; }
s->rle_out = dst;
}
rle_run = 0;
}
} else {
// New run or in a literal run
uint32_t v0 = s->vals[pos & (rle_buffer_size - 1)];
uint32_t v1 = s->vals[(pos + 1) & (rle_buffer_size - 1)];
uint32_t mask = ballot(pos + 1 < numvals && v0 == v1);
uint32_t maxvals = min(numvals - rle_pos, 128);
uint32_t rle_lit_count, rle_rpt_count;
if (!(t & 0x1f)) { s->rpt_map[t >> 5] = mask; }
__syncthreads();
if (t < 32) {
// Repeat run can only start on a multiple of 8 values
uint32_t idx8 = (t * 8) >> 5;
uint32_t pos8 = (t * 8) & 0x1f;
uint32_t m0 = (idx8 < 4) ? s->rpt_map[idx8] : 0;
uint32_t m1 = (idx8 < 3) ? s->rpt_map[idx8 + 1] : 0;
uint32_t needed_mask = kRleRunMask[nbits - 1];
mask = ballot((__funnelshift_r(m0, m1, pos8) & needed_mask) == needed_mask);
if (!t) {
uint32_t rle_run_start = (mask != 0) ? min((__ffs(mask) - 1) * 8, maxvals) : maxvals;
uint32_t rpt_len = 0;
if (rle_run_start < maxvals) {
uint32_t idx_cur = rle_run_start >> 5;
uint32_t idx_ofs = rle_run_start & 0x1f;
while (idx_cur < 4) {
m0 = (idx_cur < 4) ? s->rpt_map[idx_cur] : 0;
m1 = (idx_cur < 3) ? s->rpt_map[idx_cur + 1] : 0;
mask = ~__funnelshift_r(m0, m1, idx_ofs);
if (mask != 0) {
rpt_len += __ffs(mask) - 1;
break;
}
rpt_len += 32;
idx_cur++;
}
}
s->rle_lit_count = rle_run_start;
s->rle_rpt_count = min(rpt_len, maxvals - rle_run_start);
}
}
__syncthreads();
rle_lit_count = s->rle_lit_count;
rle_rpt_count = s->rle_rpt_count;
if (rle_lit_count != 0 || (rle_run != 0 && rle_rpt_count != 0)) {
uint32_t lit_div8;
bool need_more_data = false;
if (!flush && rle_pos + rle_lit_count == numvals) {
// Wait for more data
rle_lit_count -= min(rle_lit_count, 24);
need_more_data = true;
}
if (rle_lit_count != 0) {
lit_div8 = (rle_lit_count + ((flush && rle_pos + rle_lit_count == numvals) ? 7 : 0)) >> 3;
if (rle_run + lit_div8 * 2 > 0x7f) {
lit_div8 = 0x3f - (rle_run >> 1); // Limit to fixed 1-byte header (504 literals)
rle_rpt_count = 0; // Defer repeat run
}
if (lit_div8 != 0) {
uint8_t *dst = s->rle_out + 1 + (rle_run >> 1) * nbits;
PackLiterals(dst, (rle_pos + t < numvals) ? v0 : 0, lit_div8 * 8, nbits, t);
rle_run = (rle_run + lit_div8 * 2) | 1;
rle_pos = min(rle_pos + lit_div8 * 8, numvals);
}
}
if (rle_run >= ((rle_rpt_count != 0 || (flush && rle_pos == numvals)) ? 0x03 : 0x7f)) {
__syncthreads();
// Complete literal run
if (!t) {
uint8_t *dst = s->rle_out;
dst[0] = rle_run; // At most 0x7f
dst += 1 + nbits * (rle_run >> 1);
s->rle_out = dst;
}
rle_run = 0;
}
if (need_more_data) { break; }
}
// Start a repeat run
if (rle_rpt_count != 0) {
if (t == s->rle_lit_count) { s->run_val = v0; }
rle_run = rle_rpt_count * 2;
rle_pos += rle_rpt_count;
if (rle_pos + 1 == numvals && !flush) { break; }
}
}
__syncthreads();
}
__syncthreads();
if (!t) {
s->rle_run = rle_run;
s->rle_pos = rle_pos;
s->rle_numvals = numvals;
}
}
/**
* @brief PLAIN bool encoder
*
* @param[in,out] s Page encode state
* @param[in] numvals Total count of input values
* @param[in] flush nonzero if last batch in block
* @param[in] t thread id (0..127)
*/
static __device__ void PlainBoolEncode(page_enc_state_s *s,
uint32_t numvals,
uint32_t flush,
uint32_t t)
{
uint32_t rle_pos = s->rle_pos;
uint8_t *dst = s->rle_out;
while (rle_pos < numvals) {
uint32_t pos = rle_pos + t;
uint32_t v = (pos < numvals) ? s->vals[pos & (rle_buffer_size - 1)] : 0;
uint32_t n = min(numvals - rle_pos, 128);
uint32_t nbytes = (n + ((flush) ? 7 : 0)) >> 3;
if (!nbytes) { break; }
v |= shuffle_xor(v, 1) << 1;
v |= shuffle_xor(v, 2) << 2;
v |= shuffle_xor(v, 4) << 4;
if (t < n && !(t & 7)) { dst[t >> 3] = v; }
rle_pos = min(rle_pos + nbytes * 8, numvals);
dst += nbytes;
}
__syncthreads();
if (!t) {
s->rle_pos = rle_pos;
s->rle_numvals = numvals;
s->rle_out = dst;
}
}
constexpr auto julian_calendar_epoch_diff()
{
using namespace cuda::std::chrono;
using namespace cuda::std::chrono_literals;
return sys_days{January / 1 / 1970} - (sys_days{November / 24 / -4713} + 12h);
}
/**
* @brief Converts a sys_time<nanoseconds> into a pair with nanoseconds since midnight and number of
* Julian days. Does not deal with time zones. Used by INT96 code.
*
* @param ns number of nanoseconds since epoch
* @return std::pair<nanoseconds,days> where nanoseconds is the number of nanoseconds
* elapsed in the day and days is the number of days from Julian epoch.
*/
static __device__ std::pair<cuda::std::chrono::nanoseconds, cuda::std::chrono::days>
convert_nanoseconds(cuda::std::chrono::sys_time<cuda::std::chrono::nanoseconds> const ns)
{
using namespace cuda::std::chrono;
auto const nanosecond_ticks = ns.time_since_epoch();
auto const gregorian_days = floor<days>(nanosecond_ticks);
auto const julian_days = gregorian_days + ceil<days>(julian_calendar_epoch_diff());
auto const last_day_ticks = nanosecond_ticks - duration_cast<nanoseconds>(gregorian_days);
return {last_day_ticks, julian_days};
}
// blockDim(128, 1, 1)
template <int block_size>
__global__ void __launch_bounds__(128, 8)
gpuEncodePages(device_span<gpu::EncPage> pages,
device_span<gpu_inflate_input_s> comp_in,
device_span<gpu_inflate_status_s> comp_stat)
{
__shared__ __align__(8) page_enc_state_s state_g;
using block_scan = hipcub::BlockScan<uint32_t, block_size>;
__shared__ typename block_scan::TempStorage temp_storage;
page_enc_state_s *const s = &state_g;
uint32_t t = threadIdx.x;
uint32_t dtype, dtype_len_in, dtype_len_out;
int32_t dict_bits;
if (t == 0) {
s->page = pages[blockIdx.x];
s->ck = *s->page.chunk;
s->col = *s->ck.col_desc;
s->cur = s->page.page_data + s->page.max_hdr_size;
}
__syncthreads();
// Encode Repetition and Definition levels
if (s->page.page_type != PageType::DICTIONARY_PAGE &&
(s->col.num_def_level_bits()) != 0 && // This means max definition level is not 0 (nullable)
(s->col.num_rep_level_bits()) == 0 // This means there are no repetition levels (non-list)
) {
// Calculate definition levels from validity
uint32_t def_lvl_bits = s->col.num_def_level_bits();
if (def_lvl_bits != 0) {
if (!t) {
s->rle_run = 0;
s->rle_pos = 0;
s->rle_numvals = 0;
s->rle_out = s->cur + 4;
}
__syncthreads();
while (s->rle_numvals < s->page.num_rows) {
uint32_t rle_numvals = s->rle_numvals;
uint32_t nrows = min(s->page.num_rows - rle_numvals, 128);
uint32_t row = s->page.start_row + rle_numvals + t;
// Definition level encodes validity. Checks the valid map and if it is valid, then sets the
// def_lvl accordingly and sets it in s->vals which is then given to RleEncode to encode
uint32_t def_lvl = [&]() {
bool within_bounds = rle_numvals + t < s->page.num_rows && row < s->col.num_rows;
if (not within_bounds) { return 0u; }
uint32_t def = 0;
size_type l = 0;
bool is_col_struct = false;
auto col = *s->col.parent_column;
do {
// If col not nullable then it does not contribute to def levels
if (s->col.nullability[l]) {
if (col.is_valid(row)) {
++def;
} else {
// We have found the shallowest level at which this row is null
break;
}
}
is_col_struct = (col.type().id() == type_id::STRUCT);
if (is_col_struct) {
row += col.offset();
col = col.child(0);
++l;
}
} while (is_col_struct);
return def;
}();
s->vals[(rle_numvals + t) & (rle_buffer_size - 1)] = def_lvl;
__syncthreads();
rle_numvals += nrows;
RleEncode(s, rle_numvals, def_lvl_bits, (rle_numvals == s->page.num_rows), t);
__syncthreads();
}
if (t < 32) {
uint8_t *cur = s->cur;
uint8_t *rle_out = s->rle_out;
if (t < 4) {
uint32_t rle_bytes = (uint32_t)(rle_out - cur) - 4;
cur[t] = rle_bytes >> (t * 8);
}
__syncwarp();
if (t == 0) { s->cur = rle_out; }
}
}
} else if (s->page.page_type != PageType::DICTIONARY_PAGE &&
s->col.num_rep_level_bits() != 0 // This means there ARE repetition levels (has list)
) {
auto encode_levels = [&](uint8_t const *lvl_val_data, uint32_t nbits) {
// For list types, the repetition and definition levels are pre-calculated. We just need to
// encode and write them now.
if (!t) {
s->rle_run = 0;
s->rle_pos = 0;
s->rle_numvals = 0;
s->rle_out = s->cur + 4;
}
__syncthreads();
size_type page_first_val_idx = s->col.level_offsets[s->page.start_row];
size_type col_last_val_idx = s->col.level_offsets[s->col.num_rows];
while (s->rle_numvals < s->page.num_values) {
uint32_t rle_numvals = s->rle_numvals;
uint32_t nvals = min(s->page.num_values - rle_numvals, 128);
uint32_t idx = page_first_val_idx + rle_numvals + t;
uint32_t lvl_val =
(rle_numvals + t < s->page.num_values && idx < col_last_val_idx) ? lvl_val_data[idx] : 0;
s->vals[(rle_numvals + t) & (rle_buffer_size - 1)] = lvl_val;
__syncthreads();
rle_numvals += nvals;
RleEncode(s, rle_numvals, nbits, (rle_numvals == s->page.num_values), t);
__syncthreads();
}
if (t < 32) {
uint8_t *cur = s->cur;
uint8_t *rle_out = s->rle_out;
if (t < 4) {
uint32_t rle_bytes = (uint32_t)(rle_out - cur) - 4;
cur[t] = rle_bytes >> (t * 8);
}
__syncwarp();
if (t == 0) { s->cur = rle_out; }
}
};
encode_levels(s->col.rep_values, s->col.num_rep_level_bits());
__syncthreads();
encode_levels(s->col.def_values, s->col.num_def_level_bits());
}
// Encode data values
__syncthreads();
dtype = s->col.physical_type;
dtype_len_out =
(dtype == INT96) ? 12 : (dtype == INT64 || dtype == DOUBLE) ? 8 : (dtype == BOOLEAN) ? 1 : 4;
if (dtype == INT32) {
dtype_len_in = GetDtypeLogicalLen(s->col.leaf_column);
} else if (dtype == INT96) {
dtype_len_in = 8;
} else {
dtype_len_in = dtype_len_out;
}
dict_bits = (dtype == BOOLEAN) ? 1 : (s->page.dict_bits_plus1 - 1);
if (t == 0) {
uint8_t *dst = s->cur;
s->rle_run = 0;
s->rle_pos = 0;
s->rle_numvals = 0;
s->rle_out = dst;
if (dict_bits >= 0 && dtype != BOOLEAN) {
dst[0] = dict_bits;
s->rle_out = dst + 1;
}
s->page_start_val = s->page.start_row;
if (s->col.parent_column != nullptr) {
auto col = *(s->col.parent_column);
auto current_page_start_val = s->page_start_val;
while (col.type().id() == type_id::LIST or col.type().id() == type_id::STRUCT) {
if (col.type().id() == type_id::STRUCT) {
current_page_start_val += col.offset();
col = col.child(0);
} else {
current_page_start_val = col.child(lists_column_view::offsets_column_index)
.element<size_type>(current_page_start_val + col.offset());
col = col.child(lists_column_view::child_column_index);
}
}
s->page_start_val = current_page_start_val;
}
}
__syncthreads();
for (uint32_t cur_val_idx = 0; cur_val_idx < s->page.num_leaf_values;) {
uint32_t nvals = min(s->page.num_leaf_values - cur_val_idx, 128);
uint32_t val_idx = s->page_start_val + cur_val_idx + t;
uint32_t is_valid, len, pos;
if (s->page.page_type == PageType::DICTIONARY_PAGE) {
is_valid = (cur_val_idx + t < s->page.num_leaf_values);
val_idx = (is_valid) ? s->col.dict_data[val_idx] : val_idx;
} else {
is_valid = (val_idx < s->col.leaf_column->size() && cur_val_idx + t < s->page.num_leaf_values)
? s->col.leaf_column->is_valid(val_idx)
: 0;
}
cur_val_idx += nvals;
if (dict_bits >= 0) {
// Dictionary encoding
if (dict_bits > 0) {
uint32_t rle_numvals;
uint32_t rle_numvals_in_block;
block_scan(temp_storage).ExclusiveSum(is_valid, pos, rle_numvals_in_block);
rle_numvals = s->rle_numvals;
if (is_valid) {
uint32_t v;
if (dtype == BOOLEAN) {
v = s->col.leaf_column->element<uint8_t>(val_idx);
} else {
v = s->col.dict_index[val_idx];
}
s->vals[(rle_numvals + pos) & (rle_buffer_size - 1)] = v;
}
rle_numvals += rle_numvals_in_block;
__syncthreads();
if ((!enable_bool_rle) && (dtype == BOOLEAN)) {
PlainBoolEncode(s, rle_numvals, (cur_val_idx == s->page.num_leaf_values), t);
} else {
RleEncode(s, rle_numvals, dict_bits, (cur_val_idx == s->page.num_leaf_values), t);
}
__syncthreads();
}
if (t == 0) { s->cur = s->rle_out; }
__syncthreads();
} else {
// Non-dictionary encoding
uint8_t *dst = s->cur;
if (is_valid) {
len = dtype_len_out;
if (dtype == BYTE_ARRAY) {
len += s->col.leaf_column->element<string_view>(val_idx).size_bytes();
}
} else {
len = 0;
}
uint32_t total_len = 0;
block_scan(temp_storage).ExclusiveSum(len, pos, total_len);
__syncthreads();
if (t == 0) { s->cur = dst + total_len; }
if (is_valid) {
switch (dtype) {
case INT32:
case FLOAT: {
int32_t v;
if (dtype_len_in == 4)
v = s->col.leaf_column->element<int32_t>(val_idx);
else if (dtype_len_in == 2)
v = s->col.leaf_column->element<int16_t>(val_idx);
else
v = s->col.leaf_column->element<int8_t>(val_idx);
dst[pos + 0] = v;
dst[pos + 1] = v >> 8;
dst[pos + 2] = v >> 16;
dst[pos + 3] = v >> 24;
} break;
case INT64: {
int64_t v = s->col.leaf_column->element<int64_t>(val_idx);
int32_t ts_scale = s->col.ts_scale;
if (ts_scale != 0) {
if (ts_scale < 0) {
v /= -ts_scale;
} else {
v *= ts_scale;
}
}
dst[pos + 0] = v;
dst[pos + 1] = v >> 8;
dst[pos + 2] = v >> 16;
dst[pos + 3] = v >> 24;
dst[pos + 4] = v >> 32;
dst[pos + 5] = v >> 40;
dst[pos + 6] = v >> 48;
dst[pos + 7] = v >> 56;
} break;
case INT96: {
int64_t v = s->col.leaf_column->element<int64_t>(val_idx);
int32_t ts_scale = s->col.ts_scale;
if (ts_scale != 0) {
if (ts_scale < 0) {
v /= -ts_scale;
} else {
v *= ts_scale;
}
}
auto const ret = convert_nanoseconds([&]() {
using namespace cuda::std::chrono;
switch (s->col.leaf_column->type().id()) {
case type_id::TIMESTAMP_SECONDS:
case type_id::TIMESTAMP_MILLISECONDS: {
return sys_time<nanoseconds>{milliseconds{v}};
} break;
case type_id::TIMESTAMP_MICROSECONDS:
case type_id::TIMESTAMP_NANOSECONDS: {
return sys_time<nanoseconds>{microseconds{v}};
} break;
}
return sys_time<nanoseconds>{microseconds{0}};
}());
// the 12 bytes of fixed length data.
v = ret.first.count();
dst[pos + 0] = v;
dst[pos + 1] = v >> 8;
dst[pos + 2] = v >> 16;
dst[pos + 3] = v >> 24;
dst[pos + 4] = v >> 32;
dst[pos + 5] = v >> 40;
dst[pos + 6] = v >> 48;
dst[pos + 7] = v >> 56;
uint32_t w = ret.second.count();
dst[pos + 8] = w;
dst[pos + 9] = w >> 8;
dst[pos + 10] = w >> 16;
dst[pos + 11] = w >> 24;
} break;
case DOUBLE: {
auto v = s->col.leaf_column->element<double>(val_idx);
memcpy(dst + pos, &v, 8);
} break;
case BYTE_ARRAY: {
auto str = s->col.leaf_column->element<string_view>(val_idx);
uint32_t v = len - 4; // string length
dst[pos + 0] = v;
dst[pos + 1] = v >> 8;
dst[pos + 2] = v >> 16;
dst[pos + 3] = v >> 24;
if (v != 0) memcpy(dst + pos + 4, str.data(), v);
} break;
}
}
__syncthreads();
}
}
if (t == 0) {
uint8_t *base = s->page.page_data + s->page.max_hdr_size;
uint32_t actual_data_size = static_cast<uint32_t>(s->cur - base);
uint32_t compressed_bfr_size = GetMaxCompressedBfrSize(actual_data_size);
s->page.max_data_size = actual_data_size;
s->comp_in.srcDevice = base;
s->comp_in.srcSize = actual_data_size;
s->comp_in.dstDevice = s->page.compressed_data + s->page.max_hdr_size;
s->comp_in.dstSize = compressed_bfr_size;
s->comp_stat.bytes_written = 0;
s->comp_stat.status = ~0;
s->comp_stat.reserved = 0;
}
__syncthreads();
if (t == 0) {
pages[blockIdx.x] = s->page;
if (not comp_in.empty()) comp_in[blockIdx.x] = s->comp_in;
if (not comp_stat.empty()) {
comp_stat[blockIdx.x] = s->comp_stat;
pages[blockIdx.x].comp_stat = &comp_stat[blockIdx.x];
}
}
}
// blockDim(128, 1, 1)
__global__ void __launch_bounds__(128) gpuDecideCompression(device_span<EncColumnChunk> chunks)
{
// After changing the way structs are loaded from coop to normal, this kernel has no business
// being launched with 128 thread block. It can easily be a single warp.
__shared__ __align__(8) EncColumnChunk ck_g;
__shared__ __align__(4) unsigned int error_count;
using warp_reduce = hipcub::WarpReduce<uint32_t>;
__shared__ typename warp_reduce::TempStorage temp_storage[2];
__shared__ volatile bool has_compression;
uint32_t t = threadIdx.x;
uint32_t uncompressed_data_size = 0;
uint32_t compressed_data_size = 0;
uint32_t num_pages;
if (t == 0) {
ck_g = chunks[blockIdx.x];
atomicAnd(&error_count, 0);
has_compression = false;
}
__syncthreads();
if (t < 32) {
num_pages = ck_g.num_pages;
for (uint32_t page = t; page < num_pages; page += 32) {
auto &curr_page = ck_g.pages[page];
uint32_t page_data_size = curr_page.max_data_size;
uncompressed_data_size += page_data_size;
if (auto comp_status = curr_page.comp_stat; comp_status != nullptr) {
has_compression = true;
compressed_data_size += comp_status->bytes_written;
if (comp_status->status != 0) { atomicAdd(&error_count, 1); }
}
}
uncompressed_data_size = warp_reduce(temp_storage[0]).Sum(uncompressed_data_size);
compressed_data_size = warp_reduce(temp_storage[1]).Sum(compressed_data_size);
}
__syncthreads();
if (t == 0) {
bool is_compressed;
if (has_compression) {
uint32_t compression_error = atomicAdd(&error_count, 0);
is_compressed = (!compression_error && compressed_data_size < uncompressed_data_size);
} else {
is_compressed = false;
}
chunks[blockIdx.x].is_compressed = is_compressed;
chunks[blockIdx.x].bfr_size = uncompressed_data_size;
chunks[blockIdx.x].compressed_size =
(is_compressed) ? compressed_data_size : uncompressed_data_size;
}
}
/**
* Minimal thrift compact protocol support
*/
inline __device__ uint8_t *cpw_put_uint32(uint8_t *p, uint32_t v)
{
while (v > 0x7f) {
*p++ = v | 0x80;
v >>= 7;
}
*p++ = v;
return p;
}
inline __device__ uint8_t *cpw_put_uint64(uint8_t *p, uint64_t v)
{
while (v > 0x7f) {
*p++ = v | 0x80;
v >>= 7;
}
*p++ = v;
return p;
}
inline __device__ uint8_t *cpw_put_int32(uint8_t *p, int32_t v)
{
int32_t s = (v < 0);
return cpw_put_uint32(p, (v ^ -s) * 2 + s);
}
inline __device__ uint8_t *cpw_put_int64(uint8_t *p, int64_t v)
{
int64_t s = (v < 0);
return cpw_put_uint64(p, (v ^ -s) * 2 + s);
}
inline __device__ uint8_t *cpw_put_fldh(uint8_t *p, int f, int cur, int t)
{
if (f > cur && f <= cur + 15) {
*p++ = ((f - cur) << 4) | t;
return p;
} else {
*p++ = t;
return cpw_put_int32(p, f);
}
}
class header_encoder {
uint8_t *current_header_ptr;
int current_field_index;
public:
inline __device__ header_encoder(uint8_t *header_start)
: current_header_ptr(header_start), current_field_index(0)
{
}
inline __device__ void field_struct_begin(int field)
{
current_header_ptr =
cpw_put_fldh(current_header_ptr, field, current_field_index, ST_FLD_STRUCT);
current_field_index = 0;
}
inline __device__ void field_struct_end(int field)
{
*current_header_ptr++ = 0;
current_field_index = field;
}
template <typename T>
inline __device__ void field_int32(int field, T value)
{
current_header_ptr = cpw_put_fldh(current_header_ptr, field, current_field_index, ST_FLD_I32);
current_header_ptr = cpw_put_int32(current_header_ptr, static_cast<int32_t>(value));
current_field_index = field;
}
template <typename T>
inline __device__ void field_int64(int field, T value)
{
current_header_ptr = cpw_put_fldh(current_header_ptr, field, current_field_index, ST_FLD_I64);
current_header_ptr = cpw_put_int64(current_header_ptr, static_cast<int64_t>(value));
current_field_index = field;
}
inline __device__ void field_binary(int field, const void *value, uint32_t length)
{
current_header_ptr =
cpw_put_fldh(current_header_ptr, field, current_field_index, ST_FLD_BINARY);
current_header_ptr = cpw_put_uint32(current_header_ptr, length);
memcpy(current_header_ptr, value, length);
current_header_ptr += length;
current_field_index = field;
}
inline __device__ void end(uint8_t **header_end, bool termination_flag = true)
{
if (termination_flag == false) { *current_header_ptr++ = 0; }
*header_end = current_header_ptr;
}
inline __device__ uint8_t *get_ptr(void) { return current_header_ptr; }
inline __device__ void set_ptr(uint8_t *ptr) { current_header_ptr = ptr; }
};
__device__ uint8_t *EncodeStatistics(uint8_t *start,
const statistics_chunk *s,
uint8_t dtype,
float *fp_scratch)
{
uint8_t *end, dtype_len;
switch (dtype) {
case dtype_bool: dtype_len = 1; break;
case dtype_int8:
case dtype_int16:
case dtype_int32:
case dtype_date32:
case dtype_float32: dtype_len = 4; break;
case dtype_int64:
case dtype_timestamp64:
case dtype_float64:
case dtype_decimal64: dtype_len = 8; break;
case dtype_decimal128: dtype_len = 16; break;
case dtype_string:
default: dtype_len = 0; break;
}
header_encoder encoder(start);
encoder.field_int64(3, s->null_count);
if (s->has_minmax) {
const void *vmin, *vmax;
uint32_t lmin, lmax;
if (dtype == dtype_string) {
lmin = s->min_value.str_val.length;
vmin = s->min_value.str_val.ptr;
lmax = s->max_value.str_val.length;
vmax = s->max_value.str_val.ptr;
} else {
lmin = lmax = dtype_len;
if (dtype == dtype_float32) { // Convert from double to float32
fp_scratch[0] = s->min_value.fp_val;
fp_scratch[1] = s->max_value.fp_val;
vmin = &fp_scratch[0];
vmax = &fp_scratch[1];
} else {
vmin = &s->min_value;
vmax = &s->max_value;
}
}
encoder.field_binary(5, vmax, lmax);
encoder.field_binary(6, vmin, lmin);
}
encoder.end(&end);
return end;
}
// blockDim(128, 1, 1)
__global__ void __launch_bounds__(128)
gpuEncodePageHeaders(device_span<EncPage> pages,
device_span<gpu_inflate_status_s const> comp_stat,
device_span<statistics_chunk const> page_stats,
const statistics_chunk *chunk_stats)
{
// When this whole kernel becomes single thread, the following variables need not be __shared__
__shared__ __align__(8) parquet_column_device_view col_g;
__shared__ __align__(8) EncColumnChunk ck_g;
__shared__ __align__(8) EncPage page_g;
__shared__ __align__(8) float fp_scratch[2];
uint32_t t = threadIdx.x;
if (t == 0) {
uint8_t *hdr_start, *hdr_end;
uint32_t compressed_page_size, uncompressed_page_size;
page_g = pages[blockIdx.x];
ck_g = *page_g.chunk;
col_g = *ck_g.col_desc;
if (chunk_stats && &pages[blockIdx.x] == ck_g.pages) { // Is this the first page in a chunk?
hdr_start = (ck_g.is_compressed) ? ck_g.compressed_bfr : ck_g.uncompressed_bfr;
hdr_end =
EncodeStatistics(hdr_start, &chunk_stats[page_g.chunk_id], col_g.stats_dtype, fp_scratch);
page_g.chunk->ck_stat_size = static_cast<uint32_t>(hdr_end - hdr_start);
}
uncompressed_page_size = page_g.max_data_size;
if (ck_g.is_compressed) {
hdr_start = page_g.compressed_data;
compressed_page_size = (uint32_t)comp_stat[blockIdx.x].bytes_written;
page_g.max_data_size = compressed_page_size;
} else {
hdr_start = page_g.page_data;
compressed_page_size = uncompressed_page_size;
}
header_encoder encoder(hdr_start);
PageType page_type = page_g.page_type;
// NOTE: For dictionary encoding, parquet v2 recommends using PLAIN in dictionary page and
// RLE_DICTIONARY in data page, but parquet v1 uses PLAIN_DICTIONARY in both dictionary and
// data pages (actual encoding is identical).
Encoding encoding;
if (enable_bool_rle) {
encoding = (col_g.physical_type != BOOLEAN)
? (page_type == PageType::DICTIONARY_PAGE || page_g.dict_bits_plus1 != 0)
? Encoding::PLAIN_DICTIONARY
: Encoding::PLAIN
: Encoding::RLE;
} else {
encoding = (page_type == PageType::DICTIONARY_PAGE || page_g.dict_bits_plus1 != 0)
? Encoding::PLAIN_DICTIONARY
: Encoding::PLAIN;
}
encoder.field_int32(1, page_type);
encoder.field_int32(2, uncompressed_page_size);
encoder.field_int32(3, compressed_page_size);
if (page_type == PageType::DATA_PAGE) {
// DataPageHeader
encoder.field_struct_begin(5);
encoder.field_int32(1, page_g.num_values); // NOTE: num_values != num_rows for list types
encoder.field_int32(2, encoding); // encoding
encoder.field_int32(3, Encoding::RLE); // definition_level_encoding
encoder.field_int32(4, Encoding::RLE); // repetition_level_encoding
// Optionally encode page-level statistics
if (not page_stats.empty()) {
encoder.field_struct_begin(5);
encoder.set_ptr(EncodeStatistics(
encoder.get_ptr(), &page_stats[blockIdx.x], col_g.stats_dtype, fp_scratch));
encoder.field_struct_end(5);
}
encoder.field_struct_end(5);
} else {
// DictionaryPageHeader
encoder.field_struct_begin(7);
encoder.field_int32(1, ck_g.total_dict_entries); // number of values in dictionary
encoder.field_int32(2, encoding);
encoder.field_struct_end(7);
}
encoder.end(&hdr_end, false);
page_g.hdr_size = (uint32_t)(hdr_end - hdr_start);
}
__syncthreads();
if (t == 0) pages[blockIdx.x] = page_g;
}
// blockDim(1024, 1, 1)
__global__ void __launch_bounds__(1024)
gpuGatherPages(device_span<EncColumnChunk> chunks, device_span<gpu::EncPage const> pages)
{
__shared__ __align__(8) EncColumnChunk ck_g;
__shared__ __align__(8) EncPage page_g;
uint32_t t = threadIdx.x;
uint8_t *dst, *dst_base;
const EncPage *first_page;
uint32_t num_pages, uncompressed_size;
if (t == 0) ck_g = chunks[blockIdx.x];
__syncthreads();
first_page = ck_g.pages;
num_pages = ck_g.num_pages;
dst = (ck_g.is_compressed) ? ck_g.compressed_bfr : ck_g.uncompressed_bfr;
dst += ck_g.ck_stat_size; // Skip over chunk statistics
dst_base = dst;
uncompressed_size = ck_g.bfr_size;
for (uint32_t page = 0; page < num_pages; page++) {
const uint8_t *src;
uint32_t hdr_len, data_len;
if (t == 0) { page_g = first_page[page]; }
__syncthreads();
src = (ck_g.is_compressed) ? page_g.compressed_data : page_g.page_data;
// Copy page header
hdr_len = page_g.hdr_size;
memcpy_block<1024, true>(dst, src, hdr_len, t);
src += page_g.max_hdr_size;
dst += hdr_len;
// Copy page data
uncompressed_size += hdr_len;
data_len = page_g.max_data_size;
memcpy_block<1024, true>(dst, src, data_len, t);
dst += data_len;
__syncthreads();
if (!t && page == 0 && ck_g.has_dictionary) { ck_g.dictionary_size = hdr_len + data_len; }
}
if (t == 0) {
chunks[blockIdx.x].bfr_size = uncompressed_size;
chunks[blockIdx.x].compressed_size = (dst - dst_base);
if (ck_g.has_dictionary) { chunks[blockIdx.x].dictionary_size = ck_g.dictionary_size; }
}
}
/**
* @brief Functor to get definition level value for a nested struct column until the leaf level or
* the first list level.
*
*/
struct def_level_fn {
column_device_view const *parent_col;
uint8_t const *d_nullability;
uint8_t sub_level_start;
uint8_t curr_def_level;
__device__ uint32_t operator()(size_type i)
{
uint32_t def = curr_def_level;
uint8_t l = sub_level_start;
bool is_col_struct = false;
auto col = *parent_col;
do {
// If col not nullable then it does not contribute to def levels
if (d_nullability[l]) {
if (not col.nullable() or bit_is_set(col.null_mask(), i)) {
++def;
} else { // We have found the shallowest level at which this row is null
break;
}
}
is_col_struct = (col.type().id() == type_id::STRUCT);
if (is_col_struct) {
col = col.child(0);
++l;
}
} while (is_col_struct);
return def;
}
};
/**
* @brief Get the dremel offsets and repetition and definition levels for a LIST column
*
* The repetition and definition level values are ideally computed using a recursive call over a
* nested structure but in order to better utilize GPU resources, this function calculates them
* with a bottom up merge method.
*
* Given a LIST column of type `List<List<int>>` like so:
* ```
* col = {
* [],
* [[], [1, 2, 3], [4, 5]],
* [[]]
* }
* ```
* We can represent it in cudf format with two level of offsets like this:
* ```
* Level 0 offsets = {0, 0, 3, 5, 6}
* Level 1 offsets = {0, 0, 3, 5, 5}
* Values = {1, 2, 3, 4, 5}
* ```
* The desired result of this function is the repetition and definition level values that
* correspond to the data values:
* ```
* col = {[], [[], [1, 2, 3], [4, 5]], [[]]}
* def = { 0 1, 2, 2, 2, 2, 2, 1 }
* rep = { 0, 0, 0, 2, 2, 1, 2, 0 }
* ```
*
* Since repetition and definition levels arrays contain a value for each empty list, the size of
* the rep/def level array can be given by
* ```
* rep_level.size() = size of leaf column + number of empty lists in level 0
* + number of empty lists in level 1 ...
* ```
*
* We start with finding the empty lists in the penultimate level and merging it with the indices
* of the leaf level. The values for the merge are the definition and repetition levels
* ```
* empties at level 1 = {0, 5}
* def values at 1 = {1, 1}
* rep values at 1 = {1, 1}
* indices at leaf = {0, 1, 2, 3, 4}
* def values at leaf = {2, 2, 2, 2, 2}
* rep values at leaf = {2, 2, 2, 2, 2}
* ```
*
* merged def values = {1, 2, 2, 2, 2, 2, 1}
* merged rep values = {1, 2, 2, 2, 2, 2, 1}
*
* The size of the rep/def values is now larger than the leaf values and the offsets need to be
* adjusted in order to point to the correct start indices. We do this with an exclusive scan over
* the indices of offsets of empty lists and adding to existing offsets.
* ```
* Level 1 new offsets = {0, 1, 4, 6, 7}
* ```
* Repetition values at the beginning of a list need to be decremented. We use the new offsets to
* scatter the rep value.
* ```
* merged rep values = {1, 2, 2, 2, 2, 2, 1}
* scatter (1, new offsets)
* new offsets = {0, 1, 4, 6, 7}
* new rep values = {1, 1, 2, 2, 1, 2, 1}
* ```
*
* Similarly we merge up all the way till level 0 offsets
*
* STRUCT COLUMNS :
* In case of struct columns, we don't have to merge struct levels with their children because a
* struct is the same size as its children. e.g. for a column `struct<int, float>`, if the row `i`
* is null, then the children columns `int` and `float` are also null at `i`. They also have the
* null entry represented in their respective null masks. So for any case of strictly struct based
* nesting, we can get the definition levels merely by iterating over the nesting for the same row.
*
* In case struct and lists are intermixed, the definition levels of all the contiguous struct
* levels can be constructed using the aforementioned iterative method. Only when we reach a list
* level, we need to do a merge with the subsequent level.
*
* So, for a column like `struct<list<int>>`, we are going to merge between the levels `struct<list`
* and `int`.
* For a column like `list<struct<int>>`, we are going to merge between `list` and `struct<int>`.
*
* In general, one nesting level is the list level and any struct level that precedes it.
*
* A few more examples to visualize the partitioning of column hierarchy into nesting levels:
* (L is list, S is struct, i is integer(leaf data level), angle brackets omitted)
* ```
* 1. LSi = L Si
* - | --
*
* 2. LLSi = L L Si
* - | - | --
*
* 3. SSLi = SSL i
* --- | -
*
* 4. LLSLSSi = L L SL SSi
* - | - | -- | ---
```
*/
dremel_data get_dremel_data(column_view h_col,
// TODO(cp): use device_span once it is converted to a single hd_vec
rmm::device_uvector<uint8_t> const &d_nullability,
std::vector<uint8_t> const &nullability,
rmm::cuda_stream_view stream)
{
auto get_list_level = [](column_view col) {
while (col.type().id() == type_id::STRUCT) { col = col.child(0); }
return col;
};
auto get_empties = [&](column_view col, size_type start, size_type end) {
auto lcv = lists_column_view(get_list_level(col));
rmm::device_uvector<size_type> empties_idx(lcv.size(), stream);
rmm::device_uvector<size_type> empties(lcv.size(), stream);
auto d_off = lcv.offsets().data<size_type>();
auto empties_idx_end =
thrust::copy_if(rmm::exec_policy(stream),
thrust::make_counting_iterator(start),
thrust::make_counting_iterator(end),
empties_idx.begin(),
[d_off] __device__(auto i) { return d_off[i] == d_off[i + 1]; });
auto empties_end = thrust::gather(rmm::exec_policy(stream),
empties_idx.begin(),
empties_idx_end,
lcv.offsets().begin<size_type>(),
empties.begin());
auto empties_size = empties_end - empties.begin();
return std::make_tuple(std::move(empties), std::move(empties_idx), empties_size);
};
auto curr_col = h_col;
std::vector<column_view> nesting_levels;
std::vector<uint8_t> def_at_level;
std::vector<uint8_t> start_at_sub_level;
uint8_t curr_nesting_level_idx = 0;
auto add_def_at_level = [&](column_view col) {
// Add up all def level contributions in this column all the way till the first list column
// appears in the hierarchy or until we get to leaf
uint32_t def = 0;
start_at_sub_level.push_back(curr_nesting_level_idx);
while (col.type().id() == type_id::STRUCT) {
def += (nullability[curr_nesting_level_idx]) ? 1 : 0;
col = col.child(0);
++curr_nesting_level_idx;
}
// At the end of all those structs is either a list column or the leaf. Leaf column contributes
// at least one def level. It doesn't matter what the leaf contributes because it'll be at the
// end of the exclusive scan.
def += (nullability[curr_nesting_level_idx]) ? 2 : 1;
def_at_level.push_back(def);
++curr_nesting_level_idx;
};
while (cudf::is_nested(curr_col.type())) {
nesting_levels.push_back(curr_col);
add_def_at_level(curr_col);
while (curr_col.type().id() == type_id::STRUCT) {
// Go down the hierarchy until we get to the LIST or the leaf level
curr_col = curr_col.child(0);
}
if (curr_col.type().id() == type_id::LIST) {
curr_col = curr_col.child(lists_column_view::child_column_index);
if (not is_nested(curr_col.type())) {
// Special case: when the leaf data column is the immediate child of the list col then we
// want it to be included right away. Otherwise the struct containing it will be included in
// the next iteration of this loop.
nesting_levels.push_back(curr_col);
add_def_at_level(curr_col);
break;
}
}
}
std::unique_ptr<rmm::device_buffer> device_view_owners;
column_device_view *d_nesting_levels;
std::tie(device_view_owners, d_nesting_levels) =
contiguous_copy_column_device_views<column_device_view>(nesting_levels, stream);
thrust::exclusive_scan(
thrust::host, def_at_level.begin(), def_at_level.end(), def_at_level.begin());
// Sliced list column views only have offsets applied to top level. Get offsets for each level.
rmm::device_uvector<size_type> d_column_offsets(nesting_levels.size(), stream);
rmm::device_uvector<size_type> d_column_ends(nesting_levels.size(), stream);
auto d_col = column_device_view::create(h_col, stream);
cudf::detail::device_single_thread(
[offset_at_level = d_column_offsets.data(),
end_idx_at_level = d_column_ends.data(),
col = *d_col] __device__() {
auto curr_col = col;
size_type off = curr_col.offset();
size_type end = off + curr_col.size();
size_type level = 0;
offset_at_level[level] = off;
end_idx_at_level[level] = end;
++level;
// Apply offset recursively until we get to leaf data
// Skip doing the following for any structs we encounter in between.
while (curr_col.type().id() == type_id::LIST or curr_col.type().id() == type_id::STRUCT) {
if (curr_col.type().id() == type_id::LIST) {
off = curr_col.child(lists_column_view::offsets_column_index).element<size_type>(off);
end = curr_col.child(lists_column_view::offsets_column_index).element<size_type>(end);
offset_at_level[level] = off;
end_idx_at_level[level] = end;
++level;
curr_col = curr_col.child(lists_column_view::child_column_index);
} else {
curr_col = curr_col.child(0);
}
}
},
stream);
thrust::host_vector<size_type> column_offsets(d_column_offsets.size());
CUDA_TRY(hipMemcpyAsync(column_offsets.data(),
d_column_offsets.data(),
d_column_offsets.size() * sizeof(size_type),
hipMemcpyDeviceToHost,
stream.value()));
thrust::host_vector<size_type> column_ends(d_column_ends.size());
CUDA_TRY(hipMemcpyAsync(column_ends.data(),
d_column_ends.data(),
d_column_ends.size() * sizeof(size_type),
hipMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
size_t max_vals_size = 0;
for (size_t l = 0; l < column_offsets.size(); ++l) {
max_vals_size += column_ends[l] - column_offsets[l];
}
rmm::device_uvector<uint8_t> rep_level(max_vals_size, stream);
rmm::device_uvector<uint8_t> def_level(max_vals_size, stream);
rmm::device_uvector<uint8_t> temp_rep_vals(max_vals_size, stream);
rmm::device_uvector<uint8_t> temp_def_vals(max_vals_size, stream);
rmm::device_uvector<size_type> new_offsets(0, stream);
size_type curr_rep_values_size = 0;
{
// At this point, curr_col contains the leaf column. Max nesting level is
// nesting_levels.size().
// We are going to start by merging the last column in nesting_levels (the leaf, which is at the
// index `nesting_levels.size() - 1`) with the second-to-last (which is at
// `nesting_levels.size() - 2`).
size_t level = nesting_levels.size() - 2;
curr_col = nesting_levels[level];
auto lcv = lists_column_view(get_list_level(curr_col));
auto offset_size_at_level = column_ends[level] - column_offsets[level] + 1;
// Get empties at this level
rmm::device_uvector<size_type> empties(0, stream);
rmm::device_uvector<size_type> empties_idx(0, stream);
size_t empties_size;
std::tie(empties, empties_idx, empties_size) =
get_empties(nesting_levels[level], column_offsets[level], column_ends[level]);
// Merge empty at deepest parent level with the rep, def level vals at leaf level
auto input_parent_rep_it = thrust::make_constant_iterator(level);
auto input_parent_def_it =
thrust::make_transform_iterator(empties_idx.begin(),
def_level_fn{d_nesting_levels + level,
d_nullability.data(),
start_at_sub_level[level],
def_at_level[level]});
// `nesting_levels.size()` == no of list levels + leaf. Max repetition level = no of list levels
auto input_child_rep_it = thrust::make_constant_iterator(nesting_levels.size() - 1);
auto input_child_def_it =
thrust::make_transform_iterator(thrust::make_counting_iterator(column_offsets[level + 1]),
def_level_fn{d_nesting_levels + level + 1,
d_nullability.data(),
start_at_sub_level[level + 1],
def_at_level[level + 1]});
// Zip the input and output value iterators so that merge operation is done only once
auto input_parent_zip_it =
thrust::make_zip_iterator(thrust::make_tuple(input_parent_rep_it, input_parent_def_it));
auto input_child_zip_it =
thrust::make_zip_iterator(thrust::make_tuple(input_child_rep_it, input_child_def_it));
auto output_zip_it =
thrust::make_zip_iterator(thrust::make_tuple(rep_level.begin(), def_level.begin()));
auto ends = thrust::merge_by_key(rmm::exec_policy(stream),
empties.begin(),
empties.begin() + empties_size,
thrust::make_counting_iterator(column_offsets[level + 1]),
thrust::make_counting_iterator(column_ends[level + 1]),
input_parent_zip_it,
input_child_zip_it,
thrust::make_discard_iterator(),
output_zip_it);
curr_rep_values_size = ends.second - output_zip_it;
// Scan to get distance by which each offset value is shifted due to the insertion of empties
auto scan_it = cudf::detail::make_counting_transform_iterator(
column_offsets[level], [off = lcv.offsets().data<size_type>()] __device__(auto i) -> int {
return off[i] == off[i + 1];
});
rmm::device_uvector<size_type> scan_out(offset_size_at_level, stream);
thrust::exclusive_scan(
rmm::exec_policy(stream), scan_it, scan_it + offset_size_at_level, scan_out.begin());
// Add scan output to existing offsets to get new offsets into merged rep level values
new_offsets = rmm::device_uvector<size_type>(offset_size_at_level, stream);
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
offset_size_at_level,
[off = lcv.offsets().data<size_type>() + column_offsets[level],
scan_out = scan_out.data(),
new_off = new_offsets.data()] __device__(auto i) {
new_off[i] = off[i] - off[0] + scan_out[i];
});
// Set rep level values at level starts to appropriate rep level
auto scatter_it = thrust::make_constant_iterator(level);
thrust::scatter(rmm::exec_policy(stream),
scatter_it,
scatter_it + new_offsets.size() - 1,
new_offsets.begin(),
rep_level.begin());
}
// Having already merged the last two levels, we are now going to merge the result with the
// third-last level which is at index `nesting_levels.size() - 3`.
for (int level = nesting_levels.size() - 3; level >= 0; level--) {
curr_col = nesting_levels[level];
auto lcv = lists_column_view(get_list_level(curr_col));
auto offset_size_at_level = column_ends[level] - column_offsets[level] + 1;
// Get empties at this level
rmm::device_uvector<size_type> empties(0, stream);
rmm::device_uvector<size_type> empties_idx(0, stream);
size_t empties_size;
std::tie(empties, empties_idx, empties_size) =
get_empties(nesting_levels[level], column_offsets[level], column_ends[level]);
auto offset_transformer = [new_child_offsets = new_offsets.data(),
child_start = column_offsets[level + 1]] __device__(auto x) {
return new_child_offsets[x - child_start]; // (x - child's offset)
};
// We will be reading from old rep_levels and writing again to rep_levels. Swap the current
// rep values into temp_rep_vals so it can become the input and rep_levels can again be output.
std::swap(temp_rep_vals, rep_level);
std::swap(temp_def_vals, def_level);
// Merge empty at parent level with the rep, def level vals at current level
auto transformed_empties = thrust::make_transform_iterator(empties.begin(), offset_transformer);
auto input_parent_rep_it = thrust::make_constant_iterator(level);
auto input_parent_def_it =
thrust::make_transform_iterator(empties_idx.begin(),
def_level_fn{d_nesting_levels + level,
d_nullability.data(),
start_at_sub_level[level],
def_at_level[level]});
// Zip the input and output value iterators so that merge operation is done only once
auto input_parent_zip_it =
thrust::make_zip_iterator(thrust::make_tuple(input_parent_rep_it, input_parent_def_it));
auto input_child_zip_it =
thrust::make_zip_iterator(thrust::make_tuple(temp_rep_vals.begin(), temp_def_vals.begin()));
auto output_zip_it =
thrust::make_zip_iterator(thrust::make_tuple(rep_level.begin(), def_level.begin()));
auto ends = thrust::merge_by_key(rmm::exec_policy(stream),
transformed_empties,
transformed_empties + empties_size,
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(curr_rep_values_size),
input_parent_zip_it,
input_child_zip_it,
thrust::make_discard_iterator(),
output_zip_it);
curr_rep_values_size = ends.second - output_zip_it;
// Scan to get distance by which each offset value is shifted due to the insertion of dremel
// level value fof an empty list
auto scan_it = cudf::detail::make_counting_transform_iterator(
column_offsets[level], [off = lcv.offsets().data<size_type>()] __device__(auto i) -> int {
return off[i] == off[i + 1];
});
rmm::device_uvector<size_type> scan_out(offset_size_at_level, stream);
thrust::exclusive_scan(
rmm::exec_policy(stream), scan_it, scan_it + offset_size_at_level, scan_out.begin());
// Add scan output to existing offsets to get new offsets into merged rep level values
rmm::device_uvector<size_type> temp_new_offsets(offset_size_at_level, stream);
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
offset_size_at_level,
[off = lcv.offsets().data<size_type>() + column_offsets[level],
scan_out = scan_out.data(),
new_off = temp_new_offsets.data(),
offset_transformer] __device__(auto i) {
new_off[i] = offset_transformer(off[i]) + scan_out[i];
});
new_offsets = std::move(temp_new_offsets);
// Set rep level values at level starts to appropriate rep level
auto scatter_it = thrust::make_constant_iterator(level);
thrust::scatter(rmm::exec_policy(stream),
scatter_it,
scatter_it + new_offsets.size() - 1,
new_offsets.begin(),
rep_level.begin());
}
size_t level_vals_size = new_offsets.back_element(stream);
rep_level.resize(level_vals_size, stream);
def_level.resize(level_vals_size, stream);
stream.synchronize();
size_type leaf_data_size = column_ends.back() - column_offsets.back();
return dremel_data{
std::move(new_offsets), std::move(rep_level), std::move(def_level), leaf_data_size};
}
/**
* @brief Launches kernel for initializing encoder page fragments
*
* @param[in,out] frag Fragment array [column_id][fragment_id]
* @param[in] col_desc Column description array [column_id]
* @param[in] num_fragments Number of fragments per column
* @param[in] num_columns Number of columns
* @param[in] stream CUDA stream to use, default 0
*/
void InitPageFragments(device_2dspan<PageFragment> frag,
device_span<parquet_column_device_view const> col_desc,
uint32_t fragment_size,
uint32_t num_rows,
rmm::cuda_stream_view stream)
{
auto num_columns = frag.size().first;
auto num_fragments_per_column = frag.size().second;
dim3 dim_grid(num_columns, num_fragments_per_column); // 1 threadblock per fragment
hipLaunchKernelGGL(( gpuInitPageFragments<512>)
, dim3(dim_grid), dim3(512), 0, stream.value(), frag, col_desc, fragment_size, num_rows);
}
/**
* @brief Launches kernel for initializing fragment statistics groups
*
* @param[out] groups Statistics groups [num_columns x num_fragments]
* @param[in] fragments Page fragments [num_columns x num_fragments]
* @param[in] col_desc Column description [num_columns]
* @param[in] stream CUDA stream to use, default 0
*/
void InitFragmentStatistics(device_2dspan<statistics_group> groups,
device_2dspan<PageFragment const> fragments,
device_span<parquet_column_device_view const> col_desc,
rmm::cuda_stream_view stream)
{
int const num_columns = col_desc.size();
int const num_fragments_per_column = fragments.size().second;
auto grid_y = util::div_rounding_up_safe(num_fragments_per_column, 128 / cudf::detail::warp_size);
dim3 dim_grid(num_columns, grid_y); // 1 warp per fragment
hipLaunchKernelGGL(( gpuInitFragmentStats), dim3(dim_grid), dim3(128), 0, stream.value(), groups, fragments, col_desc);
}
/**
* @brief Launches kernel for initializing encoder data pages
*
* @param[in,out] chunks Column chunks [rowgroup][column]
* @param[out] pages Encode page array (null if just counting pages)
* @param[in] col_desc Column description array [column_id]
* @param[in] num_rowgroups Number of fragments per column
* @param[in] num_columns Number of columns
* @param[out] page_grstats Setup for page-level stats
* @param[out] chunk_grstats Setup for chunk-level stats
* @param[in] stream CUDA stream to use, default 0
*/
void InitEncoderPages(device_2dspan<EncColumnChunk> chunks,
device_span<gpu::EncPage> pages,
device_span<parquet_column_device_view const> col_desc,
int32_t num_columns,
statistics_merge_group *page_grstats,
statistics_merge_group *chunk_grstats,
rmm::cuda_stream_view stream)
{
auto num_rowgroups = chunks.size().first;
dim3 dim_grid(num_columns, num_rowgroups); // 1 threadblock per rowgroup
hipLaunchKernelGGL(( gpuInitPages), dim3(dim_grid), dim3(128), 0, stream.value(),
chunks, pages, col_desc, page_grstats, chunk_grstats, num_columns);
}
/**
* @brief Launches kernel for packing column data into parquet pages
*
* @param[in,out] pages Device array of EncPages (unordered)
* @param[out] comp_in Optionally initializes compressor input params
* @param[out] comp_stat Optionally initializes compressor status
* @param[in] stream CUDA stream to use, default 0
*/
void EncodePages(device_span<gpu::EncPage> pages,
device_span<gpu_inflate_input_s> comp_in,
device_span<gpu_inflate_status_s> comp_stat,
rmm::cuda_stream_view stream)
{
auto num_pages = pages.size();
// A page is part of one column. This is launching 1 block per page. 1 block will exclusively
// deal with one datatype.
hipLaunchKernelGGL(( gpuEncodePages<128>), dim3(num_pages), dim3(128), 0, stream.value(), pages, comp_in, comp_stat);
}
/**
* @brief Launches kernel to make the compressed vs uncompressed chunk-level decision
*
* @param[in,out] chunks Column chunks
* @param[in] stream CUDA stream to use, default 0
*/
void DecideCompression(device_span<EncColumnChunk> chunks, rmm::cuda_stream_view stream)
{
hipLaunchKernelGGL(( gpuDecideCompression), dim3(chunks.size()), dim3(128), 0, stream.value(), chunks);
}
/**
* @brief Launches kernel to encode page headers
*
* @param[in,out] pages Device array of EncPages
* @param[in] comp_stat Compressor status or nullptr if no compression
* @param[in] page_stats Optional page-level statistics to be included in page header
* @param[in] chunk_stats Optional chunk-level statistics to be encoded
* @param[in] stream CUDA stream to use, default 0
*/
void EncodePageHeaders(device_span<EncPage> pages,
device_span<gpu_inflate_status_s const> comp_stat,
device_span<statistics_chunk const> page_stats,
const statistics_chunk *chunk_stats,
rmm::cuda_stream_view stream)
{
// TODO: single thread task. No need for 128 threads/block. Earlier it used to employ rest of the
// threads to coop load structs
hipLaunchKernelGGL(( gpuEncodePageHeaders), dim3(pages.size()), dim3(128), 0, stream.value(),
pages, comp_stat, page_stats, chunk_stats);
}
/**
* @brief Launches kernel to gather pages to a single contiguous block per chunk
*
* @param[in,out] chunks Column chunks
* @param[in] pages Device array of EncPages
* @param[in] stream CUDA stream to use, default 0
*/
void GatherPages(device_span<EncColumnChunk> chunks,
device_span<gpu::EncPage const> pages,
rmm::cuda_stream_view stream)
{
hipLaunchKernelGGL(( gpuGatherPages), dim3(chunks.size()), dim3(1024), 0, stream.value(), chunks, pages);
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
| be80ff13ffe1a1e0728c4ed225bc2966d93ea12c.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <io/utilities/block_utils.cuh>
#include "parquet_gpu.hpp"
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <cub/cub.cuh>
#include <thrust/gather.h>
#include <thrust/iterator/discard_iterator.h>
#include <cub/cub.cuh>
#include <cuda/std/chrono>
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
// Spark doesn't support RLE encoding for BOOLEANs
#ifdef ENABLE_BOOL_RLE
constexpr bool enable_bool_rle = true;
#else
constexpr bool enable_bool_rle = false;
#endif
using ::cudf::detail::device_2dspan;
constexpr int init_hash_bits = 12;
constexpr uint32_t rle_buffer_size = (1 << 9);
struct frag_init_state_s {
parquet_column_device_view col;
PageFragment frag;
uint32_t total_dupes;
size_type start_value_idx;
volatile uint32_t scratch_red[32];
uint32_t dict[max_page_fragment_size];
union {
uint16_t u16[1 << (init_hash_bits)];
uint32_t u32[1 << (init_hash_bits - 1)];
} map;
};
struct page_enc_state_s {
uint8_t *cur; //!< current output ptr
uint8_t *rle_out; //!< current RLE write ptr
uint32_t rle_run; //!< current RLE run
uint32_t run_val; //!< current RLE run value
uint32_t rle_pos; //!< RLE encoder positions
uint32_t rle_numvals; //!< RLE input value count
uint32_t rle_lit_count;
uint32_t rle_rpt_count;
uint32_t page_start_val;
volatile uint32_t rpt_map[4];
volatile uint32_t scratch_red[32];
EncPage page;
EncColumnChunk ck;
parquet_column_device_view col;
gpu_inflate_input_s comp_in;
gpu_inflate_status_s comp_stat;
uint16_t vals[rle_buffer_size];
};
/**
* @brief Return a 12-bit hash from a byte sequence
*/
inline __device__ uint32_t hash_string(const string_view &val)
{
char const *ptr = val.data();
uint32_t len = val.size_bytes();
if (len != 0) {
return (ptr[0] + (ptr[len - 1] << 5) + (len << 10)) & ((1 << init_hash_bits) - 1);
} else {
return 0;
}
}
inline __device__ uint32_t uint32_init_hash(uint32_t v)
{
return (v + (v >> 11) + (v >> 22)) & ((1 << init_hash_bits) - 1);
}
inline __device__ uint32_t uint64_init_hash(uint64_t v)
{
return uint32_init_hash(static_cast<uint32_t>(v + (v >> 32)));
}
/**
* @brief Initializes encoder page fragments
*
* Based on the number of rows in each fragment, populates the value count, the size of data in the
* fragment, the number of unique values, and the data size of unique values.
*
* @param[in] frag Fragment array [fragment_id][column_id]
* @param[in] col_desc Column description array [column_id]
* @param[in] num_fragments Number of fragments per column
* @param[in] num_columns Number of columns
*/
// blockDim {512,1,1}
template <int block_size>
__global__ void __launch_bounds__(block_size)
gpuInitPageFragments(device_2dspan<PageFragment> frag,
device_span<parquet_column_device_view const> col_desc,
uint32_t fragment_size,
uint32_t max_num_rows)
{
__shared__ __align__(16) frag_init_state_s state_g;
using block_reduce = cub::BlockReduce<uint32_t, block_size>;
using block_scan = cub::BlockScan<uint32_t, block_size>;
__shared__ union {
typename block_reduce::TempStorage reduce_storage;
typename block_scan::TempStorage scan_storage;
} temp_storage;
frag_init_state_s *const s = &state_g;
uint32_t t = threadIdx.x;
uint32_t start_row, dtype_len, dtype_len_in, dtype;
if (t == 0) s->col = col_desc[blockIdx.x];
for (uint32_t i = 0; i < sizeof(s->map) / sizeof(uint32_t); i += block_size) {
if (i + t < sizeof(s->map) / sizeof(uint32_t)) s->map.u32[i + t] = 0;
}
__syncthreads();
start_row = blockIdx.y * fragment_size;
if (!t) {
// frag.num_rows = fragment_size except for the last page fragment which can be smaller.
// num_rows is fixed but fragment size could be larger if the data is strings or nested.
s->frag.num_rows = min(fragment_size, max_num_rows - min(start_row, max_num_rows));
s->frag.non_nulls = 0;
s->frag.num_dict_vals = 0;
s->frag.fragment_data_size = 0;
s->frag.dict_data_size = 0;
s->total_dupes = 0;
// To use num_vals instead of num_rows, we need to calculate num_vals on the fly.
// For list<list<int>>, values between i and i+50 can be calculated by
// off_11 = off[i], off_12 = off[i+50]
// off_21 = child.off[off_11], off_22 = child.off[off_12]
// etc...
size_type end_value_idx = start_row + s->frag.num_rows;
if (s->col.parent_column == nullptr) {
s->start_value_idx = start_row;
} else {
auto col = *(s->col.parent_column);
auto current_start_value_idx = start_row;
while (col.type().id() == type_id::LIST or col.type().id() == type_id::STRUCT) {
if (col.type().id() == type_id::STRUCT) {
current_start_value_idx += col.offset();
end_value_idx += col.offset();
col = col.child(0);
} else {
auto offset_col = col.child(lists_column_view::offsets_column_index);
current_start_value_idx =
offset_col.element<size_type>(current_start_value_idx + col.offset());
end_value_idx = offset_col.element<size_type>(end_value_idx + col.offset());
col = col.child(lists_column_view::child_column_index);
}
}
s->start_value_idx = current_start_value_idx;
}
s->frag.start_value_idx = s->start_value_idx;
s->frag.num_leaf_values = end_value_idx - s->start_value_idx;
if (s->col.level_offsets != nullptr) {
// For nested schemas, the number of values in a fragment is not directly related to the
// number of encoded data elements or the number of rows. It is simply the number of
// repetition/definition values which together encode validity and nesting information.
size_type first_level_val_idx = s->col.level_offsets[start_row];
size_type last_level_val_idx = s->col.level_offsets[start_row + s->frag.num_rows];
s->frag.num_values = last_level_val_idx - first_level_val_idx;
} else {
s->frag.num_values = s->frag.num_rows;
}
}
dtype = s->col.physical_type;
dtype_len =
(dtype == INT96) ? 12 : (dtype == INT64 || dtype == DOUBLE) ? 8 : (dtype == BOOLEAN) ? 1 : 4;
if (dtype == INT32) {
dtype_len_in = GetDtypeLogicalLen(s->col.leaf_column);
} else if (dtype == INT96) {
// cudf doesn't support INT96 internally and uses INT64, so treat INT96 as an INT64 for
// computing dictionary hash values and reading the data, but we do treat it as 12 bytes for
// dtype_len, which determines how much memory we need to allocate for the fragment.
dtype_len_in = 8;
} else {
dtype_len_in = dtype_len;
}
__syncthreads();
size_type nvals = s->frag.num_leaf_values;
size_type start_value_idx = s->start_value_idx;
for (uint32_t i = 0; i < nvals; i += block_size) {
uint32_t val_idx = start_value_idx + i + t;
uint32_t is_valid = (i + t < nvals && val_idx < s->col.leaf_column->size())
? s->col.leaf_column->is_valid(val_idx)
: 0;
uint32_t len, nz_pos, hash;
if (is_valid) {
len = dtype_len;
if (dtype != BOOLEAN) {
if (dtype == BYTE_ARRAY) {
auto str = s->col.leaf_column->element<string_view>(val_idx);
len += str.size_bytes();
hash = hash_string(str);
} else if (dtype_len_in == 8) {
hash = uint64_init_hash(s->col.leaf_column->element<uint64_t>(val_idx));
} else {
hash = uint32_init_hash((dtype_len_in == 4)
? s->col.leaf_column->element<uint32_t>(val_idx)
: (dtype_len_in == 2)
? s->col.leaf_column->element<uint16_t>(val_idx)
: s->col.leaf_column->element<uint8_t>(val_idx));
}
}
} else {
len = 0;
}
uint32_t non_nulls;
block_scan(temp_storage.scan_storage).ExclusiveSum(is_valid, nz_pos, non_nulls);
nz_pos += s->frag.non_nulls;
__syncthreads();
len = block_reduce(temp_storage.reduce_storage).Sum(len);
if (!t) {
s->frag.non_nulls += non_nulls;
s->frag.fragment_data_size += len;
}
__syncthreads();
if (is_valid && dtype != BOOLEAN) {
uint32_t *dict_index = s->col.dict_index;
if (dict_index) {
atomicAdd(&s->map.u32[hash >> 1], (hash & 1) ? 1 << 16 : 1);
dict_index[start_value_idx + nz_pos] =
((i + t) << init_hash_bits) |
hash; // Store the hash along with the index, so we don't have to recompute it
}
}
__syncthreads();
}
__syncthreads();
// Reorder the 16-bit local indices according to the hash values
if (s->col.dict_index) {
static_assert((init_hash_bits == 12), "Hardcoded for init_hash_bits=12");
// Cumulative sum of hash map counts
uint32_t count01 = s->map.u32[t * 4 + 0];
uint32_t count23 = s->map.u32[t * 4 + 1];
uint32_t count45 = s->map.u32[t * 4 + 2];
uint32_t count67 = s->map.u32[t * 4 + 3];
uint32_t sum01 = count01 + (count01 << 16);
uint32_t sum23 = count23 + (count23 << 16);
uint32_t sum45 = count45 + (count45 << 16);
uint32_t sum67 = count67 + (count67 << 16);
sum23 += (sum01 >> 16) * 0x10001;
sum45 += (sum23 >> 16) * 0x10001;
sum67 += (sum45 >> 16) * 0x10001;
uint32_t sum_w = sum67 >> 16;
block_scan(temp_storage.scan_storage).InclusiveSum(sum_w, sum_w);
sum_w = (sum_w - (sum67 >> 16)) * 0x10001;
s->map.u32[t * 4 + 0] = sum_w + sum01 - count01;
s->map.u32[t * 4 + 1] = sum_w + sum23 - count23;
s->map.u32[t * 4 + 2] = sum_w + sum45 - count45;
s->map.u32[t * 4 + 3] = sum_w + sum67 - count67;
}
__syncthreads();
// Put the indices back in hash order
if (s->col.dict_index) {
uint32_t *dict_index = s->col.dict_index + start_row;
uint32_t nnz = s->frag.non_nulls;
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t pos = 0, hash = 0, pos_old, pos_new, sh, colliding_row, val = 0;
bool collision;
if (i + t < nnz) {
val = dict_index[i + t];
hash = val & ((1 << init_hash_bits) - 1);
sh = (hash & 1) ? 16 : 0;
pos_old = s->map.u16[hash];
}
// The isolation of the atomicAdd, along with pos_old/pos_new is to guarantee deterministic
// behavior for the first row in the hash map that will be used for early duplicate detection
__syncthreads();
if (i + t < nnz) {
pos = (atomicAdd(&s->map.u32[hash >> 1], 1 << sh) >> sh) & 0xffff;
s->dict[pos] = val;
}
__syncthreads();
collision = false;
if (i + t < nnz) {
pos_new = s->map.u16[hash];
collision = (pos != pos_old && pos_new > pos_old + 1);
if (collision) { colliding_row = s->dict[pos_old]; }
}
__syncthreads();
if (collision) { atomicMin(&s->dict[pos_old], val); }
__syncthreads();
// Resolve collision
if (collision && val == s->dict[pos_old]) { s->dict[pos] = colliding_row; }
}
__syncthreads();
// Now that the values are ordered by hash, compare every entry with the first entry in the hash
// map, the position of the first entry can be inferred from the hash map counts
uint32_t dupe_data_size = 0;
for (uint32_t i = 0; i < nnz; i += block_size) {
uint32_t ck_row = 0, ck_row_ref = 0, is_dupe = 0;
if (i + t < nnz) {
uint32_t dict_val = s->dict[i + t];
uint32_t hash = dict_val & ((1 << init_hash_bits) - 1);
ck_row = start_row + (dict_val >> init_hash_bits);
ck_row_ref = start_row + (s->dict[(hash > 0) ? s->map.u16[hash - 1] : 0] >> init_hash_bits);
if (ck_row_ref != ck_row) {
if (dtype == BYTE_ARRAY) {
auto str1 = s->col.leaf_column->element<string_view>(ck_row);
auto str2 = s->col.leaf_column->element<string_view>(ck_row_ref);
is_dupe = (str1 == str2);
dupe_data_size += (is_dupe) ? 4 + str1.size_bytes() : 0;
} else {
if (dtype_len_in == 8) {
auto v1 = s->col.leaf_column->element<uint64_t>(ck_row);
auto v2 = s->col.leaf_column->element<uint64_t>(ck_row_ref);
is_dupe = (v1 == v2);
dupe_data_size += (is_dupe) ? 8 : 0;
} else {
uint32_t v1, v2;
if (dtype_len_in == 4) {
v1 = s->col.leaf_column->element<uint32_t>(ck_row);
v2 = s->col.leaf_column->element<uint32_t>(ck_row_ref);
} else if (dtype_len_in == 2) {
v1 = s->col.leaf_column->element<uint16_t>(ck_row);
v2 = s->col.leaf_column->element<uint16_t>(ck_row_ref);
} else {
v1 = s->col.leaf_column->element<uint8_t>(ck_row);
v2 = s->col.leaf_column->element<uint8_t>(ck_row_ref);
}
is_dupe = (v1 == v2);
dupe_data_size += (is_dupe) ? 4 : 0;
}
}
}
}
uint32_t dupes_in_block;
uint32_t dupes_before;
block_scan(temp_storage.scan_storage).InclusiveSum(is_dupe, dupes_before, dupes_in_block);
dupes_before += s->total_dupes;
__syncthreads();
if (t == 0) { s->total_dupes += dupes_in_block; }
if (i + t < nnz) {
if (!is_dupe) {
s->col.dict_data[start_row + i + t - dupes_before] = ck_row;
} else {
s->col.dict_index[ck_row] = ck_row_ref | (1u << 31);
}
}
}
__syncthreads();
dupe_data_size = block_reduce(temp_storage.reduce_storage).Sum(dupe_data_size);
if (!t) {
s->frag.dict_data_size = s->frag.fragment_data_size - dupe_data_size;
s->frag.num_dict_vals = s->frag.non_nulls - s->total_dupes;
}
}
__syncthreads();
if (t == 0) frag[blockIdx.x][blockIdx.y] = s->frag;
}
// blockDim {128,1,1}
__global__ void __launch_bounds__(128)
gpuInitFragmentStats(device_2dspan<statistics_group> groups,
device_2dspan<PageFragment const> fragments,
device_span<parquet_column_device_view const> col_desc)
{
// TODO: why not 1 block per warp?
__shared__ __align__(8) statistics_group group_g[4];
uint32_t lane_id = threadIdx.x & 0x1f;
uint32_t frag_id = blockIdx.y * 4 + (threadIdx.x >> 5);
uint32_t column_id = blockIdx.x;
auto num_fragments_per_column = fragments.size().second;
statistics_group *const g = &group_g[threadIdx.x >> 5];
if (!lane_id && frag_id < num_fragments_per_column) {
g->col = &col_desc[column_id];
g->start_row = fragments[column_id][frag_id].start_value_idx;
g->num_rows = fragments[column_id][frag_id].num_leaf_values;
}
__syncthreads();
if (frag_id < num_fragments_per_column and lane_id == 0) groups[column_id][frag_id] = *g;
}
// blockDim {128,1,1}
__global__ void __launch_bounds__(128)
gpuInitPages(device_2dspan<EncColumnChunk> chunks,
device_span<gpu::EncPage> pages,
device_span<parquet_column_device_view const> col_desc,
statistics_merge_group *page_grstats,
statistics_merge_group *chunk_grstats,
int32_t num_columns)
{
// TODO: All writing seems to be done by thread 0. Could be replaced by thrust foreach
__shared__ __align__(8) parquet_column_device_view col_g;
__shared__ __align__(8) EncColumnChunk ck_g;
__shared__ __align__(8) PageFragment frag_g;
__shared__ __align__(8) EncPage page_g;
__shared__ __align__(8) statistics_merge_group pagestats_g;
uint32_t t = threadIdx.x;
if (t == 0) {
col_g = col_desc[blockIdx.x];
ck_g = chunks[blockIdx.y][blockIdx.x];
page_g = {};
}
__syncthreads();
if (t < 32) {
uint32_t fragments_in_chunk = 0;
uint32_t rows_in_page = 0;
uint32_t values_in_page = 0;
uint32_t leaf_values_in_page = 0;
uint32_t page_size = 0;
uint32_t num_pages = 0;
uint32_t num_rows = 0;
uint32_t page_start = 0;
uint32_t page_offset = ck_g.ck_stat_size;
uint32_t num_dict_entries = 0;
uint32_t comp_page_offset = ck_g.ck_stat_size;
uint32_t cur_row = ck_g.start_row;
uint32_t ck_max_stats_len = 0;
uint32_t max_stats_len = 0;
if (!t) {
pagestats_g.col = &col_desc[blockIdx.x];
pagestats_g.start_chunk = ck_g.first_fragment;
pagestats_g.num_chunks = 0;
}
if (ck_g.has_dictionary) {
if (!t) {
page_g.page_data = ck_g.uncompressed_bfr + page_offset;
page_g.compressed_data = ck_g.compressed_bfr + comp_page_offset;
page_g.num_fragments = 0;
page_g.page_type = PageType::DICTIONARY_PAGE;
page_g.dict_bits_plus1 = 0;
page_g.chunk = &chunks[blockIdx.y][blockIdx.x];
page_g.chunk_id = blockIdx.y * num_columns + blockIdx.x;
page_g.hdr_size = 0;
page_g.max_hdr_size = 32;
page_g.max_data_size = ck_g.dictionary_size;
page_g.start_row = cur_row;
page_g.num_rows = ck_g.total_dict_entries;
page_g.num_leaf_values = ck_g.total_dict_entries;
page_g.num_values = ck_g.total_dict_entries;
page_offset += page_g.max_hdr_size + page_g.max_data_size;
comp_page_offset += page_g.max_hdr_size + GetMaxCompressedBfrSize(page_g.max_data_size);
}
__syncwarp();
if (t == 0) {
if (not pages.empty()) pages[ck_g.first_page] = page_g;
if (page_grstats) page_grstats[ck_g.first_page] = pagestats_g;
}
num_pages = 1;
}
__syncwarp();
// This loop goes over one page fragment at a time and adds it to page.
// When page size crosses a particular limit, then it moves on to the next page and then next
// page fragment gets added to that one.
// This doesn't actually deal with data. It's agnostic. It only cares about number of rows and
// page size.
do {
uint32_t fragment_data_size, max_page_size, minmax_len = 0;
__syncwarp();
if (num_rows < ck_g.num_rows) {
if (t == 0) { frag_g = ck_g.fragments[fragments_in_chunk]; }
if (!t && ck_g.stats && col_g.stats_dtype == dtype_string) {
minmax_len = max(ck_g.stats[fragments_in_chunk].min_value.str_val.length,
ck_g.stats[fragments_in_chunk].max_value.str_val.length);
}
} else if (!t) {
frag_g.fragment_data_size = 0;
frag_g.num_rows = 0;
}
__syncwarp();
if (ck_g.has_dictionary && fragments_in_chunk < ck_g.num_dict_fragments) {
fragment_data_size =
frag_g.num_leaf_values * 2; // Assume worst-case of 2-bytes per dictionary index
} else {
fragment_data_size = frag_g.fragment_data_size;
}
// TODO (dm): this convoluted logic to limit page size needs refactoring
max_page_size = (values_in_page * 2 >= ck_g.num_values)
? 256 * 1024
: (values_in_page * 3 >= ck_g.num_values) ? 384 * 1024 : 512 * 1024;
if (num_rows >= ck_g.num_rows ||
(values_in_page > 0 &&
(page_size + fragment_data_size > max_page_size ||
(ck_g.has_dictionary && fragments_in_chunk == ck_g.num_dict_fragments)))) {
uint32_t dict_bits_plus1;
if (ck_g.has_dictionary && page_start < ck_g.num_dict_fragments) {
uint32_t dict_bits;
if (num_dict_entries <= 2) {
dict_bits = 1;
} else if (num_dict_entries <= 4) {
dict_bits = 2;
} else if (num_dict_entries <= 16) {
dict_bits = 4;
} else if (num_dict_entries <= 256) {
dict_bits = 8;
} else if (num_dict_entries <= 4096) {
dict_bits = 12;
} else {
dict_bits = 16;
}
page_size = 1 + 5 + ((values_in_page * dict_bits + 7) >> 3) + (values_in_page >> 8);
dict_bits_plus1 = dict_bits + 1;
} else {
dict_bits_plus1 = 0;
}
if (!t) {
page_g.num_fragments = fragments_in_chunk - page_start;
page_g.chunk = &chunks[blockIdx.y][blockIdx.x];
page_g.chunk_id = blockIdx.y * num_columns + blockIdx.x;
page_g.page_type = PageType::DATA_PAGE;
page_g.dict_bits_plus1 = dict_bits_plus1;
page_g.hdr_size = 0;
page_g.max_hdr_size = 32; // Max size excluding statistics
if (ck_g.stats) {
uint32_t stats_hdr_len = 16;
if (col_g.stats_dtype == dtype_string) {
stats_hdr_len += 5 * 3 + 2 * max_stats_len;
} else {
stats_hdr_len += ((col_g.stats_dtype >= dtype_int64) ? 10 : 5) * 3;
}
page_g.max_hdr_size += stats_hdr_len;
}
page_g.page_data = ck_g.uncompressed_bfr + page_offset;
page_g.compressed_data = ck_g.compressed_bfr + comp_page_offset;
page_g.start_row = cur_row;
page_g.num_rows = rows_in_page;
page_g.num_leaf_values = leaf_values_in_page;
page_g.num_values = values_in_page;
uint32_t def_level_bits = col_g.num_def_level_bits();
uint32_t rep_level_bits = col_g.num_rep_level_bits();
// Run length = 4, max(rle/bitpack header) = 5, add one byte per 256 values for overhead
// TODO (dm): Improve readability of these calculations.
uint32_t def_level_size =
(def_level_bits != 0)
? 4 + 5 + ((def_level_bits * page_g.num_values + 7) >> 3) + (page_g.num_values >> 8)
: 0;
uint32_t rep_level_size =
(rep_level_bits != 0)
? 4 + 5 + ((rep_level_bits * page_g.num_values + 7) >> 3) + (page_g.num_values >> 8)
: 0;
page_g.max_data_size = page_size + def_level_size + rep_level_size;
pagestats_g.start_chunk = ck_g.first_fragment + page_start;
pagestats_g.num_chunks = page_g.num_fragments;
page_offset += page_g.max_hdr_size + page_g.max_data_size;
comp_page_offset += page_g.max_hdr_size + GetMaxCompressedBfrSize(page_g.max_data_size);
cur_row += rows_in_page;
ck_max_stats_len = max(ck_max_stats_len, max_stats_len);
}
__syncwarp();
if (t == 0) {
if (not pages.empty()) { pages[ck_g.first_page + num_pages] = page_g; }
if (page_grstats) { page_grstats[ck_g.first_page + num_pages] = pagestats_g; }
}
num_pages++;
page_size = 0;
rows_in_page = 0;
values_in_page = 0;
leaf_values_in_page = 0;
page_start = fragments_in_chunk;
max_stats_len = 0;
}
max_stats_len = max(max_stats_len, minmax_len);
num_dict_entries += frag_g.num_dict_vals;
page_size += fragment_data_size;
rows_in_page += frag_g.num_rows;
values_in_page += frag_g.num_values;
leaf_values_in_page += frag_g.num_leaf_values;
num_rows += frag_g.num_rows;
fragments_in_chunk++;
} while (frag_g.num_rows != 0);
__syncwarp();
if (!t) {
if (ck_g.ck_stat_size == 0 && ck_g.stats) {
uint32_t ck_stat_size = 48 + 2 * ck_max_stats_len;
page_offset += ck_stat_size;
comp_page_offset += ck_stat_size;
ck_g.ck_stat_size = ck_stat_size;
}
ck_g.num_pages = num_pages;
ck_g.bfr_size = page_offset;
ck_g.compressed_size = comp_page_offset;
pagestats_g.start_chunk = ck_g.first_page + ck_g.has_dictionary; // Exclude dictionary
pagestats_g.num_chunks = num_pages - ck_g.has_dictionary;
}
}
__syncthreads();
if (t == 0) {
if (not pages.empty()) ck_g.pages = &pages[ck_g.first_page];
chunks[blockIdx.y][blockIdx.x] = ck_g;
if (chunk_grstats) chunk_grstats[blockIdx.y * num_columns + blockIdx.x] = pagestats_g;
}
}
/**
* @brief Mask table representing how many consecutive repeats are needed to code a repeat run
*[nbits-1]
*/
static __device__ __constant__ uint32_t kRleRunMask[16] = {
0x00ffffff, 0x0fff, 0x00ff, 0x3f, 0x0f, 0x0f, 0x7, 0x7, 0x3, 0x3, 0x3, 0x3, 0x1, 0x1, 0x1, 0x1};
/**
* @brief Variable-length encode an integer
*/
inline __device__ uint8_t *VlqEncode(uint8_t *p, uint32_t v)
{
while (v > 0x7f) {
*p++ = (v | 0x80);
v >>= 7;
}
*p++ = v;
return p;
}
/**
* @brief Pack literal values in output bitstream (1,2,4,8,12 or 16 bits per value)
*/
inline __device__ void PackLiterals(
uint8_t *dst, uint32_t v, uint32_t count, uint32_t w, uint32_t t)
{
if (w == 1 || w == 2 || w == 4 || w == 8 || w == 12 || w == 16) {
if (t <= (count | 0x1f)) {
if (w == 1 || w == 2 || w == 4) {
uint32_t mask = 0;
if (w == 1) {
v |= shuffle_xor(v, 1) << 1;
v |= shuffle_xor(v, 2) << 2;
v |= shuffle_xor(v, 4) << 4;
mask = 0x7;
} else if (w == 2) {
v |= shuffle_xor(v, 1) << 2;
v |= shuffle_xor(v, 2) << 4;
mask = 0x3;
} else if (w == 4) {
v |= shuffle_xor(v, 1) << 4;
mask = 0x1;
}
if (t < count && mask && !(t & mask)) { dst[(t * w) >> 3] = v; }
return;
} else if (w == 8) {
if (t < count) { dst[t] = v; }
return;
} else if (w == 12) {
v |= shuffle_xor(v, 1) << 12;
if (t < count && !(t & 1)) {
dst[(t >> 1) * 3 + 0] = v;
dst[(t >> 1) * 3 + 1] = v >> 8;
dst[(t >> 1) * 3 + 2] = v >> 16;
}
return;
} else if (w == 16) {
if (t < count) {
dst[t * 2 + 0] = v;
dst[t * 2 + 1] = v >> 8;
}
return;
}
} else {
return;
}
} else {
// Scratch space to temporarily write to. Needed because we will use atomics to write 32 bit
// words but the destination mem may not be a multiple of 4 bytes.
// TODO (dm): This assumes blockdim = 128 and max bits per value = 16. Reduce magic numbers.
__shared__ uint32_t scratch[64];
if (t < 64) { scratch[t] = 0; }
__syncthreads();
if (t <= count) {
uint64_t v64 = v;
v64 <<= (t * w) & 0x1f;
// Copy 64 bit word into two 32 bit words while following C++ strict aliasing rules.
uint32_t v32[2];
memcpy(&v32, &v64, sizeof(uint64_t));
// Atomically write result to scratch
if (v32[0]) { atomicOr(scratch + ((t * w) >> 5), v32[0]); }
if (v32[1]) { atomicOr(scratch + ((t * w) >> 5) + 1, v32[1]); }
}
__syncthreads();
// Copy scratch data to final destination
auto available_bytes = (count * w + 7) / 8;
auto scratch_bytes = reinterpret_cast<char *>(&scratch[0]);
if (t < available_bytes) { dst[t] = scratch_bytes[t]; }
if (t + 128 < available_bytes) { dst[t + 128] = scratch_bytes[t + 128]; }
__syncthreads();
}
}
/**
* @brief RLE encoder
*
* @param[in,out] s Page encode state
* @param[in] numvals Total count of input values
* @param[in] nbits number of bits per symbol (1..16)
* @param[in] flush nonzero if last batch in block
* @param[in] t thread id (0..127)
*/
static __device__ void RleEncode(
page_enc_state_s *s, uint32_t numvals, uint32_t nbits, uint32_t flush, uint32_t t)
{
uint32_t rle_pos = s->rle_pos;
uint32_t rle_run = s->rle_run;
while (rle_pos < numvals || (flush && rle_run)) {
uint32_t pos = rle_pos + t;
if (rle_run > 0 && !(rle_run & 1)) {
// Currently in a long repeat run
uint32_t mask = ballot(pos < numvals && s->vals[pos & (rle_buffer_size - 1)] == s->run_val);
uint32_t rle_rpt_count, max_rpt_count;
if (!(t & 0x1f)) { s->rpt_map[t >> 5] = mask; }
__syncthreads();
if (t < 32) {
uint32_t c32 = ballot(t >= 4 || s->rpt_map[t] != 0xffffffffu);
if (!t) {
uint32_t last_idx = __ffs(c32) - 1;
s->rle_rpt_count =
last_idx * 32 + ((last_idx < 4) ? __ffs(~s->rpt_map[last_idx]) - 1 : 0);
}
}
__syncthreads();
max_rpt_count = min(numvals - rle_pos, 128);
rle_rpt_count = s->rle_rpt_count;
rle_run += rle_rpt_count << 1;
rle_pos += rle_rpt_count;
if (rle_rpt_count < max_rpt_count || (flush && rle_pos == numvals)) {
if (t == 0) {
uint32_t const run_val = s->run_val;
uint8_t *dst = VlqEncode(s->rle_out, rle_run);
*dst++ = run_val;
if (nbits > 8) { *dst++ = run_val >> 8; }
s->rle_out = dst;
}
rle_run = 0;
}
} else {
// New run or in a literal run
uint32_t v0 = s->vals[pos & (rle_buffer_size - 1)];
uint32_t v1 = s->vals[(pos + 1) & (rle_buffer_size - 1)];
uint32_t mask = ballot(pos + 1 < numvals && v0 == v1);
uint32_t maxvals = min(numvals - rle_pos, 128);
uint32_t rle_lit_count, rle_rpt_count;
if (!(t & 0x1f)) { s->rpt_map[t >> 5] = mask; }
__syncthreads();
if (t < 32) {
// Repeat run can only start on a multiple of 8 values
uint32_t idx8 = (t * 8) >> 5;
uint32_t pos8 = (t * 8) & 0x1f;
uint32_t m0 = (idx8 < 4) ? s->rpt_map[idx8] : 0;
uint32_t m1 = (idx8 < 3) ? s->rpt_map[idx8 + 1] : 0;
uint32_t needed_mask = kRleRunMask[nbits - 1];
mask = ballot((__funnelshift_r(m0, m1, pos8) & needed_mask) == needed_mask);
if (!t) {
uint32_t rle_run_start = (mask != 0) ? min((__ffs(mask) - 1) * 8, maxvals) : maxvals;
uint32_t rpt_len = 0;
if (rle_run_start < maxvals) {
uint32_t idx_cur = rle_run_start >> 5;
uint32_t idx_ofs = rle_run_start & 0x1f;
while (idx_cur < 4) {
m0 = (idx_cur < 4) ? s->rpt_map[idx_cur] : 0;
m1 = (idx_cur < 3) ? s->rpt_map[idx_cur + 1] : 0;
mask = ~__funnelshift_r(m0, m1, idx_ofs);
if (mask != 0) {
rpt_len += __ffs(mask) - 1;
break;
}
rpt_len += 32;
idx_cur++;
}
}
s->rle_lit_count = rle_run_start;
s->rle_rpt_count = min(rpt_len, maxvals - rle_run_start);
}
}
__syncthreads();
rle_lit_count = s->rle_lit_count;
rle_rpt_count = s->rle_rpt_count;
if (rle_lit_count != 0 || (rle_run != 0 && rle_rpt_count != 0)) {
uint32_t lit_div8;
bool need_more_data = false;
if (!flush && rle_pos + rle_lit_count == numvals) {
// Wait for more data
rle_lit_count -= min(rle_lit_count, 24);
need_more_data = true;
}
if (rle_lit_count != 0) {
lit_div8 = (rle_lit_count + ((flush && rle_pos + rle_lit_count == numvals) ? 7 : 0)) >> 3;
if (rle_run + lit_div8 * 2 > 0x7f) {
lit_div8 = 0x3f - (rle_run >> 1); // Limit to fixed 1-byte header (504 literals)
rle_rpt_count = 0; // Defer repeat run
}
if (lit_div8 != 0) {
uint8_t *dst = s->rle_out + 1 + (rle_run >> 1) * nbits;
PackLiterals(dst, (rle_pos + t < numvals) ? v0 : 0, lit_div8 * 8, nbits, t);
rle_run = (rle_run + lit_div8 * 2) | 1;
rle_pos = min(rle_pos + lit_div8 * 8, numvals);
}
}
if (rle_run >= ((rle_rpt_count != 0 || (flush && rle_pos == numvals)) ? 0x03 : 0x7f)) {
__syncthreads();
// Complete literal run
if (!t) {
uint8_t *dst = s->rle_out;
dst[0] = rle_run; // At most 0x7f
dst += 1 + nbits * (rle_run >> 1);
s->rle_out = dst;
}
rle_run = 0;
}
if (need_more_data) { break; }
}
// Start a repeat run
if (rle_rpt_count != 0) {
if (t == s->rle_lit_count) { s->run_val = v0; }
rle_run = rle_rpt_count * 2;
rle_pos += rle_rpt_count;
if (rle_pos + 1 == numvals && !flush) { break; }
}
}
__syncthreads();
}
__syncthreads();
if (!t) {
s->rle_run = rle_run;
s->rle_pos = rle_pos;
s->rle_numvals = numvals;
}
}
/**
* @brief PLAIN bool encoder
*
* @param[in,out] s Page encode state
* @param[in] numvals Total count of input values
* @param[in] flush nonzero if last batch in block
* @param[in] t thread id (0..127)
*/
static __device__ void PlainBoolEncode(page_enc_state_s *s,
uint32_t numvals,
uint32_t flush,
uint32_t t)
{
uint32_t rle_pos = s->rle_pos;
uint8_t *dst = s->rle_out;
while (rle_pos < numvals) {
uint32_t pos = rle_pos + t;
uint32_t v = (pos < numvals) ? s->vals[pos & (rle_buffer_size - 1)] : 0;
uint32_t n = min(numvals - rle_pos, 128);
uint32_t nbytes = (n + ((flush) ? 7 : 0)) >> 3;
if (!nbytes) { break; }
v |= shuffle_xor(v, 1) << 1;
v |= shuffle_xor(v, 2) << 2;
v |= shuffle_xor(v, 4) << 4;
if (t < n && !(t & 7)) { dst[t >> 3] = v; }
rle_pos = min(rle_pos + nbytes * 8, numvals);
dst += nbytes;
}
__syncthreads();
if (!t) {
s->rle_pos = rle_pos;
s->rle_numvals = numvals;
s->rle_out = dst;
}
}
constexpr auto julian_calendar_epoch_diff()
{
using namespace cuda::std::chrono;
using namespace cuda::std::chrono_literals;
return sys_days{January / 1 / 1970} - (sys_days{November / 24 / -4713} + 12h);
}
/**
* @brief Converts a sys_time<nanoseconds> into a pair with nanoseconds since midnight and number of
* Julian days. Does not deal with time zones. Used by INT96 code.
*
* @param ns number of nanoseconds since epoch
* @return std::pair<nanoseconds,days> where nanoseconds is the number of nanoseconds
* elapsed in the day and days is the number of days from Julian epoch.
*/
static __device__ std::pair<cuda::std::chrono::nanoseconds, cuda::std::chrono::days>
convert_nanoseconds(cuda::std::chrono::sys_time<cuda::std::chrono::nanoseconds> const ns)
{
using namespace cuda::std::chrono;
auto const nanosecond_ticks = ns.time_since_epoch();
auto const gregorian_days = floor<days>(nanosecond_ticks);
auto const julian_days = gregorian_days + ceil<days>(julian_calendar_epoch_diff());
auto const last_day_ticks = nanosecond_ticks - duration_cast<nanoseconds>(gregorian_days);
return {last_day_ticks, julian_days};
}
// blockDim(128, 1, 1)
template <int block_size>
__global__ void __launch_bounds__(128, 8)
gpuEncodePages(device_span<gpu::EncPage> pages,
device_span<gpu_inflate_input_s> comp_in,
device_span<gpu_inflate_status_s> comp_stat)
{
__shared__ __align__(8) page_enc_state_s state_g;
using block_scan = cub::BlockScan<uint32_t, block_size>;
__shared__ typename block_scan::TempStorage temp_storage;
page_enc_state_s *const s = &state_g;
uint32_t t = threadIdx.x;
uint32_t dtype, dtype_len_in, dtype_len_out;
int32_t dict_bits;
if (t == 0) {
s->page = pages[blockIdx.x];
s->ck = *s->page.chunk;
s->col = *s->ck.col_desc;
s->cur = s->page.page_data + s->page.max_hdr_size;
}
__syncthreads();
// Encode Repetition and Definition levels
if (s->page.page_type != PageType::DICTIONARY_PAGE &&
(s->col.num_def_level_bits()) != 0 && // This means max definition level is not 0 (nullable)
(s->col.num_rep_level_bits()) == 0 // This means there are no repetition levels (non-list)
) {
// Calculate definition levels from validity
uint32_t def_lvl_bits = s->col.num_def_level_bits();
if (def_lvl_bits != 0) {
if (!t) {
s->rle_run = 0;
s->rle_pos = 0;
s->rle_numvals = 0;
s->rle_out = s->cur + 4;
}
__syncthreads();
while (s->rle_numvals < s->page.num_rows) {
uint32_t rle_numvals = s->rle_numvals;
uint32_t nrows = min(s->page.num_rows - rle_numvals, 128);
uint32_t row = s->page.start_row + rle_numvals + t;
// Definition level encodes validity. Checks the valid map and if it is valid, then sets the
// def_lvl accordingly and sets it in s->vals which is then given to RleEncode to encode
uint32_t def_lvl = [&]() {
bool within_bounds = rle_numvals + t < s->page.num_rows && row < s->col.num_rows;
if (not within_bounds) { return 0u; }
uint32_t def = 0;
size_type l = 0;
bool is_col_struct = false;
auto col = *s->col.parent_column;
do {
// If col not nullable then it does not contribute to def levels
if (s->col.nullability[l]) {
if (col.is_valid(row)) {
++def;
} else {
// We have found the shallowest level at which this row is null
break;
}
}
is_col_struct = (col.type().id() == type_id::STRUCT);
if (is_col_struct) {
row += col.offset();
col = col.child(0);
++l;
}
} while (is_col_struct);
return def;
}();
s->vals[(rle_numvals + t) & (rle_buffer_size - 1)] = def_lvl;
__syncthreads();
rle_numvals += nrows;
RleEncode(s, rle_numvals, def_lvl_bits, (rle_numvals == s->page.num_rows), t);
__syncthreads();
}
if (t < 32) {
uint8_t *cur = s->cur;
uint8_t *rle_out = s->rle_out;
if (t < 4) {
uint32_t rle_bytes = (uint32_t)(rle_out - cur) - 4;
cur[t] = rle_bytes >> (t * 8);
}
__syncwarp();
if (t == 0) { s->cur = rle_out; }
}
}
} else if (s->page.page_type != PageType::DICTIONARY_PAGE &&
s->col.num_rep_level_bits() != 0 // This means there ARE repetition levels (has list)
) {
auto encode_levels = [&](uint8_t const *lvl_val_data, uint32_t nbits) {
// For list types, the repetition and definition levels are pre-calculated. We just need to
// encode and write them now.
if (!t) {
s->rle_run = 0;
s->rle_pos = 0;
s->rle_numvals = 0;
s->rle_out = s->cur + 4;
}
__syncthreads();
size_type page_first_val_idx = s->col.level_offsets[s->page.start_row];
size_type col_last_val_idx = s->col.level_offsets[s->col.num_rows];
while (s->rle_numvals < s->page.num_values) {
uint32_t rle_numvals = s->rle_numvals;
uint32_t nvals = min(s->page.num_values - rle_numvals, 128);
uint32_t idx = page_first_val_idx + rle_numvals + t;
uint32_t lvl_val =
(rle_numvals + t < s->page.num_values && idx < col_last_val_idx) ? lvl_val_data[idx] : 0;
s->vals[(rle_numvals + t) & (rle_buffer_size - 1)] = lvl_val;
__syncthreads();
rle_numvals += nvals;
RleEncode(s, rle_numvals, nbits, (rle_numvals == s->page.num_values), t);
__syncthreads();
}
if (t < 32) {
uint8_t *cur = s->cur;
uint8_t *rle_out = s->rle_out;
if (t < 4) {
uint32_t rle_bytes = (uint32_t)(rle_out - cur) - 4;
cur[t] = rle_bytes >> (t * 8);
}
__syncwarp();
if (t == 0) { s->cur = rle_out; }
}
};
encode_levels(s->col.rep_values, s->col.num_rep_level_bits());
__syncthreads();
encode_levels(s->col.def_values, s->col.num_def_level_bits());
}
// Encode data values
__syncthreads();
dtype = s->col.physical_type;
dtype_len_out =
(dtype == INT96) ? 12 : (dtype == INT64 || dtype == DOUBLE) ? 8 : (dtype == BOOLEAN) ? 1 : 4;
if (dtype == INT32) {
dtype_len_in = GetDtypeLogicalLen(s->col.leaf_column);
} else if (dtype == INT96) {
dtype_len_in = 8;
} else {
dtype_len_in = dtype_len_out;
}
dict_bits = (dtype == BOOLEAN) ? 1 : (s->page.dict_bits_plus1 - 1);
if (t == 0) {
uint8_t *dst = s->cur;
s->rle_run = 0;
s->rle_pos = 0;
s->rle_numvals = 0;
s->rle_out = dst;
if (dict_bits >= 0 && dtype != BOOLEAN) {
dst[0] = dict_bits;
s->rle_out = dst + 1;
}
s->page_start_val = s->page.start_row;
if (s->col.parent_column != nullptr) {
auto col = *(s->col.parent_column);
auto current_page_start_val = s->page_start_val;
while (col.type().id() == type_id::LIST or col.type().id() == type_id::STRUCT) {
if (col.type().id() == type_id::STRUCT) {
current_page_start_val += col.offset();
col = col.child(0);
} else {
current_page_start_val = col.child(lists_column_view::offsets_column_index)
.element<size_type>(current_page_start_val + col.offset());
col = col.child(lists_column_view::child_column_index);
}
}
s->page_start_val = current_page_start_val;
}
}
__syncthreads();
for (uint32_t cur_val_idx = 0; cur_val_idx < s->page.num_leaf_values;) {
uint32_t nvals = min(s->page.num_leaf_values - cur_val_idx, 128);
uint32_t val_idx = s->page_start_val + cur_val_idx + t;
uint32_t is_valid, len, pos;
if (s->page.page_type == PageType::DICTIONARY_PAGE) {
is_valid = (cur_val_idx + t < s->page.num_leaf_values);
val_idx = (is_valid) ? s->col.dict_data[val_idx] : val_idx;
} else {
is_valid = (val_idx < s->col.leaf_column->size() && cur_val_idx + t < s->page.num_leaf_values)
? s->col.leaf_column->is_valid(val_idx)
: 0;
}
cur_val_idx += nvals;
if (dict_bits >= 0) {
// Dictionary encoding
if (dict_bits > 0) {
uint32_t rle_numvals;
uint32_t rle_numvals_in_block;
block_scan(temp_storage).ExclusiveSum(is_valid, pos, rle_numvals_in_block);
rle_numvals = s->rle_numvals;
if (is_valid) {
uint32_t v;
if (dtype == BOOLEAN) {
v = s->col.leaf_column->element<uint8_t>(val_idx);
} else {
v = s->col.dict_index[val_idx];
}
s->vals[(rle_numvals + pos) & (rle_buffer_size - 1)] = v;
}
rle_numvals += rle_numvals_in_block;
__syncthreads();
if ((!enable_bool_rle) && (dtype == BOOLEAN)) {
PlainBoolEncode(s, rle_numvals, (cur_val_idx == s->page.num_leaf_values), t);
} else {
RleEncode(s, rle_numvals, dict_bits, (cur_val_idx == s->page.num_leaf_values), t);
}
__syncthreads();
}
if (t == 0) { s->cur = s->rle_out; }
__syncthreads();
} else {
// Non-dictionary encoding
uint8_t *dst = s->cur;
if (is_valid) {
len = dtype_len_out;
if (dtype == BYTE_ARRAY) {
len += s->col.leaf_column->element<string_view>(val_idx).size_bytes();
}
} else {
len = 0;
}
uint32_t total_len = 0;
block_scan(temp_storage).ExclusiveSum(len, pos, total_len);
__syncthreads();
if (t == 0) { s->cur = dst + total_len; }
if (is_valid) {
switch (dtype) {
case INT32:
case FLOAT: {
int32_t v;
if (dtype_len_in == 4)
v = s->col.leaf_column->element<int32_t>(val_idx);
else if (dtype_len_in == 2)
v = s->col.leaf_column->element<int16_t>(val_idx);
else
v = s->col.leaf_column->element<int8_t>(val_idx);
dst[pos + 0] = v;
dst[pos + 1] = v >> 8;
dst[pos + 2] = v >> 16;
dst[pos + 3] = v >> 24;
} break;
case INT64: {
int64_t v = s->col.leaf_column->element<int64_t>(val_idx);
int32_t ts_scale = s->col.ts_scale;
if (ts_scale != 0) {
if (ts_scale < 0) {
v /= -ts_scale;
} else {
v *= ts_scale;
}
}
dst[pos + 0] = v;
dst[pos + 1] = v >> 8;
dst[pos + 2] = v >> 16;
dst[pos + 3] = v >> 24;
dst[pos + 4] = v >> 32;
dst[pos + 5] = v >> 40;
dst[pos + 6] = v >> 48;
dst[pos + 7] = v >> 56;
} break;
case INT96: {
int64_t v = s->col.leaf_column->element<int64_t>(val_idx);
int32_t ts_scale = s->col.ts_scale;
if (ts_scale != 0) {
if (ts_scale < 0) {
v /= -ts_scale;
} else {
v *= ts_scale;
}
}
auto const ret = convert_nanoseconds([&]() {
using namespace cuda::std::chrono;
switch (s->col.leaf_column->type().id()) {
case type_id::TIMESTAMP_SECONDS:
case type_id::TIMESTAMP_MILLISECONDS: {
return sys_time<nanoseconds>{milliseconds{v}};
} break;
case type_id::TIMESTAMP_MICROSECONDS:
case type_id::TIMESTAMP_NANOSECONDS: {
return sys_time<nanoseconds>{microseconds{v}};
} break;
}
return sys_time<nanoseconds>{microseconds{0}};
}());
// the 12 bytes of fixed length data.
v = ret.first.count();
dst[pos + 0] = v;
dst[pos + 1] = v >> 8;
dst[pos + 2] = v >> 16;
dst[pos + 3] = v >> 24;
dst[pos + 4] = v >> 32;
dst[pos + 5] = v >> 40;
dst[pos + 6] = v >> 48;
dst[pos + 7] = v >> 56;
uint32_t w = ret.second.count();
dst[pos + 8] = w;
dst[pos + 9] = w >> 8;
dst[pos + 10] = w >> 16;
dst[pos + 11] = w >> 24;
} break;
case DOUBLE: {
auto v = s->col.leaf_column->element<double>(val_idx);
memcpy(dst + pos, &v, 8);
} break;
case BYTE_ARRAY: {
auto str = s->col.leaf_column->element<string_view>(val_idx);
uint32_t v = len - 4; // string length
dst[pos + 0] = v;
dst[pos + 1] = v >> 8;
dst[pos + 2] = v >> 16;
dst[pos + 3] = v >> 24;
if (v != 0) memcpy(dst + pos + 4, str.data(), v);
} break;
}
}
__syncthreads();
}
}
if (t == 0) {
uint8_t *base = s->page.page_data + s->page.max_hdr_size;
uint32_t actual_data_size = static_cast<uint32_t>(s->cur - base);
uint32_t compressed_bfr_size = GetMaxCompressedBfrSize(actual_data_size);
s->page.max_data_size = actual_data_size;
s->comp_in.srcDevice = base;
s->comp_in.srcSize = actual_data_size;
s->comp_in.dstDevice = s->page.compressed_data + s->page.max_hdr_size;
s->comp_in.dstSize = compressed_bfr_size;
s->comp_stat.bytes_written = 0;
s->comp_stat.status = ~0;
s->comp_stat.reserved = 0;
}
__syncthreads();
if (t == 0) {
pages[blockIdx.x] = s->page;
if (not comp_in.empty()) comp_in[blockIdx.x] = s->comp_in;
if (not comp_stat.empty()) {
comp_stat[blockIdx.x] = s->comp_stat;
pages[blockIdx.x].comp_stat = &comp_stat[blockIdx.x];
}
}
}
// blockDim(128, 1, 1)
__global__ void __launch_bounds__(128) gpuDecideCompression(device_span<EncColumnChunk> chunks)
{
// After changing the way structs are loaded from coop to normal, this kernel has no business
// being launched with 128 thread block. It can easily be a single warp.
__shared__ __align__(8) EncColumnChunk ck_g;
__shared__ __align__(4) unsigned int error_count;
using warp_reduce = cub::WarpReduce<uint32_t>;
__shared__ typename warp_reduce::TempStorage temp_storage[2];
__shared__ volatile bool has_compression;
uint32_t t = threadIdx.x;
uint32_t uncompressed_data_size = 0;
uint32_t compressed_data_size = 0;
uint32_t num_pages;
if (t == 0) {
ck_g = chunks[blockIdx.x];
atomicAnd(&error_count, 0);
has_compression = false;
}
__syncthreads();
if (t < 32) {
num_pages = ck_g.num_pages;
for (uint32_t page = t; page < num_pages; page += 32) {
auto &curr_page = ck_g.pages[page];
uint32_t page_data_size = curr_page.max_data_size;
uncompressed_data_size += page_data_size;
if (auto comp_status = curr_page.comp_stat; comp_status != nullptr) {
has_compression = true;
compressed_data_size += comp_status->bytes_written;
if (comp_status->status != 0) { atomicAdd(&error_count, 1); }
}
}
uncompressed_data_size = warp_reduce(temp_storage[0]).Sum(uncompressed_data_size);
compressed_data_size = warp_reduce(temp_storage[1]).Sum(compressed_data_size);
}
__syncthreads();
if (t == 0) {
bool is_compressed;
if (has_compression) {
uint32_t compression_error = atomicAdd(&error_count, 0);
is_compressed = (!compression_error && compressed_data_size < uncompressed_data_size);
} else {
is_compressed = false;
}
chunks[blockIdx.x].is_compressed = is_compressed;
chunks[blockIdx.x].bfr_size = uncompressed_data_size;
chunks[blockIdx.x].compressed_size =
(is_compressed) ? compressed_data_size : uncompressed_data_size;
}
}
/**
* Minimal thrift compact protocol support
*/
inline __device__ uint8_t *cpw_put_uint32(uint8_t *p, uint32_t v)
{
while (v > 0x7f) {
*p++ = v | 0x80;
v >>= 7;
}
*p++ = v;
return p;
}
inline __device__ uint8_t *cpw_put_uint64(uint8_t *p, uint64_t v)
{
while (v > 0x7f) {
*p++ = v | 0x80;
v >>= 7;
}
*p++ = v;
return p;
}
inline __device__ uint8_t *cpw_put_int32(uint8_t *p, int32_t v)
{
int32_t s = (v < 0);
return cpw_put_uint32(p, (v ^ -s) * 2 + s);
}
inline __device__ uint8_t *cpw_put_int64(uint8_t *p, int64_t v)
{
int64_t s = (v < 0);
return cpw_put_uint64(p, (v ^ -s) * 2 + s);
}
inline __device__ uint8_t *cpw_put_fldh(uint8_t *p, int f, int cur, int t)
{
if (f > cur && f <= cur + 15) {
*p++ = ((f - cur) << 4) | t;
return p;
} else {
*p++ = t;
return cpw_put_int32(p, f);
}
}
class header_encoder {
uint8_t *current_header_ptr;
int current_field_index;
public:
inline __device__ header_encoder(uint8_t *header_start)
: current_header_ptr(header_start), current_field_index(0)
{
}
inline __device__ void field_struct_begin(int field)
{
current_header_ptr =
cpw_put_fldh(current_header_ptr, field, current_field_index, ST_FLD_STRUCT);
current_field_index = 0;
}
inline __device__ void field_struct_end(int field)
{
*current_header_ptr++ = 0;
current_field_index = field;
}
template <typename T>
inline __device__ void field_int32(int field, T value)
{
current_header_ptr = cpw_put_fldh(current_header_ptr, field, current_field_index, ST_FLD_I32);
current_header_ptr = cpw_put_int32(current_header_ptr, static_cast<int32_t>(value));
current_field_index = field;
}
template <typename T>
inline __device__ void field_int64(int field, T value)
{
current_header_ptr = cpw_put_fldh(current_header_ptr, field, current_field_index, ST_FLD_I64);
current_header_ptr = cpw_put_int64(current_header_ptr, static_cast<int64_t>(value));
current_field_index = field;
}
inline __device__ void field_binary(int field, const void *value, uint32_t length)
{
current_header_ptr =
cpw_put_fldh(current_header_ptr, field, current_field_index, ST_FLD_BINARY);
current_header_ptr = cpw_put_uint32(current_header_ptr, length);
memcpy(current_header_ptr, value, length);
current_header_ptr += length;
current_field_index = field;
}
inline __device__ void end(uint8_t **header_end, bool termination_flag = true)
{
if (termination_flag == false) { *current_header_ptr++ = 0; }
*header_end = current_header_ptr;
}
inline __device__ uint8_t *get_ptr(void) { return current_header_ptr; }
inline __device__ void set_ptr(uint8_t *ptr) { current_header_ptr = ptr; }
};
__device__ uint8_t *EncodeStatistics(uint8_t *start,
const statistics_chunk *s,
uint8_t dtype,
float *fp_scratch)
{
uint8_t *end, dtype_len;
switch (dtype) {
case dtype_bool: dtype_len = 1; break;
case dtype_int8:
case dtype_int16:
case dtype_int32:
case dtype_date32:
case dtype_float32: dtype_len = 4; break;
case dtype_int64:
case dtype_timestamp64:
case dtype_float64:
case dtype_decimal64: dtype_len = 8; break;
case dtype_decimal128: dtype_len = 16; break;
case dtype_string:
default: dtype_len = 0; break;
}
header_encoder encoder(start);
encoder.field_int64(3, s->null_count);
if (s->has_minmax) {
const void *vmin, *vmax;
uint32_t lmin, lmax;
if (dtype == dtype_string) {
lmin = s->min_value.str_val.length;
vmin = s->min_value.str_val.ptr;
lmax = s->max_value.str_val.length;
vmax = s->max_value.str_val.ptr;
} else {
lmin = lmax = dtype_len;
if (dtype == dtype_float32) { // Convert from double to float32
fp_scratch[0] = s->min_value.fp_val;
fp_scratch[1] = s->max_value.fp_val;
vmin = &fp_scratch[0];
vmax = &fp_scratch[1];
} else {
vmin = &s->min_value;
vmax = &s->max_value;
}
}
encoder.field_binary(5, vmax, lmax);
encoder.field_binary(6, vmin, lmin);
}
encoder.end(&end);
return end;
}
// blockDim(128, 1, 1)
__global__ void __launch_bounds__(128)
gpuEncodePageHeaders(device_span<EncPage> pages,
device_span<gpu_inflate_status_s const> comp_stat,
device_span<statistics_chunk const> page_stats,
const statistics_chunk *chunk_stats)
{
// When this whole kernel becomes single thread, the following variables need not be __shared__
__shared__ __align__(8) parquet_column_device_view col_g;
__shared__ __align__(8) EncColumnChunk ck_g;
__shared__ __align__(8) EncPage page_g;
__shared__ __align__(8) float fp_scratch[2];
uint32_t t = threadIdx.x;
if (t == 0) {
uint8_t *hdr_start, *hdr_end;
uint32_t compressed_page_size, uncompressed_page_size;
page_g = pages[blockIdx.x];
ck_g = *page_g.chunk;
col_g = *ck_g.col_desc;
if (chunk_stats && &pages[blockIdx.x] == ck_g.pages) { // Is this the first page in a chunk?
hdr_start = (ck_g.is_compressed) ? ck_g.compressed_bfr : ck_g.uncompressed_bfr;
hdr_end =
EncodeStatistics(hdr_start, &chunk_stats[page_g.chunk_id], col_g.stats_dtype, fp_scratch);
page_g.chunk->ck_stat_size = static_cast<uint32_t>(hdr_end - hdr_start);
}
uncompressed_page_size = page_g.max_data_size;
if (ck_g.is_compressed) {
hdr_start = page_g.compressed_data;
compressed_page_size = (uint32_t)comp_stat[blockIdx.x].bytes_written;
page_g.max_data_size = compressed_page_size;
} else {
hdr_start = page_g.page_data;
compressed_page_size = uncompressed_page_size;
}
header_encoder encoder(hdr_start);
PageType page_type = page_g.page_type;
// NOTE: For dictionary encoding, parquet v2 recommends using PLAIN in dictionary page and
// RLE_DICTIONARY in data page, but parquet v1 uses PLAIN_DICTIONARY in both dictionary and
// data pages (actual encoding is identical).
Encoding encoding;
if (enable_bool_rle) {
encoding = (col_g.physical_type != BOOLEAN)
? (page_type == PageType::DICTIONARY_PAGE || page_g.dict_bits_plus1 != 0)
? Encoding::PLAIN_DICTIONARY
: Encoding::PLAIN
: Encoding::RLE;
} else {
encoding = (page_type == PageType::DICTIONARY_PAGE || page_g.dict_bits_plus1 != 0)
? Encoding::PLAIN_DICTIONARY
: Encoding::PLAIN;
}
encoder.field_int32(1, page_type);
encoder.field_int32(2, uncompressed_page_size);
encoder.field_int32(3, compressed_page_size);
if (page_type == PageType::DATA_PAGE) {
// DataPageHeader
encoder.field_struct_begin(5);
encoder.field_int32(1, page_g.num_values); // NOTE: num_values != num_rows for list types
encoder.field_int32(2, encoding); // encoding
encoder.field_int32(3, Encoding::RLE); // definition_level_encoding
encoder.field_int32(4, Encoding::RLE); // repetition_level_encoding
// Optionally encode page-level statistics
if (not page_stats.empty()) {
encoder.field_struct_begin(5);
encoder.set_ptr(EncodeStatistics(
encoder.get_ptr(), &page_stats[blockIdx.x], col_g.stats_dtype, fp_scratch));
encoder.field_struct_end(5);
}
encoder.field_struct_end(5);
} else {
// DictionaryPageHeader
encoder.field_struct_begin(7);
encoder.field_int32(1, ck_g.total_dict_entries); // number of values in dictionary
encoder.field_int32(2, encoding);
encoder.field_struct_end(7);
}
encoder.end(&hdr_end, false);
page_g.hdr_size = (uint32_t)(hdr_end - hdr_start);
}
__syncthreads();
if (t == 0) pages[blockIdx.x] = page_g;
}
// blockDim(1024, 1, 1)
__global__ void __launch_bounds__(1024)
gpuGatherPages(device_span<EncColumnChunk> chunks, device_span<gpu::EncPage const> pages)
{
__shared__ __align__(8) EncColumnChunk ck_g;
__shared__ __align__(8) EncPage page_g;
uint32_t t = threadIdx.x;
uint8_t *dst, *dst_base;
const EncPage *first_page;
uint32_t num_pages, uncompressed_size;
if (t == 0) ck_g = chunks[blockIdx.x];
__syncthreads();
first_page = ck_g.pages;
num_pages = ck_g.num_pages;
dst = (ck_g.is_compressed) ? ck_g.compressed_bfr : ck_g.uncompressed_bfr;
dst += ck_g.ck_stat_size; // Skip over chunk statistics
dst_base = dst;
uncompressed_size = ck_g.bfr_size;
for (uint32_t page = 0; page < num_pages; page++) {
const uint8_t *src;
uint32_t hdr_len, data_len;
if (t == 0) { page_g = first_page[page]; }
__syncthreads();
src = (ck_g.is_compressed) ? page_g.compressed_data : page_g.page_data;
// Copy page header
hdr_len = page_g.hdr_size;
memcpy_block<1024, true>(dst, src, hdr_len, t);
src += page_g.max_hdr_size;
dst += hdr_len;
// Copy page data
uncompressed_size += hdr_len;
data_len = page_g.max_data_size;
memcpy_block<1024, true>(dst, src, data_len, t);
dst += data_len;
__syncthreads();
if (!t && page == 0 && ck_g.has_dictionary) { ck_g.dictionary_size = hdr_len + data_len; }
}
if (t == 0) {
chunks[blockIdx.x].bfr_size = uncompressed_size;
chunks[blockIdx.x].compressed_size = (dst - dst_base);
if (ck_g.has_dictionary) { chunks[blockIdx.x].dictionary_size = ck_g.dictionary_size; }
}
}
/**
* @brief Functor to get definition level value for a nested struct column until the leaf level or
* the first list level.
*
*/
struct def_level_fn {
column_device_view const *parent_col;
uint8_t const *d_nullability;
uint8_t sub_level_start;
uint8_t curr_def_level;
__device__ uint32_t operator()(size_type i)
{
uint32_t def = curr_def_level;
uint8_t l = sub_level_start;
bool is_col_struct = false;
auto col = *parent_col;
do {
// If col not nullable then it does not contribute to def levels
if (d_nullability[l]) {
if (not col.nullable() or bit_is_set(col.null_mask(), i)) {
++def;
} else { // We have found the shallowest level at which this row is null
break;
}
}
is_col_struct = (col.type().id() == type_id::STRUCT);
if (is_col_struct) {
col = col.child(0);
++l;
}
} while (is_col_struct);
return def;
}
};
/**
* @brief Get the dremel offsets and repetition and definition levels for a LIST column
*
* The repetition and definition level values are ideally computed using a recursive call over a
* nested structure but in order to better utilize GPU resources, this function calculates them
* with a bottom up merge method.
*
* Given a LIST column of type `List<List<int>>` like so:
* ```
* col = {
* [],
* [[], [1, 2, 3], [4, 5]],
* [[]]
* }
* ```
* We can represent it in cudf format with two level of offsets like this:
* ```
* Level 0 offsets = {0, 0, 3, 5, 6}
* Level 1 offsets = {0, 0, 3, 5, 5}
* Values = {1, 2, 3, 4, 5}
* ```
* The desired result of this function is the repetition and definition level values that
* correspond to the data values:
* ```
* col = {[], [[], [1, 2, 3], [4, 5]], [[]]}
* def = { 0 1, 2, 2, 2, 2, 2, 1 }
* rep = { 0, 0, 0, 2, 2, 1, 2, 0 }
* ```
*
* Since repetition and definition levels arrays contain a value for each empty list, the size of
* the rep/def level array can be given by
* ```
* rep_level.size() = size of leaf column + number of empty lists in level 0
* + number of empty lists in level 1 ...
* ```
*
* We start with finding the empty lists in the penultimate level and merging it with the indices
* of the leaf level. The values for the merge are the definition and repetition levels
* ```
* empties at level 1 = {0, 5}
* def values at 1 = {1, 1}
* rep values at 1 = {1, 1}
* indices at leaf = {0, 1, 2, 3, 4}
* def values at leaf = {2, 2, 2, 2, 2}
* rep values at leaf = {2, 2, 2, 2, 2}
* ```
*
* merged def values = {1, 2, 2, 2, 2, 2, 1}
* merged rep values = {1, 2, 2, 2, 2, 2, 1}
*
* The size of the rep/def values is now larger than the leaf values and the offsets need to be
* adjusted in order to point to the correct start indices. We do this with an exclusive scan over
* the indices of offsets of empty lists and adding to existing offsets.
* ```
* Level 1 new offsets = {0, 1, 4, 6, 7}
* ```
* Repetition values at the beginning of a list need to be decremented. We use the new offsets to
* scatter the rep value.
* ```
* merged rep values = {1, 2, 2, 2, 2, 2, 1}
* scatter (1, new offsets)
* new offsets = {0, 1, 4, 6, 7}
* new rep values = {1, 1, 2, 2, 1, 2, 1}
* ```
*
* Similarly we merge up all the way till level 0 offsets
*
* STRUCT COLUMNS :
* In case of struct columns, we don't have to merge struct levels with their children because a
* struct is the same size as its children. e.g. for a column `struct<int, float>`, if the row `i`
* is null, then the children columns `int` and `float` are also null at `i`. They also have the
* null entry represented in their respective null masks. So for any case of strictly struct based
* nesting, we can get the definition levels merely by iterating over the nesting for the same row.
*
* In case struct and lists are intermixed, the definition levels of all the contiguous struct
* levels can be constructed using the aforementioned iterative method. Only when we reach a list
* level, we need to do a merge with the subsequent level.
*
* So, for a column like `struct<list<int>>`, we are going to merge between the levels `struct<list`
* and `int`.
* For a column like `list<struct<int>>`, we are going to merge between `list` and `struct<int>`.
*
* In general, one nesting level is the list level and any struct level that precedes it.
*
* A few more examples to visualize the partitioning of column hierarchy into nesting levels:
* (L is list, S is struct, i is integer(leaf data level), angle brackets omitted)
* ```
* 1. LSi = L Si
* - | --
*
* 2. LLSi = L L Si
* - | - | --
*
* 3. SSLi = SSL i
* --- | -
*
* 4. LLSLSSi = L L SL SSi
* - | - | -- | ---
```
*/
dremel_data get_dremel_data(column_view h_col,
// TODO(cp): use device_span once it is converted to a single hd_vec
rmm::device_uvector<uint8_t> const &d_nullability,
std::vector<uint8_t> const &nullability,
rmm::cuda_stream_view stream)
{
auto get_list_level = [](column_view col) {
while (col.type().id() == type_id::STRUCT) { col = col.child(0); }
return col;
};
auto get_empties = [&](column_view col, size_type start, size_type end) {
auto lcv = lists_column_view(get_list_level(col));
rmm::device_uvector<size_type> empties_idx(lcv.size(), stream);
rmm::device_uvector<size_type> empties(lcv.size(), stream);
auto d_off = lcv.offsets().data<size_type>();
auto empties_idx_end =
thrust::copy_if(rmm::exec_policy(stream),
thrust::make_counting_iterator(start),
thrust::make_counting_iterator(end),
empties_idx.begin(),
[d_off] __device__(auto i) { return d_off[i] == d_off[i + 1]; });
auto empties_end = thrust::gather(rmm::exec_policy(stream),
empties_idx.begin(),
empties_idx_end,
lcv.offsets().begin<size_type>(),
empties.begin());
auto empties_size = empties_end - empties.begin();
return std::make_tuple(std::move(empties), std::move(empties_idx), empties_size);
};
auto curr_col = h_col;
std::vector<column_view> nesting_levels;
std::vector<uint8_t> def_at_level;
std::vector<uint8_t> start_at_sub_level;
uint8_t curr_nesting_level_idx = 0;
auto add_def_at_level = [&](column_view col) {
// Add up all def level contributions in this column all the way till the first list column
// appears in the hierarchy or until we get to leaf
uint32_t def = 0;
start_at_sub_level.push_back(curr_nesting_level_idx);
while (col.type().id() == type_id::STRUCT) {
def += (nullability[curr_nesting_level_idx]) ? 1 : 0;
col = col.child(0);
++curr_nesting_level_idx;
}
// At the end of all those structs is either a list column or the leaf. Leaf column contributes
// at least one def level. It doesn't matter what the leaf contributes because it'll be at the
// end of the exclusive scan.
def += (nullability[curr_nesting_level_idx]) ? 2 : 1;
def_at_level.push_back(def);
++curr_nesting_level_idx;
};
while (cudf::is_nested(curr_col.type())) {
nesting_levels.push_back(curr_col);
add_def_at_level(curr_col);
while (curr_col.type().id() == type_id::STRUCT) {
// Go down the hierarchy until we get to the LIST or the leaf level
curr_col = curr_col.child(0);
}
if (curr_col.type().id() == type_id::LIST) {
curr_col = curr_col.child(lists_column_view::child_column_index);
if (not is_nested(curr_col.type())) {
// Special case: when the leaf data column is the immediate child of the list col then we
// want it to be included right away. Otherwise the struct containing it will be included in
// the next iteration of this loop.
nesting_levels.push_back(curr_col);
add_def_at_level(curr_col);
break;
}
}
}
std::unique_ptr<rmm::device_buffer> device_view_owners;
column_device_view *d_nesting_levels;
std::tie(device_view_owners, d_nesting_levels) =
contiguous_copy_column_device_views<column_device_view>(nesting_levels, stream);
thrust::exclusive_scan(
thrust::host, def_at_level.begin(), def_at_level.end(), def_at_level.begin());
// Sliced list column views only have offsets applied to top level. Get offsets for each level.
rmm::device_uvector<size_type> d_column_offsets(nesting_levels.size(), stream);
rmm::device_uvector<size_type> d_column_ends(nesting_levels.size(), stream);
auto d_col = column_device_view::create(h_col, stream);
cudf::detail::device_single_thread(
[offset_at_level = d_column_offsets.data(),
end_idx_at_level = d_column_ends.data(),
col = *d_col] __device__() {
auto curr_col = col;
size_type off = curr_col.offset();
size_type end = off + curr_col.size();
size_type level = 0;
offset_at_level[level] = off;
end_idx_at_level[level] = end;
++level;
// Apply offset recursively until we get to leaf data
// Skip doing the following for any structs we encounter in between.
while (curr_col.type().id() == type_id::LIST or curr_col.type().id() == type_id::STRUCT) {
if (curr_col.type().id() == type_id::LIST) {
off = curr_col.child(lists_column_view::offsets_column_index).element<size_type>(off);
end = curr_col.child(lists_column_view::offsets_column_index).element<size_type>(end);
offset_at_level[level] = off;
end_idx_at_level[level] = end;
++level;
curr_col = curr_col.child(lists_column_view::child_column_index);
} else {
curr_col = curr_col.child(0);
}
}
},
stream);
thrust::host_vector<size_type> column_offsets(d_column_offsets.size());
CUDA_TRY(cudaMemcpyAsync(column_offsets.data(),
d_column_offsets.data(),
d_column_offsets.size() * sizeof(size_type),
cudaMemcpyDeviceToHost,
stream.value()));
thrust::host_vector<size_type> column_ends(d_column_ends.size());
CUDA_TRY(cudaMemcpyAsync(column_ends.data(),
d_column_ends.data(),
d_column_ends.size() * sizeof(size_type),
cudaMemcpyDeviceToHost,
stream.value()));
stream.synchronize();
size_t max_vals_size = 0;
for (size_t l = 0; l < column_offsets.size(); ++l) {
max_vals_size += column_ends[l] - column_offsets[l];
}
rmm::device_uvector<uint8_t> rep_level(max_vals_size, stream);
rmm::device_uvector<uint8_t> def_level(max_vals_size, stream);
rmm::device_uvector<uint8_t> temp_rep_vals(max_vals_size, stream);
rmm::device_uvector<uint8_t> temp_def_vals(max_vals_size, stream);
rmm::device_uvector<size_type> new_offsets(0, stream);
size_type curr_rep_values_size = 0;
{
// At this point, curr_col contains the leaf column. Max nesting level is
// nesting_levels.size().
// We are going to start by merging the last column in nesting_levels (the leaf, which is at the
// index `nesting_levels.size() - 1`) with the second-to-last (which is at
// `nesting_levels.size() - 2`).
size_t level = nesting_levels.size() - 2;
curr_col = nesting_levels[level];
auto lcv = lists_column_view(get_list_level(curr_col));
auto offset_size_at_level = column_ends[level] - column_offsets[level] + 1;
// Get empties at this level
rmm::device_uvector<size_type> empties(0, stream);
rmm::device_uvector<size_type> empties_idx(0, stream);
size_t empties_size;
std::tie(empties, empties_idx, empties_size) =
get_empties(nesting_levels[level], column_offsets[level], column_ends[level]);
// Merge empty at deepest parent level with the rep, def level vals at leaf level
auto input_parent_rep_it = thrust::make_constant_iterator(level);
auto input_parent_def_it =
thrust::make_transform_iterator(empties_idx.begin(),
def_level_fn{d_nesting_levels + level,
d_nullability.data(),
start_at_sub_level[level],
def_at_level[level]});
// `nesting_levels.size()` == no of list levels + leaf. Max repetition level = no of list levels
auto input_child_rep_it = thrust::make_constant_iterator(nesting_levels.size() - 1);
auto input_child_def_it =
thrust::make_transform_iterator(thrust::make_counting_iterator(column_offsets[level + 1]),
def_level_fn{d_nesting_levels + level + 1,
d_nullability.data(),
start_at_sub_level[level + 1],
def_at_level[level + 1]});
// Zip the input and output value iterators so that merge operation is done only once
auto input_parent_zip_it =
thrust::make_zip_iterator(thrust::make_tuple(input_parent_rep_it, input_parent_def_it));
auto input_child_zip_it =
thrust::make_zip_iterator(thrust::make_tuple(input_child_rep_it, input_child_def_it));
auto output_zip_it =
thrust::make_zip_iterator(thrust::make_tuple(rep_level.begin(), def_level.begin()));
auto ends = thrust::merge_by_key(rmm::exec_policy(stream),
empties.begin(),
empties.begin() + empties_size,
thrust::make_counting_iterator(column_offsets[level + 1]),
thrust::make_counting_iterator(column_ends[level + 1]),
input_parent_zip_it,
input_child_zip_it,
thrust::make_discard_iterator(),
output_zip_it);
curr_rep_values_size = ends.second - output_zip_it;
// Scan to get distance by which each offset value is shifted due to the insertion of empties
auto scan_it = cudf::detail::make_counting_transform_iterator(
column_offsets[level], [off = lcv.offsets().data<size_type>()] __device__(auto i) -> int {
return off[i] == off[i + 1];
});
rmm::device_uvector<size_type> scan_out(offset_size_at_level, stream);
thrust::exclusive_scan(
rmm::exec_policy(stream), scan_it, scan_it + offset_size_at_level, scan_out.begin());
// Add scan output to existing offsets to get new offsets into merged rep level values
new_offsets = rmm::device_uvector<size_type>(offset_size_at_level, stream);
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
offset_size_at_level,
[off = lcv.offsets().data<size_type>() + column_offsets[level],
scan_out = scan_out.data(),
new_off = new_offsets.data()] __device__(auto i) {
new_off[i] = off[i] - off[0] + scan_out[i];
});
// Set rep level values at level starts to appropriate rep level
auto scatter_it = thrust::make_constant_iterator(level);
thrust::scatter(rmm::exec_policy(stream),
scatter_it,
scatter_it + new_offsets.size() - 1,
new_offsets.begin(),
rep_level.begin());
}
// Having already merged the last two levels, we are now going to merge the result with the
// third-last level which is at index `nesting_levels.size() - 3`.
for (int level = nesting_levels.size() - 3; level >= 0; level--) {
curr_col = nesting_levels[level];
auto lcv = lists_column_view(get_list_level(curr_col));
auto offset_size_at_level = column_ends[level] - column_offsets[level] + 1;
// Get empties at this level
rmm::device_uvector<size_type> empties(0, stream);
rmm::device_uvector<size_type> empties_idx(0, stream);
size_t empties_size;
std::tie(empties, empties_idx, empties_size) =
get_empties(nesting_levels[level], column_offsets[level], column_ends[level]);
auto offset_transformer = [new_child_offsets = new_offsets.data(),
child_start = column_offsets[level + 1]] __device__(auto x) {
return new_child_offsets[x - child_start]; // (x - child's offset)
};
// We will be reading from old rep_levels and writing again to rep_levels. Swap the current
// rep values into temp_rep_vals so it can become the input and rep_levels can again be output.
std::swap(temp_rep_vals, rep_level);
std::swap(temp_def_vals, def_level);
// Merge empty at parent level with the rep, def level vals at current level
auto transformed_empties = thrust::make_transform_iterator(empties.begin(), offset_transformer);
auto input_parent_rep_it = thrust::make_constant_iterator(level);
auto input_parent_def_it =
thrust::make_transform_iterator(empties_idx.begin(),
def_level_fn{d_nesting_levels + level,
d_nullability.data(),
start_at_sub_level[level],
def_at_level[level]});
// Zip the input and output value iterators so that merge operation is done only once
auto input_parent_zip_it =
thrust::make_zip_iterator(thrust::make_tuple(input_parent_rep_it, input_parent_def_it));
auto input_child_zip_it =
thrust::make_zip_iterator(thrust::make_tuple(temp_rep_vals.begin(), temp_def_vals.begin()));
auto output_zip_it =
thrust::make_zip_iterator(thrust::make_tuple(rep_level.begin(), def_level.begin()));
auto ends = thrust::merge_by_key(rmm::exec_policy(stream),
transformed_empties,
transformed_empties + empties_size,
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(curr_rep_values_size),
input_parent_zip_it,
input_child_zip_it,
thrust::make_discard_iterator(),
output_zip_it);
curr_rep_values_size = ends.second - output_zip_it;
// Scan to get distance by which each offset value is shifted due to the insertion of dremel
// level value fof an empty list
auto scan_it = cudf::detail::make_counting_transform_iterator(
column_offsets[level], [off = lcv.offsets().data<size_type>()] __device__(auto i) -> int {
return off[i] == off[i + 1];
});
rmm::device_uvector<size_type> scan_out(offset_size_at_level, stream);
thrust::exclusive_scan(
rmm::exec_policy(stream), scan_it, scan_it + offset_size_at_level, scan_out.begin());
// Add scan output to existing offsets to get new offsets into merged rep level values
rmm::device_uvector<size_type> temp_new_offsets(offset_size_at_level, stream);
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
offset_size_at_level,
[off = lcv.offsets().data<size_type>() + column_offsets[level],
scan_out = scan_out.data(),
new_off = temp_new_offsets.data(),
offset_transformer] __device__(auto i) {
new_off[i] = offset_transformer(off[i]) + scan_out[i];
});
new_offsets = std::move(temp_new_offsets);
// Set rep level values at level starts to appropriate rep level
auto scatter_it = thrust::make_constant_iterator(level);
thrust::scatter(rmm::exec_policy(stream),
scatter_it,
scatter_it + new_offsets.size() - 1,
new_offsets.begin(),
rep_level.begin());
}
size_t level_vals_size = new_offsets.back_element(stream);
rep_level.resize(level_vals_size, stream);
def_level.resize(level_vals_size, stream);
stream.synchronize();
size_type leaf_data_size = column_ends.back() - column_offsets.back();
return dremel_data{
std::move(new_offsets), std::move(rep_level), std::move(def_level), leaf_data_size};
}
/**
* @brief Launches kernel for initializing encoder page fragments
*
* @param[in,out] frag Fragment array [column_id][fragment_id]
* @param[in] col_desc Column description array [column_id]
* @param[in] num_fragments Number of fragments per column
* @param[in] num_columns Number of columns
* @param[in] stream CUDA stream to use, default 0
*/
void InitPageFragments(device_2dspan<PageFragment> frag,
device_span<parquet_column_device_view const> col_desc,
uint32_t fragment_size,
uint32_t num_rows,
rmm::cuda_stream_view stream)
{
auto num_columns = frag.size().first;
auto num_fragments_per_column = frag.size().second;
dim3 dim_grid(num_columns, num_fragments_per_column); // 1 threadblock per fragment
gpuInitPageFragments<512>
<<<dim_grid, 512, 0, stream.value()>>>(frag, col_desc, fragment_size, num_rows);
}
/**
* @brief Launches kernel for initializing fragment statistics groups
*
* @param[out] groups Statistics groups [num_columns x num_fragments]
* @param[in] fragments Page fragments [num_columns x num_fragments]
* @param[in] col_desc Column description [num_columns]
* @param[in] stream CUDA stream to use, default 0
*/
void InitFragmentStatistics(device_2dspan<statistics_group> groups,
device_2dspan<PageFragment const> fragments,
device_span<parquet_column_device_view const> col_desc,
rmm::cuda_stream_view stream)
{
int const num_columns = col_desc.size();
int const num_fragments_per_column = fragments.size().second;
auto grid_y = util::div_rounding_up_safe(num_fragments_per_column, 128 / cudf::detail::warp_size);
dim3 dim_grid(num_columns, grid_y); // 1 warp per fragment
gpuInitFragmentStats<<<dim_grid, 128, 0, stream.value()>>>(groups, fragments, col_desc);
}
/**
* @brief Launches kernel for initializing encoder data pages
*
* @param[in,out] chunks Column chunks [rowgroup][column]
* @param[out] pages Encode page array (null if just counting pages)
* @param[in] col_desc Column description array [column_id]
* @param[in] num_rowgroups Number of fragments per column
* @param[in] num_columns Number of columns
* @param[out] page_grstats Setup for page-level stats
* @param[out] chunk_grstats Setup for chunk-level stats
* @param[in] stream CUDA stream to use, default 0
*/
void InitEncoderPages(device_2dspan<EncColumnChunk> chunks,
device_span<gpu::EncPage> pages,
device_span<parquet_column_device_view const> col_desc,
int32_t num_columns,
statistics_merge_group *page_grstats,
statistics_merge_group *chunk_grstats,
rmm::cuda_stream_view stream)
{
auto num_rowgroups = chunks.size().first;
dim3 dim_grid(num_columns, num_rowgroups); // 1 threadblock per rowgroup
gpuInitPages<<<dim_grid, 128, 0, stream.value()>>>(
chunks, pages, col_desc, page_grstats, chunk_grstats, num_columns);
}
/**
* @brief Launches kernel for packing column data into parquet pages
*
* @param[in,out] pages Device array of EncPages (unordered)
* @param[out] comp_in Optionally initializes compressor input params
* @param[out] comp_stat Optionally initializes compressor status
* @param[in] stream CUDA stream to use, default 0
*/
void EncodePages(device_span<gpu::EncPage> pages,
device_span<gpu_inflate_input_s> comp_in,
device_span<gpu_inflate_status_s> comp_stat,
rmm::cuda_stream_view stream)
{
auto num_pages = pages.size();
// A page is part of one column. This is launching 1 block per page. 1 block will exclusively
// deal with one datatype.
gpuEncodePages<128><<<num_pages, 128, 0, stream.value()>>>(pages, comp_in, comp_stat);
}
/**
* @brief Launches kernel to make the compressed vs uncompressed chunk-level decision
*
* @param[in,out] chunks Column chunks
* @param[in] stream CUDA stream to use, default 0
*/
void DecideCompression(device_span<EncColumnChunk> chunks, rmm::cuda_stream_view stream)
{
gpuDecideCompression<<<chunks.size(), 128, 0, stream.value()>>>(chunks);
}
/**
* @brief Launches kernel to encode page headers
*
* @param[in,out] pages Device array of EncPages
* @param[in] comp_stat Compressor status or nullptr if no compression
* @param[in] page_stats Optional page-level statistics to be included in page header
* @param[in] chunk_stats Optional chunk-level statistics to be encoded
* @param[in] stream CUDA stream to use, default 0
*/
void EncodePageHeaders(device_span<EncPage> pages,
device_span<gpu_inflate_status_s const> comp_stat,
device_span<statistics_chunk const> page_stats,
const statistics_chunk *chunk_stats,
rmm::cuda_stream_view stream)
{
// TODO: single thread task. No need for 128 threads/block. Earlier it used to employ rest of the
// threads to coop load structs
gpuEncodePageHeaders<<<pages.size(), 128, 0, stream.value()>>>(
pages, comp_stat, page_stats, chunk_stats);
}
/**
* @brief Launches kernel to gather pages to a single contiguous block per chunk
*
* @param[in,out] chunks Column chunks
* @param[in] pages Device array of EncPages
* @param[in] stream CUDA stream to use, default 0
*/
void GatherPages(device_span<EncColumnChunk> chunks,
device_span<gpu::EncPage const> pages,
rmm::cuda_stream_view stream)
{
gpuGatherPages<<<chunks.size(), 1024, 0, stream.value()>>>(chunks, pages);
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
|
37956e02fca96c884fbea797d5406c84ff96e035.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <malloc.h>
#include <time.h>
#include <mpi.h>
#include <hip/hip_runtime.h>
#include <unistd.h>
using namespace std;
//-- This MPI+CUDA multi-GPU program generates n numbers in node Rank 0, process them on its GPU, then sends them to node Rank 1 to be processed. At the end, the final result returns back to master node.
//-- TO COMPILE nvcc -arch=compute_37 -I/usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -I/usr/lib/x86_64-linux-gnu/openmpi/include/openmpi/opal/mca/event/libevent2022/libevent -I/usr/lib/x86_64-linux-gnu/openmpi/include/openmpi/opal/mca/event/libevent2022/libevent/include -I/usr/lib/x86_64-linux-gnu/openmpi/include -L/usr//lib -L/usr/lib/x86_64-linux-gnu/openmpi/lib -lmpi mpicudanormal.cu -o program
//-- TO RUN mpiexec -n 2 ./program xx yy zz
//-- xx (integer) No.of input to generate
//-- yy (integer) Range of input data to be generated randomly
//-- zz (integer) number of iterations
// declare the kernel function
__global__ void add(int *A, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<N) A[i] = A[i] + 1;
}
__global__ void add2(int *A, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<N) A[i] = A[i] + 1;
}
int main(int argc, char **argv){
int myid, procs, n, range, err, maxiter;
int iter = 0;
int block_size, grid_size;
MPI_Status status;
double t_start = 0.0, t_end = 0.0;
// initialize MPI_Init
err = MPI_Init(&argc, &argv);
if (err != MPI_SUCCESS){
printf("\nError initializing MPI.\n");
MPI_Abort(MPI_COMM_WORLD, err);
} // end if
// Get No. of processors
MPI_Comm_size(MPI_COMM_WORLD, &procs);
// Get processor id
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
if (myid == 0) {// to print only once....
if (argc < 4) {
printf("\n\tOOOPS...., INVALID No of Arguements,\n");
}
} // end myid == 0
if (argc < 4) {MPI_Finalize(); return 0;} // end if
n = atoi(argv[1]); // get n
range = atoi(argv[2]);
maxiter = atoi(argv[3]);
int nBytes = n * sizeof(int);
if (myid == 0) printf("The size of data: %d\n", nBytes);
//k = n; // No. of elements to be computed by each Processor
while (iter < maxiter)
{
iter++;
if (myid == 0) {
int *a_d;
int *arr, *myarr;
block_size=32;
grid_size = 512;
//srand(time(NULL));
time_t t1;
time(&t1); // get system time
srand(t1); // Initilize Random Seed
dim3 dimBlock(block_size,1,1);
dim3 dimGrid(grid_size,1,1);
float milliseconds_h2d, milliseconds_d2h, milliseconds_k = 0;
hipEvent_t start_h2d, stop_h2d, start_d2h, stop_d2h, start_k, stop_k;
hipEventCreate(&start_h2d);
hipEventCreate(&start_d2h);
hipEventCreate(&start_k);
hipEventCreate(&stop_d2h);
hipEventCreate(&stop_h2d);
hipEventCreate(&stop_k);
myarr = new int[n * sizeof (int)];
// allocate space to generate data
arr = new int[n * sizeof (int)];
for(int j = 0; j < n; j++)// generate random data
arr[j] = rand() % range;
//hipSetDevice(0);
//hipDeviceEnablePeerAccess( 1, 0 );
hipMalloc((void **)&a_d , n*sizeof(int));
//Sending data to the GPU
hipEventRecord(start_h2d);
hipMemcpy(a_d, arr, n*sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(stop_h2d);
hipEventSynchronize(stop_h2d);
hipEventElapsedTime(&milliseconds_h2d, start_h2d, stop_h2d);
//Stating kernel
hipEventRecord(start_k);
hipLaunchKernelGGL(( add2), dim3(grid_size),dim3(block_size), 0, 0, a_d,n);
hipDeviceSynchronize();
hipEventRecord(stop_k);
hipEventSynchronize(stop_k);
hipEventElapsedTime(&milliseconds_k, start_k, stop_k);
//Returning data from device memory to the host
hipEventRecord(start_d2h);
hipMemcpy(myarr, a_d, n*sizeof(int), hipMemcpyDeviceToHost);
hipEventRecord(stop_d2h);
hipEventSynchronize(stop_d2h);
hipEventElapsedTime(&milliseconds_d2h, start_d2h, stop_d2h);
//Check if cuda operations are successful
hipError_t err1 = hipGetLastError();
if (err1 != hipSuccess)
printf("CUDA Error: %s\n", hipGetErrorString(err1));
//Sending result to the master node
t_start = MPI_Wtime();
MPI_Send(myarr, n, MPI_INT, 1, 0, MPI_COMM_WORLD);
MPI_Recv(myarr, n, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
t_end = MPI_Wtime();
//for(int j=0 ; j<n ; j++)
// printf("%d ", myarr[j]);
printf("\nRank: %d , Iteration: %d , H2D: %f , D2H: %f ,Kernel: %f , Total Time: (ms) %f\n",myid, iter, milliseconds_h2d, milliseconds_d2h, milliseconds_k , ((t_end - t_start)* 1e3));
// --free allocated spaces
free (arr);//free allocated space for array a
free(myarr);
hipFree(a_d);
hipEventDestroy(start_h2d);
hipEventDestroy(stop_h2d);
hipEventDestroy(start_d2h);
hipEventDestroy(stop_d2h);
hipEventDestroy(start_k);
hipEventDestroy(stop_k);
sleep(1);//waits between iterations
} // end myid == 0
else{ //Node rank 1
float milliseconds_k, milliseconds_h2d, milliseconds_d2h = 0;
int *a_dd, *myarr;
//hipSetDevice(1);
block_size=32;
grid_size = 512;
dim3 dimBlock(block_size,1,1);
dim3 dimGrid(grid_size,1,1);
hipEvent_t start_k, stop_k, start_h2d, stop_h2d, start_d2h, stop_d2h;
hipEventCreate(&start_k);
hipEventCreate(&stop_k);
hipEventCreate(&stop_h2d);
hipEventCreate(&stop_d2h);
hipEventCreate(&start_h2d);
hipEventCreate(&start_d2h);
hipMalloc((void **)&a_dd , n*sizeof(int));
myarr = new int[n * sizeof (int)];
//hipSetDevice(1);
//hipDeviceEnablePeerAccess( 0, 0 );
//Receiving data from master (Rank=0)
MPI_Recv(myarr, n, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
//Sending data to the GPU
hipEventRecord(start_h2d);
hipMemcpy(a_dd, myarr, n*sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(stop_h2d);
hipEventSynchronize(stop_h2d);
hipEventElapsedTime(&milliseconds_h2d, start_h2d, stop_h2d);
//Stating kernel
hipEventRecord(start_k);
hipLaunchKernelGGL(( add), dim3(grid_size),dim3(block_size), 0, 0, a_dd,n);
hipDeviceSynchronize();
hipEventRecord(stop_k);
hipEventSynchronize(stop_k);
hipEventElapsedTime(&milliseconds_k, start_k, stop_k);
//Returning data from device memory to the host
hipEventRecord(start_d2h);
hipMemcpy(myarr, a_dd, n*sizeof(int), hipMemcpyDeviceToHost);
hipEventRecord(stop_d2h);
hipEventSynchronize(stop_d2h);
hipEventElapsedTime(&milliseconds_d2h, start_d2h, stop_d2h);
//Check if cuda operations are successful
hipError_t err = hipGetLastError();
if ( err != hipSuccess )
printf("CUDA Error: %s\n", hipGetErrorString(err));
printf("\nRank: %d , Iteration: %d , H2D: %f, D2H: %f , Kernel: %f (ms) \n",myid, iter, milliseconds_h2d, milliseconds_d2h, milliseconds_k);
//Returning result to the master node
MPI_Send(myarr, n, MPI_INT, 0, myid, MPI_COMM_WORLD);
//free allocated spaces
hipFree(a_dd);
free(myarr);
hipEventDestroy(start_d2h);
hipEventDestroy(stop_d2h);
hipEventDestroy(stop_h2d);
hipEventDestroy(start_h2d);
hipEventDestroy(start_k);
hipEventDestroy(stop_k);
} // end else
}
MPI_Finalize();
return 0;
} // end main
| 37956e02fca96c884fbea797d5406c84ff96e035.cu | #include <stdio.h>
#include <malloc.h>
#include <time.h>
#include <mpi.h>
#include <cuda.h>
#include <unistd.h>
using namespace std;
//-- This MPI+CUDA multi-GPU program generates n numbers in node Rank 0, process them on its GPU, then sends them to node Rank 1 to be processed. At the end, the final result returns back to master node.
//-- TO COMPILE nvcc -arch=compute_37 -I/usr/lib/x86_64-linux-gnu/openmpi/include/openmpi -I/usr/lib/x86_64-linux-gnu/openmpi/include/openmpi/opal/mca/event/libevent2022/libevent -I/usr/lib/x86_64-linux-gnu/openmpi/include/openmpi/opal/mca/event/libevent2022/libevent/include -I/usr/lib/x86_64-linux-gnu/openmpi/include -L/usr//lib -L/usr/lib/x86_64-linux-gnu/openmpi/lib -lmpi mpicudanormal.cu -o program
//-- TO RUN mpiexec -n 2 ./program xx yy zz
//-- xx (integer) No.of input to generate
//-- yy (integer) Range of input data to be generated randomly
//-- zz (integer) number of iterations
// declare the kernel function
__global__ void add(int *A, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<N) A[i] = A[i] + 1;
}
__global__ void add2(int *A, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<N) A[i] = A[i] + 1;
}
int main(int argc, char **argv){
int myid, procs, n, range, err, maxiter;
int iter = 0;
int block_size, grid_size;
MPI_Status status;
double t_start = 0.0, t_end = 0.0;
// initialize MPI_Init
err = MPI_Init(&argc, &argv);
if (err != MPI_SUCCESS){
printf("\nError initializing MPI.\n");
MPI_Abort(MPI_COMM_WORLD, err);
} // end if
// Get No. of processors
MPI_Comm_size(MPI_COMM_WORLD, &procs);
// Get processor id
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
if (myid == 0) {// to print only once....
if (argc < 4) {
printf("\n\tOOOPS...., INVALID No of Arguements,\n");
}
} // end myid == 0
if (argc < 4) {MPI_Finalize(); return 0;} // end if
n = atoi(argv[1]); // get n
range = atoi(argv[2]);
maxiter = atoi(argv[3]);
int nBytes = n * sizeof(int);
if (myid == 0) printf("The size of data: %d\n", nBytes);
//k = n; // No. of elements to be computed by each Processor
while (iter < maxiter)
{
iter++;
if (myid == 0) {
int *a_d;
int *arr, *myarr;
block_size=32;
grid_size = 512;
//srand(time(NULL));
time_t t1;
time(&t1); // get system time
srand(t1); // Initilize Random Seed
dim3 dimBlock(block_size,1,1);
dim3 dimGrid(grid_size,1,1);
float milliseconds_h2d, milliseconds_d2h, milliseconds_k = 0;
cudaEvent_t start_h2d, stop_h2d, start_d2h, stop_d2h, start_k, stop_k;
cudaEventCreate(&start_h2d);
cudaEventCreate(&start_d2h);
cudaEventCreate(&start_k);
cudaEventCreate(&stop_d2h);
cudaEventCreate(&stop_h2d);
cudaEventCreate(&stop_k);
myarr = new int[n * sizeof (int)];
// allocate space to generate data
arr = new int[n * sizeof (int)];
for(int j = 0; j < n; j++)// generate random data
arr[j] = rand() % range;
//cudaSetDevice(0);
//cudaDeviceEnablePeerAccess( 1, 0 );
cudaMalloc((void **)&a_d , n*sizeof(int));
//Sending data to the GPU
cudaEventRecord(start_h2d);
cudaMemcpy(a_d, arr, n*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(stop_h2d);
cudaEventSynchronize(stop_h2d);
cudaEventElapsedTime(&milliseconds_h2d, start_h2d, stop_h2d);
//Stating kernel
cudaEventRecord(start_k);
add2<<<grid_size,block_size>>>(a_d,n);
cudaDeviceSynchronize();
cudaEventRecord(stop_k);
cudaEventSynchronize(stop_k);
cudaEventElapsedTime(&milliseconds_k, start_k, stop_k);
//Returning data from device memory to the host
cudaEventRecord(start_d2h);
cudaMemcpy(myarr, a_d, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop_d2h);
cudaEventSynchronize(stop_d2h);
cudaEventElapsedTime(&milliseconds_d2h, start_d2h, stop_d2h);
//Check if cuda operations are successful
cudaError_t err1 = cudaGetLastError();
if (err1 != cudaSuccess)
printf("CUDA Error: %s\n", cudaGetErrorString(err1));
//Sending result to the master node
t_start = MPI_Wtime();
MPI_Send(myarr, n, MPI_INT, 1, 0, MPI_COMM_WORLD);
MPI_Recv(myarr, n, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
t_end = MPI_Wtime();
//for(int j=0 ; j<n ; j++)
// printf("%d ", myarr[j]);
printf("\nRank: %d , Iteration: %d , H2D: %f , D2H: %f ,Kernel: %f , Total Time: (ms) %f\n",myid, iter, milliseconds_h2d, milliseconds_d2h, milliseconds_k , ((t_end - t_start)* 1e3));
// --free allocated spaces
free (arr);//free allocated space for array a
free(myarr);
cudaFree(a_d);
cudaEventDestroy(start_h2d);
cudaEventDestroy(stop_h2d);
cudaEventDestroy(start_d2h);
cudaEventDestroy(stop_d2h);
cudaEventDestroy(start_k);
cudaEventDestroy(stop_k);
sleep(1);//waits between iterations
} // end myid == 0
else{ //Node rank 1
float milliseconds_k, milliseconds_h2d, milliseconds_d2h = 0;
int *a_dd, *myarr;
//cudaSetDevice(1);
block_size=32;
grid_size = 512;
dim3 dimBlock(block_size,1,1);
dim3 dimGrid(grid_size,1,1);
cudaEvent_t start_k, stop_k, start_h2d, stop_h2d, start_d2h, stop_d2h;
cudaEventCreate(&start_k);
cudaEventCreate(&stop_k);
cudaEventCreate(&stop_h2d);
cudaEventCreate(&stop_d2h);
cudaEventCreate(&start_h2d);
cudaEventCreate(&start_d2h);
cudaMalloc((void **)&a_dd , n*sizeof(int));
myarr = new int[n * sizeof (int)];
//cudaSetDevice(1);
//cudaDeviceEnablePeerAccess( 0, 0 );
//Receiving data from master (Rank=0)
MPI_Recv(myarr, n, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
//Sending data to the GPU
cudaEventRecord(start_h2d);
cudaMemcpy(a_dd, myarr, n*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(stop_h2d);
cudaEventSynchronize(stop_h2d);
cudaEventElapsedTime(&milliseconds_h2d, start_h2d, stop_h2d);
//Stating kernel
cudaEventRecord(start_k);
add<<<grid_size,block_size>>>(a_dd,n);
cudaDeviceSynchronize();
cudaEventRecord(stop_k);
cudaEventSynchronize(stop_k);
cudaEventElapsedTime(&milliseconds_k, start_k, stop_k);
//Returning data from device memory to the host
cudaEventRecord(start_d2h);
cudaMemcpy(myarr, a_dd, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop_d2h);
cudaEventSynchronize(stop_d2h);
cudaEventElapsedTime(&milliseconds_d2h, start_d2h, stop_d2h);
//Check if cuda operations are successful
cudaError_t err = cudaGetLastError();
if ( err != cudaSuccess )
printf("CUDA Error: %s\n", cudaGetErrorString(err));
printf("\nRank: %d , Iteration: %d , H2D: %f, D2H: %f , Kernel: %f (ms) \n",myid, iter, milliseconds_h2d, milliseconds_d2h, milliseconds_k);
//Returning result to the master node
MPI_Send(myarr, n, MPI_INT, 0, myid, MPI_COMM_WORLD);
//free allocated spaces
cudaFree(a_dd);
free(myarr);
cudaEventDestroy(start_d2h);
cudaEventDestroy(stop_d2h);
cudaEventDestroy(stop_h2d);
cudaEventDestroy(start_h2d);
cudaEventDestroy(start_k);
cudaEventDestroy(stop_k);
} // end else
}
MPI_Finalize();
return 0;
} // end main
|
a0346364396d18607833da4f526ca0a1df3bdb1f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
using namespace std;
const int GRIDSIZE = (32*1024);
const int BLOCKSIZE = 1024;
const int TOTALSIZE = GRIDSIZE * BLOCKSIZE;
const int TargetSIze = 2 * BLOCKSIZE;
__global__ void kernel(unsigned* pData, unsigned* pAnswer, unsigned target) {
//each thread loads multiple element from global to shared memory
register unsigned tid = threadIdx.x;
if (tid == 0) {
*pAnswer = TOTALSIZE;
}
__syncthreads();
register unsigned start = tid * GRIDSIZE;
register unsigned end = (tid + 1) * GRIDSIZE;
register unsigned index;
for (index = start; index < end; index++) {
register unsigned value = pData[index];
if (value == target) {
atomicMin(pAnswer, index);
}
}
}
//Better memory access
__global__ void kernel(unsigned* pData, unsigned* pAnswer, unsigned target) {
//each thread loads multiple element from global to shared memory
register unsigned tid = threadIdx.x;
register unsigned i;
if (tid == 0) {
*pAnswer = TOTALSIZE;
}
__syncthreads();
for (i = 0; i < GRIDSIZE; i++) {
register unsigned index = tid + i * BLOCKSIZE;
register unsigned value = pData[index];
if (value == target) {
atomicMin(pAnswer, index);
}
}
}
__global__ void kernel_binSearch(unsigned* pData, unsigned* pAnswer, unsigned target) {
register unsigned tid = threadIdx.x;
register unsigned first = tid * GRIDSIZE;
register unsigned last = (tid + 1) * GRIDSIZE;
while (first < last) {
register unsigned mid = (first + last) / 2;
if (target == pData[mid]) {
atomicMin(pAnswer, mid);
last = first;
}
else if (target < pData[mid]) {
last = mid - 1;
}
else {
first = mid + 1;
}
}
}
//early cutoff BinSearh
__global__ void kernel_binSearch(unsigned* pData, unsigned* pAnswer, unsigned target) {
register unsigned tid = threadIdx.x;
register unsigned first = tid * GRIDSIZE;
register unsigned last = (tid + 1) * GRIDSIZE;
if (pData[first] <= target && target <= pData[last - 1]) {
while (first < last) {
register unsigned mid = (first + last) / 2;
if (target == pData[mid]) {
atomicMin(pAnswer, mid);
last = first;
}
else if (target < pData[mid]) {
last = mid - 1;
}
else {
first = mid + 1;
}
}
}
} | a0346364396d18607833da4f526ca0a1df3bdb1f.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
using namespace std;
const int GRIDSIZE = (32*1024);
const int BLOCKSIZE = 1024;
const int TOTALSIZE = GRIDSIZE * BLOCKSIZE;
const int TargetSIze = 2 * BLOCKSIZE;
__global__ void kernel(unsigned* pData, unsigned* pAnswer, unsigned target) {
//each thread loads multiple element from global to shared memory
register unsigned tid = threadIdx.x;
if (tid == 0) {
*pAnswer = TOTALSIZE;
}
__syncthreads();
register unsigned start = tid * GRIDSIZE;
register unsigned end = (tid + 1) * GRIDSIZE;
register unsigned index;
for (index = start; index < end; index++) {
register unsigned value = pData[index];
if (value == target) {
atomicMin(pAnswer, index);
}
}
}
//Better memory access
__global__ void kernel(unsigned* pData, unsigned* pAnswer, unsigned target) {
//each thread loads multiple element from global to shared memory
register unsigned tid = threadIdx.x;
register unsigned i;
if (tid == 0) {
*pAnswer = TOTALSIZE;
}
__syncthreads();
for (i = 0; i < GRIDSIZE; i++) {
register unsigned index = tid + i * BLOCKSIZE;
register unsigned value = pData[index];
if (value == target) {
atomicMin(pAnswer, index);
}
}
}
__global__ void kernel_binSearch(unsigned* pData, unsigned* pAnswer, unsigned target) {
register unsigned tid = threadIdx.x;
register unsigned first = tid * GRIDSIZE;
register unsigned last = (tid + 1) * GRIDSIZE;
while (first < last) {
register unsigned mid = (first + last) / 2;
if (target == pData[mid]) {
atomicMin(pAnswer, mid);
last = first;
}
else if (target < pData[mid]) {
last = mid - 1;
}
else {
first = mid + 1;
}
}
}
//early cutoff BinSearh
__global__ void kernel_binSearch(unsigned* pData, unsigned* pAnswer, unsigned target) {
register unsigned tid = threadIdx.x;
register unsigned first = tid * GRIDSIZE;
register unsigned last = (tid + 1) * GRIDSIZE;
if (pData[first] <= target && target <= pData[last - 1]) {
while (first < last) {
register unsigned mid = (first + last) / 2;
if (target == pData[mid]) {
atomicMin(pAnswer, mid);
last = first;
}
else if (target < pData[mid]) {
last = mid - 1;
}
else {
first = mid + 1;
}
}
}
} |
695b09d99d0fef45df4dc5e9aed8cc0c156e6eaa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "whattotest.cuh"
#define N 10
__global__ void add( int *a, int *b, int *c ) {
//int tid = threadIdx.x + blockIdx.x * blockDim.x;
int tid = blockIdx.x;
if (tid < N)
c[tid] = a[tid] + b[tid];
//tid += blockDim.x * gridDim.x;
}
int addKernelWrapper(int *a, int /b, int *c) {
//int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
hipMalloc( (void**)&dev_a, N * sizeof(int));
hipMalloc( (void**)&dev_b, N * sizeof(int));
hipMalloc( (void**)&dev_c, N * sizeof(int));
// fill the arrays 'a' and 'b' on the CPU
// for (int i=0; i<N; i++) {
// a[i] = -i;
// b[i] = i * i;
// }
// copy the arrays 'a' and 'b' to the GPU
hipMemcpy( dev_a, a, N * sizeof(int),
hipMemcpyHostToDevice );
hipMemcpy( dev_b, b, N * sizeof(int),
hipMemcpyHostToDevice );
hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
hipMemcpy( c, dev_c, N * sizeof(int),
hipMemcpyDeviceToHost );
// display the results
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
hipFree( dev_a );
hipFree( dev_b );
hipFree( dev_c );
return 0;
}
| 695b09d99d0fef45df4dc5e9aed8cc0c156e6eaa.cu | #include "whattotest.cuh"
#define N 10
__global__ void add( int *a, int *b, int *c ) {
//int tid = threadIdx.x + blockIdx.x * blockDim.x;
int tid = blockIdx.x;
if (tid < N)
c[tid] = a[tid] + b[tid];
//tid += blockDim.x * gridDim.x;
}
int addKernelWrapper(int *a, int /b, int *c) {
//int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// allocate the memory on the GPU
cudaMalloc( (void**)&dev_a, N * sizeof(int));
cudaMalloc( (void**)&dev_b, N * sizeof(int));
cudaMalloc( (void**)&dev_c, N * sizeof(int));
// fill the arrays 'a' and 'b' on the CPU
// for (int i=0; i<N; i++) {
// a[i] = -i;
// b[i] = i * i;
// }
// copy the arrays 'a' and 'b' to the GPU
cudaMemcpy( dev_a, a, N * sizeof(int),
cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, N * sizeof(int),
cudaMemcpyHostToDevice );
add<<<N,1>>>( dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy( c, dev_c, N * sizeof(int),
cudaMemcpyDeviceToHost );
// display the results
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
return 0;
}
|
c7e0e6bbc711b91f0be26341028f476baa33918a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "myers-common.h"
#define NUM_BITS 4
#define NUM_BASES 5
#define BASES_PER_THREAD 128
#define BASES_PER_ENTRY 8
#define SIZE_GPU_HW_WORD 32
#define SIZE_WARP 32
#define HIGH_MASK_32 0x80000000
#define LOW_MASK_32 0x00000001
#define MAX_VALUE 0xFFFFFFFF
#ifndef DEVICE
#define DEVICE 0
#endif
#ifndef CUDA_NUM_THREADS
#define CUDA_NUM_THREADS 128
#endif
// output temporal carry in internal register
#define UADD__CARRY_OUT(c, a, b) \
asm volatile("add.cc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b));
// add & output with temporal carry of internal register
#define UADD__IN_CARRY_OUT(c, a, b) \
asm volatile("addc.cc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b));
// add with temporal carry of internal register
#define UADD__IN_CARRY(c, a, b) \
asm volatile("addc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b));
inline __device__ void shared_collaborative_shift_CC20(uint32_t value_A, uint32_t value_B, uint32_t value_C, uint32_t value_D,
const uint32_t localThreadIdx, const uint32_t intraWarpIdx, volatile uint32_t *interBuff,
uint32_t* res_A, uint32_t* res_B, uint32_t* res_C, uint32_t* res_D)
{
uint32_t carry;
interBuff[intraWarpIdx + 1] = value_D;
carry = interBuff[intraWarpIdx];
carry = (localThreadIdx) ? carry : 0;
value_D = (value_C >> 31) | (value_D << 1);
value_C = (value_B >> 31) | (value_C << 1);
value_B = (value_A >> 31) | (value_B << 1);
value_A = (carry >> 31) | (value_A << 1);
(* res_A) = value_A;
(* res_B) = value_B;
(* res_C) = value_C;
(* res_D) = value_D;
}
inline __device__ void shared_collaborative_sum_CC20(const uint32_t a_A, const uint32_t a_B, const uint32_t a_C, const uint32_t a_D,
const uint32_t b_A, const uint32_t b_B, const uint32_t b_C, const uint32_t b_D,
const uint32_t localThreadIdx, const uint32_t intraWarpIdx, volatile uint32_t *interBuff,
uint32_t* sum_A, uint32_t* sum_B, uint32_t* sum_C, uint32_t* sum_D)
{
uint32_t carry, c_A, c_B, c_C, c_D;
UADD__CARRY_OUT (c_A, a_A, b_A)
UADD__IN_CARRY_OUT(c_B, a_B, b_B)
UADD__IN_CARRY_OUT(c_C, a_C, b_C)
UADD__IN_CARRY_OUT(c_D, a_D, b_D)
UADD__IN_CARRY (carry, 0, 0)
while(__any(carry)){
interBuff[intraWarpIdx + 1] = carry;
carry = interBuff[intraWarpIdx];
carry = (localThreadIdx) ? carry : 0;
UADD__CARRY_OUT (c_A, c_A, carry)
UADD__IN_CARRY_OUT(c_B, c_B, 0)
UADD__IN_CARRY_OUT(c_C, c_C, 0)
UADD__IN_CARRY_OUT(c_D, c_D, 0)
UADD__IN_CARRY (carry, 0, 0)
}
(* sum_A) = c_A;
(* sum_B) = c_B;
(* sum_C) = c_C;
(* sum_D) = c_D;
}
inline __device__ uint32_t selectEq_CC20(const uint32_t indexBase,
const uint32_t Eq0, const uint32_t Eq1,
const uint32_t Eq2, const uint32_t Eq3,
const uint32_t Eq4)
{
uint32_t Eq = Eq0;
Eq = (indexBase == 1) ? Eq1 : Eq;
Eq = (indexBase == 2) ? Eq2 : Eq;
Eq = (indexBase == 3) ? Eq3 : Eq;
Eq = (indexBase == 4) ? Eq4 : Eq;
return Eq;
}
inline __device__ uint32_t select_CC20(const uint32_t indexWord,
const uint32_t A, const uint32_t B,
const uint32_t C, const uint32_t D)
{
uint32_t value = A;
value = (indexWord == 1) ? B : value;
value = (indexWord == 2) ? C : value;
value = (indexWord == 3) ? D : value;
return value;
}
__device__ void myerslocalFermiKernel_CC20( const d_qryEntry_t *d_queries, const uint32_t * d_reference, const candInfo_t *d_candidates,
const uint32_t *d_reorderBuffer, resEntry_t *d_reorderResults, const qryInfo_t *d_qinfo,
const uint32_t idCandidate, const uint32_t sizeRef, const uint32_t numReorderedResults,
const float distance, const uint32_t intraQueryThreadIdx, const uint32_t threadsPerQuery)
{
if (idCandidate < numReorderedResults){
const uint32_t * localCandidate;
uint32_t Ph_A, Mh_A, Pv_A, Mv_A, Xv_A, Xh_A, Eq_A, tEq_A;
uint32_t Ph_B, Mh_B, Pv_B, Mv_B, Xv_B, Xh_B, Eq_B, tEq_B;
uint32_t Ph_C, Mh_C, Pv_C, Mv_C, Xv_C, Xh_C, Eq_C, tEq_C;
uint32_t Ph_D, Mh_D, Pv_D, Mv_D, Xv_D, Xh_D, Eq_D, tEq_D;
uint4 Eq0, Eq1, Eq2, Eq3, Eq4;
uint32_t PH, MH, indexWord;
uint32_t sum_A, sum_B, sum_C, sum_D;
const uint32_t originalCandidate = d_reorderBuffer[idCandidate];
const uint64_t positionRef = d_candidates[originalCandidate].position;
const uint32_t sizeQuery = d_qinfo[d_candidates[originalCandidate].query].size;
const uint32_t entry = d_qinfo[d_candidates[originalCandidate].query].posEntry + intraQueryThreadIdx;
const uint32_t sizeCandidate = sizeQuery * (1 + 2 * distance);
const uint32_t numEntriesPerCandidate = (sizeCandidate / BASES_PER_ENTRY) + ((sizeCandidate % BASES_PER_ENTRY) ? 2 : 1);
uint32_t candidate;
const uint32_t mask = ((sizeQuery % SIZE_GPU_HW_WORD) == 0) ? HIGH_MASK_32 : 1 << ((sizeQuery % SIZE_GPU_HW_WORD) - 1);
int32_t score = sizeQuery, minScore = sizeQuery;
uint32_t idColumn = 0, minColumn = 0, indexBase;
uint32_t intraBase, idEntry;
__shared__
uint32_t globalInterBuff[(SIZE_WARP + 1) * (CUDA_NUM_THREADS/SIZE_WARP)];
uint32_t *localInterBuff = globalInterBuff + ((threadIdx.x/SIZE_WARP) * (SIZE_WARP + 1));
uint32_t intraWarpIdx = threadIdx.x % SIZE_WARP;
indexWord = ((sizeQuery - 1) & (BASES_PER_THREAD - 1)) / SIZE_GPU_HW_WORD;
if((positionRef < sizeRef) && ((sizeRef - positionRef) > sizeCandidate)){
localCandidate = d_reference + (positionRef / BASES_PER_ENTRY);
Pv_A = MAX_VALUE;
Mv_A = 0;
Pv_B = MAX_VALUE;
Mv_B = 0;
Pv_C = MAX_VALUE;
Mv_C = 0;
Pv_D = MAX_VALUE;
Mv_D = 0;
Eq0 = d_queries[entry].bitmap[0];
Eq1 = d_queries[entry].bitmap[1];
Eq2 = d_queries[entry].bitmap[2];
Eq3 = d_queries[entry].bitmap[3];
Eq4 = d_queries[entry].bitmap[4];
for(idEntry = 0; idEntry < numEntriesPerCandidate; idEntry++){
candidate = localCandidate[idEntry];
for(intraBase = 0; intraBase < BASES_PER_ENTRY; intraBase++){
indexBase = candidate & 0x07;
Eq_A = selectEq_CC20(indexBase, Eq0.x, Eq1.x, Eq2.x, Eq3.x, Eq4.x);
Eq_B = selectEq_CC20(indexBase, Eq0.y, Eq1.y, Eq2.y, Eq3.y, Eq4.y);
Eq_C = selectEq_CC20(indexBase, Eq0.z, Eq1.z, Eq2.z, Eq3.z, Eq4.z);
Eq_D = selectEq_CC20(indexBase, Eq0.w, Eq1.w, Eq2.w, Eq3.w, Eq4.w);
Xv_A = Eq_A | Mv_A;
Xv_B = Eq_B | Mv_B;
Xv_C = Eq_C | Mv_C;
Xv_D = Eq_D | Mv_D;
tEq_A = Eq_A & Pv_A;
tEq_B = Eq_B & Pv_B;
tEq_C = Eq_C & Pv_C;
tEq_D = Eq_D & Pv_D;
shared_collaborative_sum_CC20(tEq_A, tEq_B, tEq_C, tEq_D, Pv_A, Pv_B, Pv_C, Pv_D,
intraQueryThreadIdx, intraWarpIdx, localInterBuff,
&sum_A, &sum_B, &sum_C, &sum_D);
Xh_A = (sum_A ^ Pv_A) | Eq_A;
Xh_B = (sum_B ^ Pv_B) | Eq_B;
Xh_C = (sum_C ^ Pv_C) | Eq_C;
Xh_D = (sum_D ^ Pv_D) | Eq_D;
Ph_A = Mv_A | ~(Xh_A | Pv_A);
Ph_B = Mv_B | ~(Xh_B | Pv_B);
Ph_C = Mv_C | ~(Xh_C | Pv_C);
Ph_D = Mv_D | ~(Xh_D | Pv_D);
Mh_A = Pv_A & Xh_A;
Mh_B = Pv_B & Xh_B;
Mh_C = Pv_C & Xh_C;
Mh_D = Pv_D & Xh_D;
PH = select_CC20(indexWord, Ph_A, Ph_B, Ph_C, Ph_D);
MH = select_CC20(indexWord, Mh_A, Mh_B, Mh_C, Mh_D);
score += (((PH & mask) != 0) - ((MH & mask) != 0));
shared_collaborative_shift_CC20(Ph_A, Ph_B, Ph_C, Ph_D, intraQueryThreadIdx,
intraWarpIdx, localInterBuff,
&Ph_A, &Ph_B, &Ph_C, &Ph_D);
shared_collaborative_shift_CC20(Mh_A, Mh_B, Mh_C, Mh_D, intraQueryThreadIdx,
intraWarpIdx, localInterBuff,
&Mh_A, &Mh_B, &Mh_C, &Mh_D);
Pv_A = Mh_A | ~(Xv_A | Ph_A);
Pv_B = Mh_B | ~(Xv_B | Ph_B);
Pv_C = Mh_C | ~(Xv_C | Ph_C);
Pv_D = Mh_D | ~(Xv_D | Ph_D);
Mv_A = Ph_A & Xv_A;
Mv_B = Ph_B & Xv_B;
Mv_C = Ph_C & Xv_C;
Mv_D = Ph_D & Xv_D;
candidate >>= NUM_BITS;
minColumn = (score < minScore) ? idColumn : minColumn;
minScore = (score < minScore) ? score : minScore;
if(intraQueryThreadIdx == (threadsPerQuery - 1))
idColumn++;
}
}
if(intraQueryThreadIdx == (threadsPerQuery - 1)){
d_reorderResults[idCandidate].column = minColumn/* - (positionRef % BASES_PER_ENTRY)*/;
d_reorderResults[idCandidate].score = minScore;
}
}
}
}
__global__ void myersFermiKernel_CC20(const d_qryEntry_t *d_queries, const uint32_t * d_reference, const candInfo_t *d_candidates, const uint32_t *d_reorderBuffer,
resEntry_t *d_reorderResults, const qryInfo_t *d_qinfo, const uint32_t sizeRef, const uint32_t numReorderedResults,
const float distance, uint32_t *d_initPosPerBucket, uint32_t *d_initWarpPerBucket, uint32_t numWarps)
{
uint32_t bucketIdx = 0;
uint32_t globalThreadIdx = blockIdx.y * blockDim.y + (blockIdx.x * blockDim.x + threadIdx.x);
uint32_t globalWarpIdx = globalThreadIdx / SIZE_WARP;
uint32_t localThreadInTheBucket, idCandidate, intraQueryThreadIdx, threadsPerQuery, queriesPerWarp, localIdCandidateInTheBucket;
while((bucketIdx != (SIZE_WARP + 1)) && (d_initWarpPerBucket[bucketIdx] <= globalWarpIdx)){
bucketIdx++;
}
bucketIdx--;
localThreadInTheBucket = globalThreadIdx - (d_initWarpPerBucket[bucketIdx] * SIZE_WARP);
threadsPerQuery = bucketIdx + 1;
queriesPerWarp = SIZE_WARP / threadsPerQuery;
localIdCandidateInTheBucket = ((localThreadInTheBucket / SIZE_WARP) * queriesPerWarp) + ((threadIdx.x % SIZE_WARP) / threadsPerQuery);
idCandidate = d_initPosPerBucket[bucketIdx] + localIdCandidateInTheBucket;
intraQueryThreadIdx = (threadIdx.x % SIZE_WARP) % threadsPerQuery;
myerslocalFermiKernel_CC20(d_queries, d_reference, d_candidates, d_reorderBuffer, d_reorderResults, d_qinfo,
idCandidate, sizeRef, numReorderedResults, distance, intraQueryThreadIdx, threadsPerQuery);
}
extern "C"
myersError_t processMyersBufferOnFermi(buffer_t *mBuff)
{
reference_buffer_t *ref = mBuff->reference;
queries_buffer_t *qry = mBuff->queries;
candidates_buffer_t *cand = mBuff->candidates;
reorder_buffer_t *rebuff = mBuff->reorderBuffer;
results_buffer_t *res = mBuff->results;
hipStream_t idStream = mBuff->idStream;
//We use 2-Dimensional Grid (because Fermi is limited to 65535 Blocks per dim)
uint32_t threadsPerBlock = CUDA_NUM_THREADS;
uint32_t maxBlocksPerRow = 65535;
uint32_t numThreads = rebuff->numWarps * SIZE_WARP;
uint32_t numBlocks = (numThreads / threadsPerBlock) + ((numThreads % threadsPerBlock) ? 1 : 0);
uint32_t rowsPerGrid = (numBlocks / maxBlocksPerRow) + ((numBlocks % maxBlocksPerRow) ? 1 : 0);
uint32_t blocksPerRow = (rowsPerGrid > 1) ? maxBlocksPerRow : numBlocks;
dim3 blocksPerGrid;
blocksPerGrid.x = blocksPerRow;
blocksPerGrid.y = rowsPerGrid;
if(DEVICE == 0){
//printf("FERMI: LAUNCH KERNEL 0 -- blocksPerRow: %d - rowsPerGrid %d - threadsPerBlock %d\n", blocksPerGrid.x, blocksPerGrid.y, threadsPerBlock);
hipLaunchKernelGGL(( myersFermiKernel_CC20), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, idStream, (d_qryEntry_t *)qry->d_queries, ref->d_reference, cand->d_candidates, rebuff->d_reorderBuffer,
res->d_reorderResults, qry->d_qinfo, ref->size, res->numReorderedResults,
qry->distance, rebuff->d_initPosPerBucket, rebuff->d_initWarpPerBucket,
rebuff->numWarps);
}
return(SUCCESS);
}
| c7e0e6bbc711b91f0be26341028f476baa33918a.cu | #include <stdio.h>
#include "myers-common.h"
#define NUM_BITS 4
#define NUM_BASES 5
#define BASES_PER_THREAD 128
#define BASES_PER_ENTRY 8
#define SIZE_GPU_HW_WORD 32
#define SIZE_WARP 32
#define HIGH_MASK_32 0x80000000
#define LOW_MASK_32 0x00000001
#define MAX_VALUE 0xFFFFFFFF
#ifndef DEVICE
#define DEVICE 0
#endif
#ifndef CUDA_NUM_THREADS
#define CUDA_NUM_THREADS 128
#endif
// output temporal carry in internal register
#define UADD__CARRY_OUT(c, a, b) \
asm volatile("add.cc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b));
// add & output with temporal carry of internal register
#define UADD__IN_CARRY_OUT(c, a, b) \
asm volatile("addc.cc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b));
// add with temporal carry of internal register
#define UADD__IN_CARRY(c, a, b) \
asm volatile("addc.u32 %0, %1, %2;" : "=r"(c) : "r"(a) , "r"(b));
inline __device__ void shared_collaborative_shift_CC20(uint32_t value_A, uint32_t value_B, uint32_t value_C, uint32_t value_D,
const uint32_t localThreadIdx, const uint32_t intraWarpIdx, volatile uint32_t *interBuff,
uint32_t* res_A, uint32_t* res_B, uint32_t* res_C, uint32_t* res_D)
{
uint32_t carry;
interBuff[intraWarpIdx + 1] = value_D;
carry = interBuff[intraWarpIdx];
carry = (localThreadIdx) ? carry : 0;
value_D = (value_C >> 31) | (value_D << 1);
value_C = (value_B >> 31) | (value_C << 1);
value_B = (value_A >> 31) | (value_B << 1);
value_A = (carry >> 31) | (value_A << 1);
(* res_A) = value_A;
(* res_B) = value_B;
(* res_C) = value_C;
(* res_D) = value_D;
}
inline __device__ void shared_collaborative_sum_CC20(const uint32_t a_A, const uint32_t a_B, const uint32_t a_C, const uint32_t a_D,
const uint32_t b_A, const uint32_t b_B, const uint32_t b_C, const uint32_t b_D,
const uint32_t localThreadIdx, const uint32_t intraWarpIdx, volatile uint32_t *interBuff,
uint32_t* sum_A, uint32_t* sum_B, uint32_t* sum_C, uint32_t* sum_D)
{
uint32_t carry, c_A, c_B, c_C, c_D;
UADD__CARRY_OUT (c_A, a_A, b_A)
UADD__IN_CARRY_OUT(c_B, a_B, b_B)
UADD__IN_CARRY_OUT(c_C, a_C, b_C)
UADD__IN_CARRY_OUT(c_D, a_D, b_D)
UADD__IN_CARRY (carry, 0, 0)
while(__any(carry)){
interBuff[intraWarpIdx + 1] = carry;
carry = interBuff[intraWarpIdx];
carry = (localThreadIdx) ? carry : 0;
UADD__CARRY_OUT (c_A, c_A, carry)
UADD__IN_CARRY_OUT(c_B, c_B, 0)
UADD__IN_CARRY_OUT(c_C, c_C, 0)
UADD__IN_CARRY_OUT(c_D, c_D, 0)
UADD__IN_CARRY (carry, 0, 0)
}
(* sum_A) = c_A;
(* sum_B) = c_B;
(* sum_C) = c_C;
(* sum_D) = c_D;
}
inline __device__ uint32_t selectEq_CC20(const uint32_t indexBase,
const uint32_t Eq0, const uint32_t Eq1,
const uint32_t Eq2, const uint32_t Eq3,
const uint32_t Eq4)
{
uint32_t Eq = Eq0;
Eq = (indexBase == 1) ? Eq1 : Eq;
Eq = (indexBase == 2) ? Eq2 : Eq;
Eq = (indexBase == 3) ? Eq3 : Eq;
Eq = (indexBase == 4) ? Eq4 : Eq;
return Eq;
}
inline __device__ uint32_t select_CC20(const uint32_t indexWord,
const uint32_t A, const uint32_t B,
const uint32_t C, const uint32_t D)
{
uint32_t value = A;
value = (indexWord == 1) ? B : value;
value = (indexWord == 2) ? C : value;
value = (indexWord == 3) ? D : value;
return value;
}
__device__ void myerslocalFermiKernel_CC20( const d_qryEntry_t *d_queries, const uint32_t * d_reference, const candInfo_t *d_candidates,
const uint32_t *d_reorderBuffer, resEntry_t *d_reorderResults, const qryInfo_t *d_qinfo,
const uint32_t idCandidate, const uint32_t sizeRef, const uint32_t numReorderedResults,
const float distance, const uint32_t intraQueryThreadIdx, const uint32_t threadsPerQuery)
{
if (idCandidate < numReorderedResults){
const uint32_t * localCandidate;
uint32_t Ph_A, Mh_A, Pv_A, Mv_A, Xv_A, Xh_A, Eq_A, tEq_A;
uint32_t Ph_B, Mh_B, Pv_B, Mv_B, Xv_B, Xh_B, Eq_B, tEq_B;
uint32_t Ph_C, Mh_C, Pv_C, Mv_C, Xv_C, Xh_C, Eq_C, tEq_C;
uint32_t Ph_D, Mh_D, Pv_D, Mv_D, Xv_D, Xh_D, Eq_D, tEq_D;
uint4 Eq0, Eq1, Eq2, Eq3, Eq4;
uint32_t PH, MH, indexWord;
uint32_t sum_A, sum_B, sum_C, sum_D;
const uint32_t originalCandidate = d_reorderBuffer[idCandidate];
const uint64_t positionRef = d_candidates[originalCandidate].position;
const uint32_t sizeQuery = d_qinfo[d_candidates[originalCandidate].query].size;
const uint32_t entry = d_qinfo[d_candidates[originalCandidate].query].posEntry + intraQueryThreadIdx;
const uint32_t sizeCandidate = sizeQuery * (1 + 2 * distance);
const uint32_t numEntriesPerCandidate = (sizeCandidate / BASES_PER_ENTRY) + ((sizeCandidate % BASES_PER_ENTRY) ? 2 : 1);
uint32_t candidate;
const uint32_t mask = ((sizeQuery % SIZE_GPU_HW_WORD) == 0) ? HIGH_MASK_32 : 1 << ((sizeQuery % SIZE_GPU_HW_WORD) - 1);
int32_t score = sizeQuery, minScore = sizeQuery;
uint32_t idColumn = 0, minColumn = 0, indexBase;
uint32_t intraBase, idEntry;
__shared__
uint32_t globalInterBuff[(SIZE_WARP + 1) * (CUDA_NUM_THREADS/SIZE_WARP)];
uint32_t *localInterBuff = globalInterBuff + ((threadIdx.x/SIZE_WARP) * (SIZE_WARP + 1));
uint32_t intraWarpIdx = threadIdx.x % SIZE_WARP;
indexWord = ((sizeQuery - 1) & (BASES_PER_THREAD - 1)) / SIZE_GPU_HW_WORD;
if((positionRef < sizeRef) && ((sizeRef - positionRef) > sizeCandidate)){
localCandidate = d_reference + (positionRef / BASES_PER_ENTRY);
Pv_A = MAX_VALUE;
Mv_A = 0;
Pv_B = MAX_VALUE;
Mv_B = 0;
Pv_C = MAX_VALUE;
Mv_C = 0;
Pv_D = MAX_VALUE;
Mv_D = 0;
Eq0 = d_queries[entry].bitmap[0];
Eq1 = d_queries[entry].bitmap[1];
Eq2 = d_queries[entry].bitmap[2];
Eq3 = d_queries[entry].bitmap[3];
Eq4 = d_queries[entry].bitmap[4];
for(idEntry = 0; idEntry < numEntriesPerCandidate; idEntry++){
candidate = localCandidate[idEntry];
for(intraBase = 0; intraBase < BASES_PER_ENTRY; intraBase++){
indexBase = candidate & 0x07;
Eq_A = selectEq_CC20(indexBase, Eq0.x, Eq1.x, Eq2.x, Eq3.x, Eq4.x);
Eq_B = selectEq_CC20(indexBase, Eq0.y, Eq1.y, Eq2.y, Eq3.y, Eq4.y);
Eq_C = selectEq_CC20(indexBase, Eq0.z, Eq1.z, Eq2.z, Eq3.z, Eq4.z);
Eq_D = selectEq_CC20(indexBase, Eq0.w, Eq1.w, Eq2.w, Eq3.w, Eq4.w);
Xv_A = Eq_A | Mv_A;
Xv_B = Eq_B | Mv_B;
Xv_C = Eq_C | Mv_C;
Xv_D = Eq_D | Mv_D;
tEq_A = Eq_A & Pv_A;
tEq_B = Eq_B & Pv_B;
tEq_C = Eq_C & Pv_C;
tEq_D = Eq_D & Pv_D;
shared_collaborative_sum_CC20(tEq_A, tEq_B, tEq_C, tEq_D, Pv_A, Pv_B, Pv_C, Pv_D,
intraQueryThreadIdx, intraWarpIdx, localInterBuff,
&sum_A, &sum_B, &sum_C, &sum_D);
Xh_A = (sum_A ^ Pv_A) | Eq_A;
Xh_B = (sum_B ^ Pv_B) | Eq_B;
Xh_C = (sum_C ^ Pv_C) | Eq_C;
Xh_D = (sum_D ^ Pv_D) | Eq_D;
Ph_A = Mv_A | ~(Xh_A | Pv_A);
Ph_B = Mv_B | ~(Xh_B | Pv_B);
Ph_C = Mv_C | ~(Xh_C | Pv_C);
Ph_D = Mv_D | ~(Xh_D | Pv_D);
Mh_A = Pv_A & Xh_A;
Mh_B = Pv_B & Xh_B;
Mh_C = Pv_C & Xh_C;
Mh_D = Pv_D & Xh_D;
PH = select_CC20(indexWord, Ph_A, Ph_B, Ph_C, Ph_D);
MH = select_CC20(indexWord, Mh_A, Mh_B, Mh_C, Mh_D);
score += (((PH & mask) != 0) - ((MH & mask) != 0));
shared_collaborative_shift_CC20(Ph_A, Ph_B, Ph_C, Ph_D, intraQueryThreadIdx,
intraWarpIdx, localInterBuff,
&Ph_A, &Ph_B, &Ph_C, &Ph_D);
shared_collaborative_shift_CC20(Mh_A, Mh_B, Mh_C, Mh_D, intraQueryThreadIdx,
intraWarpIdx, localInterBuff,
&Mh_A, &Mh_B, &Mh_C, &Mh_D);
Pv_A = Mh_A | ~(Xv_A | Ph_A);
Pv_B = Mh_B | ~(Xv_B | Ph_B);
Pv_C = Mh_C | ~(Xv_C | Ph_C);
Pv_D = Mh_D | ~(Xv_D | Ph_D);
Mv_A = Ph_A & Xv_A;
Mv_B = Ph_B & Xv_B;
Mv_C = Ph_C & Xv_C;
Mv_D = Ph_D & Xv_D;
candidate >>= NUM_BITS;
minColumn = (score < minScore) ? idColumn : minColumn;
minScore = (score < minScore) ? score : minScore;
if(intraQueryThreadIdx == (threadsPerQuery - 1))
idColumn++;
}
}
if(intraQueryThreadIdx == (threadsPerQuery - 1)){
d_reorderResults[idCandidate].column = minColumn/* - (positionRef % BASES_PER_ENTRY)*/;
d_reorderResults[idCandidate].score = minScore;
}
}
}
}
__global__ void myersFermiKernel_CC20(const d_qryEntry_t *d_queries, const uint32_t * d_reference, const candInfo_t *d_candidates, const uint32_t *d_reorderBuffer,
resEntry_t *d_reorderResults, const qryInfo_t *d_qinfo, const uint32_t sizeRef, const uint32_t numReorderedResults,
const float distance, uint32_t *d_initPosPerBucket, uint32_t *d_initWarpPerBucket, uint32_t numWarps)
{
uint32_t bucketIdx = 0;
uint32_t globalThreadIdx = blockIdx.y * blockDim.y + (blockIdx.x * blockDim.x + threadIdx.x);
uint32_t globalWarpIdx = globalThreadIdx / SIZE_WARP;
uint32_t localThreadInTheBucket, idCandidate, intraQueryThreadIdx, threadsPerQuery, queriesPerWarp, localIdCandidateInTheBucket;
while((bucketIdx != (SIZE_WARP + 1)) && (d_initWarpPerBucket[bucketIdx] <= globalWarpIdx)){
bucketIdx++;
}
bucketIdx--;
localThreadInTheBucket = globalThreadIdx - (d_initWarpPerBucket[bucketIdx] * SIZE_WARP);
threadsPerQuery = bucketIdx + 1;
queriesPerWarp = SIZE_WARP / threadsPerQuery;
localIdCandidateInTheBucket = ((localThreadInTheBucket / SIZE_WARP) * queriesPerWarp) + ((threadIdx.x % SIZE_WARP) / threadsPerQuery);
idCandidate = d_initPosPerBucket[bucketIdx] + localIdCandidateInTheBucket;
intraQueryThreadIdx = (threadIdx.x % SIZE_WARP) % threadsPerQuery;
myerslocalFermiKernel_CC20(d_queries, d_reference, d_candidates, d_reorderBuffer, d_reorderResults, d_qinfo,
idCandidate, sizeRef, numReorderedResults, distance, intraQueryThreadIdx, threadsPerQuery);
}
extern "C"
myersError_t processMyersBufferOnFermi(buffer_t *mBuff)
{
reference_buffer_t *ref = mBuff->reference;
queries_buffer_t *qry = mBuff->queries;
candidates_buffer_t *cand = mBuff->candidates;
reorder_buffer_t *rebuff = mBuff->reorderBuffer;
results_buffer_t *res = mBuff->results;
cudaStream_t idStream = mBuff->idStream;
//We use 2-Dimensional Grid (because Fermi is limited to 65535 Blocks per dim)
uint32_t threadsPerBlock = CUDA_NUM_THREADS;
uint32_t maxBlocksPerRow = 65535;
uint32_t numThreads = rebuff->numWarps * SIZE_WARP;
uint32_t numBlocks = (numThreads / threadsPerBlock) + ((numThreads % threadsPerBlock) ? 1 : 0);
uint32_t rowsPerGrid = (numBlocks / maxBlocksPerRow) + ((numBlocks % maxBlocksPerRow) ? 1 : 0);
uint32_t blocksPerRow = (rowsPerGrid > 1) ? maxBlocksPerRow : numBlocks;
dim3 blocksPerGrid;
blocksPerGrid.x = blocksPerRow;
blocksPerGrid.y = rowsPerGrid;
if(DEVICE == 0){
//printf("FERMI: LAUNCH KERNEL 0 -- blocksPerRow: %d - rowsPerGrid %d - threadsPerBlock %d\n", blocksPerGrid.x, blocksPerGrid.y, threadsPerBlock);
myersFermiKernel_CC20<<<blocksPerGrid, threadsPerBlock, 0, idStream>>>((d_qryEntry_t *)qry->d_queries, ref->d_reference, cand->d_candidates, rebuff->d_reorderBuffer,
res->d_reorderResults, qry->d_qinfo, ref->size, res->numReorderedResults,
qry->distance, rebuff->d_initPosPerBucket, rebuff->d_initWarpPerBucket,
rebuff->numWarps);
}
return(SUCCESS);
}
|
7d9b5fd0163a775a0b23410700b2261f46fc6974.hip | // !!! This is a file automatically generated by hipify!!!
/* Matrix multiplication: P = M * N.
* Host code.
*/
// includes, system
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, kernels
#include "matrixmul_kernel.hip"
#include "assist.h"
#define ERROR_CHECK { hipError_t err; \
if ((err = hipGetLastError()) != hipSuccess) { \
printf("CUDA error: %s, line %d\n", hipGetErrorString(err), __LINE__);}}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char** argv)
{
bool if_quiet = true;
int i, j;
char *matrix_id = NULL, *input_fn = NULL, *gold_fn = NULL;
float * deviceM = NULL, * deviceN = NULL, * deviceP = NULL;
int Mw = 0, Mh = 0, Nw = 0, Nh = 0, Pw = 0, Ph = 0;
int block_size = 0;
hipEvent_t start, stop;
float timer_compute, timer_memory_in, timer_memory_out;
// Create CUDA events for measuring execution times
hipEventCreate(&start);
hipEventCreate(&stop);
if (argc == 2) {
matrix_id = strdup(argv[1]);
} else {
fprintf(stderr, "Error: Wrong input parameter numbers.\n");
fprintf(stderr, "Usage:\n"
"$> ./lab2.1-matrixmul <8, 128, 512, 3072, 4096>\n"
"Examples:\n"
" $> ./lab2.1-matrixmul 128\n"
);
exit(1);
}
// Note: Matrix width and height must be multiples of block size.
if (!strcmp(matrix_id, "8")) {
Mw = Mh = Nw = Nh = Pw = Ph = 8;
block_size = 2; // thread number = block_size^2
input_fn = strdup("matrix_8.bin");
gold_fn = strdup("matrix_8.gold");
if_quiet = false; // If not display matrix contents
} else
if (!strcmp(matrix_id, "128")) {
Mw = Mh = Nw = Nh = Pw = Ph = 128;
block_size = 16; // thread number = block_size^2
input_fn = strdup("matrix_128.bin");
gold_fn = strdup("matrix_128.gold");
if_quiet = true; // If not display matrix contents
} else
if (!strcmp(matrix_id, "512")) {
Mw = Mh = Nw = Nh = Pw = Ph = 512;
block_size = 16; // thread number = block_size^2
input_fn = strdup("matrix_512.bin");
gold_fn = strdup("matrix_512.gold");
if_quiet = true; // If not display matrix contents
} else
if (!strcmp(matrix_id, "3072")) {
Mw = Mh = Nw = Nh = Pw = Ph = 3072;
block_size = 16; // thread number = block_size^2
input_fn = strdup("matrix_3072.bin");
gold_fn = strdup("matrix_3072.gold");
if_quiet = true; // If not display matrix contents
} else
if (!strcmp(matrix_id, "4096")) {
Mw = Mh = Nw = Nh = Pw = Ph = 4096;
block_size = 16; // thread number = block_size^2
input_fn = strdup("matrix_4096.bin");
gold_fn = strdup("matrix_4096.gold");
if_quiet = true; // If not display matrix contents
} else {
printf("***Error on %s: %d: Undefined matrix ID.\n",
__FILE__, __LINE__);
printf(" You should add it to the source code.\n");
printf(" Current available ID's are 8, 128, 512, 3072, 4096.\n");
exit(1);
}
printf("Input matrix file name: %s\n", input_fn);
// -----------------------------------------------------------------------
// Setup host side
// -----------------------------------------------------------------------
printf("Setup host side environment and launch kernel:\n");
// allocate host memory for matrices M and N
printf(" Allocate host memory for matrices M and N.\n");
printf(" M: %d x %d\n", Mw, Mh);
printf(" N: %d x %d\n", Nw, Nh);
unsigned int size_M = Mw * Mh;
unsigned int mem_size_M = sizeof(float) * size_M;
float* hostM = (float*) malloc(mem_size_M);
unsigned int size_N = Nw * (Nh);
unsigned int mem_size_N = sizeof(float) * size_N;
float* hostN = (float*) malloc(mem_size_N);
// allocate memory for the result on host side
printf(" Allocate memory for the result on host side.\n");
unsigned int size_P = Pw * Ph;
unsigned int mem_size_P = sizeof(float) * size_P;
float* hostP = (float*) malloc(mem_size_P);
// Initialize the input matrices.
printf(" Initialize the input matrices.\n");
unsigned int * matrix = ReadMatrixFile(input_fn, Pw, Ph, if_quiet);
for (i = 0; i < Mw; i++)
for (j = 0; j < Nw; j++)
hostM[i * Mw + j] = hostN[i * Mw + j] = (float) matrix[i*Mw + j];
free(matrix); matrix = NULL;
// ===================================================================
// Allocate device memory for the input matrices.
// Copy memory from the host memory to the device memory.
// ===================================================================
// Start measuring transfer times from CPU to GPU
hipEventRecord(start, NULL);
printf(" Allocate device memory.\n");
hipMalloc((void**) &deviceM, mem_size_M);
hipMalloc((void**) &deviceN, mem_size_N);
printf(" Copy host memory data to device.\n");
hipMemcpy(deviceM, hostM, mem_size_M, hipMemcpyHostToDevice);
hipMemcpy(deviceN, hostN, mem_size_N, hipMemcpyHostToDevice);
printf(" Allocate device memory for results and clean it.\n");
hipMalloc((void**) &deviceP, mem_size_P);
hipMemset(deviceP, 0, mem_size_P);
// Stop measuring transfer times from CPU to GPU
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&timer_memory_in, start, stop);
// ===================================================================
// ===================================================================
// Initialize the thread block and kernel grid dimensions
// and invoke the CUDA kernel.
// You may assume that each matrix dimension is a multiple
// of the defined constant block_size.
// ===================================================================
printf(" Setup kernel execution parameters.\n");
// Different ways of declarations
#if 1
dim3 block;
dim3 grid;
grid.x = Pw/block_size;
grid.y = Pw/block_size;
block.x = block_size;
block.y = block_size;
#else
dim3 block(block_size, block_size);
dim3 grid(Pw/block.x, Pw/block.y);
#endif
printf(" # of threads in a block: %d x %d (%d)\n",
block.x, block.y, block.x * block.y);
printf(" # of blocks in a grid : %d x %d (%d)\n",
grid.x, grid.y, grid.x * grid.y);
// ================================================
// Initialize the block and grid dimensions here
// ================================================
printf(" Executing the kernel...\n");
// Start measuring the computation time for the CUDA kernel
hipEventRecord(start, NULL);
// Invoke the CUDA kernel here
hipLaunchKernelGGL(( matrixMul), dim3(grid), dim3(block), 0, 0, deviceP, deviceM, deviceN, Mw, Nw);
// Make sure all threads have finished their jobs
// before we stop the timer_compute.
hipDeviceSynchronize();
// Stop measuring the computation time for the CUDA kernel
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&timer_compute, start, stop);
// ===================================================================
// ===================================================================
// Copy the results back from the host
// ===================================================================
printf(" Copy result from device to host.\n");
// Start measuring the transfer time back from the GPU to the CPU
hipEventRecord(start, NULL);
hipMemcpy(hostP, deviceP, mem_size_P, hipMemcpyDeviceToHost);
// Stop measuring the transfer time back from the GPU to the CPU
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&timer_memory_out, start, stop);
// ===================================================================
// ================================================
// Show timing information
// ================================================
printf("Memory transfer time: %.4f ms.\n", timer_memory_in+timer_memory_out);
printf("GPU computation time: %.4f ms.\n", timer_compute);
printf("Total GPU processing time: %.4f ms.\n", timer_memory_in+timer_compute+timer_memory_out);
// ================================================
// Do comparison
// ================================================
// Full result check when input matrix is <= 512x512
//if (0) {
if (Mw * Nw > 512*512) {
printf("\nInput matrix size is too big. Skip computing reference.\n");
} else {
printf("\nCheck results with those computed by CPU.\n");
printf (" Computing reference solution.\n");
// Start measuring the computation time for the CPU
hipEventRecord(start, NULL);
float* reference = (float*) malloc(mem_size_P);
computeGold(reference, hostM, hostN, Mh, Mw, Nw);
// Stop measuring the computation time for the CPU
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&timer_compute, start, stop);
printf(" CPU Processing time : %.4f ms.\n\n", timer_compute);
printf(" CPU checksum: %g\n", CheckSum(reference, Mw, Nw));
matrix = (unsigned int *) malloc(Pw * Ph * sizeof(unsigned int));
for (i = 0; i < Ph; i++)
for (j = 0; j < Pw; j++)
matrix[i*Pw + j] = (unsigned int) reference[i*Pw + j];
WriteMatrixFile("lab1.1-matrixmul.gold", matrix, Pw, Ph, 1);
free(matrix); matrix = NULL;
free(reference);
}
printf(" GPU checksum: %g\n", CheckSum(hostP, Mw, Nw));
/* Write matrix C to output binary file */
matrix = (unsigned int *) malloc (Pw * Ph * sizeof(unsigned int));
for (i = 0; i < Ph; i++)
for (j = 0; j < Pw; j++)
matrix[i*Pw + j] = (unsigned int) hostP[i*Pw + j];
WriteMatrixFile("lab1.1-matrixmul.bin", matrix, Pw, Ph, 1);
free (matrix); matrix = NULL;
if (Mw >= 3072 && Mh >= 3072) {
CompareMatrixFile("lab1.1-matrixmul.bin", gold_fn, Pw, Ph, if_quiet);
} else {
CompareMatrixFile("lab1.1-matrixmul.bin", "lab1.1-matrixmul.gold",
Pw, Ph, if_quiet);
}
// clean up memory
free(hostM); free(hostN); free(hostP);
free(input_fn); free(gold_fn);
// ===================================================================
// Free the device memory
// ===================================================================
hipFree(deviceM);
hipFree(deviceN);
hipFree(deviceP);
// ===================================================================
}
| 7d9b5fd0163a775a0b23410700b2261f46fc6974.cu | /* Matrix multiplication: P = M * N.
* Host code.
*/
// includes, system
#include <cuda_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, kernels
#include "matrixmul_kernel.cu"
#include "assist.h"
#define ERROR_CHECK { cudaError_t err; \
if ((err = cudaGetLastError()) != cudaSuccess) { \
printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__);}}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char** argv)
{
bool if_quiet = true;
int i, j;
char *matrix_id = NULL, *input_fn = NULL, *gold_fn = NULL;
float * deviceM = NULL, * deviceN = NULL, * deviceP = NULL;
int Mw = 0, Mh = 0, Nw = 0, Nh = 0, Pw = 0, Ph = 0;
int block_size = 0;
cudaEvent_t start, stop;
float timer_compute, timer_memory_in, timer_memory_out;
// Create CUDA events for measuring execution times
cudaEventCreate(&start);
cudaEventCreate(&stop);
if (argc == 2) {
matrix_id = strdup(argv[1]);
} else {
fprintf(stderr, "Error: Wrong input parameter numbers.\n");
fprintf(stderr, "Usage:\n"
"$> ./lab2.1-matrixmul <8, 128, 512, 3072, 4096>\n"
"Examples:\n"
" $> ./lab2.1-matrixmul 128\n"
);
exit(1);
}
// Note: Matrix width and height must be multiples of block size.
if (!strcmp(matrix_id, "8")) {
Mw = Mh = Nw = Nh = Pw = Ph = 8;
block_size = 2; // thread number = block_size^2
input_fn = strdup("matrix_8.bin");
gold_fn = strdup("matrix_8.gold");
if_quiet = false; // If not display matrix contents
} else
if (!strcmp(matrix_id, "128")) {
Mw = Mh = Nw = Nh = Pw = Ph = 128;
block_size = 16; // thread number = block_size^2
input_fn = strdup("matrix_128.bin");
gold_fn = strdup("matrix_128.gold");
if_quiet = true; // If not display matrix contents
} else
if (!strcmp(matrix_id, "512")) {
Mw = Mh = Nw = Nh = Pw = Ph = 512;
block_size = 16; // thread number = block_size^2
input_fn = strdup("matrix_512.bin");
gold_fn = strdup("matrix_512.gold");
if_quiet = true; // If not display matrix contents
} else
if (!strcmp(matrix_id, "3072")) {
Mw = Mh = Nw = Nh = Pw = Ph = 3072;
block_size = 16; // thread number = block_size^2
input_fn = strdup("matrix_3072.bin");
gold_fn = strdup("matrix_3072.gold");
if_quiet = true; // If not display matrix contents
} else
if (!strcmp(matrix_id, "4096")) {
Mw = Mh = Nw = Nh = Pw = Ph = 4096;
block_size = 16; // thread number = block_size^2
input_fn = strdup("matrix_4096.bin");
gold_fn = strdup("matrix_4096.gold");
if_quiet = true; // If not display matrix contents
} else {
printf("***Error on %s: %d: Undefined matrix ID.\n",
__FILE__, __LINE__);
printf(" You should add it to the source code.\n");
printf(" Current available ID's are 8, 128, 512, 3072, 4096.\n");
exit(1);
}
printf("Input matrix file name: %s\n", input_fn);
// -----------------------------------------------------------------------
// Setup host side
// -----------------------------------------------------------------------
printf("Setup host side environment and launch kernel:\n");
// allocate host memory for matrices M and N
printf(" Allocate host memory for matrices M and N.\n");
printf(" M: %d x %d\n", Mw, Mh);
printf(" N: %d x %d\n", Nw, Nh);
unsigned int size_M = Mw * Mh;
unsigned int mem_size_M = sizeof(float) * size_M;
float* hostM = (float*) malloc(mem_size_M);
unsigned int size_N = Nw * (Nh);
unsigned int mem_size_N = sizeof(float) * size_N;
float* hostN = (float*) malloc(mem_size_N);
// allocate memory for the result on host side
printf(" Allocate memory for the result on host side.\n");
unsigned int size_P = Pw * Ph;
unsigned int mem_size_P = sizeof(float) * size_P;
float* hostP = (float*) malloc(mem_size_P);
// Initialize the input matrices.
printf(" Initialize the input matrices.\n");
unsigned int * matrix = ReadMatrixFile(input_fn, Pw, Ph, if_quiet);
for (i = 0; i < Mw; i++)
for (j = 0; j < Nw; j++)
hostM[i * Mw + j] = hostN[i * Mw + j] = (float) matrix[i*Mw + j];
free(matrix); matrix = NULL;
// ===================================================================
// Allocate device memory for the input matrices.
// Copy memory from the host memory to the device memory.
// ===================================================================
// Start measuring transfer times from CPU to GPU
cudaEventRecord(start, NULL);
printf(" Allocate device memory.\n");
cudaMalloc((void**) &deviceM, mem_size_M);
cudaMalloc((void**) &deviceN, mem_size_N);
printf(" Copy host memory data to device.\n");
cudaMemcpy(deviceM, hostM, mem_size_M, cudaMemcpyHostToDevice);
cudaMemcpy(deviceN, hostN, mem_size_N, cudaMemcpyHostToDevice);
printf(" Allocate device memory for results and clean it.\n");
cudaMalloc((void**) &deviceP, mem_size_P);
cudaMemset(deviceP, 0, mem_size_P);
// Stop measuring transfer times from CPU to GPU
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timer_memory_in, start, stop);
// ===================================================================
// ===================================================================
// Initialize the thread block and kernel grid dimensions
// and invoke the CUDA kernel.
// You may assume that each matrix dimension is a multiple
// of the defined constant block_size.
// ===================================================================
printf(" Setup kernel execution parameters.\n");
// Different ways of declarations
#if 1
dim3 block;
dim3 grid;
grid.x = Pw/block_size;
grid.y = Pw/block_size;
block.x = block_size;
block.y = block_size;
#else
dim3 block(block_size, block_size);
dim3 grid(Pw/block.x, Pw/block.y);
#endif
printf(" # of threads in a block: %d x %d (%d)\n",
block.x, block.y, block.x * block.y);
printf(" # of blocks in a grid : %d x %d (%d)\n",
grid.x, grid.y, grid.x * grid.y);
// ================================================
// Initialize the block and grid dimensions here
// ================================================
printf(" Executing the kernel...\n");
// Start measuring the computation time for the CUDA kernel
cudaEventRecord(start, NULL);
// Invoke the CUDA kernel here
matrixMul<<<grid, block>>> (deviceP, deviceM, deviceN, Mw, Nw);
// Make sure all threads have finished their jobs
// before we stop the timer_compute.
cudaThreadSynchronize();
// Stop measuring the computation time for the CUDA kernel
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timer_compute, start, stop);
// ===================================================================
// ===================================================================
// Copy the results back from the host
// ===================================================================
printf(" Copy result from device to host.\n");
// Start measuring the transfer time back from the GPU to the CPU
cudaEventRecord(start, NULL);
cudaMemcpy(hostP, deviceP, mem_size_P, cudaMemcpyDeviceToHost);
// Stop measuring the transfer time back from the GPU to the CPU
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timer_memory_out, start, stop);
// ===================================================================
// ================================================
// Show timing information
// ================================================
printf("Memory transfer time: %.4f ms.\n", timer_memory_in+timer_memory_out);
printf("GPU computation time: %.4f ms.\n", timer_compute);
printf("Total GPU processing time: %.4f ms.\n", timer_memory_in+timer_compute+timer_memory_out);
// ================================================
// Do comparison
// ================================================
// Full result check when input matrix is <= 512x512
//if (0) {
if (Mw * Nw > 512*512) {
printf("\nInput matrix size is too big. Skip computing reference.\n");
} else {
printf("\nCheck results with those computed by CPU.\n");
printf (" Computing reference solution.\n");
// Start measuring the computation time for the CPU
cudaEventRecord(start, NULL);
float* reference = (float*) malloc(mem_size_P);
computeGold(reference, hostM, hostN, Mh, Mw, Nw);
// Stop measuring the computation time for the CPU
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timer_compute, start, stop);
printf(" CPU Processing time : %.4f ms.\n\n", timer_compute);
printf(" CPU checksum: %g\n", CheckSum(reference, Mw, Nw));
matrix = (unsigned int *) malloc(Pw * Ph * sizeof(unsigned int));
for (i = 0; i < Ph; i++)
for (j = 0; j < Pw; j++)
matrix[i*Pw + j] = (unsigned int) reference[i*Pw + j];
WriteMatrixFile("lab1.1-matrixmul.gold", matrix, Pw, Ph, 1);
free(matrix); matrix = NULL;
free(reference);
}
printf(" GPU checksum: %g\n", CheckSum(hostP, Mw, Nw));
/* Write matrix C to output binary file */
matrix = (unsigned int *) malloc (Pw * Ph * sizeof(unsigned int));
for (i = 0; i < Ph; i++)
for (j = 0; j < Pw; j++)
matrix[i*Pw + j] = (unsigned int) hostP[i*Pw + j];
WriteMatrixFile("lab1.1-matrixmul.bin", matrix, Pw, Ph, 1);
free (matrix); matrix = NULL;
if (Mw >= 3072 && Mh >= 3072) {
CompareMatrixFile("lab1.1-matrixmul.bin", gold_fn, Pw, Ph, if_quiet);
} else {
CompareMatrixFile("lab1.1-matrixmul.bin", "lab1.1-matrixmul.gold",
Pw, Ph, if_quiet);
}
// clean up memory
free(hostM); free(hostN); free(hostP);
free(input_fn); free(gold_fn);
// ===================================================================
// Free the device memory
// ===================================================================
cudaFree(deviceM);
cudaFree(deviceN);
cudaFree(deviceP);
// ===================================================================
}
|
417401ffcf3ee87b97266c39118423b462c91222.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel_test5_init.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *_ptr = NULL;
hipMalloc(&_ptr, XSIZE*YSIZE);
char *end_ptr = NULL;
hipMalloc(&end_ptr, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel_test5_init), dim3(gridBlock),dim3(threadBlock), 0, 0, _ptr,end_ptr);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel_test5_init), dim3(gridBlock),dim3(threadBlock), 0, 0, _ptr,end_ptr);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel_test5_init), dim3(gridBlock),dim3(threadBlock), 0, 0, _ptr,end_ptr);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 417401ffcf3ee87b97266c39118423b462c91222.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel_test5_init.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *_ptr = NULL;
cudaMalloc(&_ptr, XSIZE*YSIZE);
char *end_ptr = NULL;
cudaMalloc(&end_ptr, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel_test5_init<<<gridBlock,threadBlock>>>(_ptr,end_ptr);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel_test5_init<<<gridBlock,threadBlock>>>(_ptr,end_ptr);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel_test5_init<<<gridBlock,threadBlock>>>(_ptr,end_ptr);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d50a7ce79978f7cfe98c0c997395be395139d5f2.hip | // !!! This is a file automatically generated by hipify!!!
//Inputs:
// Number of bodies
// Max sphere size
// AVG temperature
#define MAX_THREADS_PER_BLOCK 1024
#define MAX_BLOCKS 2496
#define SQUARED(a) a*a
typedef struct Particles{
float * x;
float * y;
float * z;
float * v_x;
float * v_y;
float * v_z;
short * intersects; //Number of times intersected the dust grain
unsigned char * pType;
int numBodies;
void Particles(int nBodies){
x = hipMalloc(nBodies * sizeof(float));
y = hipMalloc(nBodies * sizeof(float));
z = hipMalloc(nBodies * sizeof(float));
v_x = hipMalloc(nBodies * sizeof(float));
v_y = hipMalloc(nBodies * sizeof(float));
v_z = hipMalloc(nBodies * sizeof(float));
pType = hipMalloc(nBodies * sizeof(unsigned char));
intersects = hipMalloc(nBodies * sizeof(short));
numBodies = nBodies;
};
void free(){
hipFree(x);
hipFree(y);
hipFree(z);
hipFree(v_x);
hipFree(v_y);
hipFree(v_z);
hipFree(intersects);
hipFree(pType);
}
} Particles;
enum DiagnosticsType{
InitTime = 0;
StepTime = 1;
}
enum ParticleType{
Electron = 0;
Ion = 1;
Grain = 2;
}
enum DiagnosticsType diag_id;
//Potentially pass into command whether or not to reallocate memory for particles
__device__ Particles * ParticleCollection = NULL;
// Diagnostics:
// 1. science charging, debye size and shape, plasma condx, etc
// 2. simulation: energy conservation, etc
// 3. cuda: time, memory, etc
// TODO:
// 1. only prepare if asked for.
__device__ void command(int nBodies, float radius, float temp, bool reallocate, int* diag_id, bool* diag_dataAvailable, int* diag_length, void* diag_data)
{
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
if (reallocate && (ParticleCollection!=NULL) )
{
ParticleCollection.free();
hipFree(ParticleCollection);
ParticleCollection=NULL;
}
if(ParticleCollection==NULL)
{
ParticleCollection = Particles(nBodies); //dynamically allocate the required memory for n bodies.
}
int blocks,threads;
if(nBodies<MAX_BLOCKS)
{
blocks = nBodies;
threads = 1;
} else
{
blocks = MAX_BLOCKS
threads = (nBodies/MAX_BLOCKS)+1; //truncate and add one just to be sure. Okay if falls off, that is checked.
}
hipEventRecord(start,0);
//Generate initial plasma, and time it.
prepare<<blocks,threads,1>>(ParticleCollection,radius,tepp);
hipEventRecord(end,0);
float seconds;
hipEventElapsedTime(&seconds);
//Submit plasma generation phase timing results.
pushDiagnostics(DiagnosticsType.InitTime, seconds, sizeof(float), diag_id, diag_dataAvailable, diag_length, diag_data)
/*for (i=0, i<numSteps, i++)
{
forceAccum();
// time diagnostic on force accum
advanceOrbit();
// time diagnostic on advance orbit
boundarCheck();
// time diagnostic on aboundary check
grainColelct();
// time diagnostic on ocllctChecke
pushDiagnostics(DiagnosticsType.StepTime, ..., sizeof(float), diag_id, diag_dataAvailable, diag_length, diag_data)
} */
}
// TODO: Make equal parts ions and electrons, don't forget to make the dust grain
__device__ void prepare(Particles ParticleCollection, float radius, float temp) //Generate inital plasma
{
//(https://code.google.com/p/stanford-cs193g-sp2010/wiki/TutorialMultidimensionalKernelLaunch)
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
if(index<ParticleCollection.numBodies)
{
float x,y,z,v_x,v_y,v_z;
//thermalize(&x,&y,&z,&v_x,&v_y,&v_z);
int sqareroot = sqrt(ParticleCollection.numBodies)+1;
randomize(&x,&y,&z,&v_x,&v_y,&v_z,index,sqareroot);
ParticleCollection.x[index] = x;
ParticleCollection.y[index] = y;
ParticleCollection.z[index] = z;
ParticleCollection.v_x[index] = v_x;
ParticleCollection.v_y[index] = v_y;
ParticleCollection.v_z[index] = v_z;
ParticleCollection.intersects[index] = 0;
if(index==0){
ParticleCollection.pType[index] = ParticleType.Grain;
} else if(index%2==0){
ParticleCollection.pType[index] = ParticleType.Electron;
} else{
ParticleCollection.pType[index] = ParticleType.Ion;
}
}
}
__device__ void randomize(float * x, float * y, float * z, float * v_x, float * v_y, float * v_z, int index, int sqareroot)
{
*z = index % sqareroot;
*y = (index/sqareroot) % sqareroot;
*x = index/(sqareroot*sqareroot);
*v_x = 0;
*v_y = 0;
*v_z = 0;
}
//Watch out when block queueing happens, might be off by max one time step.
__device__ void collectAndIntegrate(Particles ParticleCollection)
{
//(https://code.google.com/p/stanford-cs193g-sp2010/wiki/TutorialMultidimensionalKernelLaunch)
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
float fx=0,fy=0,fz=0;
float k = 3.1415926; // TODO: cmon, fix this
if(index<ParticleCollection.numBodies)
{
float i_x,i_y,i_z,i_type,i_charge,i_v_x,i_v_y,i_v_z;
float j_x,j_y,j_z,j_type,j_charge;//,j_v_x,j_v_y,j_v_z;
float rsq,r;
i_x = ParticleCollection.x[index];
i_y = ParticleCollection.y[index];
i_z = ParticleCollection.z[index];
i_type = ParticleCollection.pType[index];
i_v_x = ParticleCollection.v_x[index];
i_v_y = ParticleCollection.v_y[index];
i_v_z = ParticleCollection.v_z[index];
// TODO: dimensional scaling will come later
i_charge = 0;
if(i_type==ParticleType.Electron){ //electron
i_charge = -1.0;
}
else if(i_type==ParticleType.Ion){ //ion
i_charge = 1.0;
}
for(j = 0;j<ParticleCollection.numBodies;j++)
{
if(j!=index)
{
//TODO: Potentially memcpy 6 floats and work directly etc.
j_x = ParticleCollection.x[j];
j_y = ParticleCollection.y[j];
j_z = ParticleCollection.z[j];
j_type = ParticleCollection.pType[j];
//j_v_x = ParticleCollection.v_x[index];
//j_v_y = ParticleCollection.v_y[index];
//j_v_z = ParticleCollection.v_z[index];
j_charge = 0;
if(j_type==0){ //electron
j_charge = -1.0;
}
else if(j_type==1){ //ion
j_charge = 1.0;
}
rsq = ((i_x*i_x) - (2*i_x*j_x) + (j_x*j_x))+
((i_y*i_y) - (2*i_y*j_y) + (j_y*j_y))+
((i_z*i_z) - (2*i_z*j_z) + (j_z*j_z));
r = __sqrt(rsq);
rcb = rsq*r;
fx += - j_charge/rcb * (j_x-i_x);
fy += - j_charge/rcb * (j_y-i_y);
fz += - j_charge/rcb * (j_z-i_z);
}
fx = k * i_charge* fx;
fy = k * i_charge* fy;
fz = k * i_charge* fz;
}
//INTEGRATE MEEEEEEE
}
}
__device__ void gatherBlaDiagnostics(Particles ParticleCollection)
{
}
__device__ void pushDiagnostics(DiagnosticsType type, void * data, int length, int* diag_id, bool* diag_dataAvailable, int* diag_length, void* diag_data)
{
while(!*diag_dataAvailable){
*diag_id = type;
*diag_length = length;
hipMemcpy(diag_data,data,length,hipMemcpyDefault); //hipMemcpyDeviceToHost??
*diag_dataAvailable = true;
}
}
//__device__ void queuePush();
//__device__ void handlePushQueue();
//Particles parts = Particles(nBodies);
//float x,y,z,v_x,v_y,v_z;
| d50a7ce79978f7cfe98c0c997395be395139d5f2.cu | //Inputs:
// Number of bodies
// Max sphere size
// AVG temperature
#define MAX_THREADS_PER_BLOCK 1024
#define MAX_BLOCKS 2496
#define SQUARED(a) a*a
typedef struct Particles{
float * x;
float * y;
float * z;
float * v_x;
float * v_y;
float * v_z;
short * intersects; //Number of times intersected the dust grain
unsigned char * pType;
int numBodies;
void Particles(int nBodies){
x = cudaMalloc(nBodies * sizeof(float));
y = cudaMalloc(nBodies * sizeof(float));
z = cudaMalloc(nBodies * sizeof(float));
v_x = cudaMalloc(nBodies * sizeof(float));
v_y = cudaMalloc(nBodies * sizeof(float));
v_z = cudaMalloc(nBodies * sizeof(float));
pType = cudaMalloc(nBodies * sizeof(unsigned char));
intersects = cudaMalloc(nBodies * sizeof(short));
numBodies = nBodies;
};
void free(){
cudaFree(x);
cudaFree(y);
cudaFree(z);
cudaFree(v_x);
cudaFree(v_y);
cudaFree(v_z);
cudaFree(intersects);
cudaFree(pType);
}
} Particles;
enum DiagnosticsType{
InitTime = 0;
StepTime = 1;
}
enum ParticleType{
Electron = 0;
Ion = 1;
Grain = 2;
}
enum DiagnosticsType diag_id;
//Potentially pass into command whether or not to reallocate memory for particles
__device__ Particles * ParticleCollection = NULL;
// Diagnostics:
// 1. science charging, debye size and shape, plasma condx, etc
// 2. simulation: energy conservation, etc
// 3. cuda: time, memory, etc
// TODO:
// 1. only prepare if asked for.
__device__ void command(int nBodies, float radius, float temp, bool reallocate, int* diag_id, bool* diag_dataAvailable, int* diag_length, void* diag_data)
{
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if (reallocate && (ParticleCollection!=NULL) )
{
ParticleCollection.free();
cudaFree(ParticleCollection);
ParticleCollection=NULL;
}
if(ParticleCollection==NULL)
{
ParticleCollection = Particles(nBodies); //dynamically allocate the required memory for n bodies.
}
int blocks,threads;
if(nBodies<MAX_BLOCKS)
{
blocks = nBodies;
threads = 1;
} else
{
blocks = MAX_BLOCKS
threads = (nBodies/MAX_BLOCKS)+1; //truncate and add one just to be sure. Okay if falls off, that is checked.
}
cudaEventRecord(start,0);
//Generate initial plasma, and time it.
prepare<<blocks,threads,1>>(ParticleCollection,radius,tepp);
cudaEventRecord(end,0);
float seconds;
cudaEventElapsedTime(&seconds);
//Submit plasma generation phase timing results.
pushDiagnostics(DiagnosticsType.InitTime, seconds, sizeof(float), diag_id, diag_dataAvailable, diag_length, diag_data)
/*for (i=0, i<numSteps, i++)
{
forceAccum();
// time diagnostic on force accum
advanceOrbit();
// time diagnostic on advance orbit
boundarCheck();
// time diagnostic on aboundary check
grainColelct();
// time diagnostic on ocllctChecke
pushDiagnostics(DiagnosticsType.StepTime, ..., sizeof(float), diag_id, diag_dataAvailable, diag_length, diag_data)
} */
}
// TODO: Make equal parts ions and electrons, don't forget to make the dust grain
__device__ void prepare(Particles ParticleCollection, float radius, float temp) //Generate inital plasma
{
//(https://code.google.com/p/stanford-cs193g-sp2010/wiki/TutorialMultidimensionalKernelLaunch)
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
if(index<ParticleCollection.numBodies)
{
float x,y,z,v_x,v_y,v_z;
//thermalize(&x,&y,&z,&v_x,&v_y,&v_z);
int sqareroot = sqrt(ParticleCollection.numBodies)+1;
randomize(&x,&y,&z,&v_x,&v_y,&v_z,index,sqareroot);
ParticleCollection.x[index] = x;
ParticleCollection.y[index] = y;
ParticleCollection.z[index] = z;
ParticleCollection.v_x[index] = v_x;
ParticleCollection.v_y[index] = v_y;
ParticleCollection.v_z[index] = v_z;
ParticleCollection.intersects[index] = 0;
if(index==0){
ParticleCollection.pType[index] = ParticleType.Grain;
} else if(index%2==0){
ParticleCollection.pType[index] = ParticleType.Electron;
} else{
ParticleCollection.pType[index] = ParticleType.Ion;
}
}
}
__device__ void randomize(float * x, float * y, float * z, float * v_x, float * v_y, float * v_z, int index, int sqareroot)
{
*z = index % sqareroot;
*y = (index/sqareroot) % sqareroot;
*x = index/(sqareroot*sqareroot);
*v_x = 0;
*v_y = 0;
*v_z = 0;
}
//Watch out when block queueing happens, might be off by max one time step.
__device__ void collectAndIntegrate(Particles ParticleCollection)
{
//(https://code.google.com/p/stanford-cs193g-sp2010/wiki/TutorialMultidimensionalKernelLaunch)
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
float fx=0,fy=0,fz=0;
float k = 3.1415926; // TODO: cmon, fix this
if(index<ParticleCollection.numBodies)
{
float i_x,i_y,i_z,i_type,i_charge,i_v_x,i_v_y,i_v_z;
float j_x,j_y,j_z,j_type,j_charge;//,j_v_x,j_v_y,j_v_z;
float rsq,r;
i_x = ParticleCollection.x[index];
i_y = ParticleCollection.y[index];
i_z = ParticleCollection.z[index];
i_type = ParticleCollection.pType[index];
i_v_x = ParticleCollection.v_x[index];
i_v_y = ParticleCollection.v_y[index];
i_v_z = ParticleCollection.v_z[index];
// TODO: dimensional scaling will come later
i_charge = 0;
if(i_type==ParticleType.Electron){ //electron
i_charge = -1.0;
}
else if(i_type==ParticleType.Ion){ //ion
i_charge = 1.0;
}
for(j = 0;j<ParticleCollection.numBodies;j++)
{
if(j!=index)
{
//TODO: Potentially memcpy 6 floats and work directly etc.
j_x = ParticleCollection.x[j];
j_y = ParticleCollection.y[j];
j_z = ParticleCollection.z[j];
j_type = ParticleCollection.pType[j];
//j_v_x = ParticleCollection.v_x[index];
//j_v_y = ParticleCollection.v_y[index];
//j_v_z = ParticleCollection.v_z[index];
j_charge = 0;
if(j_type==0){ //electron
j_charge = -1.0;
}
else if(j_type==1){ //ion
j_charge = 1.0;
}
rsq = ((i_x*i_x) - (2*i_x*j_x) + (j_x*j_x))+
((i_y*i_y) - (2*i_y*j_y) + (j_y*j_y))+
((i_z*i_z) - (2*i_z*j_z) + (j_z*j_z));
r = __sqrt(rsq);
rcb = rsq*r;
fx += - j_charge/rcb * (j_x-i_x);
fy += - j_charge/rcb * (j_y-i_y);
fz += - j_charge/rcb * (j_z-i_z);
}
fx = k * i_charge* fx;
fy = k * i_charge* fy;
fz = k * i_charge* fz;
}
//INTEGRATE MEEEEEEE
}
}
__device__ void gatherBlaDiagnostics(Particles ParticleCollection)
{
}
__device__ void pushDiagnostics(DiagnosticsType type, void * data, int length, int* diag_id, bool* diag_dataAvailable, int* diag_length, void* diag_data)
{
while(!*diag_dataAvailable){
*diag_id = type;
*diag_length = length;
cudaMemcpy(diag_data,data,length,cudaMemcpyDefault); //cudaMemcpyDeviceToHost??
*diag_dataAvailable = true;
}
}
//__device__ void queuePush();
//__device__ void handlePushQueue();
//Particles parts = Particles(nBodies);
//float x,y,z,v_x,v_y,v_z;
|
b730b3bcb44990c19b1b9e53d04af4daf9ed0635.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__
void nekbone(double *w, const double *u, const double *g, const double *d, const int N, const float RN) {
const int e_size = N * N * N;
const int e_offset = e_size * blockIdx.x;
__shared__ double ur[1024];
__shared__ double us[1024];
__shared__ double ut[1024];
__shared__ double ul[1024];
__shared__ double d_s[128];
for (int it = threadIdx.x; it < e_size; it += blockDim.x) {
ul[it] = u[e_offset + it];
}
if (threadIdx.x < 128) {
d_s[threadIdx.x] = d[threadIdx.x];
}
__syncthreads();
int i, j, k;
for (int it = threadIdx.x; it < e_size; it += blockDim.x) {
double g0 = __ldg(&g[6 * e_offset + 0 * e_size + it]);
double g1 = __ldg(&g[6 * e_offset + 1 * e_size + it]);
double g2 = __ldg(&g[6 * e_offset + 2 * e_size + it]);
double g3 = __ldg(&g[6 * e_offset + 3 * e_size + it]);
double g4 = __ldg(&g[6 * e_offset + 4 * e_size + it]);
double g5 = __ldg(&g[6 * e_offset + 5 * e_size + it]);
j = it * RN;
i = it - j * N;
k = j * RN;
j -= k * N;
double wr = 0.0;
double ws = 0.0;
double wt = 0.0;
for (int n = 0; n < N; ++n) {
wr += d_s[n * N + i] * ul[N * (j + k * N) + n];
ws += d_s[n * N + j] * ul[N * (n + k * N) + i];
wt += d_s[n * N + k] * ul[N * (j + n * N) + i];
}
ur[it] = g0 * wr + g1 * ws + g2 * wt;
us[it] = g1 * wr + g3 * ws + g4 * wt;
ut[it] = g2 * wr + g4 * ws + g5 * wt;
}
__syncthreads();
for (int it = threadIdx.x; it < e_size; it += blockDim.x) {
j = it * RN;
i = it - j * N;
k = j * RN;
j -= k * N;
double s = 0.0;
for (int n = 0; n < N; ++n) {
s += d_s[i * N + n] * ur[N * (j + N * k) + n] +
d_s[j * N + n] * us[N * (n + N * k) + i] +
d_s[k * N + n] * ut[N * (j + N * n) + i];
}
w[e_offset + it] = s;
}
}
| b730b3bcb44990c19b1b9e53d04af4daf9ed0635.cu | __global__
void nekbone(double *w, const double *u, const double *g, const double *d, const int N, const float RN) {
const int e_size = N * N * N;
const int e_offset = e_size * blockIdx.x;
__shared__ double ur[1024];
__shared__ double us[1024];
__shared__ double ut[1024];
__shared__ double ul[1024];
__shared__ double d_s[128];
for (int it = threadIdx.x; it < e_size; it += blockDim.x) {
ul[it] = u[e_offset + it];
}
if (threadIdx.x < 128) {
d_s[threadIdx.x] = d[threadIdx.x];
}
__syncthreads();
int i, j, k;
for (int it = threadIdx.x; it < e_size; it += blockDim.x) {
double g0 = __ldg(&g[6 * e_offset + 0 * e_size + it]);
double g1 = __ldg(&g[6 * e_offset + 1 * e_size + it]);
double g2 = __ldg(&g[6 * e_offset + 2 * e_size + it]);
double g3 = __ldg(&g[6 * e_offset + 3 * e_size + it]);
double g4 = __ldg(&g[6 * e_offset + 4 * e_size + it]);
double g5 = __ldg(&g[6 * e_offset + 5 * e_size + it]);
j = it * RN;
i = it - j * N;
k = j * RN;
j -= k * N;
double wr = 0.0;
double ws = 0.0;
double wt = 0.0;
for (int n = 0; n < N; ++n) {
wr += d_s[n * N + i] * ul[N * (j + k * N) + n];
ws += d_s[n * N + j] * ul[N * (n + k * N) + i];
wt += d_s[n * N + k] * ul[N * (j + n * N) + i];
}
ur[it] = g0 * wr + g1 * ws + g2 * wt;
us[it] = g1 * wr + g3 * ws + g4 * wt;
ut[it] = g2 * wr + g4 * ws + g5 * wt;
}
__syncthreads();
for (int it = threadIdx.x; it < e_size; it += blockDim.x) {
j = it * RN;
i = it - j * N;
k = j * RN;
j -= k * N;
double s = 0.0;
for (int n = 0; n < N; ++n) {
s += d_s[i * N + n] * ur[N * (j + N * k) + n] +
d_s[j * N + n] * us[N * (n + N * k) + i] +
d_s[k * N + n] * ut[N * (j + N * n) + i];
}
w[e_offset + it] = s;
}
}
|
d34c71b9fc66478ee87da9b4eb49d6037605315d.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2020 Saurabh Yadav
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#define TOTAL_ROWS 1000U
#define TOTAL_COLS 2000U
__global__
void init_matrix(float *matrix, int width, int height, float val) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = idx; i < width * height; i += gridDim.x * blockDim.x) {
matrix[i]=val;
}
}
__global__
void add_matrices(float * mat_A_arr, float * mat_B_arr, float * mat_C_arr,
int num_cols, int num_rows) {
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
if(row < num_rows && col < num_cols) {
mat_C_arr[row*num_cols + col] = mat_A_arr[row*num_cols + col] +
mat_B_arr[row*num_cols + col];
}
}
int main() {
hipError_t err = hipSuccess;
float *mat_A, *mat_B, *mat_C;
size_t memsize = TOTAL_COLS * TOTAL_ROWS * sizeof(float);
/* Allocate memories for the matrices*/
err = hipMallocManaged(&mat_A, memsize);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate memory for matrix A (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMallocManaged(&mat_B, memsize);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate memory for matrix B (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMallocManaged(&mat_C, memsize);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate memory for matrix C (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Initialize matrices A and B */
int blocksize_for_init = 256;
int blocks_for_init = (TOTAL_ROWS*TOTAL_COLS + blocksize_for_init - 1)
/ (blocksize_for_init);
hipLaunchKernelGGL(( init_matrix), dim3(blocks_for_init), dim3(blocksize_for_init), 0, 0, mat_A, TOTAL_COLS, TOTAL_ROWS, 1);
hipLaunchKernelGGL(( init_matrix), dim3(blocks_for_init), dim3(blocksize_for_init), 0, 0, mat_B, TOTAL_COLS, TOTAL_ROWS, 2);
err = hipGetLastError();
if( err != hipSuccess) {
fprintf(stderr, "Failed to initialize matrix (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Do the matrix addition */
size_t blocksizeX = 16;
size_t blocksizeY = 16;
dim3 DimGrid( (TOTAL_COLS-1)/blocksizeX + 1, (TOTAL_ROWS-1)/blocksizeY + 1);
dim3 DimBlock( blocksizeX, blocksizeY);
hipLaunchKernelGGL(( add_matrices), dim3(DimGrid), dim3(DimBlock), 0, 0, mat_A, mat_B, mat_C, TOTAL_COLS, TOTAL_ROWS);
err = hipGetLastError();
if( err != hipSuccess) {
fprintf(stderr, "Failed to perform matrix addition (error code %s)!\n",
hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < (TOTAL_ROWS*TOTAL_COLS); i++)
maxError = fmax(maxError, fabs(mat_C[i]-3.0f));
printf("Max error: %f\n", maxError);
return EXIT_SUCCESS;
} | d34c71b9fc66478ee87da9b4eb49d6037605315d.cu | // Copyright (c) 2020 Saurabh Yadav
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#define TOTAL_ROWS 1000U
#define TOTAL_COLS 2000U
__global__
void init_matrix(float *matrix, int width, int height, float val) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = idx; i < width * height; i += gridDim.x * blockDim.x) {
matrix[i]=val;
}
}
__global__
void add_matrices(float * mat_A_arr, float * mat_B_arr, float * mat_C_arr,
int num_cols, int num_rows) {
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
if(row < num_rows && col < num_cols) {
mat_C_arr[row*num_cols + col] = mat_A_arr[row*num_cols + col] +
mat_B_arr[row*num_cols + col];
}
}
int main() {
cudaError_t err = cudaSuccess;
float *mat_A, *mat_B, *mat_C;
size_t memsize = TOTAL_COLS * TOTAL_ROWS * sizeof(float);
/* Allocate memories for the matrices*/
err = cudaMallocManaged(&mat_A, memsize);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate memory for matrix A (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMallocManaged(&mat_B, memsize);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate memory for matrix B (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMallocManaged(&mat_C, memsize);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate memory for matrix C (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Initialize matrices A and B */
int blocksize_for_init = 256;
int blocks_for_init = (TOTAL_ROWS*TOTAL_COLS + blocksize_for_init - 1)
/ (blocksize_for_init);
init_matrix<<<blocks_for_init, blocksize_for_init>>>(mat_A, TOTAL_COLS, TOTAL_ROWS, 1);
init_matrix<<<blocks_for_init, blocksize_for_init>>>(mat_B, TOTAL_COLS, TOTAL_ROWS, 2);
err = cudaGetLastError();
if( err != cudaSuccess) {
fprintf(stderr, "Failed to initialize matrix (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Do the matrix addition */
size_t blocksizeX = 16;
size_t blocksizeY = 16;
dim3 DimGrid( (TOTAL_COLS-1)/blocksizeX + 1, (TOTAL_ROWS-1)/blocksizeY + 1);
dim3 DimBlock( blocksizeX, blocksizeY);
add_matrices<<<DimGrid, DimBlock>>>(mat_A, mat_B, mat_C, TOTAL_COLS, TOTAL_ROWS);
err = cudaGetLastError();
if( err != cudaSuccess) {
fprintf(stderr, "Failed to perform matrix addition (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < (TOTAL_ROWS*TOTAL_COLS); i++)
maxError = fmax(maxError, fabs(mat_C[i]-3.0f));
printf("Max error: %f\n", maxError);
return EXIT_SUCCESS;
} |
37803041b50d2784f6635518fd4391fa3e40edaa.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#define N 32
void printMatrix (unsigned* matrix) {
for (unsigned i = 0; i < N * N; i++) {
printf(" %u ", matrix[i]);
if (i % N == (N-1)) {
printf("\n");
}
}
}
void createMatrix(unsigned* matrix) {
for (unsigned i = 0; i < N; i++) {
for (unsigned j = 0; j < N; j++) {
if (i == j) {
matrix[i * N + j] = i + 1;
} else {
matrix[i * N + j] = 0;
}
}
}
}
__global__ void square (unsigned* matrix, unsigned* result, unsigned matrixSize) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned jj = 0; jj < matrixSize; jj++) {
for (unsigned kk = 0; kk < matrixSize; kk++) {
result[id * matrixSize + jj] += matrix[id * matrixSize + kk] * matrix[kk * matrixSize + jj];
}
}
}
__host__ void call_sqr (unsigned* h_in_matrix, unsigned* h_out_matrix) {
// unsigned n = N;
unsigned *d_in_matrix, *d_out_matrix;
hipMalloc((void **) &d_in_matrix, N * N * sizeof(unsigned));
hipMalloc((void **) &d_out_matrix, N * N * sizeof(unsigned));
hipMemcpy(d_in_matrix, h_in_matrix, N * N * sizeof(unsigned), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( square), dim3(1), dim3(N), 0, 0, d_in_matrix, d_out_matrix, N);
hipMemcpy(h_out_matrix, d_out_matrix, N * N * sizeof(unsigned), hipMemcpyDeviceToHost);
hipFree(d_in_matrix);
hipFree(d_out_matrix);
}
int main() {
unsigned* matrix;
unsigned* result;
matrix = (unsigned*)malloc(N * N * sizeof(unsigned));
result = (unsigned*)malloc(N * N * sizeof(unsigned));
createMatrix(matrix);
call_sqr (matrix, result);
printMatrix(result);
free(matrix);
free(result);
return 0;
}
| 37803041b50d2784f6635518fd4391fa3e40edaa.cu | #include <cuda.h>
#include <stdio.h>
#define N 32
void printMatrix (unsigned* matrix) {
for (unsigned i = 0; i < N * N; i++) {
printf(" %u ", matrix[i]);
if (i % N == (N-1)) {
printf("\n");
}
}
}
void createMatrix(unsigned* matrix) {
for (unsigned i = 0; i < N; i++) {
for (unsigned j = 0; j < N; j++) {
if (i == j) {
matrix[i * N + j] = i + 1;
} else {
matrix[i * N + j] = 0;
}
}
}
}
__global__ void square (unsigned* matrix, unsigned* result, unsigned matrixSize) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned jj = 0; jj < matrixSize; jj++) {
for (unsigned kk = 0; kk < matrixSize; kk++) {
result[id * matrixSize + jj] += matrix[id * matrixSize + kk] * matrix[kk * matrixSize + jj];
}
}
}
__host__ void call_sqr (unsigned* h_in_matrix, unsigned* h_out_matrix) {
// unsigned n = N;
unsigned *d_in_matrix, *d_out_matrix;
cudaMalloc((void **) &d_in_matrix, N * N * sizeof(unsigned));
cudaMalloc((void **) &d_out_matrix, N * N * sizeof(unsigned));
cudaMemcpy(d_in_matrix, h_in_matrix, N * N * sizeof(unsigned), cudaMemcpyHostToDevice);
square<<<1, N>>>(d_in_matrix, d_out_matrix, N);
cudaMemcpy(h_out_matrix, d_out_matrix, N * N * sizeof(unsigned), cudaMemcpyDeviceToHost);
cudaFree(d_in_matrix);
cudaFree(d_out_matrix);
}
int main() {
unsigned* matrix;
unsigned* result;
matrix = (unsigned*)malloc(N * N * sizeof(unsigned));
result = (unsigned*)malloc(N * N * sizeof(unsigned));
createMatrix(matrix);
call_sqr (matrix, result);
printMatrix(result);
free(matrix);
free(result);
return 0;
}
|
e46e6c552d3461b227d5470f9a266cc62005e525.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void sumMatrixOnGPUMix(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = blockIdx.y;
unsigned int idx = iy * nx + ix;
if (ix < nx && iy < ny)
MatC[idx] = MatA[idx] + MatB[idx];
} | e46e6c552d3461b227d5470f9a266cc62005e525.cu | #include "includes.h"
__global__ void sumMatrixOnGPUMix(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = blockIdx.y;
unsigned int idx = iy * nx + ix;
if (ix < nx && iy < ny)
MatC[idx] = MatA[idx] + MatB[idx];
} |
ad27c0e99ec344e038ac9a85a0eda507fa030c9b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "aarect.h"
#include "box.h"
#include "bvh.h"
#include "camera.h"
#include "constant_medium.h"
#include "cuda_utils.h"
#include "hittable_list.h"
#include "material.h"
#include "moving_sphere.h"
#include "ray.h"
#include "sphere.h"
#include "triangle.h"
#include "vec3.h"
#include <hiprand/hiprand_kernel.h>
#include <float.h>
#include <iostream>
#include <time.h>
// Matching the C++ code would recurse enough into color() calls that
// it was blowing up the stack, so we have to turn this into a
// limited-depth loop instead. Later code in the book limits to a max
// depth of 50, so we adapt this a few chapters early on the GPU.
__device__ vec3 get_color(const ray &r, hittable **world,
hiprandState_t *local_rand_state) {
ray cur_ray = r;
vec3 cur_attenuation(1.0f, 1.0f, 1.0f);
for (int i = 0; i < 50; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec, local_rand_state)) {
ray scattered;
vec3 attenuation;
if (rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered,
local_rand_state)) {
cur_attenuation *= attenuation;
cur_ray = scattered;
} else {
return vec3(0.0, 0.0, 0.0);
}
} else {
vec3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5f * (unit_direction.y() + 1.0f);
vec3 c = (1.0f - t) * vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0);
return cur_attenuation * c;
}
}
return vec3(0.0, 0.0, 0.0); // exceeded recursion
}
__device__ vec3 get_color(const ray &r, color **background, hittable **world,
hiprandState_t *local_rand_state) {
ray cur_ray = r;
vec3 cur_attenuation(1.0f, 1.0f, 1.0f);
const int depth = 50;
vec3 emitted_rec[depth];
vec3 attenuation_rec[depth];
for (int i = 0; i < depth; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec, local_rand_state)) {
ray scattered;
vec3 attenuation;
color emitted = rec.mat_ptr->emitted(rec.u, rec.v, rec.p);
if (rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered,
local_rand_state)) {
// scattered
// cur_attenuation *= attenuation;
// cur_attenuation += emitted;
// cur_attenuation *= (attenuation + emitted);
emitted_rec[i] = emitted;
attenuation_rec[i] = attenuation;
cur_ray = scattered;
} else {
// no scatter
// no attenuation
// no background light
// but we have emitted
cur_attenuation *= emitted;
while (i-- > 0) {
cur_attenuation =
emitted_rec[i] + cur_attenuation * attenuation_rec[i];
}
return cur_attenuation;
}
} else {
// no hit
// only have background
cur_attenuation *= **background;
while (i-- > 0) {
cur_attenuation = emitted_rec[i] + cur_attenuation * attenuation_rec[i];
}
return cur_attenuation;
}
}
return **background; // exceeded recursion
}
__global__ void rand_init(hiprandState_t *rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
hiprand_init(1984, 0, 0, rand_state);
}
}
__global__ void render_init(int max_x, int max_y, hiprandState_t *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y))
return;
int pixel_index = j * max_x + i;
// Original: Each thread gets same seed, a different sequence number, no
// offset hiprand_init(1984, pixel_index, 0, &rand_state[pixel_index]); BUGFIX,
// see Issue#2: Each thread gets different seed, same sequence for performance
// improvement of about 2x!
// hiprand_init(1984 + pixel_index, 0, 0, &rand_state[pixel_index]);
hiprand_init(1984 + pixel_index, i, j, &rand_state[pixel_index]);
}
__global__ void render(vec3 *fb, int max_x, int max_y, int ns, camera **cam,
hittable **world, hiprandState_t *rand_state,
color **background) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y))
return;
int pixel_index = j * max_x + i;
hiprandState_t local_rand_state = rand_state[pixel_index];
vec3 col(0, 0, 0);
for (int s = 0; s < ns; s++) {
float u = float(i + hiprand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + hiprand_uniform(&local_rand_state)) / float(max_y);
ray r = (*cam)->get_ray(u, v, &local_rand_state);
col += get_color(r, background, world, &local_rand_state);
}
rand_state[pixel_index] = local_rand_state;
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
fb[pixel_index] = col;
}
#define RND (hiprand_uniform(&local_rand_state))
__device__ hittable *random_scene(hittable **d_list,
hiprandState_t local_rand_state) {
auto checker =
new checker_texture(color(0.2, 0.3, 0.1), color(0.9, 0.9, 0.9));
d_list[0] = new sphere(vec3(0, -1000.0, -1), 1000, new lambertian(checker));
// d_list[0] = new sphere(vec3(0, -1000.0, -1), 1000,
// new lambertian(vec3(0.5, 0.5, 0.5)));
// d_list[0] = new sphere(vec3(0, -1000.0, -1), 1000,
// make_shared<lambertian>(vec3(0.5, 0.5, 0.5)));
int i = 1;
for (int a = -11; a < 11; a++) {
for (int b = -11; b < 11; b++) {
float choose_mat = RND;
vec3 center(a + RND, 0.2, b + RND);
if (choose_mat < 0.8f) {
vec3 center2 = center + vec3(0, RND * 0.5f, 0);
// d_list[i++] =
// new sphere(center, 0.2,
// new lambertian(vec3(RND * RND, RND * RND, RND *
// RND)));
d_list[i++] = new moving_sphere(
center, center2, 0.0, 1.0, 0.2,
new lambertian(vec3(RND * RND, RND * RND, RND * RND)));
} else if (choose_mat < 0.95f) {
d_list[i++] =
new sphere(center, 0.2,
new metal(vec3(0.5f * (1.0f + RND), 0.5f * (1.0f + RND),
0.5f * (1.0f + RND)),
0.5f * RND));
} else {
d_list[i++] = new sphere(center, 0.2, new dielectric(1.5));
}
}
}
d_list[i++] = new sphere(vec3(0, 1, 0), 1.0, new dielectric(1.5));
d_list[i++] =
new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1)));
d_list[i++] =
new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0));
return new bvh_node(d_list, 0, 22 * 22 + 1 + 3, 0.0f, 1.0f,
&local_rand_state);
}
__device__ hittable *two_spheres(hiprandState_t local_rand_state) {
auto checker =
new checker_texture(color(0.2, 0.3, 0.1), color(0.9, 0.9, 0.9));
hittable *ret[2];
ret[0] = new sphere(point3(0, -10, 0), 10, new lambertian(checker));
ret[1] = new sphere(point3(0, 10, 0), 10, new lambertian(checker));
return new bvh_node(ret, 0, 2, 0.0f, 1.0f, &local_rand_state);
}
__device__ hittable *two_perlin_spheres(hiprandState_t local_rand_state) {
auto perlin_texture = new noise_texture(4, &local_rand_state);
hittable *ret[2];
ret[0] =
new sphere(point3(0, -1000, 0), 1000, new lambertian(perlin_texture));
ret[1] = new sphere(point3(0, 2, 0), 2, new lambertian(perlin_texture));
return new bvh_node(ret, 0, 2, 0.0f, 1.0f, &local_rand_state);
}
__device__ hittable *earth(unsigned char *data, int w, int h,
hiprandState_t local_rand_state) {
auto earth_texture = new image_texture(data, w, h);
auto earth_surface = new lambertian(earth_texture);
hittable *ret[1];
ret[0] = new sphere(point3(0, 0, 0), 2, earth_surface);
return new bvh_node(ret, 0, 1, 0.0f, 1.0f, &local_rand_state);
}
__device__ hittable *simple_light(hiprandState_t local_rand_state) {
auto perlin_texture = new noise_texture(4, &local_rand_state);
hittable *ret[4];
ret[0] =
new sphere(point3(0, -1000, 0), 1000, new lambertian(perlin_texture));
ret[1] = new sphere(point3(0, 2, 0), 2, new lambertian(perlin_texture));
auto diff_light = new diffuse_light(color(4, 4, 4));
ret[2] = new xy_rect(3, 5, 1, 2, -2, diff_light);
auto diff_light2 = new diffuse_light(color(6, 4, 4));
ret[3] = new sphere(point3(0, 6, 0), 1.5, diff_light2);
return new bvh_node(ret, 0, 4, 0.0f, 1.0f, &local_rand_state);
}
__device__ hittable *cornell_box(hiprandState_t local_rand_state) {
hittable *ret[8];
auto red = new lambertian(color(.65, .05, .05));
auto white = new lambertian(color(.73, .73, .73));
auto green = new lambertian(color(.12, .45, .15));
auto light = new diffuse_light(color(15, 15, 15));
ret[0] = new yz_rect(0, 555, 0, 555, 555, green);
ret[1] = new yz_rect(0, 555, 0, 555, 0, red);
ret[2] = new xz_rect(213, 343, 227, 332, 554, light);
ret[3] = new xz_rect(0, 555, 0, 555, 0, white);
ret[4] = new xz_rect(0, 555, 0, 555, 555, white);
ret[5] = new xy_rect(0, 555, 0, 555, 555, white);
// ret[6] = new box(point3(130, 0, 65), point3(295, 165, 230), white);
// ret[7] = new box(point3(265, 0, 295), point3(430, 330, 460), white);
hittable *box1 = new box(point3(0, 0, 0), point3(165, 330, 165), white);
box1 = new rotate_y(box1, 15);
box1 = new translate(box1, vec3(265, 0, 295));
hittable *box2 = new box(point3(0, 0, 0), point3(165, 165, 165), white);
box2 = new rotate_y(box2, -18);
box2 = new translate(box2, vec3(130, 0, 65));
ret[6] = box1;
ret[7] = box2;
return new bvh_node(ret, 0, 8, 0.0f, 1.0f, &local_rand_state);
}
__device__ hittable *cornell_smoke(hiprandState_t local_rand_state) {
hittable *ret[8];
auto red = new lambertian(color(.65, .05, .05));
auto white = new lambertian(color(.73, .73, .73));
auto green = new lambertian(color(.12, .45, .15));
auto light = new diffuse_light(color(15, 15, 15));
ret[0] = new yz_rect(0, 555, 0, 555, 555, green);
ret[1] = new yz_rect(0, 555, 0, 555, 0, red);
ret[2] = new xz_rect(213, 343, 227, 332, 554, light);
ret[3] = new xz_rect(0, 555, 0, 555, 0, white);
ret[4] = new xz_rect(0, 555, 0, 555, 555, white);
ret[5] = new xy_rect(0, 555, 0, 555, 555, white);
hittable *box1 = new box(point3(0, 0, 0), point3(165, 330, 165), white);
box1 = new rotate_y(box1, 15);
box1 = new translate(box1, vec3(265, 0, 295));
box1 = new constant_medium(box1, 0.01, color(0, 0, 0));
hittable *box2 = new box(point3(0, 0, 0), point3(165, 165, 165), white);
box2 = new rotate_y(box2, -18);
box2 = new translate(box2, vec3(130, 0, 65));
box2 = new constant_medium(box2, 0.01, color(1, 1, 1));
ret[6] = box1;
ret[7] = box2;
return new bvh_node(ret, 0, 8, 0.0f, 1.0f, &local_rand_state);
}
__device__ hittable *rt_next_week_final_scene(unsigned char *data, int w, int h,
hiprandState_t local_rand_state) {
const int boxes_per_side = 20;
const int num_obj = boxes_per_side * boxes_per_side + 10;
hittable *ret[num_obj];
// ground
auto ground = new lambertian(color(0.48, 0.83, 0.53));
int index = 0;
for (int i = 0; i < boxes_per_side; i++) {
for (int j = 0; j < boxes_per_side; j++) {
float w = 100.0;
float x0 = -1000.0f + i * w;
float z0 = -1000.0f + j * w;
float y0 = 0.0;
float x1 = x0 + w;
float y1 = random_float(1, 101, &local_rand_state);
float z1 = z0 + w;
ret[index++] = new box(point3(x0, y0, z0), point3(x1, y1, z1), ground);
}
}
// light
auto light = new diffuse_light(color(7, 7, 7));
ret[index++] = new xz_rect(123, 423, 147, 412, 554, light);
// moving sphere
auto center1 = point3(400, 400, 200);
auto center2 = center1 + vec3(30, 0, 0);
auto moving_sphere_material = new lambertian(color(0.7, 0.3, 0.1));
ret[index++] =
new moving_sphere(center1, center2, 0, 1, 50, moving_sphere_material);
ret[index++] = new sphere(point3(260, 150, 45), 50, new dielectric(1.5));
ret[index++] =
new sphere(point3(0, 150, 145), 50, new metal(color(0.8, 0.8, 0.9), 1.0));
// constant medium
auto sphere_dielectric_2 =
new sphere(point3(360, 150, 145), 70, new dielectric(1.5));
ret[index++] = sphere_dielectric_2;
ret[index++] =
new constant_medium(sphere_dielectric_2, 0.2, color(0.2, 0.4, 0.9));
// fog
auto fog = new sphere(point3(0, 0, 0), 5000, new dielectric(1.5));
ret[index++] = new constant_medium(fog, 0.0001, color(1, 1, 1));
// earth
auto earth_texture = new image_texture(data, w, h);
auto earth_surface = new lambertian(earth_texture);
ret[index++] = new sphere(point3(400, 200, 400), 100, earth_surface);
auto pertext = new noise_texture(0.1, &local_rand_state);
ret[index++] = new sphere(point3(220, 280, 300), 80, new lambertian(pertext));
auto white = new lambertian(color(.73, .73, .73));
const int ns = 1000;
hittable *cluster[ns];
for (int j = 0; j < ns; j++) {
cluster[j] = new sphere(random_vec3(0, 165, &local_rand_state), 10, white);
}
ret[index++] = new translate(
new rotate_y(new bvh_node(cluster, 0, ns, 0, 1, &local_rand_state), 15),
vec3(-100, 270, 395));
return new bvh_node(ret, 0, index, 0.0, 1.0, &local_rand_state);
}
__device__ hittable *simple_triangle(hiprandState_t local_rand_state) {
hittable *ret[3];
int index = 0;
// light
auto light = new diffuse_light(color(17, 17, 17));
ret[index++] = new xz_rect(123, 423, 147, 412, 554, light);
auto white = new lambertian(color(.073, .73, .73));
ret[index++] =
new triangle(vec3(123, 0, 150), vec3(423, 0, 150), vec3(273, 50, (500 + 150) / 2),
vec3(0, 1, 0), vec3(0, 1, 0), vec3(0, 1, 0), white);
ret[index++] = new sphere(point3(273, 100, (500 + 150) / 2), 10,
new lambertian(color(0.5, 0.5, 0.5)));
return new bvh_node(ret, 0, index, 0, 1, &local_rand_state);
}
__global__ void create_world(hittable **d_list, hittable **d_world,
camera **d_camera, int nx, int ny,
hiprandState_t *rand_state, unsigned char *data,
int w, int h, color **background) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
hiprandState_t local_rand_state = *rand_state;
vec3 lookfrom(13, 2, 3);
vec3 lookat(0, 0, 0);
// float dist_to_focus = (lookfrom - lookat).length();
float aperture = 0.00;
float vfov = 40.0;
vec3 vup(0, 1, 0);
// background = new color(0, 0, 0);
switch (8) {
case 1:
*d_world = random_scene(d_list, local_rand_state);
vfov = 20.0;
aperture = 0.05;
*background = new color(0.70, 0.80, 1.00);
break;
case 2:
*d_world = two_spheres(local_rand_state);
vfov = 20.0;
aperture = 0;
*background = new color(0.70, 0.80, 1.00);
break;
case 3:
*d_world = two_perlin_spheres(local_rand_state);
vfov = 20.0;
aperture = 0;
*background = new color(0.70, 0.80, 1.00);
break;
case 4:
*d_world = earth(data, w, h, local_rand_state);
*background = new color(0.70, 0.80, 1.00);
break;
case 5:
*background = new color(0.0, 0.0, 0.0);
*d_world = simple_light(local_rand_state);
lookfrom = point3(26, 3, 6);
lookat = point3(0, 2, 0);
vfov = 20.0f;
break;
case 6:
*background = new color(0.0, 0.0, 0.0);
// *background = new color(0.70, 0.80, 1.00);
*d_world = cornell_box(local_rand_state);
lookfrom = point3(278, 278, -800);
lookat = point3(278, 278, 0);
vfov = 40.0;
break;
case 7:
*background = new color(0.0, 0.0, 0.0);
*d_world = cornell_smoke(local_rand_state);
lookfrom = point3(278, 278, -800);
lookat = point3(278, 278, 0);
vfov = 40.0;
break;
case 8:
*background = new color(0.0, 0.0, 0.0);
*d_world = rt_next_week_final_scene(data, w, h, local_rand_state);
lookfrom = point3(478, 278, -600);
lookat = point3(278, 278, 0);
vfov = 40.0;
break;
default:
case 9:
// *background = new color(0.70/2, 0.80/2, 1.00/2);
*background = new color(0.0, 0.0, 0.0);
*d_world = simple_triangle(local_rand_state);
lookfrom = point3(278, 278, -600);
lookat = point3(278, 278, 0);
vfov = 50.0;
break;
}
float dist_to_focus = (lookfrom - lookat).length();
*d_camera = new camera(lookfrom, lookat, vup, vfov, float(nx) / float(ny),
aperture, dist_to_focus, 0.0f, 1.0f);
*rand_state = local_rand_state;
}
}
__global__ void free_world(hittable **d_list, hittable **d_world,
camera **d_camera) {
for (int i = 0; i < 22 * 22 + 1 + 3; i++) {
// the bug is located here, we have sphere and moving_sphere, but we only
// use sphere here, a workaround is define moving_sphere as a sub class of
// sphere. then we can get ride of hipFree 700 error. delete ((sphere
// *)d_list[i])->mat_ptr;
delete d_list[i];
}
delete *d_world;
delete *d_camera;
}
int main() {
hipDeviceSetLimit(hipLimitStackSize, 32768ULL);
const char *filename = "earthmap.jpeg";
int width, height;
int components_per_pixel = image_texture::bytes_per_pixel;
unsigned char *data;
data = stbi_load(filename, &width, &height, &components_per_pixel,
components_per_pixel);
unsigned char *device_data;
size_t img_data_size =
components_per_pixel * width * height * sizeof(unsigned char);
checkCudaErrors(hipMallocManaged((void **)&device_data, img_data_size));
checkCudaErrors(hipMemcpy((void *)device_data, (void *)data, img_data_size,
hipMemcpyHostToDevice));
color **background_color;
checkCudaErrors(
hipMallocManaged((void **)&background_color, sizeof(color *)));
const auto aspect_ratio = 1.0; // 3.0 / 2.0;
int nx = 800; // 1200;
int ny = static_cast<int>(nx / aspect_ratio);
int ns = 100; // 500;
// int ns = 500;
int tx = 8;
int ty = 8;
std::cerr << "Rendering a " << nx << "x" << ny << " image with " << ns
<< " samples per pixel ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
int num_pixels = nx * ny;
size_t fb_size = num_pixels * sizeof(vec3);
// allocate FB
vec3 *fb;
checkCudaErrors(hipMallocManaged((void **)&fb, fb_size));
// allocate random state
hiprandState_t *d_rand_state;
checkCudaErrors(
hipMalloc((void **)&d_rand_state, num_pixels * sizeof(hiprandState_t)));
hiprandState_t *d_rand_state2;
checkCudaErrors(hipMalloc((void **)&d_rand_state2, 1 * sizeof(hiprandState_t)));
// we need that 2nd random state to be initialized for the world creation
hipLaunchKernelGGL(( rand_init), dim3(1), dim3(1), 0, 0, d_rand_state2);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// make our world of hitables & the camera
hittable **d_list;
int num_hitables = 22 * 22 + 1 + 3;
checkCudaErrors(
hipMalloc((void **)&d_list, num_hitables * sizeof(hittable *)));
hittable **d_world;
// checkCudaErrors(hipMalloc((void **)&d_world, sizeof(hittable *)));
checkCudaErrors(hipMalloc((void **)&d_world, sizeof(bvh_node *)));
camera **d_camera;
checkCudaErrors(hipMalloc((void **)&d_camera, sizeof(camera *)));
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( create_world), dim3(1), dim3(1), 0, 0, d_list, d_world, d_camera, nx, ny, d_rand_state2,
device_data, width, height, background_color);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(nx / tx + 1, ny / ty + 1);
dim3 threads(tx, ty);
hipLaunchKernelGGL(( render_init), dim3(blocks), dim3(threads), 0, 0, nx, ny, d_rand_state);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( render), dim3(blocks), dim3(threads), 0, 0, fb, nx, ny, ns, d_camera, d_world, d_rand_state,
background_color);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
// Output FB as Image
std::cout << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny - 1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j * nx + i;
int ir = int(255.99 * fb[pixel_index].x());
int ig = int(255.99 * fb[pixel_index].y());
int ib = int(255.99 * fb[pixel_index].z());
std::cout << ir << " " << ig << " " << ib << "\n";
}
}
// clean up
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( free_world), dim3(1), dim3(1), 0, 0, d_list, d_world, d_camera);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(d_camera));
checkCudaErrors(hipFree(d_world));
checkCudaErrors(hipFree(d_list));
checkCudaErrors(hipFree(d_rand_state));
checkCudaErrors(hipFree(d_rand_state2));
checkCudaErrors(hipFree(fb));
hipDeviceReset();
return 0;
}
| ad27c0e99ec344e038ac9a85a0eda507fa030c9b.cu | #include "aarect.h"
#include "box.h"
#include "bvh.h"
#include "camera.h"
#include "constant_medium.h"
#include "cuda_utils.h"
#include "hittable_list.h"
#include "material.h"
#include "moving_sphere.h"
#include "ray.h"
#include "sphere.h"
#include "triangle.h"
#include "vec3.h"
#include <curand_kernel.h>
#include <float.h>
#include <iostream>
#include <time.h>
// Matching the C++ code would recurse enough into color() calls that
// it was blowing up the stack, so we have to turn this into a
// limited-depth loop instead. Later code in the book limits to a max
// depth of 50, so we adapt this a few chapters early on the GPU.
__device__ vec3 get_color(const ray &r, hittable **world,
curandState *local_rand_state) {
ray cur_ray = r;
vec3 cur_attenuation(1.0f, 1.0f, 1.0f);
for (int i = 0; i < 50; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec, local_rand_state)) {
ray scattered;
vec3 attenuation;
if (rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered,
local_rand_state)) {
cur_attenuation *= attenuation;
cur_ray = scattered;
} else {
return vec3(0.0, 0.0, 0.0);
}
} else {
vec3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5f * (unit_direction.y() + 1.0f);
vec3 c = (1.0f - t) * vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0);
return cur_attenuation * c;
}
}
return vec3(0.0, 0.0, 0.0); // exceeded recursion
}
__device__ vec3 get_color(const ray &r, color **background, hittable **world,
curandState *local_rand_state) {
ray cur_ray = r;
vec3 cur_attenuation(1.0f, 1.0f, 1.0f);
const int depth = 50;
vec3 emitted_rec[depth];
vec3 attenuation_rec[depth];
for (int i = 0; i < depth; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec, local_rand_state)) {
ray scattered;
vec3 attenuation;
color emitted = rec.mat_ptr->emitted(rec.u, rec.v, rec.p);
if (rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered,
local_rand_state)) {
// scattered
// cur_attenuation *= attenuation;
// cur_attenuation += emitted;
// cur_attenuation *= (attenuation + emitted);
emitted_rec[i] = emitted;
attenuation_rec[i] = attenuation;
cur_ray = scattered;
} else {
// no scatter
// no attenuation
// no background light
// but we have emitted
cur_attenuation *= emitted;
while (i-- > 0) {
cur_attenuation =
emitted_rec[i] + cur_attenuation * attenuation_rec[i];
}
return cur_attenuation;
}
} else {
// no hit
// only have background
cur_attenuation *= **background;
while (i-- > 0) {
cur_attenuation = emitted_rec[i] + cur_attenuation * attenuation_rec[i];
}
return cur_attenuation;
}
}
return **background; // exceeded recursion
}
__global__ void rand_init(curandState *rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
curand_init(1984, 0, 0, rand_state);
}
}
__global__ void render_init(int max_x, int max_y, curandState *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y))
return;
int pixel_index = j * max_x + i;
// Original: Each thread gets same seed, a different sequence number, no
// offset curand_init(1984, pixel_index, 0, &rand_state[pixel_index]); BUGFIX,
// see Issue#2: Each thread gets different seed, same sequence for performance
// improvement of about 2x!
// curand_init(1984 + pixel_index, 0, 0, &rand_state[pixel_index]);
curand_init(1984 + pixel_index, i, j, &rand_state[pixel_index]);
}
__global__ void render(vec3 *fb, int max_x, int max_y, int ns, camera **cam,
hittable **world, curandState *rand_state,
color **background) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y))
return;
int pixel_index = j * max_x + i;
curandState local_rand_state = rand_state[pixel_index];
vec3 col(0, 0, 0);
for (int s = 0; s < ns; s++) {
float u = float(i + curand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + curand_uniform(&local_rand_state)) / float(max_y);
ray r = (*cam)->get_ray(u, v, &local_rand_state);
col += get_color(r, background, world, &local_rand_state);
}
rand_state[pixel_index] = local_rand_state;
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
fb[pixel_index] = col;
}
#define RND (curand_uniform(&local_rand_state))
__device__ hittable *random_scene(hittable **d_list,
curandState local_rand_state) {
auto checker =
new checker_texture(color(0.2, 0.3, 0.1), color(0.9, 0.9, 0.9));
d_list[0] = new sphere(vec3(0, -1000.0, -1), 1000, new lambertian(checker));
// d_list[0] = new sphere(vec3(0, -1000.0, -1), 1000,
// new lambertian(vec3(0.5, 0.5, 0.5)));
// d_list[0] = new sphere(vec3(0, -1000.0, -1), 1000,
// make_shared<lambertian>(vec3(0.5, 0.5, 0.5)));
int i = 1;
for (int a = -11; a < 11; a++) {
for (int b = -11; b < 11; b++) {
float choose_mat = RND;
vec3 center(a + RND, 0.2, b + RND);
if (choose_mat < 0.8f) {
vec3 center2 = center + vec3(0, RND * 0.5f, 0);
// d_list[i++] =
// new sphere(center, 0.2,
// new lambertian(vec3(RND * RND, RND * RND, RND *
// RND)));
d_list[i++] = new moving_sphere(
center, center2, 0.0, 1.0, 0.2,
new lambertian(vec3(RND * RND, RND * RND, RND * RND)));
} else if (choose_mat < 0.95f) {
d_list[i++] =
new sphere(center, 0.2,
new metal(vec3(0.5f * (1.0f + RND), 0.5f * (1.0f + RND),
0.5f * (1.0f + RND)),
0.5f * RND));
} else {
d_list[i++] = new sphere(center, 0.2, new dielectric(1.5));
}
}
}
d_list[i++] = new sphere(vec3(0, 1, 0), 1.0, new dielectric(1.5));
d_list[i++] =
new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1)));
d_list[i++] =
new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0));
return new bvh_node(d_list, 0, 22 * 22 + 1 + 3, 0.0f, 1.0f,
&local_rand_state);
}
__device__ hittable *two_spheres(curandState local_rand_state) {
auto checker =
new checker_texture(color(0.2, 0.3, 0.1), color(0.9, 0.9, 0.9));
hittable *ret[2];
ret[0] = new sphere(point3(0, -10, 0), 10, new lambertian(checker));
ret[1] = new sphere(point3(0, 10, 0), 10, new lambertian(checker));
return new bvh_node(ret, 0, 2, 0.0f, 1.0f, &local_rand_state);
}
__device__ hittable *two_perlin_spheres(curandState local_rand_state) {
auto perlin_texture = new noise_texture(4, &local_rand_state);
hittable *ret[2];
ret[0] =
new sphere(point3(0, -1000, 0), 1000, new lambertian(perlin_texture));
ret[1] = new sphere(point3(0, 2, 0), 2, new lambertian(perlin_texture));
return new bvh_node(ret, 0, 2, 0.0f, 1.0f, &local_rand_state);
}
__device__ hittable *earth(unsigned char *data, int w, int h,
curandState local_rand_state) {
auto earth_texture = new image_texture(data, w, h);
auto earth_surface = new lambertian(earth_texture);
hittable *ret[1];
ret[0] = new sphere(point3(0, 0, 0), 2, earth_surface);
return new bvh_node(ret, 0, 1, 0.0f, 1.0f, &local_rand_state);
}
__device__ hittable *simple_light(curandState local_rand_state) {
auto perlin_texture = new noise_texture(4, &local_rand_state);
hittable *ret[4];
ret[0] =
new sphere(point3(0, -1000, 0), 1000, new lambertian(perlin_texture));
ret[1] = new sphere(point3(0, 2, 0), 2, new lambertian(perlin_texture));
auto diff_light = new diffuse_light(color(4, 4, 4));
ret[2] = new xy_rect(3, 5, 1, 2, -2, diff_light);
auto diff_light2 = new diffuse_light(color(6, 4, 4));
ret[3] = new sphere(point3(0, 6, 0), 1.5, diff_light2);
return new bvh_node(ret, 0, 4, 0.0f, 1.0f, &local_rand_state);
}
__device__ hittable *cornell_box(curandState local_rand_state) {
hittable *ret[8];
auto red = new lambertian(color(.65, .05, .05));
auto white = new lambertian(color(.73, .73, .73));
auto green = new lambertian(color(.12, .45, .15));
auto light = new diffuse_light(color(15, 15, 15));
ret[0] = new yz_rect(0, 555, 0, 555, 555, green);
ret[1] = new yz_rect(0, 555, 0, 555, 0, red);
ret[2] = new xz_rect(213, 343, 227, 332, 554, light);
ret[3] = new xz_rect(0, 555, 0, 555, 0, white);
ret[4] = new xz_rect(0, 555, 0, 555, 555, white);
ret[5] = new xy_rect(0, 555, 0, 555, 555, white);
// ret[6] = new box(point3(130, 0, 65), point3(295, 165, 230), white);
// ret[7] = new box(point3(265, 0, 295), point3(430, 330, 460), white);
hittable *box1 = new box(point3(0, 0, 0), point3(165, 330, 165), white);
box1 = new rotate_y(box1, 15);
box1 = new translate(box1, vec3(265, 0, 295));
hittable *box2 = new box(point3(0, 0, 0), point3(165, 165, 165), white);
box2 = new rotate_y(box2, -18);
box2 = new translate(box2, vec3(130, 0, 65));
ret[6] = box1;
ret[7] = box2;
return new bvh_node(ret, 0, 8, 0.0f, 1.0f, &local_rand_state);
}
__device__ hittable *cornell_smoke(curandState local_rand_state) {
hittable *ret[8];
auto red = new lambertian(color(.65, .05, .05));
auto white = new lambertian(color(.73, .73, .73));
auto green = new lambertian(color(.12, .45, .15));
auto light = new diffuse_light(color(15, 15, 15));
ret[0] = new yz_rect(0, 555, 0, 555, 555, green);
ret[1] = new yz_rect(0, 555, 0, 555, 0, red);
ret[2] = new xz_rect(213, 343, 227, 332, 554, light);
ret[3] = new xz_rect(0, 555, 0, 555, 0, white);
ret[4] = new xz_rect(0, 555, 0, 555, 555, white);
ret[5] = new xy_rect(0, 555, 0, 555, 555, white);
hittable *box1 = new box(point3(0, 0, 0), point3(165, 330, 165), white);
box1 = new rotate_y(box1, 15);
box1 = new translate(box1, vec3(265, 0, 295));
box1 = new constant_medium(box1, 0.01, color(0, 0, 0));
hittable *box2 = new box(point3(0, 0, 0), point3(165, 165, 165), white);
box2 = new rotate_y(box2, -18);
box2 = new translate(box2, vec3(130, 0, 65));
box2 = new constant_medium(box2, 0.01, color(1, 1, 1));
ret[6] = box1;
ret[7] = box2;
return new bvh_node(ret, 0, 8, 0.0f, 1.0f, &local_rand_state);
}
__device__ hittable *rt_next_week_final_scene(unsigned char *data, int w, int h,
curandState local_rand_state) {
const int boxes_per_side = 20;
const int num_obj = boxes_per_side * boxes_per_side + 10;
hittable *ret[num_obj];
// ground
auto ground = new lambertian(color(0.48, 0.83, 0.53));
int index = 0;
for (int i = 0; i < boxes_per_side; i++) {
for (int j = 0; j < boxes_per_side; j++) {
float w = 100.0;
float x0 = -1000.0f + i * w;
float z0 = -1000.0f + j * w;
float y0 = 0.0;
float x1 = x0 + w;
float y1 = random_float(1, 101, &local_rand_state);
float z1 = z0 + w;
ret[index++] = new box(point3(x0, y0, z0), point3(x1, y1, z1), ground);
}
}
// light
auto light = new diffuse_light(color(7, 7, 7));
ret[index++] = new xz_rect(123, 423, 147, 412, 554, light);
// moving sphere
auto center1 = point3(400, 400, 200);
auto center2 = center1 + vec3(30, 0, 0);
auto moving_sphere_material = new lambertian(color(0.7, 0.3, 0.1));
ret[index++] =
new moving_sphere(center1, center2, 0, 1, 50, moving_sphere_material);
ret[index++] = new sphere(point3(260, 150, 45), 50, new dielectric(1.5));
ret[index++] =
new sphere(point3(0, 150, 145), 50, new metal(color(0.8, 0.8, 0.9), 1.0));
// constant medium
auto sphere_dielectric_2 =
new sphere(point3(360, 150, 145), 70, new dielectric(1.5));
ret[index++] = sphere_dielectric_2;
ret[index++] =
new constant_medium(sphere_dielectric_2, 0.2, color(0.2, 0.4, 0.9));
// fog
auto fog = new sphere(point3(0, 0, 0), 5000, new dielectric(1.5));
ret[index++] = new constant_medium(fog, 0.0001, color(1, 1, 1));
// earth
auto earth_texture = new image_texture(data, w, h);
auto earth_surface = new lambertian(earth_texture);
ret[index++] = new sphere(point3(400, 200, 400), 100, earth_surface);
auto pertext = new noise_texture(0.1, &local_rand_state);
ret[index++] = new sphere(point3(220, 280, 300), 80, new lambertian(pertext));
auto white = new lambertian(color(.73, .73, .73));
const int ns = 1000;
hittable *cluster[ns];
for (int j = 0; j < ns; j++) {
cluster[j] = new sphere(random_vec3(0, 165, &local_rand_state), 10, white);
}
ret[index++] = new translate(
new rotate_y(new bvh_node(cluster, 0, ns, 0, 1, &local_rand_state), 15),
vec3(-100, 270, 395));
return new bvh_node(ret, 0, index, 0.0, 1.0, &local_rand_state);
}
__device__ hittable *simple_triangle(curandState local_rand_state) {
hittable *ret[3];
int index = 0;
// light
auto light = new diffuse_light(color(17, 17, 17));
ret[index++] = new xz_rect(123, 423, 147, 412, 554, light);
auto white = new lambertian(color(.073, .73, .73));
ret[index++] =
new triangle(vec3(123, 0, 150), vec3(423, 0, 150), vec3(273, 50, (500 + 150) / 2),
vec3(0, 1, 0), vec3(0, 1, 0), vec3(0, 1, 0), white);
ret[index++] = new sphere(point3(273, 100, (500 + 150) / 2), 10,
new lambertian(color(0.5, 0.5, 0.5)));
return new bvh_node(ret, 0, index, 0, 1, &local_rand_state);
}
__global__ void create_world(hittable **d_list, hittable **d_world,
camera **d_camera, int nx, int ny,
curandState *rand_state, unsigned char *data,
int w, int h, color **background) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
curandState local_rand_state = *rand_state;
vec3 lookfrom(13, 2, 3);
vec3 lookat(0, 0, 0);
// float dist_to_focus = (lookfrom - lookat).length();
float aperture = 0.00;
float vfov = 40.0;
vec3 vup(0, 1, 0);
// background = new color(0, 0, 0);
switch (8) {
case 1:
*d_world = random_scene(d_list, local_rand_state);
vfov = 20.0;
aperture = 0.05;
*background = new color(0.70, 0.80, 1.00);
break;
case 2:
*d_world = two_spheres(local_rand_state);
vfov = 20.0;
aperture = 0;
*background = new color(0.70, 0.80, 1.00);
break;
case 3:
*d_world = two_perlin_spheres(local_rand_state);
vfov = 20.0;
aperture = 0;
*background = new color(0.70, 0.80, 1.00);
break;
case 4:
*d_world = earth(data, w, h, local_rand_state);
*background = new color(0.70, 0.80, 1.00);
break;
case 5:
*background = new color(0.0, 0.0, 0.0);
*d_world = simple_light(local_rand_state);
lookfrom = point3(26, 3, 6);
lookat = point3(0, 2, 0);
vfov = 20.0f;
break;
case 6:
*background = new color(0.0, 0.0, 0.0);
// *background = new color(0.70, 0.80, 1.00);
*d_world = cornell_box(local_rand_state);
lookfrom = point3(278, 278, -800);
lookat = point3(278, 278, 0);
vfov = 40.0;
break;
case 7:
*background = new color(0.0, 0.0, 0.0);
*d_world = cornell_smoke(local_rand_state);
lookfrom = point3(278, 278, -800);
lookat = point3(278, 278, 0);
vfov = 40.0;
break;
case 8:
*background = new color(0.0, 0.0, 0.0);
*d_world = rt_next_week_final_scene(data, w, h, local_rand_state);
lookfrom = point3(478, 278, -600);
lookat = point3(278, 278, 0);
vfov = 40.0;
break;
default:
case 9:
// *background = new color(0.70/2, 0.80/2, 1.00/2);
*background = new color(0.0, 0.0, 0.0);
*d_world = simple_triangle(local_rand_state);
lookfrom = point3(278, 278, -600);
lookat = point3(278, 278, 0);
vfov = 50.0;
break;
}
float dist_to_focus = (lookfrom - lookat).length();
*d_camera = new camera(lookfrom, lookat, vup, vfov, float(nx) / float(ny),
aperture, dist_to_focus, 0.0f, 1.0f);
*rand_state = local_rand_state;
}
}
__global__ void free_world(hittable **d_list, hittable **d_world,
camera **d_camera) {
for (int i = 0; i < 22 * 22 + 1 + 3; i++) {
// the bug is located here, we have sphere and moving_sphere, but we only
// use sphere here, a workaround is define moving_sphere as a sub class of
// sphere. then we can get ride of cudaFree 700 error. delete ((sphere
// *)d_list[i])->mat_ptr;
delete d_list[i];
}
delete *d_world;
delete *d_camera;
}
int main() {
cudaDeviceSetLimit(cudaLimitStackSize, 32768ULL);
const char *filename = "earthmap.jpeg";
int width, height;
int components_per_pixel = image_texture::bytes_per_pixel;
unsigned char *data;
data = stbi_load(filename, &width, &height, &components_per_pixel,
components_per_pixel);
unsigned char *device_data;
size_t img_data_size =
components_per_pixel * width * height * sizeof(unsigned char);
checkCudaErrors(cudaMallocManaged((void **)&device_data, img_data_size));
checkCudaErrors(cudaMemcpy((void *)device_data, (void *)data, img_data_size,
cudaMemcpyHostToDevice));
color **background_color;
checkCudaErrors(
cudaMallocManaged((void **)&background_color, sizeof(color *)));
const auto aspect_ratio = 1.0; // 3.0 / 2.0;
int nx = 800; // 1200;
int ny = static_cast<int>(nx / aspect_ratio);
int ns = 100; // 500;
// int ns = 500;
int tx = 8;
int ty = 8;
std::cerr << "Rendering a " << nx << "x" << ny << " image with " << ns
<< " samples per pixel ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
int num_pixels = nx * ny;
size_t fb_size = num_pixels * sizeof(vec3);
// allocate FB
vec3 *fb;
checkCudaErrors(cudaMallocManaged((void **)&fb, fb_size));
// allocate random state
curandState *d_rand_state;
checkCudaErrors(
cudaMalloc((void **)&d_rand_state, num_pixels * sizeof(curandState)));
curandState *d_rand_state2;
checkCudaErrors(cudaMalloc((void **)&d_rand_state2, 1 * sizeof(curandState)));
// we need that 2nd random state to be initialized for the world creation
rand_init<<<1, 1>>>(d_rand_state2);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// make our world of hitables & the camera
hittable **d_list;
int num_hitables = 22 * 22 + 1 + 3;
checkCudaErrors(
cudaMalloc((void **)&d_list, num_hitables * sizeof(hittable *)));
hittable **d_world;
// checkCudaErrors(cudaMalloc((void **)&d_world, sizeof(hittable *)));
checkCudaErrors(cudaMalloc((void **)&d_world, sizeof(bvh_node *)));
camera **d_camera;
checkCudaErrors(cudaMalloc((void **)&d_camera, sizeof(camera *)));
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
create_world<<<1, 1>>>(d_list, d_world, d_camera, nx, ny, d_rand_state2,
device_data, width, height, background_color);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(nx / tx + 1, ny / ty + 1);
dim3 threads(tx, ty);
render_init<<<blocks, threads>>>(nx, ny, d_rand_state);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
render<<<blocks, threads>>>(fb, nx, ny, ns, d_camera, d_world, d_rand_state,
background_color);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
// Output FB as Image
std::cout << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny - 1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j * nx + i;
int ir = int(255.99 * fb[pixel_index].x());
int ig = int(255.99 * fb[pixel_index].y());
int ib = int(255.99 * fb[pixel_index].z());
std::cout << ir << " " << ig << " " << ib << "\n";
}
}
// clean up
checkCudaErrors(cudaDeviceSynchronize());
free_world<<<1, 1>>>(d_list, d_world, d_camera);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(d_camera));
checkCudaErrors(cudaFree(d_world));
checkCudaErrors(cudaFree(d_list));
checkCudaErrors(cudaFree(d_rand_state));
checkCudaErrors(cudaFree(d_rand_state2));
checkCudaErrors(cudaFree(fb));
cudaDeviceReset();
return 0;
}
|
08fb0185513636f4fd8d8253ac1936a99b4e1bed.hip | // !!! This is a file automatically generated by hipify!!!
/*
This version assigns one thread per 16 bytes of text.(one text block)
Stores the plaintext/ciphertext in registers.
Stores the encryption keys in shared memory.
Stores the S-boxes in constant memory.
The blocksize is 256.
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include <chrono>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
typedef unsigned char uint8;
enum workMode { ENCRYPTION, DECRYPTION };
//Key generation constants
uint8 C1[] = { 0x51,0x7c,0xc1,0xb7,0x27,0x22,0x0a,0x94,0xfe,0x13,0xab,0xe8,0xfa,0x9a,0x6e,0xe0 };
uint8 C2[] = { 0x6d,0xb1,0x4a,0xcc,0x9e,0x21,0xc8,0x20,0xff,0x28,0xb1,0xd5,0xef,0x5d,0xe2,0xb0 };
uint8 C3[] = { 0xdb,0x92,0x37,0x1d,0x21,0x26,0xe9,0x70,0x03,0x24,0x97,0x75,0x04,0xe8,0xc9,0x0e };
//Encryption round keys
uint8 ek[272] = { 0 }; //272 bytes(17 round keys each 16 bytes)
//Decyription round keys
uint8 dk[272] = { 0 }; //272 bytes(17 round keys each 16 bytes)
//S-boxes
static const uint8 SB1[256] =
{
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16
};
static const uint8 SB2[256] =
{
0xE2, 0x4E, 0x54, 0xFC, 0x94, 0xC2, 0x4A, 0xCC, 0x62, 0x0D, 0x6A, 0x46, 0x3C, 0x4D, 0x8B, 0xD1,
0x5E, 0xFA, 0x64, 0xCB, 0xB4, 0x97, 0xBE, 0x2B, 0xBC, 0x77, 0x2E, 0x03, 0xD3, 0x19, 0x59, 0xC1,
0x1D, 0x06, 0x41, 0x6B, 0x55, 0xF0, 0x99, 0x69, 0xEA, 0x9C, 0x18, 0xAE, 0x63, 0xDF, 0xE7, 0xBB,
0x00, 0x73, 0x66, 0xFB, 0x96, 0x4C, 0x85, 0xE4, 0x3A, 0x09, 0x45, 0xAA, 0x0F, 0xEE, 0x10, 0xEB,
0x2D, 0x7F, 0xF4, 0x29, 0xAC, 0xCF, 0xAD, 0x91, 0x8D, 0x78, 0xC8, 0x95, 0xF9, 0x2F, 0xCE, 0xCD,
0x08, 0x7A, 0x88, 0x38, 0x5C, 0x83, 0x2A, 0x28, 0x47, 0xDB, 0xB8, 0xC7, 0x93, 0xA4, 0x12, 0x53,
0xFF, 0x87, 0x0E, 0x31, 0x36, 0x21, 0x58, 0x48, 0x01, 0x8E, 0x37, 0x74, 0x32, 0xCA, 0xE9, 0xB1,
0xB7, 0xAB, 0x0C, 0xD7, 0xC4, 0x56, 0x42, 0x26, 0x07, 0x98, 0x60, 0xD9, 0xB6, 0xB9, 0x11, 0x40,
0xEC, 0x20, 0x8C, 0xBD, 0xA0, 0xC9, 0x84, 0x04, 0x49, 0x23, 0xF1, 0x4F, 0x50, 0x1F, 0x13, 0xDC,
0xD8, 0xC0, 0x9E, 0x57, 0xE3, 0xC3, 0x7B, 0x65, 0x3B, 0x02, 0x8F, 0x3E, 0xE8, 0x25, 0x92, 0xE5,
0x15, 0xDD, 0xFD, 0x17, 0xA9, 0xBF, 0xD4, 0x9A, 0x7E, 0xC5, 0x39, 0x67, 0xFE, 0x76, 0x9D, 0x43,
0xA7, 0xE1, 0xD0, 0xF5, 0x68, 0xF2, 0x1B, 0x34, 0x70, 0x05, 0xA3, 0x8A, 0xD5, 0x79, 0x86, 0xA8,
0x30, 0xC6, 0x51, 0x4B, 0x1E, 0xA6, 0x27, 0xF6, 0x35, 0xD2, 0x6E, 0x24, 0x16, 0x82, 0x5F, 0xDA,
0xE6, 0x75, 0xA2, 0xEF, 0x2C, 0xB2, 0x1C, 0x9F, 0x5D, 0x6F, 0x80, 0x0A, 0x72, 0x44, 0x9B, 0x6C,
0x90, 0x0B, 0x5B, 0x33, 0x7D, 0x5A, 0x52, 0xF3, 0x61, 0xA1, 0xF7, 0xB0, 0xD6, 0x3F, 0x7C, 0x6D,
0xED, 0x14, 0xE0, 0xA5, 0x3D, 0x22, 0xB3, 0xF8, 0x89, 0xDE, 0x71, 0x1A, 0xAF, 0xBA, 0xB5, 0x81
};
static const uint8 SB3[256] =
{
0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,
0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB,
0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E,
0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,
0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,
0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,
0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,
0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,
0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,
0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,
0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,
0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,
0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,
0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,
0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D
};
static const uint8 SB4[256] =
{
0x30, 0x68, 0x99, 0x1B, 0x87, 0xB9, 0x21, 0x78, 0x50, 0x39, 0xDB, 0xE1, 0x72, 0x09, 0x62, 0x3C,
0x3E, 0x7E, 0x5E, 0x8E, 0xF1, 0xA0, 0xCC, 0xA3, 0x2A, 0x1D, 0xFB, 0xB6, 0xD6, 0x20, 0xC4, 0x8D,
0x81, 0x65, 0xF5, 0x89, 0xCB, 0x9D, 0x77, 0xC6, 0x57, 0x43, 0x56, 0x17, 0xD4, 0x40, 0x1A, 0x4D,
0xC0, 0x63, 0x6C, 0xE3, 0xB7, 0xC8, 0x64, 0x6A, 0x53, 0xAA, 0x38, 0x98, 0x0C, 0xF4, 0x9B, 0xED,
0x7F, 0x22, 0x76, 0xAF, 0xDD, 0x3A, 0x0B, 0x58, 0x67, 0x88, 0x06, 0xC3, 0x35, 0x0D, 0x01, 0x8B,
0x8C, 0xC2, 0xE6, 0x5F, 0x02, 0x24, 0x75, 0x93, 0x66, 0x1E, 0xE5, 0xE2, 0x54, 0xD8, 0x10, 0xCE,
0x7A, 0xE8, 0x08, 0x2C, 0x12, 0x97, 0x32, 0xAB, 0xB4, 0x27, 0x0A, 0x23, 0xDF, 0xEF, 0xCA, 0xD9,
0xB8, 0xFA, 0xDC, 0x31, 0x6B, 0xD1, 0xAD, 0x19, 0x49, 0xBD, 0x51, 0x96, 0xEE, 0xE4, 0xA8, 0x41,
0xDA, 0xFF, 0xCD, 0x55, 0x86, 0x36, 0xBE, 0x61, 0x52, 0xF8, 0xBB, 0x0E, 0x82, 0x48, 0x69, 0x9A,
0xE0, 0x47, 0x9E, 0x5C, 0x04, 0x4B, 0x34, 0x15, 0x79, 0x26, 0xA7, 0xDE, 0x29, 0xAE, 0x92, 0xD7,
0x84, 0xE9, 0xD2, 0xBA, 0x5D, 0xF3, 0xC5, 0xB0, 0xBF, 0xA4, 0x3B, 0x71, 0x44, 0x46, 0x2B, 0xFC,
0xEB, 0x6F, 0xD5, 0xF6, 0x14, 0xFE, 0x7C, 0x70, 0x5A, 0x7D, 0xFD, 0x2F, 0x18, 0x83, 0x16, 0xA5,
0x91, 0x1F, 0x05, 0x95, 0x74, 0xA9, 0xC1, 0x5B, 0x4A, 0x85, 0x6D, 0x13, 0x07, 0x4F, 0x4E, 0x45,
0xB2, 0x0F, 0xC9, 0x1C, 0xA6, 0xBC, 0xEC, 0x73, 0x90, 0x7B, 0xCF, 0x59, 0x8F, 0xA1, 0xF9, 0x2D,
0xF2, 0xB1, 0x00, 0x94, 0x37, 0x9F, 0xD0, 0x2E, 0x9C, 0x6E, 0x28, 0x3F, 0x80, 0xF0, 0x3D, 0xD3,
0x25, 0x8A, 0xB5, 0xE7, 0x42, 0xB3, 0xC7, 0xEA, 0xF7, 0x4C, 0x11, 0x33, 0x03, 0xA2, 0xAC, 0x60
};
//S-boxes
__constant__ uint8 SB1_dev[256] =
{
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16
};
__constant__ uint8 SB2_dev[256] =
{
0xE2, 0x4E, 0x54, 0xFC, 0x94, 0xC2, 0x4A, 0xCC, 0x62, 0x0D, 0x6A, 0x46, 0x3C, 0x4D, 0x8B, 0xD1,
0x5E, 0xFA, 0x64, 0xCB, 0xB4, 0x97, 0xBE, 0x2B, 0xBC, 0x77, 0x2E, 0x03, 0xD3, 0x19, 0x59, 0xC1,
0x1D, 0x06, 0x41, 0x6B, 0x55, 0xF0, 0x99, 0x69, 0xEA, 0x9C, 0x18, 0xAE, 0x63, 0xDF, 0xE7, 0xBB,
0x00, 0x73, 0x66, 0xFB, 0x96, 0x4C, 0x85, 0xE4, 0x3A, 0x09, 0x45, 0xAA, 0x0F, 0xEE, 0x10, 0xEB,
0x2D, 0x7F, 0xF4, 0x29, 0xAC, 0xCF, 0xAD, 0x91, 0x8D, 0x78, 0xC8, 0x95, 0xF9, 0x2F, 0xCE, 0xCD,
0x08, 0x7A, 0x88, 0x38, 0x5C, 0x83, 0x2A, 0x28, 0x47, 0xDB, 0xB8, 0xC7, 0x93, 0xA4, 0x12, 0x53,
0xFF, 0x87, 0x0E, 0x31, 0x36, 0x21, 0x58, 0x48, 0x01, 0x8E, 0x37, 0x74, 0x32, 0xCA, 0xE9, 0xB1,
0xB7, 0xAB, 0x0C, 0xD7, 0xC4, 0x56, 0x42, 0x26, 0x07, 0x98, 0x60, 0xD9, 0xB6, 0xB9, 0x11, 0x40,
0xEC, 0x20, 0x8C, 0xBD, 0xA0, 0xC9, 0x84, 0x04, 0x49, 0x23, 0xF1, 0x4F, 0x50, 0x1F, 0x13, 0xDC,
0xD8, 0xC0, 0x9E, 0x57, 0xE3, 0xC3, 0x7B, 0x65, 0x3B, 0x02, 0x8F, 0x3E, 0xE8, 0x25, 0x92, 0xE5,
0x15, 0xDD, 0xFD, 0x17, 0xA9, 0xBF, 0xD4, 0x9A, 0x7E, 0xC5, 0x39, 0x67, 0xFE, 0x76, 0x9D, 0x43,
0xA7, 0xE1, 0xD0, 0xF5, 0x68, 0xF2, 0x1B, 0x34, 0x70, 0x05, 0xA3, 0x8A, 0xD5, 0x79, 0x86, 0xA8,
0x30, 0xC6, 0x51, 0x4B, 0x1E, 0xA6, 0x27, 0xF6, 0x35, 0xD2, 0x6E, 0x24, 0x16, 0x82, 0x5F, 0xDA,
0xE6, 0x75, 0xA2, 0xEF, 0x2C, 0xB2, 0x1C, 0x9F, 0x5D, 0x6F, 0x80, 0x0A, 0x72, 0x44, 0x9B, 0x6C,
0x90, 0x0B, 0x5B, 0x33, 0x7D, 0x5A, 0x52, 0xF3, 0x61, 0xA1, 0xF7, 0xB0, 0xD6, 0x3F, 0x7C, 0x6D,
0xED, 0x14, 0xE0, 0xA5, 0x3D, 0x22, 0xB3, 0xF8, 0x89, 0xDE, 0x71, 0x1A, 0xAF, 0xBA, 0xB5, 0x81
};
__constant__ uint8 SB3_dev[256] =
{
0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,
0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB,
0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E,
0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,
0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,
0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,
0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,
0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,
0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,
0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,
0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,
0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,
0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,
0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,
0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D
};
__constant__ uint8 SB4_dev[256] =
{
0x30, 0x68, 0x99, 0x1B, 0x87, 0xB9, 0x21, 0x78, 0x50, 0x39, 0xDB, 0xE1, 0x72, 0x09, 0x62, 0x3C,
0x3E, 0x7E, 0x5E, 0x8E, 0xF1, 0xA0, 0xCC, 0xA3, 0x2A, 0x1D, 0xFB, 0xB6, 0xD6, 0x20, 0xC4, 0x8D,
0x81, 0x65, 0xF5, 0x89, 0xCB, 0x9D, 0x77, 0xC6, 0x57, 0x43, 0x56, 0x17, 0xD4, 0x40, 0x1A, 0x4D,
0xC0, 0x63, 0x6C, 0xE3, 0xB7, 0xC8, 0x64, 0x6A, 0x53, 0xAA, 0x38, 0x98, 0x0C, 0xF4, 0x9B, 0xED,
0x7F, 0x22, 0x76, 0xAF, 0xDD, 0x3A, 0x0B, 0x58, 0x67, 0x88, 0x06, 0xC3, 0x35, 0x0D, 0x01, 0x8B,
0x8C, 0xC2, 0xE6, 0x5F, 0x02, 0x24, 0x75, 0x93, 0x66, 0x1E, 0xE5, 0xE2, 0x54, 0xD8, 0x10, 0xCE,
0x7A, 0xE8, 0x08, 0x2C, 0x12, 0x97, 0x32, 0xAB, 0xB4, 0x27, 0x0A, 0x23, 0xDF, 0xEF, 0xCA, 0xD9,
0xB8, 0xFA, 0xDC, 0x31, 0x6B, 0xD1, 0xAD, 0x19, 0x49, 0xBD, 0x51, 0x96, 0xEE, 0xE4, 0xA8, 0x41,
0xDA, 0xFF, 0xCD, 0x55, 0x86, 0x36, 0xBE, 0x61, 0x52, 0xF8, 0xBB, 0x0E, 0x82, 0x48, 0x69, 0x9A,
0xE0, 0x47, 0x9E, 0x5C, 0x04, 0x4B, 0x34, 0x15, 0x79, 0x26, 0xA7, 0xDE, 0x29, 0xAE, 0x92, 0xD7,
0x84, 0xE9, 0xD2, 0xBA, 0x5D, 0xF3, 0xC5, 0xB0, 0xBF, 0xA4, 0x3B, 0x71, 0x44, 0x46, 0x2B, 0xFC,
0xEB, 0x6F, 0xD5, 0xF6, 0x14, 0xFE, 0x7C, 0x70, 0x5A, 0x7D, 0xFD, 0x2F, 0x18, 0x83, 0x16, 0xA5,
0x91, 0x1F, 0x05, 0x95, 0x74, 0xA9, 0xC1, 0x5B, 0x4A, 0x85, 0x6D, 0x13, 0x07, 0x4F, 0x4E, 0x45,
0xB2, 0x0F, 0xC9, 0x1C, 0xA6, 0xBC, 0xEC, 0x73, 0x90, 0x7B, 0xCF, 0x59, 0x8F, 0xA1, 0xF9, 0x2D,
0xF2, 0xB1, 0x00, 0x94, 0x37, 0x9F, 0xD0, 0x2E, 0x9C, 0x6E, 0x28, 0x3F, 0x80, 0xF0, 0x3D, 0xD3,
0x25, 0x8A, 0xB5, 0xE7, 0x42, 0xB3, 0xC7, 0xEA, 0xF7, 0x4C, 0x11, 0x33, 0x03, 0xA2, 0xAC, 0x60
};
uint8 hex2dec(char ch)
{
if (ch >= '0' && ch <= '9')
return ch - '0';
else
return ch - 'a' + 10;
}
uint8 leftRotate(uint8 n, uint8 d)
{
return (n << d) | (n >> (8 - d));
}
uint8 rightRotate(uint8 n, uint8 d)
{
return (n >> d) | (n << (8 - d));
}
uint8* RightShiftBytes(uint8* arr, int arrSize, int amount)//shift the bytes, place them in a new array
{
uint8* tmp = (uint8*)malloc(amount);
uint8* newArr = (uint8*)malloc(16 * sizeof(uint8));
for (int i = 0; i < amount; i++) {
tmp[i] = arr[arrSize - amount + i];
}
for (int i = arrSize - 1; i >= amount; i--) {
newArr[i] = arr[i - amount];
}
for (int i = 0; i < amount; i++) {
newArr[i] = tmp[i];
}
free(tmp);
return newArr;
}
uint8* LeftShiftBytes(uint8* arr, int arrSize, int amount)//shift the bytes, place them in a new array
{
uint8* tmp = (uint8*)malloc(amount);
uint8* newArr = (uint8*)malloc(16 * sizeof(uint8));
for (int i = 0; i < amount; i++) {
tmp[i] = arr[i];
}
for (int i = 0; i < arrSize - amount; i++) {
newArr[i] = arr[i + amount];
}
for (int i = 0; i < amount; i++) {
newArr[arrSize - amount + i] = tmp[i];
}
free(tmp);
return newArr;
}
uint8* ShiftArrR(uint8* originalArr, int amount)
{
int arrSize = 16;
int byteShiftAmount = amount / 8;
uint8* arr = RightShiftBytes(originalArr, arrSize, byteShiftAmount);
amount = amount - byteShiftAmount * 8;
uint8 carryTmp, carry;
carry = arr[arrSize - 1] & (0xff >> (8 - amount));//bits that are shifted to byte on right
for (int i = 0; i < arrSize; i++)
{
carryTmp = arr[i] & (0xff >> (8 - amount));//calculate carry for byte on right
arr[i] >>= amount;//right shift the current byte.
arr[i] |= rightRotate(carry, amount);//place the bits from coming from byte on left
carry = carryTmp;
}
return arr;
}
uint8* ShiftArrL(uint8* originalArr, int amount)
{
int arrSize = 16;
int byteShiftAmount = amount / 8;
uint8* arr = LeftShiftBytes(originalArr, arrSize, byteShiftAmount);
amount = amount - byteShiftAmount * 8;
uint8 carryTmp, carry;
carry = arr[0] & (0xff << (8 - amount));//bits that are shifted to byte on left
for (int i = arrSize - 1; i >= 0; i--)
{
carryTmp = arr[i] & (0xff << (8 - amount));//calculate carry for byte on left
arr[i] <<= amount;//left shift the current byte.
arr[i] |= leftRotate(carry, amount);//place the bits from coming from byte on right
carry = carryTmp;
}
return arr;
}
void XOR_16(uint8* x, uint8* y, uint8* z)
{
for (int i = 0; i < 16; i++) {
z[i] = x[i] ^ y[i];
}
}
void XOR_16wFree(uint8* x, uint8* y, uint8* z)
{
for (int i = 0; i < 16; i++) {
z[i] = x[i] ^ y[i];
}
free(y);
}
//Substition Layer 1
void SL1(uint8* in, uint8* out)
{
out[0] = SB1[in[0]];
out[1] = SB2[in[1]];
out[2] = SB3[in[2]];
out[3] = SB4[in[3]];
out[4] = SB1[in[4]];
out[5] = SB2[in[5]];
out[6] = SB3[in[6]];
out[7] = SB4[in[7]];
out[8] = SB1[in[8]];
out[9] = SB2[in[9]];
out[10] = SB3[in[10]];
out[11] = SB4[in[11]];
out[12] = SB1[in[12]];
out[13] = SB2[in[13]];
out[14] = SB3[in[14]];
out[15] = SB4[in[15]];
}
//Substition Layer 2(Inverse of SL1)
void SL2(uint8* in, uint8* out)
{
out[0] = SB3[in[0]];
out[1] = SB4[in[1]];
out[2] = SB1[in[2]];
out[3] = SB2[in[3]];
out[4] = SB3[in[4]];
out[5] = SB4[in[5]];
out[6] = SB1[in[6]];
out[7] = SB2[in[7]];
out[8] = SB3[in[8]];
out[9] = SB4[in[9]];
out[10] = SB1[in[10]];
out[11] = SB2[in[11]];
out[12] = SB3[in[12]];
out[13] = SB4[in[13]];
out[14] = SB1[in[14]];
out[15] = SB2[in[15]];
}
//Diffusion layer
void A(uint8* in, uint8* out)
{
out[0] = in[3] ^ in[4] ^ in[6] ^ in[8] ^ in[9] ^ in[13] ^ in[14];
out[1] = in[2] ^ in[5] ^ in[7] ^ in[8] ^ in[9] ^ in[12] ^ in[15];
out[2] = in[1] ^ in[4] ^ in[6] ^ in[10] ^ in[11] ^ in[12] ^ in[15];
out[3] = in[0] ^ in[5] ^ in[7] ^ in[10] ^ in[11] ^ in[13] ^ in[14];
out[4] = in[0] ^ in[2] ^ in[5] ^ in[8] ^ in[11] ^ in[14] ^ in[15];
out[5] = in[1] ^ in[3] ^ in[4] ^ in[9] ^ in[10] ^ in[14] ^ in[15];
out[6] = in[0] ^ in[2] ^ in[7] ^ in[9] ^ in[10] ^ in[12] ^ in[13];
out[7] = in[1] ^ in[3] ^ in[6] ^ in[8] ^ in[11] ^ in[12] ^ in[13];
out[8] = in[0] ^ in[1] ^ in[4] ^ in[7] ^ in[10] ^ in[13] ^ in[15];
out[9] = in[0] ^ in[1] ^ in[5] ^ in[6] ^ in[11] ^ in[12] ^ in[14];
out[10] = in[2] ^ in[3] ^ in[5] ^ in[6] ^ in[8] ^ in[13] ^ in[15];
out[11] = in[2] ^ in[3] ^ in[4] ^ in[7] ^ in[9] ^ in[12] ^ in[14];
out[12] = in[1] ^ in[2] ^ in[6] ^ in[7] ^ in[9] ^ in[11] ^ in[12];
out[13] = in[0] ^ in[3] ^ in[6] ^ in[7] ^ in[8] ^ in[10] ^ in[13];
out[14] = in[0] ^ in[3] ^ in[4] ^ in[5] ^ in[9] ^ in[11] ^ in[14];
out[15] = in[1] ^ in[2] ^ in[4] ^ in[5] ^ in[8] ^ in[10] ^ in[15];
}
/*Round Functions(F0,FE) takes 16 bytes of plaintext
and generates an intermediate val of 16bytes
*/
//Odd Round Function
void F0(uint8* D, uint8* RK, uint8* out)
{
//res1, res2 are auxillary arrays for storing the results of XOR_16 and SL1
uint8 res1[16];
uint8 res2[16];
XOR_16(D, RK, res1);
SL1(res1, res2);
A(res2, out);
}
//Even Round Function
void FE(uint8* D, uint8* RK, uint8* out)
{
//res1, res2 are auxillary arrays for storing the results of XOR_16 and SL1
uint8 res1[16];
uint8 res2[16];
XOR_16(D, RK, res1);
SL2(res1, res2);
A(res2, out);
}
void GenerateRoundKeys(uint8* W0, uint8* W1, uint8* W2, uint8* W3)
{
//Producing encryption round keys
//Producing encryption round keys can be parallelized.
//However since we do this once for all blocks, it is faster to compute in CPU.
//ShiftArr functions return array from heap, must free.
XOR_16wFree(W0, ShiftArrR(W1, 19), &ek[0]);
XOR_16wFree(W1, ShiftArrR(W2, 19), &ek[16]);
XOR_16wFree(W2, ShiftArrR(W3, 19), &ek[32]);
XOR_16wFree(W3, ShiftArrR(W0, 19), &ek[48]);
XOR_16wFree(W0, ShiftArrR(W1, 31), &ek[64]);
XOR_16wFree(W1, ShiftArrR(W2, 31), &ek[80]);
XOR_16wFree(W2, ShiftArrR(W3, 31), &ek[96]);
XOR_16wFree(W3, ShiftArrR(W0, 31), &ek[112]);
XOR_16wFree(W0, ShiftArrL(W1, 61), &ek[128]);
XOR_16wFree(W1, ShiftArrL(W2, 61), &ek[144]);
XOR_16wFree(W2, ShiftArrL(W3, 61), &ek[160]);
XOR_16wFree(W3, ShiftArrL(W0, 61), &ek[176]);
XOR_16wFree(W0, ShiftArrL(W1, 31), &ek[192]);
XOR_16wFree(W1, ShiftArrL(W2, 31), &ek[208]);
XOR_16wFree(W2, ShiftArrL(W3, 31), &ek[224]);
XOR_16wFree(W3, ShiftArrL(W0, 31), &ek[240]);
XOR_16wFree(W0, ShiftArrL(W1, 19), &ek[256]);
}
void GenerateDecRoundKeys(uint8 numOfRounds)
{
int N = numOfRounds - 1;
int k = 1;
for (int i = 0; i < 16; i++)
{
dk[i] = ek[16 * N + i];
}
for (int i = N - 1; i >= 1; i--)
{
A(&ek[i * 16], &dk[k * 16]);
k++;
}
for (int i = 0; i < 16; i++)
{
dk[k * 16 + i] = ek[i];
}
}
//Odd Round Function
__device__ void F0_d(uint8* D, const uint8* RK)
{
uint8 aux[16];//auxilary array for keeping the results of Diffusion layer
//XOR with the round key
#pragma unroll
for (int i = 0; i < 16; i++) {
D[i] = D[i] ^ RK[i];
}
//Substition Layer(SL1)
D[0] = SB1_dev[D[0]];
D[1] = SB2_dev[D[1]];
D[2] = SB3_dev[D[2]];
D[3] = SB4_dev[D[3]];
D[4] = SB1_dev[D[4]];
D[5] = SB2_dev[D[5]];
D[6] = SB3_dev[D[6]];
D[7] = SB4_dev[D[7]];
D[8] = SB1_dev[D[8]];
D[9] = SB2_dev[D[9]];
D[10] = SB3_dev[D[10]];
D[11] = SB4_dev[D[11]];
D[12] = SB1_dev[D[12]];
D[13] = SB2_dev[D[13]];
D[14] = SB3_dev[D[14]];
D[15] = SB4_dev[D[15]];
//Diffusion layer
aux[0] = D[3] ^ D[4] ^ D[6] ^ D[8] ^ D[9] ^ D[13] ^ D[14];
aux[1] = D[2] ^ D[5] ^ D[7] ^ D[8] ^ D[9] ^ D[12] ^ D[15];
aux[2] = D[1] ^ D[4] ^ D[6] ^ D[10] ^ D[11] ^ D[12] ^ D[15];
aux[3] = D[0] ^ D[5] ^ D[7] ^ D[10] ^ D[11] ^ D[13] ^ D[14];
aux[4] = D[0] ^ D[2] ^ D[5] ^ D[8] ^ D[11] ^ D[14] ^ D[15];
aux[5] = D[1] ^ D[3] ^ D[4] ^ D[9] ^ D[10] ^ D[14] ^ D[15];
aux[6] = D[0] ^ D[2] ^ D[7] ^ D[9] ^ D[10] ^ D[12] ^ D[13];
aux[7] = D[1] ^ D[3] ^ D[6] ^ D[8] ^ D[11] ^ D[12] ^ D[13];
aux[8] = D[0] ^ D[1] ^ D[4] ^ D[7] ^ D[10] ^ D[13] ^ D[15];
aux[9] = D[0] ^ D[1] ^ D[5] ^ D[6] ^ D[11] ^ D[12] ^ D[14];
aux[10] = D[2] ^ D[3] ^ D[5] ^ D[6] ^ D[8] ^ D[13] ^ D[15];
aux[11] = D[2] ^ D[3] ^ D[4] ^ D[7] ^ D[9] ^ D[12] ^ D[14];
aux[12] = D[1] ^ D[2] ^ D[6] ^ D[7] ^ D[9] ^ D[11] ^ D[12];
aux[13] = D[0] ^ D[3] ^ D[6] ^ D[7] ^ D[8] ^ D[10] ^ D[13];
aux[14] = D[0] ^ D[3] ^ D[4] ^ D[5] ^ D[9] ^ D[11] ^ D[14];
aux[15] = D[1] ^ D[2] ^ D[4] ^ D[5] ^ D[8] ^ D[10] ^ D[15];
//put the result into plaintext registers
#pragma unroll
for (int i = 0; i < 16; i++) {
D[i] = aux[i];
}
}
//Even Round Function
__device__ void FE_d(uint8* D, const uint8* RK)
{
uint8 aux[16];//auxilary array for keeping the results of Diffusion layer
//XOR with the round key
#pragma unroll
for (int i = 0; i < 16; i++) {
D[i] = D[i] ^ RK[i];
}
//Substition Layer(SL2)
D[0] = SB3_dev[D[0]];
D[1] = SB4_dev[D[1]];
D[2] = SB1_dev[D[2]];
D[3] = SB2_dev[D[3]];
D[4] = SB3_dev[D[4]];
D[5] = SB4_dev[D[5]];
D[6] = SB1_dev[D[6]];
D[7] = SB2_dev[D[7]];
D[8] = SB3_dev[D[8]];
D[9] = SB4_dev[D[9]];
D[10] = SB1_dev[D[10]];
D[11] = SB2_dev[D[11]];
D[12] = SB3_dev[D[12]];
D[13] = SB4_dev[D[13]];
D[14] = SB1_dev[D[14]];
D[15] = SB2_dev[D[15]];
//Diffusion layer
aux[0] = D[3] ^ D[4] ^ D[6] ^ D[8] ^ D[9] ^ D[13] ^ D[14];
aux[1] = D[2] ^ D[5] ^ D[7] ^ D[8] ^ D[9] ^ D[12] ^ D[15];
aux[2] = D[1] ^ D[4] ^ D[6] ^ D[10] ^ D[11] ^ D[12] ^ D[15];
aux[3] = D[0] ^ D[5] ^ D[7] ^ D[10] ^ D[11] ^ D[13] ^ D[14];
aux[4] = D[0] ^ D[2] ^ D[5] ^ D[8] ^ D[11] ^ D[14] ^ D[15];
aux[5] = D[1] ^ D[3] ^ D[4] ^ D[9] ^ D[10] ^ D[14] ^ D[15];
aux[6] = D[0] ^ D[2] ^ D[7] ^ D[9] ^ D[10] ^ D[12] ^ D[13];
aux[7] = D[1] ^ D[3] ^ D[6] ^ D[8] ^ D[11] ^ D[12] ^ D[13];
aux[8] = D[0] ^ D[1] ^ D[4] ^ D[7] ^ D[10] ^ D[13] ^ D[15];
aux[9] = D[0] ^ D[1] ^ D[5] ^ D[6] ^ D[11] ^ D[12] ^ D[14];
aux[10] = D[2] ^ D[3] ^ D[5] ^ D[6] ^ D[8] ^ D[13] ^ D[15];
aux[11] = D[2] ^ D[3] ^ D[4] ^ D[7] ^ D[9] ^ D[12] ^ D[14];
aux[12] = D[1] ^ D[2] ^ D[6] ^ D[7] ^ D[9] ^ D[11] ^ D[12];
aux[13] = D[0] ^ D[3] ^ D[6] ^ D[7] ^ D[8] ^ D[10] ^ D[13];
aux[14] = D[0] ^ D[3] ^ D[4] ^ D[5] ^ D[9] ^ D[11] ^ D[14];
aux[15] = D[1] ^ D[2] ^ D[4] ^ D[5] ^ D[8] ^ D[10] ^ D[15];
//put the result into plaintext registers
#pragma unroll
for (int i = 0; i < 16; i++) {
D[i] = aux[i];
}
}
template <unsigned int keySize>
__global__ void Encrypt(uint8* plainText, unsigned long textSize, uint8* ek)
{
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
uint8 plainTextR[16];//registers keeping the plaintext.
__shared__ uint8 sdata[272];//each round key is 16 bytes, there are 17 round keys 272 bytes
//Put encryption round keys to shared memory.
sdata[tid] = ek[tid];
if (tid < 16) {//rest of the bytes are loaded by first 16 threads.
sdata[256 + tid] = ek[256 + tid];
}
//Load the plaintext to registers
for (int i = 0; i < 16; i++)
{
plainTextR[i] = plainText[16 * idx + i];
}
__syncthreads();
if (keySize == 16)//128-bit keys
{
F0_d(plainTextR, &sdata[0]);//ek1...
FE_d(plainTextR, &sdata[16]);
F0_d(plainTextR, &sdata[32]);
FE_d(plainTextR, &sdata[48]);
F0_d(plainTextR, &sdata[64]);
FE_d(plainTextR, &sdata[80]);
F0_d(plainTextR, &sdata[96]);
FE_d(plainTextR, &sdata[112]);
F0_d(plainTextR, &sdata[128]);
FE_d(plainTextR, &sdata[144]);
F0_d(plainTextR, &sdata[160]);//...ek11
#pragma unroll
for (int i = 0; i < 16; i++) {
plainTextR[i] = plainTextR[i] ^ sdata[176 + i];//ek12
}
plainTextR[0] = SB3_dev[plainTextR[0]];
plainTextR[1] = SB4_dev[plainTextR[1]];
plainTextR[2] = SB1_dev[plainTextR[2]];
plainTextR[3] = SB2_dev[plainTextR[3]];
plainTextR[4] = SB3_dev[plainTextR[4]];
plainTextR[5] = SB4_dev[plainTextR[5]];
plainTextR[6] = SB1_dev[plainTextR[6]];
plainTextR[7] = SB2_dev[plainTextR[7]];
plainTextR[8] = SB3_dev[plainTextR[8]];
plainTextR[9] = SB4_dev[plainTextR[9]];
plainTextR[10] = SB1_dev[plainTextR[10]];
plainTextR[11] = SB2_dev[plainTextR[11]];
plainTextR[12] = SB3_dev[plainTextR[12]];
plainTextR[13] = SB4_dev[plainTextR[13]];
plainTextR[14] = SB1_dev[plainTextR[14]];
plainTextR[15] = SB2_dev[plainTextR[15]];
#pragma unroll
for (int i = 0; i < 16; i++) {
plainTextR[i] = plainTextR[i] ^ sdata[192 + i];//ek13
}
//Write back to global memory
for (int i = 0; i < 16; i++)
{
plainText[16 * idx + i] = plainTextR[i];
}
}
else if (keySize == 24)//192-bit keys
{
F0_d(plainTextR, &sdata[0]);//ek1...
FE_d(plainTextR, &sdata[16]);
F0_d(plainTextR, &sdata[32]);
FE_d(plainTextR, &sdata[48]);
F0_d(plainTextR, &sdata[64]);
FE_d(plainTextR, &sdata[80]);
F0_d(plainTextR, &sdata[96]);
FE_d(plainTextR, &sdata[112]);
F0_d(plainTextR, &sdata[128]);
FE_d(plainTextR, &sdata[144]);
F0_d(plainTextR, &sdata[160]);
FE_d(plainTextR, &sdata[176]);
F0_d(plainTextR, &sdata[192]);//ek13
#pragma unroll
for (int i = 0; i < 16; i++) {
plainTextR[i] = plainTextR[i] ^ sdata[208 + i];//ek14
}
plainTextR[0] = SB3_dev[plainTextR[0]];
plainTextR[1] = SB4_dev[plainTextR[1]];
plainTextR[2] = SB1_dev[plainTextR[2]];
plainTextR[3] = SB2_dev[plainTextR[3]];
plainTextR[4] = SB3_dev[plainTextR[4]];
plainTextR[5] = SB4_dev[plainTextR[5]];
plainTextR[6] = SB1_dev[plainTextR[6]];
plainTextR[7] = SB2_dev[plainTextR[7]];
plainTextR[8] = SB3_dev[plainTextR[8]];
plainTextR[9] = SB4_dev[plainTextR[9]];
plainTextR[10] = SB1_dev[plainTextR[10]];
plainTextR[11] = SB2_dev[plainTextR[11]];
plainTextR[12] = SB3_dev[plainTextR[12]];
plainTextR[13] = SB4_dev[plainTextR[13]];
plainTextR[14] = SB1_dev[plainTextR[14]];
plainTextR[15] = SB2_dev[plainTextR[15]];
#pragma unroll
for (int i = 0; i < 16; i++) {
plainTextR[i] = plainTextR[i] ^ sdata[224 + i];//ek15
}
//Write back to global memory
for (int i = 0; i < 16; i++)
{
plainText[16 * idx + i] = plainTextR[i];
}
}
else//256-bit keys
{
F0_d(plainTextR, &sdata[0]);//ek1...
FE_d(plainTextR, &sdata[16]);
F0_d(plainTextR, &sdata[32]);
FE_d(plainTextR, &sdata[48]);
F0_d(plainTextR, &sdata[64]);
FE_d(plainTextR, &sdata[80]);
F0_d(plainTextR, &sdata[96]);
FE_d(plainTextR, &sdata[112]);
F0_d(plainTextR, &sdata[128]);
FE_d(plainTextR, &sdata[144]);
F0_d(plainTextR, &sdata[160]);
FE_d(plainTextR, &sdata[176]);
F0_d(plainTextR, &sdata[192]);
FE_d(plainTextR, &sdata[208]);
F0_d(plainTextR, &sdata[224]);//ek15
#pragma unroll
for (int i = 0; i < 16; i++) {
plainTextR[i] = plainTextR[i] ^ sdata[240 + i];//ek16
}
plainTextR[0] = SB3_dev[plainTextR[0]];
plainTextR[1] = SB4_dev[plainTextR[1]];
plainTextR[2] = SB1_dev[plainTextR[2]];
plainTextR[3] = SB2_dev[plainTextR[3]];
plainTextR[4] = SB3_dev[plainTextR[4]];
plainTextR[5] = SB4_dev[plainTextR[5]];
plainTextR[6] = SB1_dev[plainTextR[6]];
plainTextR[7] = SB2_dev[plainTextR[7]];
plainTextR[8] = SB3_dev[plainTextR[8]];
plainTextR[9] = SB4_dev[plainTextR[9]];
plainTextR[10] = SB1_dev[plainTextR[10]];
plainTextR[11] = SB2_dev[plainTextR[11]];
plainTextR[12] = SB3_dev[plainTextR[12]];
plainTextR[13] = SB4_dev[plainTextR[13]];
plainTextR[14] = SB1_dev[plainTextR[14]];
plainTextR[15] = SB2_dev[plainTextR[15]];
#pragma unroll
for (int i = 0; i < 16; i++) {
plainTextR[i] = plainTextR[i] ^ sdata[256 + i];//ek17
}
//Write back to global memory
for (int i = 0; i < 16; i++)
{
plainText[16 * idx + i] = plainTextR[i];
}
}
}
int main(void)
{
/////////INPUT PART BEGIN//////////////////////
enum workMode workmode = ENCRYPTION;
//Device pointers:
uint8* deviceArr, *ek_d, *dk_d;
FILE *file;
uint8* inputText;//either Plaintext or Ciphertext based on workmode;
unsigned long int fileLen, textSize;
uint8 numOfRounds;
const uint8 keySize = 32;
uint8 key[32] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f};
file = fopen("../input.txt", "r");
if (file)
{
char buf[2];
fseek(file, 0, SEEK_END);
fileLen = ftell(file);
fseek(file, 0, SEEK_SET);
textSize = fileLen / 2;
inputText = (uint8*)malloc(textSize);
for (int i = 0; i < textSize; i++)
{
buf[0] = fgetc(file);
buf[1] = fgetc(file);
uint8 hexVal = (uint8)strtol(buf, NULL, 16);
inputText[i] = hexVal;
}
}
else
{
printf("File not found.\n");
return -1;
}
/////////INPUT PART END//////////////////////
if (keySize == 16)
numOfRounds = 13;
else if (keySize == 24)
numOfRounds = 15;
else
numOfRounds = 17;
uint8 KL[16];//KL = leftmost 16 bytes of key
uint8 KR[16];//KR = rightmost 16 bytes of key
/*
Most significant byte is stored in 0th index.
KL = leftmost 16 bytes of key
KR = rightmost 16 bytes of key
*/
for (int i = 0; i < 16; i++)
{
KL[i] = key[i];
}
for (int i = 0; i < 16; i++)
{
KR[i] = key[i + 16];
}
uint8* CK1, *CK2, *CK3;
if (keySize == 16) {
CK1 = C1;
CK2 = C2;
CK3 = C3;
}
else if (keySize == 24) {
CK1 = C2;
CK2 = C3;
CK3 = C1;
}
else {
CK1 = C3;
CK2 = C1;
CK3 = C2;
}
//Calculate round key generators W0,W1,W2,W3
uint8* W0 = KL;
uint8 W1[16];
uint8 W2[16];
uint8 W3[16];
uint8 Fres[16];//auxilary array
/*
W0, W1, W2, W3 are calculated only once and used for all blocks.
Since the key data W0 and CK1 are small enough this key generators are calculated in CPU.
W1 needed for calc of W2, W2 needed for calc of W3.
F0 and FE are also used in the encryption process.
*/
F0(W0, CK1, Fres);
XOR_16(Fres, KR, W1);
FE(W1, CK2, Fres);
XOR_16(Fres, W0, W2);
F0(W2, CK3, Fres);
XOR_16(Fres, W1, W3);
GenerateRoundKeys(W0, W1, W2, W3);
/*
Because each thread will process 16 bytes we need textSize/16 threads in total.
Then thread number per block is: ceil(textSize/(16*blockSize)) bytes.
To decide blockSize we must consider the main occupancy limiter, in this case number of registers per SM.
Based on NVIDIA's programming guide Number of 32-bit registers per multiprocessor for compute capability >= 5.0 is 64K.
In this code 16 registers used for plaintext, 16 registers auxilary, +1 by itself, each thread uses 33 registers.
Then blocksize must be smaller than 64k/33. And larger than 272 since first 272 threads loads the shared memory.
512, 1024 are available blockSizes.
256 can also be tried but number of threads loading the shared memory must be decreased.
Keeping the round keys in registers results in low number of warps per SM therefore poor performance.
*/
int blockSize = 256;
int numOfBlocks = ceil((float)(textSize) / (16 * blockSize));
if (workmode == ENCRYPTION)//ENCRYPT
{
uint8* resCipherText = (uint8*)malloc(textSize);
hipMalloc((void**)& deviceArr, textSize);
hipMalloc((void**)& ek_d, 272);
//START TIMER.
using namespace std::chrono;
high_resolution_clock::time_point start = high_resolution_clock::now();
hipMemcpy(deviceArr, inputText, textSize, hipMemcpyHostToDevice);
hipMemcpy(ek_d, ek, 272, hipMemcpyHostToDevice);
Encrypt<keySize> << <numOfBlocks, blockSize >> > (deviceArr, textSize, ek_d);
hipMemcpy(resCipherText, deviceArr, textSize, hipMemcpyDeviceToHost);
//END TIMER; PRINT ELAPSED TIME.
high_resolution_clock::time_point end = high_resolution_clock::now();
duration<double> timeElapsed = duration_cast<duration<double>>(end - start);
std::cout << "Time elapsed: " << timeElapsed.count() << std::endl;
//Print/write to file
FILE *f = fopen("output.txt", "w");
for (int i = 0; i < textSize; i++) {
fprintf(f, "%02x", resCipherText[i]);
}
fclose(f);
//free
hipFree(deviceArr);
hipFree(ek_d);
free(resCipherText);
}
else //DECRYPT
{
//Decryption round keys are derived from the encryption round keys which is generated by GenerateRoundKeys.
GenerateDecRoundKeys(numOfRounds);
uint8* resPlainText = (uint8*)malloc(textSize);
hipMalloc((void**)& deviceArr, textSize);
hipMalloc((void**)& dk_d, 272);
hipMemcpy(deviceArr, inputText, textSize, hipMemcpyHostToDevice);
hipMemcpy(dk_d, dk, 272, hipMemcpyHostToDevice);
Encrypt<keySize> << <numOfBlocks, blockSize >> > (deviceArr, textSize, dk_d);
hipMemcpy(resPlainText, deviceArr, textSize, hipMemcpyDeviceToHost);
//Print/write to file
FILE *f = fopen("output.txt", "w");
for (int i = 0; i < textSize; i++) {
fprintf(f, "%02x", resPlainText[i]);
}
fclose(f);
//free
hipFree(deviceArr);
hipFree(dk_d);
free(resPlainText);
}
return 0;
} | 08fb0185513636f4fd8d8253ac1936a99b4e1bed.cu | /*
This version assigns one thread per 16 bytes of text.(one text block)
Stores the plaintext/ciphertext in registers.
Stores the encryption keys in shared memory.
Stores the S-boxes in constant memory.
The blocksize is 256.
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include <chrono>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
typedef unsigned char uint8;
enum workMode { ENCRYPTION, DECRYPTION };
//Key generation constants
uint8 C1[] = { 0x51,0x7c,0xc1,0xb7,0x27,0x22,0x0a,0x94,0xfe,0x13,0xab,0xe8,0xfa,0x9a,0x6e,0xe0 };
uint8 C2[] = { 0x6d,0xb1,0x4a,0xcc,0x9e,0x21,0xc8,0x20,0xff,0x28,0xb1,0xd5,0xef,0x5d,0xe2,0xb0 };
uint8 C3[] = { 0xdb,0x92,0x37,0x1d,0x21,0x26,0xe9,0x70,0x03,0x24,0x97,0x75,0x04,0xe8,0xc9,0x0e };
//Encryption round keys
uint8 ek[272] = { 0 }; //272 bytes(17 round keys each 16 bytes)
//Decyription round keys
uint8 dk[272] = { 0 }; //272 bytes(17 round keys each 16 bytes)
//S-boxes
static const uint8 SB1[256] =
{
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16
};
static const uint8 SB2[256] =
{
0xE2, 0x4E, 0x54, 0xFC, 0x94, 0xC2, 0x4A, 0xCC, 0x62, 0x0D, 0x6A, 0x46, 0x3C, 0x4D, 0x8B, 0xD1,
0x5E, 0xFA, 0x64, 0xCB, 0xB4, 0x97, 0xBE, 0x2B, 0xBC, 0x77, 0x2E, 0x03, 0xD3, 0x19, 0x59, 0xC1,
0x1D, 0x06, 0x41, 0x6B, 0x55, 0xF0, 0x99, 0x69, 0xEA, 0x9C, 0x18, 0xAE, 0x63, 0xDF, 0xE7, 0xBB,
0x00, 0x73, 0x66, 0xFB, 0x96, 0x4C, 0x85, 0xE4, 0x3A, 0x09, 0x45, 0xAA, 0x0F, 0xEE, 0x10, 0xEB,
0x2D, 0x7F, 0xF4, 0x29, 0xAC, 0xCF, 0xAD, 0x91, 0x8D, 0x78, 0xC8, 0x95, 0xF9, 0x2F, 0xCE, 0xCD,
0x08, 0x7A, 0x88, 0x38, 0x5C, 0x83, 0x2A, 0x28, 0x47, 0xDB, 0xB8, 0xC7, 0x93, 0xA4, 0x12, 0x53,
0xFF, 0x87, 0x0E, 0x31, 0x36, 0x21, 0x58, 0x48, 0x01, 0x8E, 0x37, 0x74, 0x32, 0xCA, 0xE9, 0xB1,
0xB7, 0xAB, 0x0C, 0xD7, 0xC4, 0x56, 0x42, 0x26, 0x07, 0x98, 0x60, 0xD9, 0xB6, 0xB9, 0x11, 0x40,
0xEC, 0x20, 0x8C, 0xBD, 0xA0, 0xC9, 0x84, 0x04, 0x49, 0x23, 0xF1, 0x4F, 0x50, 0x1F, 0x13, 0xDC,
0xD8, 0xC0, 0x9E, 0x57, 0xE3, 0xC3, 0x7B, 0x65, 0x3B, 0x02, 0x8F, 0x3E, 0xE8, 0x25, 0x92, 0xE5,
0x15, 0xDD, 0xFD, 0x17, 0xA9, 0xBF, 0xD4, 0x9A, 0x7E, 0xC5, 0x39, 0x67, 0xFE, 0x76, 0x9D, 0x43,
0xA7, 0xE1, 0xD0, 0xF5, 0x68, 0xF2, 0x1B, 0x34, 0x70, 0x05, 0xA3, 0x8A, 0xD5, 0x79, 0x86, 0xA8,
0x30, 0xC6, 0x51, 0x4B, 0x1E, 0xA6, 0x27, 0xF6, 0x35, 0xD2, 0x6E, 0x24, 0x16, 0x82, 0x5F, 0xDA,
0xE6, 0x75, 0xA2, 0xEF, 0x2C, 0xB2, 0x1C, 0x9F, 0x5D, 0x6F, 0x80, 0x0A, 0x72, 0x44, 0x9B, 0x6C,
0x90, 0x0B, 0x5B, 0x33, 0x7D, 0x5A, 0x52, 0xF3, 0x61, 0xA1, 0xF7, 0xB0, 0xD6, 0x3F, 0x7C, 0x6D,
0xED, 0x14, 0xE0, 0xA5, 0x3D, 0x22, 0xB3, 0xF8, 0x89, 0xDE, 0x71, 0x1A, 0xAF, 0xBA, 0xB5, 0x81
};
static const uint8 SB3[256] =
{
0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,
0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB,
0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E,
0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,
0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,
0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,
0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,
0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,
0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,
0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,
0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,
0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,
0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,
0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,
0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D
};
static const uint8 SB4[256] =
{
0x30, 0x68, 0x99, 0x1B, 0x87, 0xB9, 0x21, 0x78, 0x50, 0x39, 0xDB, 0xE1, 0x72, 0x09, 0x62, 0x3C,
0x3E, 0x7E, 0x5E, 0x8E, 0xF1, 0xA0, 0xCC, 0xA3, 0x2A, 0x1D, 0xFB, 0xB6, 0xD6, 0x20, 0xC4, 0x8D,
0x81, 0x65, 0xF5, 0x89, 0xCB, 0x9D, 0x77, 0xC6, 0x57, 0x43, 0x56, 0x17, 0xD4, 0x40, 0x1A, 0x4D,
0xC0, 0x63, 0x6C, 0xE3, 0xB7, 0xC8, 0x64, 0x6A, 0x53, 0xAA, 0x38, 0x98, 0x0C, 0xF4, 0x9B, 0xED,
0x7F, 0x22, 0x76, 0xAF, 0xDD, 0x3A, 0x0B, 0x58, 0x67, 0x88, 0x06, 0xC3, 0x35, 0x0D, 0x01, 0x8B,
0x8C, 0xC2, 0xE6, 0x5F, 0x02, 0x24, 0x75, 0x93, 0x66, 0x1E, 0xE5, 0xE2, 0x54, 0xD8, 0x10, 0xCE,
0x7A, 0xE8, 0x08, 0x2C, 0x12, 0x97, 0x32, 0xAB, 0xB4, 0x27, 0x0A, 0x23, 0xDF, 0xEF, 0xCA, 0xD9,
0xB8, 0xFA, 0xDC, 0x31, 0x6B, 0xD1, 0xAD, 0x19, 0x49, 0xBD, 0x51, 0x96, 0xEE, 0xE4, 0xA8, 0x41,
0xDA, 0xFF, 0xCD, 0x55, 0x86, 0x36, 0xBE, 0x61, 0x52, 0xF8, 0xBB, 0x0E, 0x82, 0x48, 0x69, 0x9A,
0xE0, 0x47, 0x9E, 0x5C, 0x04, 0x4B, 0x34, 0x15, 0x79, 0x26, 0xA7, 0xDE, 0x29, 0xAE, 0x92, 0xD7,
0x84, 0xE9, 0xD2, 0xBA, 0x5D, 0xF3, 0xC5, 0xB0, 0xBF, 0xA4, 0x3B, 0x71, 0x44, 0x46, 0x2B, 0xFC,
0xEB, 0x6F, 0xD5, 0xF6, 0x14, 0xFE, 0x7C, 0x70, 0x5A, 0x7D, 0xFD, 0x2F, 0x18, 0x83, 0x16, 0xA5,
0x91, 0x1F, 0x05, 0x95, 0x74, 0xA9, 0xC1, 0x5B, 0x4A, 0x85, 0x6D, 0x13, 0x07, 0x4F, 0x4E, 0x45,
0xB2, 0x0F, 0xC9, 0x1C, 0xA6, 0xBC, 0xEC, 0x73, 0x90, 0x7B, 0xCF, 0x59, 0x8F, 0xA1, 0xF9, 0x2D,
0xF2, 0xB1, 0x00, 0x94, 0x37, 0x9F, 0xD0, 0x2E, 0x9C, 0x6E, 0x28, 0x3F, 0x80, 0xF0, 0x3D, 0xD3,
0x25, 0x8A, 0xB5, 0xE7, 0x42, 0xB3, 0xC7, 0xEA, 0xF7, 0x4C, 0x11, 0x33, 0x03, 0xA2, 0xAC, 0x60
};
//S-boxes
__constant__ uint8 SB1_dev[256] =
{
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16
};
__constant__ uint8 SB2_dev[256] =
{
0xE2, 0x4E, 0x54, 0xFC, 0x94, 0xC2, 0x4A, 0xCC, 0x62, 0x0D, 0x6A, 0x46, 0x3C, 0x4D, 0x8B, 0xD1,
0x5E, 0xFA, 0x64, 0xCB, 0xB4, 0x97, 0xBE, 0x2B, 0xBC, 0x77, 0x2E, 0x03, 0xD3, 0x19, 0x59, 0xC1,
0x1D, 0x06, 0x41, 0x6B, 0x55, 0xF0, 0x99, 0x69, 0xEA, 0x9C, 0x18, 0xAE, 0x63, 0xDF, 0xE7, 0xBB,
0x00, 0x73, 0x66, 0xFB, 0x96, 0x4C, 0x85, 0xE4, 0x3A, 0x09, 0x45, 0xAA, 0x0F, 0xEE, 0x10, 0xEB,
0x2D, 0x7F, 0xF4, 0x29, 0xAC, 0xCF, 0xAD, 0x91, 0x8D, 0x78, 0xC8, 0x95, 0xF9, 0x2F, 0xCE, 0xCD,
0x08, 0x7A, 0x88, 0x38, 0x5C, 0x83, 0x2A, 0x28, 0x47, 0xDB, 0xB8, 0xC7, 0x93, 0xA4, 0x12, 0x53,
0xFF, 0x87, 0x0E, 0x31, 0x36, 0x21, 0x58, 0x48, 0x01, 0x8E, 0x37, 0x74, 0x32, 0xCA, 0xE9, 0xB1,
0xB7, 0xAB, 0x0C, 0xD7, 0xC4, 0x56, 0x42, 0x26, 0x07, 0x98, 0x60, 0xD9, 0xB6, 0xB9, 0x11, 0x40,
0xEC, 0x20, 0x8C, 0xBD, 0xA0, 0xC9, 0x84, 0x04, 0x49, 0x23, 0xF1, 0x4F, 0x50, 0x1F, 0x13, 0xDC,
0xD8, 0xC0, 0x9E, 0x57, 0xE3, 0xC3, 0x7B, 0x65, 0x3B, 0x02, 0x8F, 0x3E, 0xE8, 0x25, 0x92, 0xE5,
0x15, 0xDD, 0xFD, 0x17, 0xA9, 0xBF, 0xD4, 0x9A, 0x7E, 0xC5, 0x39, 0x67, 0xFE, 0x76, 0x9D, 0x43,
0xA7, 0xE1, 0xD0, 0xF5, 0x68, 0xF2, 0x1B, 0x34, 0x70, 0x05, 0xA3, 0x8A, 0xD5, 0x79, 0x86, 0xA8,
0x30, 0xC6, 0x51, 0x4B, 0x1E, 0xA6, 0x27, 0xF6, 0x35, 0xD2, 0x6E, 0x24, 0x16, 0x82, 0x5F, 0xDA,
0xE6, 0x75, 0xA2, 0xEF, 0x2C, 0xB2, 0x1C, 0x9F, 0x5D, 0x6F, 0x80, 0x0A, 0x72, 0x44, 0x9B, 0x6C,
0x90, 0x0B, 0x5B, 0x33, 0x7D, 0x5A, 0x52, 0xF3, 0x61, 0xA1, 0xF7, 0xB0, 0xD6, 0x3F, 0x7C, 0x6D,
0xED, 0x14, 0xE0, 0xA5, 0x3D, 0x22, 0xB3, 0xF8, 0x89, 0xDE, 0x71, 0x1A, 0xAF, 0xBA, 0xB5, 0x81
};
__constant__ uint8 SB3_dev[256] =
{
0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,
0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB,
0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E,
0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,
0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,
0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,
0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,
0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,
0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,
0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,
0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,
0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,
0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,
0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,
0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D
};
__constant__ uint8 SB4_dev[256] =
{
0x30, 0x68, 0x99, 0x1B, 0x87, 0xB9, 0x21, 0x78, 0x50, 0x39, 0xDB, 0xE1, 0x72, 0x09, 0x62, 0x3C,
0x3E, 0x7E, 0x5E, 0x8E, 0xF1, 0xA0, 0xCC, 0xA3, 0x2A, 0x1D, 0xFB, 0xB6, 0xD6, 0x20, 0xC4, 0x8D,
0x81, 0x65, 0xF5, 0x89, 0xCB, 0x9D, 0x77, 0xC6, 0x57, 0x43, 0x56, 0x17, 0xD4, 0x40, 0x1A, 0x4D,
0xC0, 0x63, 0x6C, 0xE3, 0xB7, 0xC8, 0x64, 0x6A, 0x53, 0xAA, 0x38, 0x98, 0x0C, 0xF4, 0x9B, 0xED,
0x7F, 0x22, 0x76, 0xAF, 0xDD, 0x3A, 0x0B, 0x58, 0x67, 0x88, 0x06, 0xC3, 0x35, 0x0D, 0x01, 0x8B,
0x8C, 0xC2, 0xE6, 0x5F, 0x02, 0x24, 0x75, 0x93, 0x66, 0x1E, 0xE5, 0xE2, 0x54, 0xD8, 0x10, 0xCE,
0x7A, 0xE8, 0x08, 0x2C, 0x12, 0x97, 0x32, 0xAB, 0xB4, 0x27, 0x0A, 0x23, 0xDF, 0xEF, 0xCA, 0xD9,
0xB8, 0xFA, 0xDC, 0x31, 0x6B, 0xD1, 0xAD, 0x19, 0x49, 0xBD, 0x51, 0x96, 0xEE, 0xE4, 0xA8, 0x41,
0xDA, 0xFF, 0xCD, 0x55, 0x86, 0x36, 0xBE, 0x61, 0x52, 0xF8, 0xBB, 0x0E, 0x82, 0x48, 0x69, 0x9A,
0xE0, 0x47, 0x9E, 0x5C, 0x04, 0x4B, 0x34, 0x15, 0x79, 0x26, 0xA7, 0xDE, 0x29, 0xAE, 0x92, 0xD7,
0x84, 0xE9, 0xD2, 0xBA, 0x5D, 0xF3, 0xC5, 0xB0, 0xBF, 0xA4, 0x3B, 0x71, 0x44, 0x46, 0x2B, 0xFC,
0xEB, 0x6F, 0xD5, 0xF6, 0x14, 0xFE, 0x7C, 0x70, 0x5A, 0x7D, 0xFD, 0x2F, 0x18, 0x83, 0x16, 0xA5,
0x91, 0x1F, 0x05, 0x95, 0x74, 0xA9, 0xC1, 0x5B, 0x4A, 0x85, 0x6D, 0x13, 0x07, 0x4F, 0x4E, 0x45,
0xB2, 0x0F, 0xC9, 0x1C, 0xA6, 0xBC, 0xEC, 0x73, 0x90, 0x7B, 0xCF, 0x59, 0x8F, 0xA1, 0xF9, 0x2D,
0xF2, 0xB1, 0x00, 0x94, 0x37, 0x9F, 0xD0, 0x2E, 0x9C, 0x6E, 0x28, 0x3F, 0x80, 0xF0, 0x3D, 0xD3,
0x25, 0x8A, 0xB5, 0xE7, 0x42, 0xB3, 0xC7, 0xEA, 0xF7, 0x4C, 0x11, 0x33, 0x03, 0xA2, 0xAC, 0x60
};
uint8 hex2dec(char ch)
{
if (ch >= '0' && ch <= '9')
return ch - '0';
else
return ch - 'a' + 10;
}
uint8 leftRotate(uint8 n, uint8 d)
{
return (n << d) | (n >> (8 - d));
}
uint8 rightRotate(uint8 n, uint8 d)
{
return (n >> d) | (n << (8 - d));
}
uint8* RightShiftBytes(uint8* arr, int arrSize, int amount)//shift the bytes, place them in a new array
{
uint8* tmp = (uint8*)malloc(amount);
uint8* newArr = (uint8*)malloc(16 * sizeof(uint8));
for (int i = 0; i < amount; i++) {
tmp[i] = arr[arrSize - amount + i];
}
for (int i = arrSize - 1; i >= amount; i--) {
newArr[i] = arr[i - amount];
}
for (int i = 0; i < amount; i++) {
newArr[i] = tmp[i];
}
free(tmp);
return newArr;
}
uint8* LeftShiftBytes(uint8* arr, int arrSize, int amount)//shift the bytes, place them in a new array
{
uint8* tmp = (uint8*)malloc(amount);
uint8* newArr = (uint8*)malloc(16 * sizeof(uint8));
for (int i = 0; i < amount; i++) {
tmp[i] = arr[i];
}
for (int i = 0; i < arrSize - amount; i++) {
newArr[i] = arr[i + amount];
}
for (int i = 0; i < amount; i++) {
newArr[arrSize - amount + i] = tmp[i];
}
free(tmp);
return newArr;
}
uint8* ShiftArrR(uint8* originalArr, int amount)
{
int arrSize = 16;
int byteShiftAmount = amount / 8;
uint8* arr = RightShiftBytes(originalArr, arrSize, byteShiftAmount);
amount = amount - byteShiftAmount * 8;
uint8 carryTmp, carry;
carry = arr[arrSize - 1] & (0xff >> (8 - amount));//bits that are shifted to byte on right
for (int i = 0; i < arrSize; i++)
{
carryTmp = arr[i] & (0xff >> (8 - amount));//calculate carry for byte on right
arr[i] >>= amount;//right shift the current byte.
arr[i] |= rightRotate(carry, amount);//place the bits from coming from byte on left
carry = carryTmp;
}
return arr;
}
uint8* ShiftArrL(uint8* originalArr, int amount)
{
int arrSize = 16;
int byteShiftAmount = amount / 8;
uint8* arr = LeftShiftBytes(originalArr, arrSize, byteShiftAmount);
amount = amount - byteShiftAmount * 8;
uint8 carryTmp, carry;
carry = arr[0] & (0xff << (8 - amount));//bits that are shifted to byte on left
for (int i = arrSize - 1; i >= 0; i--)
{
carryTmp = arr[i] & (0xff << (8 - amount));//calculate carry for byte on left
arr[i] <<= amount;//left shift the current byte.
arr[i] |= leftRotate(carry, amount);//place the bits from coming from byte on right
carry = carryTmp;
}
return arr;
}
void XOR_16(uint8* x, uint8* y, uint8* z)
{
for (int i = 0; i < 16; i++) {
z[i] = x[i] ^ y[i];
}
}
void XOR_16wFree(uint8* x, uint8* y, uint8* z)
{
for (int i = 0; i < 16; i++) {
z[i] = x[i] ^ y[i];
}
free(y);
}
//Substition Layer 1
void SL1(uint8* in, uint8* out)
{
out[0] = SB1[in[0]];
out[1] = SB2[in[1]];
out[2] = SB3[in[2]];
out[3] = SB4[in[3]];
out[4] = SB1[in[4]];
out[5] = SB2[in[5]];
out[6] = SB3[in[6]];
out[7] = SB4[in[7]];
out[8] = SB1[in[8]];
out[9] = SB2[in[9]];
out[10] = SB3[in[10]];
out[11] = SB4[in[11]];
out[12] = SB1[in[12]];
out[13] = SB2[in[13]];
out[14] = SB3[in[14]];
out[15] = SB4[in[15]];
}
//Substition Layer 2(Inverse of SL1)
void SL2(uint8* in, uint8* out)
{
out[0] = SB3[in[0]];
out[1] = SB4[in[1]];
out[2] = SB1[in[2]];
out[3] = SB2[in[3]];
out[4] = SB3[in[4]];
out[5] = SB4[in[5]];
out[6] = SB1[in[6]];
out[7] = SB2[in[7]];
out[8] = SB3[in[8]];
out[9] = SB4[in[9]];
out[10] = SB1[in[10]];
out[11] = SB2[in[11]];
out[12] = SB3[in[12]];
out[13] = SB4[in[13]];
out[14] = SB1[in[14]];
out[15] = SB2[in[15]];
}
//Diffusion layer
void A(uint8* in, uint8* out)
{
out[0] = in[3] ^ in[4] ^ in[6] ^ in[8] ^ in[9] ^ in[13] ^ in[14];
out[1] = in[2] ^ in[5] ^ in[7] ^ in[8] ^ in[9] ^ in[12] ^ in[15];
out[2] = in[1] ^ in[4] ^ in[6] ^ in[10] ^ in[11] ^ in[12] ^ in[15];
out[3] = in[0] ^ in[5] ^ in[7] ^ in[10] ^ in[11] ^ in[13] ^ in[14];
out[4] = in[0] ^ in[2] ^ in[5] ^ in[8] ^ in[11] ^ in[14] ^ in[15];
out[5] = in[1] ^ in[3] ^ in[4] ^ in[9] ^ in[10] ^ in[14] ^ in[15];
out[6] = in[0] ^ in[2] ^ in[7] ^ in[9] ^ in[10] ^ in[12] ^ in[13];
out[7] = in[1] ^ in[3] ^ in[6] ^ in[8] ^ in[11] ^ in[12] ^ in[13];
out[8] = in[0] ^ in[1] ^ in[4] ^ in[7] ^ in[10] ^ in[13] ^ in[15];
out[9] = in[0] ^ in[1] ^ in[5] ^ in[6] ^ in[11] ^ in[12] ^ in[14];
out[10] = in[2] ^ in[3] ^ in[5] ^ in[6] ^ in[8] ^ in[13] ^ in[15];
out[11] = in[2] ^ in[3] ^ in[4] ^ in[7] ^ in[9] ^ in[12] ^ in[14];
out[12] = in[1] ^ in[2] ^ in[6] ^ in[7] ^ in[9] ^ in[11] ^ in[12];
out[13] = in[0] ^ in[3] ^ in[6] ^ in[7] ^ in[8] ^ in[10] ^ in[13];
out[14] = in[0] ^ in[3] ^ in[4] ^ in[5] ^ in[9] ^ in[11] ^ in[14];
out[15] = in[1] ^ in[2] ^ in[4] ^ in[5] ^ in[8] ^ in[10] ^ in[15];
}
/*Round Functions(F0,FE) takes 16 bytes of plaintext
and generates an intermediate val of 16bytes
*/
//Odd Round Function
void F0(uint8* D, uint8* RK, uint8* out)
{
//res1, res2 are auxillary arrays for storing the results of XOR_16 and SL1
uint8 res1[16];
uint8 res2[16];
XOR_16(D, RK, res1);
SL1(res1, res2);
A(res2, out);
}
//Even Round Function
void FE(uint8* D, uint8* RK, uint8* out)
{
//res1, res2 are auxillary arrays for storing the results of XOR_16 and SL1
uint8 res1[16];
uint8 res2[16];
XOR_16(D, RK, res1);
SL2(res1, res2);
A(res2, out);
}
void GenerateRoundKeys(uint8* W0, uint8* W1, uint8* W2, uint8* W3)
{
//Producing encryption round keys
//Producing encryption round keys can be parallelized.
//However since we do this once for all blocks, it is faster to compute in CPU.
//ShiftArr functions return array from heap, must free.
XOR_16wFree(W0, ShiftArrR(W1, 19), &ek[0]);
XOR_16wFree(W1, ShiftArrR(W2, 19), &ek[16]);
XOR_16wFree(W2, ShiftArrR(W3, 19), &ek[32]);
XOR_16wFree(W3, ShiftArrR(W0, 19), &ek[48]);
XOR_16wFree(W0, ShiftArrR(W1, 31), &ek[64]);
XOR_16wFree(W1, ShiftArrR(W2, 31), &ek[80]);
XOR_16wFree(W2, ShiftArrR(W3, 31), &ek[96]);
XOR_16wFree(W3, ShiftArrR(W0, 31), &ek[112]);
XOR_16wFree(W0, ShiftArrL(W1, 61), &ek[128]);
XOR_16wFree(W1, ShiftArrL(W2, 61), &ek[144]);
XOR_16wFree(W2, ShiftArrL(W3, 61), &ek[160]);
XOR_16wFree(W3, ShiftArrL(W0, 61), &ek[176]);
XOR_16wFree(W0, ShiftArrL(W1, 31), &ek[192]);
XOR_16wFree(W1, ShiftArrL(W2, 31), &ek[208]);
XOR_16wFree(W2, ShiftArrL(W3, 31), &ek[224]);
XOR_16wFree(W3, ShiftArrL(W0, 31), &ek[240]);
XOR_16wFree(W0, ShiftArrL(W1, 19), &ek[256]);
}
void GenerateDecRoundKeys(uint8 numOfRounds)
{
int N = numOfRounds - 1;
int k = 1;
for (int i = 0; i < 16; i++)
{
dk[i] = ek[16 * N + i];
}
for (int i = N - 1; i >= 1; i--)
{
A(&ek[i * 16], &dk[k * 16]);
k++;
}
for (int i = 0; i < 16; i++)
{
dk[k * 16 + i] = ek[i];
}
}
//Odd Round Function
__device__ void F0_d(uint8* D, const uint8* RK)
{
uint8 aux[16];//auxilary array for keeping the results of Diffusion layer
//XOR with the round key
#pragma unroll
for (int i = 0; i < 16; i++) {
D[i] = D[i] ^ RK[i];
}
//Substition Layer(SL1)
D[0] = SB1_dev[D[0]];
D[1] = SB2_dev[D[1]];
D[2] = SB3_dev[D[2]];
D[3] = SB4_dev[D[3]];
D[4] = SB1_dev[D[4]];
D[5] = SB2_dev[D[5]];
D[6] = SB3_dev[D[6]];
D[7] = SB4_dev[D[7]];
D[8] = SB1_dev[D[8]];
D[9] = SB2_dev[D[9]];
D[10] = SB3_dev[D[10]];
D[11] = SB4_dev[D[11]];
D[12] = SB1_dev[D[12]];
D[13] = SB2_dev[D[13]];
D[14] = SB3_dev[D[14]];
D[15] = SB4_dev[D[15]];
//Diffusion layer
aux[0] = D[3] ^ D[4] ^ D[6] ^ D[8] ^ D[9] ^ D[13] ^ D[14];
aux[1] = D[2] ^ D[5] ^ D[7] ^ D[8] ^ D[9] ^ D[12] ^ D[15];
aux[2] = D[1] ^ D[4] ^ D[6] ^ D[10] ^ D[11] ^ D[12] ^ D[15];
aux[3] = D[0] ^ D[5] ^ D[7] ^ D[10] ^ D[11] ^ D[13] ^ D[14];
aux[4] = D[0] ^ D[2] ^ D[5] ^ D[8] ^ D[11] ^ D[14] ^ D[15];
aux[5] = D[1] ^ D[3] ^ D[4] ^ D[9] ^ D[10] ^ D[14] ^ D[15];
aux[6] = D[0] ^ D[2] ^ D[7] ^ D[9] ^ D[10] ^ D[12] ^ D[13];
aux[7] = D[1] ^ D[3] ^ D[6] ^ D[8] ^ D[11] ^ D[12] ^ D[13];
aux[8] = D[0] ^ D[1] ^ D[4] ^ D[7] ^ D[10] ^ D[13] ^ D[15];
aux[9] = D[0] ^ D[1] ^ D[5] ^ D[6] ^ D[11] ^ D[12] ^ D[14];
aux[10] = D[2] ^ D[3] ^ D[5] ^ D[6] ^ D[8] ^ D[13] ^ D[15];
aux[11] = D[2] ^ D[3] ^ D[4] ^ D[7] ^ D[9] ^ D[12] ^ D[14];
aux[12] = D[1] ^ D[2] ^ D[6] ^ D[7] ^ D[9] ^ D[11] ^ D[12];
aux[13] = D[0] ^ D[3] ^ D[6] ^ D[7] ^ D[8] ^ D[10] ^ D[13];
aux[14] = D[0] ^ D[3] ^ D[4] ^ D[5] ^ D[9] ^ D[11] ^ D[14];
aux[15] = D[1] ^ D[2] ^ D[4] ^ D[5] ^ D[8] ^ D[10] ^ D[15];
//put the result into plaintext registers
#pragma unroll
for (int i = 0; i < 16; i++) {
D[i] = aux[i];
}
}
//Even Round Function
__device__ void FE_d(uint8* D, const uint8* RK)
{
uint8 aux[16];//auxilary array for keeping the results of Diffusion layer
//XOR with the round key
#pragma unroll
for (int i = 0; i < 16; i++) {
D[i] = D[i] ^ RK[i];
}
//Substition Layer(SL2)
D[0] = SB3_dev[D[0]];
D[1] = SB4_dev[D[1]];
D[2] = SB1_dev[D[2]];
D[3] = SB2_dev[D[3]];
D[4] = SB3_dev[D[4]];
D[5] = SB4_dev[D[5]];
D[6] = SB1_dev[D[6]];
D[7] = SB2_dev[D[7]];
D[8] = SB3_dev[D[8]];
D[9] = SB4_dev[D[9]];
D[10] = SB1_dev[D[10]];
D[11] = SB2_dev[D[11]];
D[12] = SB3_dev[D[12]];
D[13] = SB4_dev[D[13]];
D[14] = SB1_dev[D[14]];
D[15] = SB2_dev[D[15]];
//Diffusion layer
aux[0] = D[3] ^ D[4] ^ D[6] ^ D[8] ^ D[9] ^ D[13] ^ D[14];
aux[1] = D[2] ^ D[5] ^ D[7] ^ D[8] ^ D[9] ^ D[12] ^ D[15];
aux[2] = D[1] ^ D[4] ^ D[6] ^ D[10] ^ D[11] ^ D[12] ^ D[15];
aux[3] = D[0] ^ D[5] ^ D[7] ^ D[10] ^ D[11] ^ D[13] ^ D[14];
aux[4] = D[0] ^ D[2] ^ D[5] ^ D[8] ^ D[11] ^ D[14] ^ D[15];
aux[5] = D[1] ^ D[3] ^ D[4] ^ D[9] ^ D[10] ^ D[14] ^ D[15];
aux[6] = D[0] ^ D[2] ^ D[7] ^ D[9] ^ D[10] ^ D[12] ^ D[13];
aux[7] = D[1] ^ D[3] ^ D[6] ^ D[8] ^ D[11] ^ D[12] ^ D[13];
aux[8] = D[0] ^ D[1] ^ D[4] ^ D[7] ^ D[10] ^ D[13] ^ D[15];
aux[9] = D[0] ^ D[1] ^ D[5] ^ D[6] ^ D[11] ^ D[12] ^ D[14];
aux[10] = D[2] ^ D[3] ^ D[5] ^ D[6] ^ D[8] ^ D[13] ^ D[15];
aux[11] = D[2] ^ D[3] ^ D[4] ^ D[7] ^ D[9] ^ D[12] ^ D[14];
aux[12] = D[1] ^ D[2] ^ D[6] ^ D[7] ^ D[9] ^ D[11] ^ D[12];
aux[13] = D[0] ^ D[3] ^ D[6] ^ D[7] ^ D[8] ^ D[10] ^ D[13];
aux[14] = D[0] ^ D[3] ^ D[4] ^ D[5] ^ D[9] ^ D[11] ^ D[14];
aux[15] = D[1] ^ D[2] ^ D[4] ^ D[5] ^ D[8] ^ D[10] ^ D[15];
//put the result into plaintext registers
#pragma unroll
for (int i = 0; i < 16; i++) {
D[i] = aux[i];
}
}
template <unsigned int keySize>
__global__ void Encrypt(uint8* plainText, unsigned long textSize, uint8* ek)
{
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
uint8 plainTextR[16];//registers keeping the plaintext.
__shared__ uint8 sdata[272];//each round key is 16 bytes, there are 17 round keys 272 bytes
//Put encryption round keys to shared memory.
sdata[tid] = ek[tid];
if (tid < 16) {//rest of the bytes are loaded by first 16 threads.
sdata[256 + tid] = ek[256 + tid];
}
//Load the plaintext to registers
for (int i = 0; i < 16; i++)
{
plainTextR[i] = plainText[16 * idx + i];
}
__syncthreads();
if (keySize == 16)//128-bit keys
{
F0_d(plainTextR, &sdata[0]);//ek1...
FE_d(plainTextR, &sdata[16]);
F0_d(plainTextR, &sdata[32]);
FE_d(plainTextR, &sdata[48]);
F0_d(plainTextR, &sdata[64]);
FE_d(plainTextR, &sdata[80]);
F0_d(plainTextR, &sdata[96]);
FE_d(plainTextR, &sdata[112]);
F0_d(plainTextR, &sdata[128]);
FE_d(plainTextR, &sdata[144]);
F0_d(plainTextR, &sdata[160]);//...ek11
#pragma unroll
for (int i = 0; i < 16; i++) {
plainTextR[i] = plainTextR[i] ^ sdata[176 + i];//ek12
}
plainTextR[0] = SB3_dev[plainTextR[0]];
plainTextR[1] = SB4_dev[plainTextR[1]];
plainTextR[2] = SB1_dev[plainTextR[2]];
plainTextR[3] = SB2_dev[plainTextR[3]];
plainTextR[4] = SB3_dev[plainTextR[4]];
plainTextR[5] = SB4_dev[plainTextR[5]];
plainTextR[6] = SB1_dev[plainTextR[6]];
plainTextR[7] = SB2_dev[plainTextR[7]];
plainTextR[8] = SB3_dev[plainTextR[8]];
plainTextR[9] = SB4_dev[plainTextR[9]];
plainTextR[10] = SB1_dev[plainTextR[10]];
plainTextR[11] = SB2_dev[plainTextR[11]];
plainTextR[12] = SB3_dev[plainTextR[12]];
plainTextR[13] = SB4_dev[plainTextR[13]];
plainTextR[14] = SB1_dev[plainTextR[14]];
plainTextR[15] = SB2_dev[plainTextR[15]];
#pragma unroll
for (int i = 0; i < 16; i++) {
plainTextR[i] = plainTextR[i] ^ sdata[192 + i];//ek13
}
//Write back to global memory
for (int i = 0; i < 16; i++)
{
plainText[16 * idx + i] = plainTextR[i];
}
}
else if (keySize == 24)//192-bit keys
{
F0_d(plainTextR, &sdata[0]);//ek1...
FE_d(plainTextR, &sdata[16]);
F0_d(plainTextR, &sdata[32]);
FE_d(plainTextR, &sdata[48]);
F0_d(plainTextR, &sdata[64]);
FE_d(plainTextR, &sdata[80]);
F0_d(plainTextR, &sdata[96]);
FE_d(plainTextR, &sdata[112]);
F0_d(plainTextR, &sdata[128]);
FE_d(plainTextR, &sdata[144]);
F0_d(plainTextR, &sdata[160]);
FE_d(plainTextR, &sdata[176]);
F0_d(plainTextR, &sdata[192]);//ek13
#pragma unroll
for (int i = 0; i < 16; i++) {
plainTextR[i] = plainTextR[i] ^ sdata[208 + i];//ek14
}
plainTextR[0] = SB3_dev[plainTextR[0]];
plainTextR[1] = SB4_dev[plainTextR[1]];
plainTextR[2] = SB1_dev[plainTextR[2]];
plainTextR[3] = SB2_dev[plainTextR[3]];
plainTextR[4] = SB3_dev[plainTextR[4]];
plainTextR[5] = SB4_dev[plainTextR[5]];
plainTextR[6] = SB1_dev[plainTextR[6]];
plainTextR[7] = SB2_dev[plainTextR[7]];
plainTextR[8] = SB3_dev[plainTextR[8]];
plainTextR[9] = SB4_dev[plainTextR[9]];
plainTextR[10] = SB1_dev[plainTextR[10]];
plainTextR[11] = SB2_dev[plainTextR[11]];
plainTextR[12] = SB3_dev[plainTextR[12]];
plainTextR[13] = SB4_dev[plainTextR[13]];
plainTextR[14] = SB1_dev[plainTextR[14]];
plainTextR[15] = SB2_dev[plainTextR[15]];
#pragma unroll
for (int i = 0; i < 16; i++) {
plainTextR[i] = plainTextR[i] ^ sdata[224 + i];//ek15
}
//Write back to global memory
for (int i = 0; i < 16; i++)
{
plainText[16 * idx + i] = plainTextR[i];
}
}
else//256-bit keys
{
F0_d(plainTextR, &sdata[0]);//ek1...
FE_d(plainTextR, &sdata[16]);
F0_d(plainTextR, &sdata[32]);
FE_d(plainTextR, &sdata[48]);
F0_d(plainTextR, &sdata[64]);
FE_d(plainTextR, &sdata[80]);
F0_d(plainTextR, &sdata[96]);
FE_d(plainTextR, &sdata[112]);
F0_d(plainTextR, &sdata[128]);
FE_d(plainTextR, &sdata[144]);
F0_d(plainTextR, &sdata[160]);
FE_d(plainTextR, &sdata[176]);
F0_d(plainTextR, &sdata[192]);
FE_d(plainTextR, &sdata[208]);
F0_d(plainTextR, &sdata[224]);//ek15
#pragma unroll
for (int i = 0; i < 16; i++) {
plainTextR[i] = plainTextR[i] ^ sdata[240 + i];//ek16
}
plainTextR[0] = SB3_dev[plainTextR[0]];
plainTextR[1] = SB4_dev[plainTextR[1]];
plainTextR[2] = SB1_dev[plainTextR[2]];
plainTextR[3] = SB2_dev[plainTextR[3]];
plainTextR[4] = SB3_dev[plainTextR[4]];
plainTextR[5] = SB4_dev[plainTextR[5]];
plainTextR[6] = SB1_dev[plainTextR[6]];
plainTextR[7] = SB2_dev[plainTextR[7]];
plainTextR[8] = SB3_dev[plainTextR[8]];
plainTextR[9] = SB4_dev[plainTextR[9]];
plainTextR[10] = SB1_dev[plainTextR[10]];
plainTextR[11] = SB2_dev[plainTextR[11]];
plainTextR[12] = SB3_dev[plainTextR[12]];
plainTextR[13] = SB4_dev[plainTextR[13]];
plainTextR[14] = SB1_dev[plainTextR[14]];
plainTextR[15] = SB2_dev[plainTextR[15]];
#pragma unroll
for (int i = 0; i < 16; i++) {
plainTextR[i] = plainTextR[i] ^ sdata[256 + i];//ek17
}
//Write back to global memory
for (int i = 0; i < 16; i++)
{
plainText[16 * idx + i] = plainTextR[i];
}
}
}
int main(void)
{
/////////INPUT PART BEGIN//////////////////////
enum workMode workmode = ENCRYPTION;
//Device pointers:
uint8* deviceArr, *ek_d, *dk_d;
FILE *file;
uint8* inputText;//either Plaintext or Ciphertext based on workmode;
unsigned long int fileLen, textSize;
uint8 numOfRounds;
const uint8 keySize = 32;
uint8 key[32] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f};
file = fopen("../input.txt", "r");
if (file)
{
char buf[2];
fseek(file, 0, SEEK_END);
fileLen = ftell(file);
fseek(file, 0, SEEK_SET);
textSize = fileLen / 2;
inputText = (uint8*)malloc(textSize);
for (int i = 0; i < textSize; i++)
{
buf[0] = fgetc(file);
buf[1] = fgetc(file);
uint8 hexVal = (uint8)strtol(buf, NULL, 16);
inputText[i] = hexVal;
}
}
else
{
printf("File not found.\n");
return -1;
}
/////////INPUT PART END//////////////////////
if (keySize == 16)
numOfRounds = 13;
else if (keySize == 24)
numOfRounds = 15;
else
numOfRounds = 17;
uint8 KL[16];//KL = leftmost 16 bytes of key
uint8 KR[16];//KR = rightmost 16 bytes of key
/*
Most significant byte is stored in 0th index.
KL = leftmost 16 bytes of key
KR = rightmost 16 bytes of key
*/
for (int i = 0; i < 16; i++)
{
KL[i] = key[i];
}
for (int i = 0; i < 16; i++)
{
KR[i] = key[i + 16];
}
uint8* CK1, *CK2, *CK3;
if (keySize == 16) {
CK1 = C1;
CK2 = C2;
CK3 = C3;
}
else if (keySize == 24) {
CK1 = C2;
CK2 = C3;
CK3 = C1;
}
else {
CK1 = C3;
CK2 = C1;
CK3 = C2;
}
//Calculate round key generators W0,W1,W2,W3
uint8* W0 = KL;
uint8 W1[16];
uint8 W2[16];
uint8 W3[16];
uint8 Fres[16];//auxilary array
/*
W0, W1, W2, W3 are calculated only once and used for all blocks.
Since the key data W0 and CK1 are small enough this key generators are calculated in CPU.
W1 needed for calc of W2, W2 needed for calc of W3.
F0 and FE are also used in the encryption process.
*/
F0(W0, CK1, Fres);
XOR_16(Fres, KR, W1);
FE(W1, CK2, Fres);
XOR_16(Fres, W0, W2);
F0(W2, CK3, Fres);
XOR_16(Fres, W1, W3);
GenerateRoundKeys(W0, W1, W2, W3);
/*
Because each thread will process 16 bytes we need textSize/16 threads in total.
Then thread number per block is: ceil(textSize/(16*blockSize)) bytes.
To decide blockSize we must consider the main occupancy limiter, in this case number of registers per SM.
Based on NVIDIA's programming guide Number of 32-bit registers per multiprocessor for compute capability >= 5.0 is 64K.
In this code 16 registers used for plaintext, 16 registers auxilary, +1 by itself, each thread uses 33 registers.
Then blocksize must be smaller than 64k/33. And larger than 272 since first 272 threads loads the shared memory.
512, 1024 are available blockSizes.
256 can also be tried but number of threads loading the shared memory must be decreased.
Keeping the round keys in registers results in low number of warps per SM therefore poor performance.
*/
int blockSize = 256;
int numOfBlocks = ceil((float)(textSize) / (16 * blockSize));
if (workmode == ENCRYPTION)//ENCRYPT
{
uint8* resCipherText = (uint8*)malloc(textSize);
cudaMalloc((void**)& deviceArr, textSize);
cudaMalloc((void**)& ek_d, 272);
//START TIMER.
using namespace std::chrono;
high_resolution_clock::time_point start = high_resolution_clock::now();
cudaMemcpy(deviceArr, inputText, textSize, cudaMemcpyHostToDevice);
cudaMemcpy(ek_d, ek, 272, cudaMemcpyHostToDevice);
Encrypt<keySize> << <numOfBlocks, blockSize >> > (deviceArr, textSize, ek_d);
cudaMemcpy(resCipherText, deviceArr, textSize, cudaMemcpyDeviceToHost);
//END TIMER; PRINT ELAPSED TIME.
high_resolution_clock::time_point end = high_resolution_clock::now();
duration<double> timeElapsed = duration_cast<duration<double>>(end - start);
std::cout << "Time elapsed: " << timeElapsed.count() << std::endl;
//Print/write to file
FILE *f = fopen("output.txt", "w");
for (int i = 0; i < textSize; i++) {
fprintf(f, "%02x", resCipherText[i]);
}
fclose(f);
//free
cudaFree(deviceArr);
cudaFree(ek_d);
free(resCipherText);
}
else //DECRYPT
{
//Decryption round keys are derived from the encryption round keys which is generated by GenerateRoundKeys.
GenerateDecRoundKeys(numOfRounds);
uint8* resPlainText = (uint8*)malloc(textSize);
cudaMalloc((void**)& deviceArr, textSize);
cudaMalloc((void**)& dk_d, 272);
cudaMemcpy(deviceArr, inputText, textSize, cudaMemcpyHostToDevice);
cudaMemcpy(dk_d, dk, 272, cudaMemcpyHostToDevice);
Encrypt<keySize> << <numOfBlocks, blockSize >> > (deviceArr, textSize, dk_d);
cudaMemcpy(resPlainText, deviceArr, textSize, cudaMemcpyDeviceToHost);
//Print/write to file
FILE *f = fopen("output.txt", "w");
for (int i = 0; i < textSize; i++) {
fprintf(f, "%02x", resPlainText[i]);
}
fclose(f);
//free
cudaFree(deviceArr);
cudaFree(dk_d);
free(resPlainText);
}
return 0;
} |
a7954b714e1253b02c86a0c88c89cb7a0b5e4eee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THHUNN/generic/RReLU.hip"
#else
#include <THHUNN/common.h>
#include <ATen/CUDAGenerator.h>
void THNN_(RReLU_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *noise,
double lower,
double upper,
bool train,
bool inplace,
void *generator)
{
THCUNN_assertSameGPU(state, 3, input, output, noise);
auto gen = at::cuda::detail::getDefaultCUDAGenerator();
if (train)
{
input = THCTensor_(newContiguous)(state, input);
THCTensor_(resizeAs)(state, noise, input);
scalar_t *input_data = THCTensor_(data)(state, input);
scalar_t *noise_data = THCTensor_(data)(state, noise);
ptrdiff_t n = THCTensor_(nElement)(state, input);
// philox offset calculation for grid-stride loop utilizing hiprand4
const uint32_t curand4_engine_calls = 4;
dim3 grid = NUM_BLOCKS(n);
uint64_t counter_offset = ((n - 1) / (BLOCK_SIZE * grid.x) + 1) * curand4_engine_calls;
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(counter_offset);
}
if (inplace)
{
hipLaunchKernelGGL(( rreluUpdateOutputTrain), dim3(grid), dim3(BLOCK_SIZE), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n, rng_engine_inputs, input_data, noise_data, input_data, lower, upper);
THCTensor_(set)(state, output, input);
}
else
{
THCTensor_(resizeAs)(state, output, input);
scalar_t *output_data = THCTensor_(data)(state, output);
hipLaunchKernelGGL(( rreluUpdateOutputTrain), dim3(grid), dim3(BLOCK_SIZE), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n, rng_engine_inputs, input_data, noise_data, output_data, lower, upper);
}
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, input);
}
else
{
const scalar_t negSlope = ScalarConvert<double, scalar_t>::to((lower + upper) / 2);
if (inplace)
{
THC_pointwiseApply1<scalar_t>(state, input, RReLUUpdateOutputEvalIP_functor<scalar_t>(negSlope));
THCTensor_(set)(state, output, input);
}
else
{
THCTensor_(resizeAs)(state, output, input);
THC_pointwiseApply2<scalar_t, scalar_t>(state, output, input, RReLUUpdateOutputEval_functor<scalar_t>(negSlope));
}
}
}
void THNN_(RReLU_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *noise,
double lower,
double upper,
bool train,
bool inplace)
{
THCUNN_check_nElement(state, input, gradOutput);
THCUNN_assertSameGPU(state, 4, input, gradOutput, gradInput, noise);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
if (train && upper - lower > 1E-6) // e.g. if upper == lower, RReLU behaves like LeakyReLU
{
// multiply the gradient by the noise tensor
if (inplace)
{
THCTensor_(cmul)(state, gradOutput, gradOutput, noise);
THCTensor_(set)(state, gradInput, gradOutput);
}
else
{
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(cmul)(state, gradInput, gradOutput, noise);
}
}
else
{
// use constant factor for negative input values
const scalar_t negSlope = ScalarConvert<double, scalar_t>::to((lower + upper) / 2);
if (inplace)
{
THC_pointwiseApply2<scalar_t, scalar_t>(state, gradOutput, input, RReLUupdateGradInputEvalIP_functor<scalar_t>(negSlope));
THCTensor_(set)(state, gradInput, gradOutput);
}
else
{
THCTensor_(resizeAs)(state, gradInput, input);
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, gradOutput, input, RReLUupdateGradInputEval_functor<scalar_t>(negSlope));
}
}
THCTensor_(free)(state, gradOutput);
}
#endif
| a7954b714e1253b02c86a0c88c89cb7a0b5e4eee.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THCUNN/generic/RReLU.cu"
#else
#include <THCUNN/common.h>
#include <ATen/CUDAGenerator.h>
void THNN_(RReLU_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *noise,
double lower,
double upper,
bool train,
bool inplace,
void *generator)
{
THCUNN_assertSameGPU(state, 3, input, output, noise);
auto gen = at::cuda::detail::getDefaultCUDAGenerator();
if (train)
{
input = THCTensor_(newContiguous)(state, input);
THCTensor_(resizeAs)(state, noise, input);
scalar_t *input_data = THCTensor_(data)(state, input);
scalar_t *noise_data = THCTensor_(data)(state, noise);
ptrdiff_t n = THCTensor_(nElement)(state, input);
// philox offset calculation for grid-stride loop utilizing curand4
const uint32_t curand4_engine_calls = 4;
dim3 grid = NUM_BLOCKS(n);
uint64_t counter_offset = ((n - 1) / (BLOCK_SIZE * grid.x) + 1) * curand4_engine_calls;
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(counter_offset);
}
if (inplace)
{
rreluUpdateOutputTrain<<<grid, BLOCK_SIZE, 0, c10::cuda::getCurrentCUDAStream()>>>(
n, rng_engine_inputs, input_data, noise_data, input_data, lower, upper);
THCTensor_(set)(state, output, input);
}
else
{
THCTensor_(resizeAs)(state, output, input);
scalar_t *output_data = THCTensor_(data)(state, output);
rreluUpdateOutputTrain<<<grid, BLOCK_SIZE, 0, c10::cuda::getCurrentCUDAStream()>>>(
n, rng_engine_inputs, input_data, noise_data, output_data, lower, upper);
}
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, input);
}
else
{
const scalar_t negSlope = ScalarConvert<double, scalar_t>::to((lower + upper) / 2);
if (inplace)
{
THC_pointwiseApply1<scalar_t>(state, input, RReLUUpdateOutputEvalIP_functor<scalar_t>(negSlope));
THCTensor_(set)(state, output, input);
}
else
{
THCTensor_(resizeAs)(state, output, input);
THC_pointwiseApply2<scalar_t, scalar_t>(state, output, input, RReLUUpdateOutputEval_functor<scalar_t>(negSlope));
}
}
}
void THNN_(RReLU_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *noise,
double lower,
double upper,
bool train,
bool inplace)
{
THCUNN_check_nElement(state, input, gradOutput);
THCUNN_assertSameGPU(state, 4, input, gradOutput, gradInput, noise);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
if (train && upper - lower > 1E-6) // e.g. if upper == lower, RReLU behaves like LeakyReLU
{
// multiply the gradient by the noise tensor
if (inplace)
{
THCTensor_(cmul)(state, gradOutput, gradOutput, noise);
THCTensor_(set)(state, gradInput, gradOutput);
}
else
{
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(cmul)(state, gradInput, gradOutput, noise);
}
}
else
{
// use constant factor for negative input values
const scalar_t negSlope = ScalarConvert<double, scalar_t>::to((lower + upper) / 2);
if (inplace)
{
THC_pointwiseApply2<scalar_t, scalar_t>(state, gradOutput, input, RReLUupdateGradInputEvalIP_functor<scalar_t>(negSlope));
THCTensor_(set)(state, gradInput, gradOutput);
}
else
{
THCTensor_(resizeAs)(state, gradInput, input);
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, gradOutput, input, RReLUupdateGradInputEval_functor<scalar_t>(negSlope));
}
}
THCTensor_(free)(state, gradOutput);
}
#endif
|
07c8775033d9548919d7d1ee15c541acee965953.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
//#include <cutil.h>
#include <iostream>
#include <ostream>
#include <fstream>
//#include "/home/yusuke/NVIDIA_GPU_Computing_SDK/C/common/inc/cutil.h"
using namespace std;
#define CASENAME "orig_LR3"
#define BLOCKSIZEX 192
#define BLOCKSIZEY 1
#define BLOCKSIZEZ 1
#define BLOCKSIZELRX 192
#define BLOCKSIZELRY 1
#define BLOCKSIZELRZ 1
#define XDIM 192
#define YDIM 256
#define ZDIM 4
#define TMAX 20000
#define STARTF 0
#define OBSTR1 6.0f
#define OBSTX1 95.5f
#define OBSTY1 95.5f
#define OBSTZ1 32.5f
#define OBSTR2 32.f
#define OBSTX2 319.5f
#define OBSTY2 511.5f
#define OBSTZ2 31.5f
#define LRFACTOR 0.5f
#define LRLEVEL 2
#define LRX0 47.75f //minimum x coord of LR
#define XLRDIM 192 //number of nodes in x
#define LRY0 47.75f
#define YLRDIM 256
#define LRZ0 -0.25f
#define ZLRDIM 8
#define RE 20.f//2000.f//100.f;
#define UMAX 0.08f
#define SmagLES "NO" //YES,NO
#define MODEL "MRT" //BGK,MRT,STREAM
#define REFINEMENT 1 //1,0
#define CS 0.1f
#define VELAV 1
#define START_VELAV 400000
#define START_VELFLUC 700000
//#define CHARLENGTH = XDIM-2.f;
//#define BLOCKSIZE 16;
//int const XDIM = 32;
//int const YDIM = 32;
#include <sys/time.h>
#include <time.h>
/*
Image List:
0 fluid
1 BB
2
3 DirichletWest(simple)
10 BB(force)
13 DirichletWest_Reg
14 NeumannEast_Reg
15 DirichletNorth_Reg
16 DirichletSouth_Reg
21 ysymmetry_top
22 ysymmetry_bot
23 zsymmetry_top
24 zsymmetry_bot
25 xsymmetry_top
26 xsymmetry_bot
*/
inline __device__ int ImageFcn(float x, float y, float z){
int value = 0;
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// value = 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// value = 10;
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// return 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// return 10;
//if(((x-OBSTX)*(x-OBSTX)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1)+(z-OBSTZ1)*(z-OBSTZ1))<OBSTR1*OBSTR1)
// {
// return 10;
// }
// else
// //if(y < 0.1f || z < 0.1f || (XDIM-x) < 0.1f || (YDIM-y) < 0.1f || (ZDIM-z) < 0.1f)
// if(y < 17.5f || z < 17.5f || y > 46.5f || z > 46.5f)
// return 1;
// else if(x < 17.5f)
// return 13;
// else if(x > 78.5f)
// return 14;
// else
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
return 10;
// else
// if(x < 3)//== 0)
// value = 1;
// else if(x > XDIM-4)//== XDIM-1)
// value = 1;
// if(z<3)
// value = 1;
// if(z>ZDIM-4)
// value = 1;
// if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
// value = 10;
// if(y == 0)
// value = 200;//22;
// else if(y == YDIM-1)
// value = 100;
return value;
}
inline __device__ int ImageFcn(int x, int y, int z){
int value = 0;
//Cylinder
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// value = 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// value = 10;
//Sphere
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1)+(z-OBSTZ1)*(z-OBSTZ1))<OBSTR1*OBSTR1)
// {
//// if(z == 0 || z == ZDIM-1)
//// return 1;
//// else
// return 10;
// }
// if(z == 0)
// value = 0;
// else if(z == ZDIM-1)
// value = 0;
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
value = 10;
// else if(y == 0)
// value = 200;//22;
// else if(y == YDIM-1)
// value = 100;
// else if(x == 0)
// value = 26;
// else if(x == XDIM-1)
// value = 25;
// else if(z == 0)
// value = 0;
// else if(z == ZDIM-1)
// value = 0;
//return value;
//Lid Driven Cavity
// if(y == 0 || y == YDIM-1 || z == 0 || z == ZDIM-1)
// value = 1;
// else if(x == XDIM-2 || y == 1 || y == YDIM-2 || z == 1 || z == ZDIM-2)
// return 1;
// else if(x == 0)
// return 1;
// if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
// value = 10;
// if(z == 1)
// value = 1;
// if(z == ZDIM-2)
// value = 1;
else if(y == 0)
value = 200;//22;
else if(y == YDIM-1)
value = 100;
if(x == 0)
value = 26;
else if(x == XDIM-1)
value = 25;
// else if(x < 3)//== 0)
// value = 1;
// else if(x > XDIM-4)//== XDIM-1)
// value = 1;
return value;
}
inline __device__ float PoisProf (float x){
float radius = (YDIM-1-1)*0.5f;
float result = -1.0f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
return (result);
// return 1.f;
}
inline __device__ float trilinear_interp (float v000, float v001, float v010, float v011,
float v100, float v101, float v110, float v111, float x, float y, float z){
return v000*(1.f-x)*(1.f-y)*(1.f-z)+
v001*( x)*(1.f-y)*(1.f-z)+
v010*(1.f-x)*( y)*(1.f-z)+
v011*( x)*( y)*(1.f-z)+
v100*(1.f-x)*(1.f-y)*( z)+
v101*( x)*(1.f-y)*( z)+
v110*(1.f-x)*( y)*( z)+
v111*( x)*( y)*( z);
}
__device__ void DirichletWest(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(y == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(y == YDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
// if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float u,v,w;//,rho;
u = UMAX;//*PoisProf(zcoord)*1.5;
v = 0.0f;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float usqr = u*u+v*v+w*w;
f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
//// f0 = 1.0f/3.0f*(rho-1.5f*usqr);
// f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
//// f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
//// f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
//// f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
//// f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
//// f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
//// f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
//// f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
//// f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
//// f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
//// f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
//// f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
//// f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
//// f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
__device__ void DirichletWest_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == YDIM-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;//*PoisProf(y)*1.5;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
// f1 =(0.166666667f*u)+
// (f3-(-(0.166666667f*u)));
f1 = f3+0.33333333f*u;
// f5 =(0.0833333333f*( u+v))+
// (f7-(0.0833333333f*(-u-v)));
f5 = f7+0.166666667f*(u+v);
// f8 =(0.0833333333f*( u-v ))+
// (f6-(0.0833333333f*(-u+v )));
f8 = f6+0.166666667f*(u-v);
// f10=(0.0833333333f*( u+w))+
// (f17-(0.0833333333f*(-u-w)));
f10= f17+0.166666667f*(u+w);
// f15=(0.0833333333f*( u-w))+
// (f12-(0.0833333333f*(-u+w)));
f15= f12+0.166666667f*(u-w);
// f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+
// (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2));
// f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+
// (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v));
// f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+
// (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v));
// f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+
// (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w));
// f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+
// (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w));
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
void __device__ DirichletWest_Regularized(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float PI11 = 0;
float PI12 = 0;
float PI22 = 0;
float PI33 = 0;
float PI13 = 0;
float PI23 = 0;
float u;//,v;//,w;//,rho;
u = UMAX;//*PoisProf(z)*1.5;
//v = 0.0f;
//w = 0.0f;
float usqr = u*u;//+v*v+w*w;
float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho -1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho -1.5f*usqr);
float feq9 = 0.0555555556f*(rho -1.5f*usqr);
float feq14 = 0.0555555556f*(rho -1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho -1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho -1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho -1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho -1.5f*usqr);
// float feq0 = 0.3333333333f*(rho-1.5f*usqr);
// float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
// float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
// float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// float feq8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
// float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq10 = 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
// float feq11 = 0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
// float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
// float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
// float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq15 = 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
// float feq16 = 0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
// float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
// float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f1 = feq1 +f3 -feq3 ;
f5 = feq5 +f7 -feq7 ;
f8 = feq8 +f6 -feq6 ;
f10= feq10+f17-feq17;
f15= feq15+f12-feq12;
PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
f0 = feq0 +1.5f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f1 = feq1 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f2 = feq2 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f3 = feq3 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f4 = feq4 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f5 = feq5 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f6 = feq6 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f7 = feq7 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f8 = feq8 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f9 = feq9 +0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f10 = feq10+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f11 = feq11+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f12 = feq12+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f13 = feq13+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f14 = feq14+0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f15 = feq15+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f16 = feq16+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f17 = feq17+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f18 = feq18+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
}
void __device__ NeumannEast_Regularized(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
f13 = f11;
f18 = f16;
f8 = f5;
}
else if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float PI11 = 0;
float PI12 = 0;
float PI22 = 0;
float PI33 = 0;
float PI13 = 0;
float PI23 = 0;
float u;//,v;//,w;//,rho;
float rho = 1.0f;
//v = 0.0f;
//w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
float usqr = u*u;//+v*v+w*w;
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho -1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho -1.5f*usqr);
float feq9 = 0.0555555556f*(rho -1.5f*usqr);
float feq14 = 0.0555555556f*(rho -1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho -1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho -1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho -1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho -1.5f*usqr);
// float feq0 = 0.3333333333f*(rho-1.5f*usqr);
// float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq5 = 0.0277777778f*(rho+3.0f*( u+v)+4.5f*( u+v)*( u+v)-1.5f*usqr);
// float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
// float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// float feq8 = 0.0277777778f*(rho+3.0f*( u-v)+4.5f*( u-v)*( u-v)-1.5f*usqr);
// float feq10 = 0.0277777778f*(rho+3.0f*( u+w)+4.5f*( u+w)*( u+w)-1.5f*usqr);
// float feq11 = 0.0277777778f*(rho+3.0f*( v+w)+4.5f*( v+w)*( v+w)-1.5f*usqr);
// float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
// float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
// float feq15 = 0.0277777778f*(rho+3.0f*( u-w)+4.5f*( u-w)*( u-w)-1.5f*usqr);
// float feq16 = 0.0277777778f*(rho+3.0f*( v-w)+4.5f*( v-w)*( v-w)-1.5f*usqr);
// float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
// float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f3 = feq3 +f1 -feq1 ;
f7 = feq7 +f5 -feq5 ;
f6 = feq6 +f8 -feq8 ;
f17= feq17+f10-feq10;
f12= feq12+f15-feq15;
PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
f0 = feq0 +1.5f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f1 = feq1 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f2 = feq2 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f3 = feq3 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f4 = feq4 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f5 = feq5 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f6 = feq6 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f7 = feq7 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f8 = feq8 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f9 = feq9 +0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f10 = feq10+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f11 = feq11+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f12 = feq12+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f13 = feq13+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f14 = feq14+0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f15 = feq15+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f16 = feq16+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f17 = feq17+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f18 = feq18+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
}
__device__ void NeumannEast(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
// f6 = f7;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
// f7 = f6;
f13 = f11;
f18 = f16;
f8 = f5;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
// f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
// f17 = f12;
f18 = f13;
}
}
float u,v,w;//,rho;
float rho = 1.0f;
v = 0.0f;
w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
float u2 = u*u;
float v2 = v*v;
float w2 = w*w;
float usqr = u2+v2+w2;
// f3 = f1 -0.333333333f*u;
// f7 = f5 -0.166666667f*(u+v);
// f6 = f8 -0.166666667f*(u-v);
// f17= f10-0.166666667f*(u+w);
// f12= f15-0.166666667f*(u-w);
f0 = 1.0f/3.0f*(rho-1.5f*usqr);
f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
__device__ void NeumannEast_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
// f6 = f7;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
// f7 = f6;
f13 = f11;
f18 = f16;
f8 = f5;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
// f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
// f17 = f12;
f18 = f13;
}
}
float u,v,w;//,rho;
float rho = 1.0f;
v = 0.0f;
w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
f3 = f1 -0.333333333f*u;
f7 = f5 -0.166666667f*(u+v);
f6 = f8 -0.166666667f*(u-v);
f17= f10-0.166666667f*(u+w);
f12= f15-0.166666667f*(u-w);
// f3 =(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2)+
// (f1-(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2));
// f7 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v)+
// (f5-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v));
// f6 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v)+
// (f8-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v));
// f17=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w)+
// (f10-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w));
// f12=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w)+
// (f15-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w));
// f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+
// (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2));
// f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+
// (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v));
// f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+
// (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v));
// f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+
// (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w));
// f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+
// (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w));
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void DirichletNorth_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(x == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(x == XDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
// f1 =(0.166666667f*u)+
// (f3-(-(0.166666667f*u)));
f4 = f2-0.33333333f*v;
// f5 =(0.0833333333f*( u+v))+
// (f7-(0.0833333333f*(-u-v)));
f7 = f5-0.166666667f*(u+v);
// f8 =(0.0833333333f*( u-v ))+
// (f6-(0.0833333333f*(-u+v )));
f8 = f6+0.166666667f*(u-v);
// f10=(0.0833333333f*( u+w))+
// (f17-(0.0833333333f*(-u-w)));
f13= f16-0.166666667f*(v-w);
// f15=(0.0833333333f*( u-w))+
// (f12-(0.0833333333f*(-u+w)));
f18= f11-0.166666667f*(v+w);
//
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr
//float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void DirichletSouth_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(x == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(x == XDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
f2 = f4 +0.33333333f*v;
f5 = f7 +0.166666667f*(u+v);
f6 = f8 -0.166666667f*(u-v);
f16= f13+0.166666667f*(v-w);
f11= f18+0.166666667f*(v+w);
//
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr
//float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void xsymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
// if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
}
f1 = f3 ;
f5 = f6 ;
f8 = f7 ;
f10= f12;
f15= f17;
}
__device__ void xsymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
f13 = f11;
f18 = f16;
f8 = f5;
}
// else if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
}
f3 = f1 ;
f6 = f5 ;
f7 = f8 ;
f12= f10;
f17= f15;
}
__device__ void ysymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int z)
{
if(z == 0){
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
if(z == ZDIM-1){
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
__device__ void ysymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int z)
{
if(z == 0){
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
if(z == ZDIM-1){
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
__device__ void zsymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y)
{
if(y == 0){
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
if(y == YDIM-1){
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
__device__ void zsymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y)
{
if(y == 0){
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
if(y == YDIM-1){
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
inline __device__ void boundaries(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z, int im)
{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 53)//DirichletWest
// {
// //DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// DirichletWest_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 54)//DirichletWest
// {
// //NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// NeumannEast_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 4)//DirichletWest
// {
// NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 21)//ysymm top
{
ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 22)//ysymm bot
{
ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 23)//zsymm top
{
zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 24)//zsymm bot
{
zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 25)//xsymm top
{
xsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 26)//xsymm bot
{
xsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
}
inline __device__ void boundaries_force(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z, int im)
{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 53)//DirichletWest
{
DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//DirichletWest_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 54)//DirichletWest
{
NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//NeumannEast_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
else if(im == 21)//ysymm top
{
ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 22)//ysymm bot
{
ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 23)//zsymm top
{
zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 24)//zsymm bot
{
zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 25)//zsymm top
{
xsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 26)//zsymm bot
{
xsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
}
inline __device__ void North_Extrap(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float rho)
{
rho = 1.0f;
float u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
float v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
float w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
m13 = f5+ - f6+ f7+ - f8 ;
m14 = f11 + - f13 + - f16 + f18;
m15 = f10 + - f12 + - f15 + f17 ;
m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2));
f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10));
f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10));
f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
}
inline __device__ void South_Extrap(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float v)
{
float rho,u,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = 0.f;//f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
w = 0.f;//f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
m13 = f5+ - f6+ f7+ - f8 ;
m14 = f11 + - f13 + - f16 + f18;
m15 = f10 + - f12 + - f15 + f17 ;
m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2));
f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10));
f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10));
f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
}
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
__device__ int dmin(int a, int b)
{
if (a<b) return a;
else return b-1;
}
__device__ int dmax(int a)
{
if (a>-1) return a;
else return 0;
}
__device__ int dmin_p(int a, int b)
{
if (a<b) return a;
else return 0;
}
__device__ int dmax_p(int a, int b)
{
if (a>-1) return a;
else return b-1;
}
inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YDIM*(zInner));
// if(index<0) index = 0;
// else if(index>19*pitch*YDIM*ZDIM/GPU_N-2) index = 19*pitch*(YDIM*ZDIM/GPU_N-2);
return index;
}
inline __device__ int f_memLR(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YLRDIM*pitch)+f_num*pitch*YLRDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM*(zInner));
return index;
}
inline __device__ int buff_mem(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YDIM;
index = dmax(index);
index = dmin(index,19*pitch*YDIM);
// if(index<0) index = 0;
// else if(index>19*pitch*YDIM) index = 19*pitch*YDIM;
return index;
}
inline __device__ int buff_memLR(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YLRDIM;
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM);
return index;
}
inline __device__ void bgk_feq(float rho, float u, float v, float w, float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18)
{
float usqr = u*u+v*v+w*w;
f0 = 1.0f/3.0f*(rho-1.5f*usqr);
f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
inline __device__ void mrt_feq(float rho, float u, float v, float w, float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18)
{
float usqr = u*u+v*v+w*w;
f0 = 0.1904761791f*rho+-0.597127747f*usqr ;
f1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
f2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
f3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
f4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
f5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v);
f6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v);
f7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v);
f8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v);
f9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w ;
f10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w);
f11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
f12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w);
f13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
f14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w ;
f15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-w);
f16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v-w);
f17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+w);
f18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v+w);
f1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
f12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
f14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
f17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
}
inline __device__ void vel_av(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float& uAv, float& vAv, int t)
{
float u,v;//,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
uAv = (uAv*(t-START_VELAV)+u)/((t-START_VELAV)+1);
vAv = (vAv*(t-START_VELAV)+v)/((t-START_VELAV)+1);
}
inline __device__ void vel_avLR(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float& uAv, float& vAv, float t)
{
float u,v;//,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
uAv = (uAv*(t-START_VELAV)+u*LRFACTOR)/((t-START_VELAV)+LRFACTOR);
vAv = (vAv*(t-START_VELAV)+v*LRFACTOR)/((t-START_VELAV)+LRFACTOR);
}
inline __device__ void vel_fluc(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float& uAv,
float& vAv, float& ufluc, float& vfluc, int t)
{
float u,v;//,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
u = (u-uAv)*(u-uAv);
v = (v-vAv)*(v-vAv);
ufluc = (ufluc*(t-START_VELFLUC)+u)/((t-START_VELFLUC)+1);
vfluc = (vfluc*(t-START_VELFLUC)+v)/((t-START_VELFLUC)+1);
}
inline __device__ void vel_flucLR(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float& uAv,
float& vAv, float& ufluc, float& vfluc, float t)
{
float u,v;//,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
u = (u-uAv)*(u-uAv);
v = (v-vAv)*(v-vAv);
ufluc = (ufluc*(t-START_VELFLUC)+u*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR);
vfluc = (vfluc*(t-START_VELFLUC)+v*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR);
}
inline __device__ void bgk_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
// f0 =(1.f-omega)*f0 +omega*(0.3333333333f*(rho-1.5f*usqr));
// f1 =(1.f-omega)*f1 +omega*(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f2 =(1.f-omega)*f2 +omega*(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
// f3 =(1.f-omega)*f3 +omega*(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
// f4 =(1.f-omega)*f4 +omega*(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
// f5 =(1.f-omega)*f5 +omega*(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
// f6 =(1.f-omega)*f6 +omega*(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
// f7 =(1.f-omega)*f7 +omega*(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
// f8 =(1.f-omega)*f8 +omega*(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
// f9 =(1.f-omega)*f9 +omega*(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
// f10=(1.f-omega)*f10+omega*(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
// f11=(1.f-omega)*f11+omega*(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
// f12=(1.f-omega)*f12+omega*(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
// f13=(1.f-omega)*f13+omega*(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
// f14=(1.f-omega)*f14+omega*(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
// f15=(1.f-omega)*f15+omega*(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
// f16=(1.f-omega)*f16+omega*(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
// f17=(1.f-omega)*f17+omega*(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
// f18=(1.f-omega)*f18+omega*(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
f0 -=omega*(f0 -0.3333333333f*(rho-1.5f*usqr));
f1 -=omega*(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f2 -=omega*(f2 -0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
f3 -=omega*(f3 -0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
f4 -=omega*(f4 -0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
f5 -=omega*(f5 -0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
f6 -=omega*(f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
f7 -=omega*(f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
f8 -=omega*(f8 -0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
f9 -=omega*(f9 -0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
f10-=omega*(f10-0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
f11-=omega*(f11-0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr));
f12-=omega*(f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
f13-=omega*(f13-0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr));
f14-=omega*(f14-0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
f15-=omega*(f15-0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
f16-=omega*(f16-0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
f17-=omega*(f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
f18-=omega*(f18-0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
}
inline __device__ void mrt_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float u,v,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
float usqr = u*u+v*v+w*w;
// u = rho*u;
// v = rho*v;
// w = rho*w;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//COMPUTE M-MEQ
//m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
//m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17;
//m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18;
//m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18;
m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w);
// m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
// m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
// m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
m13 = f5+-f6+ f7+-f8 -u*v;
m14 = f11 +- f13 + - f16 + f18 -v*w;
m15 = f10 + - f12 +-f15 + f17 -u*w;
m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
if(SmagLES == "YES"){
//// float PI11 = -1.0f/38.0f*( (m1)+19.0f*omega* (m9));
//// float PI22 = -1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)-3.0f*omega*(m11)));
//// float PI33 = -1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)+3.0f*omega*(m11)));
// float PI11 = LRLEVEL*-0.026315789f*m1-0.5f *omega*m9;
// float PI22 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
// float PI33 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
// float PI12 = LRLEVEL*-1.5f*omega*m13;
// float PI23 = LRLEVEL*-1.5f*omega*m14;
// float PI13 = LRLEVEL*-1.5f*omega*m15;
// float nu0 = ((1.0f/omega)-0.5f)*LRFACTOR/3.0f;
// float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
// //float Cs = 0.01f;
// omega = 1.0f/(3.0f*(nu0+CS*Smag*LRFACTOR*LRFACTOR)*LRLEVEL+0.5f);
// //omega = 1.0f/(1.0f/omega+3.f*CS*Smag*LRFACTOR*LRFACTOR);
// //omega = 1.0f/(1.0f*LRLEVEL/1.99983f-1.f+0.5f+3.f*CS*Smag*LRFACTOR);
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)*LRFACTOR/3.0f;
float tau0 = 1.f/omega;
//float Smag = (sqrt(nu0*nu0+18.f*CS*LRFACTOR*LRFACTOR*Q)-nu0)/(6.f*CS*LRFACTOR*LRFACTOR);
//float Smag = LRFACTOR*(sqrt(4.f/9.f*tau0*tau0+8.f*CS*LRFACTOR*Q)-2.f/3.f*tau0)/(4.f*CS*LRFACTOR*LRFACTOR);
//omega = 1.0f/(3.0f*(nu0+CS*Smag*LRFACTOR*LRFACTOR)*LRLEVEL+0.5f);
//float tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*LRFACTOR*Q));
float tau = tau0+0.5f*(-tau0+sqrt(tau0*tau0+18.f*CS*sqrt(2.f)*Q));
omega = 1.f/tau;
//float tau = 3.f*nu0*LRFACTOR+0.5f+(sqrt(tau0*tau0+18.f*CS*CS*LRFACTOR*LRFACTOR*Q)-tau0)*0.5f;
//omega = 1.f/tau;
}
f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2);
f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
}
inline __device__ void mrt_collide_LES(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega, float Cs)
{
float u,v,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
float usqr = u*u+v*v+w*w;
// u = rho*u;
// v = rho*v;
// w = rho*w;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//COMPUTE M-MEQ
//m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
//m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17;
//m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18;
//m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18;
m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w);
// m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
// m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
// m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
m13 = f5+-f6+ f7+-f8 -u*v;
m14 = f11 +- f13 + - f16 + f18 -v*w;
m15 = f10 + - f12 +-f15 + f17 -u*w;
m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
if(SmagLES == "YES"){
// float PI11 = -0.026315789f*m1-0.5f *omega*m9;
// float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
// float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
//
// float PI12 = -1.5f*omega*m13;
// float PI23 = -1.5f*omega*m14;
// float PI13 = -1.5f*omega*m15;
// float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
// omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
// float PI11 = LRLEVEL*-1.0f/38.0f*( (m1)+19.0f*omega* (m9));
// float PI22 = LRLEVEL*-1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)-3.0f*omega*(m11)));
// float PI33 = LRLEVEL*-1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)+3.0f*omega*(m11)));
// float PI12 = LRLEVEL*-1.5f*omega*m13;
// float PI23 = LRLEVEL*-1.5f*omega*m14;
// float PI13 = LRLEVEL*-1.5f*omega*m15;
// float nu0 = ((1.0f/omega)-0.5f)/3.0f;
// float Smag = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+PI12*PI12+PI23*PI23+PI13*PI13);
// omega = 1.0f/(3.0f*(nu0+Cs*Smag*LRLEVEL*LRLEVEL)+0.5f);
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//
//float Smag = (sqrt(nu0*nu0+18.f*CS*Q)-nu0)/(6.f*CS);
//
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
//
//float tau0 = 1.f/omega;
//float tau = 3.f*nu0+0.5f+(sqrt(tau0*tau0+18.f*CS*CS*Q)-tau0)*0.5f;
//omega = 1.f/tau;
float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float tau0 = 1.f/omega;
//float Smag = (sqrt(nu0*nu0+18.f*CS*LRFACTOR*LRFACTOR*Q)-nu0)/(6.f*CS*LRFACTOR*LRFACTOR);
//float Smag = (sqrt(4.f/9.f*tau0*tau0+8.f*CS*Q)-2.f/3.f*tau0)/(4.f*CS);
//omega = 1.0f/(3.0f*(nu0+CS*Smag)+0.5f);
float tau = tau0+0.5f*(-tau0+sqrt(tau0*tau0+18.f*sqrt(2.f)*CS*Q));
omega = 1.f/tau;
}
f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2);
f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
}
inline __device__ void bgk_scale_cf(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float SF)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
inline __device__ void mrt_scale_cf(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float SF)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18) -19.f*(u*u+v*v+w*w);
//float m2 = 12.f*f0+-4.f*f1+-4.f*f2+-4.f*f3+-4.f*f4+f5+f6+f7+f8+-4.f*f9+f10+f11+f12+f13+-4.f*f14+f15+f16+f17+f18 +7.53968254f*(u*u+v*v+w*w);
//float m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
//float m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
//float m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
//float m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
//float m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
//float m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
//float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
//float m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
//float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
//float m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
//float m13 = f5+-f6+ f7+-f8 -u*v;
//float m14 = f11 +- f13 + - f16 + f18 -v*w;
//float m15 = f10 + - f12 +-f15 + f17 -u*w;
//float m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
//float m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
//float m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f);
//float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//we know Smag on coarse mesh
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//omega = 1.0f/(3.0f*(nu0+Cs*Smag*sqrt(2.f))+0.5f);
//omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
//omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*sqrt(2.f)*LRFACTOR*LRFACTOR);
//omega = 1.0f/(1.0f/omega +3.f*CS*Smag);
//omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*sqrt(2.f)*LRFACTOR*LRFACTOR);
//omega = 1.0f/(1.0f/omega +3.f*CS*Smag);
//omega2 = 1.0f/(1.0f*LRLEVEL/omega2-1.f+0.5f+3.f*CS*Smag*sqrt(2.f)*LRFACTOR);
//float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
//float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
//float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
//float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//float tau0c = 1.f/omega;
//float tau = tau0c+0.5*(-tau0c+sqrt(tau0c*tau0c+18.f*CS*Q));//tau_total of coarse mesh
//omega = 1.f/tau;//total omega on coarse mesh
//tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*LRFACTOR*Q));
//omega2= 1.f/tau;
//SF = (omega*(1.0f-omega2))/((1.0f-omega)*omega2);//for post-collision
//SF = omega*0.5f/omega2;//for post-streaming, pre-collision?
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
inline __device__ void mrt_scale_fc_LES(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega, float omega2)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
//float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
//float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
//float m13 = f5+-f6+ f7+-f8 -u*v;
//float m14 = f11 +- f13 + - f16 + f18 -v*w;
//float m15 = f10 + - f12 +-f15 + f17 -u*w;
//float PI11 = -0.026315789f*m1-0.5f *omega*m9;
//float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
//float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
//float PI12 = -1.5f*omega*m13;
//float PI23 = -1.5f*omega*m14;
//float PI13 = -1.5f*omega*m15;
////we know Smag on fine mesh. Smag_c=Smag_f*sqrt(2)
//float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
////omega = 1.0f/(3.0f*(nu0+CS*Smag*sqrt(2.f))+0.5f);
////omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*LRFACTOR);
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag/sqrt(2.f));
////omega2 = 1.0f/(1.0f*LRLEVEL/omega2-1.f+0.5f+3.f*CS*Smag*LRFACTOR);
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag/sqrt(2.f));
//float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
//float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
//float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
//float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//float tau0f = 1.f/omega2;
//float tau0c = 1.f/omega;
//float tau = tau0f+0.5*(-tau0f+sqrt(tau0f*tau0f+18.f*CS*sqrt(2.f)*Q));//tau_total of fine
//omega2 = 1.f/tau;//total omega on fine mesh
//tau = LRLEVEL*(tau-tau0f)+tau0c;
//omega= 1.f/tau;
//tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*Q));
float SF = (omega*(1.0f-omega2))/((1.0f-omega)*omega2);
//float SF = omega2*2.f/omega;
//float SF = ((1.0f-omega)*omega2/LRFACTOR)/(omega*(1.0f-omega2));
//SF = omega*2.f/omega2;
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
__global__ void f_Extract(float* fout, float* fin, float* gin, float* hin,
size_t pitch_c, size_t pitch_f, int zInner_c, int zInner_f, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//local index on f of coarse mesh
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = x;//xcoord in gpu
float ycoord = y;//ycoord in gpu
float zcoord = z+1;//zcoord in gpu
float f[19];
if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) &&
(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-1) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-1)) )
{
float xcoord_LR = LRLEVEL*(xcoord-LRX0);//coord in refined region coordinates
float ycoord_LR = LRLEVEL*(ycoord-LRY0);
float zcoord_LR = LRLEVEL*(zcoord-LRZ0)-1.f;//-1.f to account for g layer
int xm = int(xcoord_LR);
int ym = int(ycoord_LR);
int zm = int(zcoord_LR);
int xp = xm+1;
int yp = ym+1;
int zp = zm+1;
float xf = xcoord_LR-xm;
float yf = ycoord_LR-ym;
float zf = zcoord_LR-zm;
for(int i=0;i<19;i++){
float v000 = fin[f_memLR(i ,xm,ym,zm,pitch_f,zInner_f)];
float v001 = fin[f_memLR(i ,xp,ym,zm,pitch_f,zInner_f)];
float v010 = fin[f_memLR(i ,xm,yp,zm,pitch_f,zInner_f)];
float v011 = fin[f_memLR(i ,xp,yp,zm,pitch_f,zInner_f)];
float v100 = fin[f_memLR(i ,xm,ym,zp,pitch_f,zInner_f)];
float v101 = fin[f_memLR(i ,xp,ym,zp,pitch_f,zInner_f)];
float v110 = fin[f_memLR(i ,xm,yp,zp,pitch_f,zInner_f)];
float v111 = fin[f_memLR(i ,xp,yp,zp,pitch_f,zInner_f)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
fout[f_mem(0 ,x,y,z,pitch_c,zInner_c)] = f[0 ];
fout[f_mem(1 ,x,y,z,pitch_c,zInner_c)] = f[1 ];
fout[f_mem(2 ,x,y,z,pitch_c,zInner_c)] = f[2 ];
fout[f_mem(3 ,x,y,z,pitch_c,zInner_c)] = f[3 ];
fout[f_mem(4 ,x,y,z,pitch_c,zInner_c)] = f[4 ];
fout[f_mem(5 ,x,y,z,pitch_c,zInner_c)] = f[5 ];
fout[f_mem(6 ,x,y,z,pitch_c,zInner_c)] = f[6 ];
fout[f_mem(7 ,x,y,z,pitch_c,zInner_c)] = f[7 ];
fout[f_mem(8 ,x,y,z,pitch_c,zInner_c)] = f[8 ];
fout[f_mem(9 ,x,y,z,pitch_c,zInner_c)] = f[9 ];
fout[f_mem(10,x,y,z,pitch_c,zInner_c)] = f[10];
fout[f_mem(11,x,y,z,pitch_c,zInner_c)] = f[11];
fout[f_mem(12,x,y,z,pitch_c,zInner_c)] = f[12];
fout[f_mem(13,x,y,z,pitch_c,zInner_c)] = f[13];
fout[f_mem(14,x,y,z,pitch_c,zInner_c)] = f[14];
fout[f_mem(15,x,y,z,pitch_c,zInner_c)] = f[15];
fout[f_mem(16,x,y,z,pitch_c,zInner_c)] = f[16];
fout[f_mem(17,x,y,z,pitch_c,zInner_c)] = f[17];
fout[f_mem(18,x,y,z,pitch_c,zInner_c)] = f[18];
}
}
__global__ void g_Extract(float* gout, float* fin, float* gin,
size_t pitch_c, size_t pitch_f, int zInner_c, int zInner_f, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//local index on f of coarse mesh
int y = threadIdx.y+blockIdx.y*blockDim.y;
float xcoord = x;//xcoord in gpu
float ycoord = y;//ycoord in gpu
//float zcoord = 0;//zcoord in gpu
float f[19];
if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) &&
(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-1) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-1)) )
{
float xcoord_LR = LRLEVEL*(xcoord-LRX0);//coord in refined region coordinates
float ycoord_LR = LRLEVEL*(ycoord-LRY0);
//float zcoord_LR = LRLEVEL*(zcoord-LRZ0);
int xm = int(xcoord_LR);
int ym = int(ycoord_LR);
int xp = xm+1;
int yp = ym+1;
float xf = xcoord_LR-xm;
float yf = ycoord_LR-ym;
float zf = 0.5f;
for(int i=0;i<19;i++){
float v000 = gin[buff_memLR(i ,xm,ym,pitch_f)];
float v001 = gin[buff_memLR(i ,xp,ym,pitch_f)];
float v010 = gin[buff_memLR(i ,xm,yp,pitch_f)];
float v011 = gin[buff_memLR(i ,xp,yp,pitch_f)];
float v100 = fin[f_memLR(i ,xm,ym,0,pitch_f,zInner_f)];
float v101 = fin[f_memLR(i ,xp,ym,0,pitch_f,zInner_f)];
float v110 = fin[f_memLR(i ,xm,yp,0,pitch_f,zInner_f)];
float v111 = fin[f_memLR(i ,xp,yp,0,pitch_f,zInner_f)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
gout[buff_mem(0 ,x,y,pitch_c)] = f[0 ];
gout[buff_mem(1 ,x,y,pitch_c)] = f[1 ];
gout[buff_mem(2 ,x,y,pitch_c)] = f[2 ];
gout[buff_mem(3 ,x,y,pitch_c)] = f[3 ];
gout[buff_mem(4 ,x,y,pitch_c)] = f[4 ];
gout[buff_mem(5 ,x,y,pitch_c)] = f[5 ];
gout[buff_mem(6 ,x,y,pitch_c)] = f[6 ];
gout[buff_mem(7 ,x,y,pitch_c)] = f[7 ];
gout[buff_mem(8 ,x,y,pitch_c)] = f[8 ];
gout[buff_mem(9 ,x,y,pitch_c)] = f[9 ];
gout[buff_mem(10,x,y,pitch_c)] = f[10];
gout[buff_mem(11,x,y,pitch_c)] = f[11];
gout[buff_mem(12,x,y,pitch_c)] = f[12];
gout[buff_mem(13,x,y,pitch_c)] = f[13];
gout[buff_mem(14,x,y,pitch_c)] = f[14];
gout[buff_mem(15,x,y,pitch_c)] = f[15];
gout[buff_mem(16,x,y,pitch_c)] = f[16];
gout[buff_mem(17,x,y,pitch_c)] = f[17];
gout[buff_mem(18,x,y,pitch_c)] = f[18];
}
}
__global__ void h_Extract(float* hout, float* fin, float* hin,
size_t pitch_c, size_t pitch_f, int zInner_c, int zInner_f, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//local index on f of coarse mesh
int y = threadIdx.y+blockIdx.y*blockDim.y;
float xcoord = x;//xcoord in gpu
float ycoord = y;//ycoord in gpu
//float zcoord = zInner_c+2-1;//zcoord in gpu
float f[19];
if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) &&
(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-1) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-1)) )
{
float xcoord_LR = LRLEVEL*(xcoord-LRX0);//coord in refined region coordinates
float ycoord_LR = LRLEVEL*(ycoord-LRY0);
//float zcoord_LR = LRLEVEL*(zcoord-LRZ0);
int xm = int(xcoord_LR);
int ym = int(ycoord_LR);
int xp = xm+1;
int yp = ym+1;
float xf = xcoord_LR-xm;
float yf = ycoord_LR-ym;
float zf = 0.5f;
for(int i=0;i<19;i++){
float v000 = fin[f_memLR(i ,xm,ym,zInner_f-1,pitch_f,zInner_f)];
float v001 = fin[f_memLR(i ,xp,ym,zInner_f-1,pitch_f,zInner_f)];
float v010 = fin[f_memLR(i ,xm,yp,zInner_f-1,pitch_f,zInner_f)];
float v011 = fin[f_memLR(i ,xp,yp,zInner_f-1,pitch_f,zInner_f)];
float v100 = hin[buff_memLR(i ,xm,ym,pitch_f)];
float v101 = hin[buff_memLR(i ,xp,ym,pitch_f)];
float v110 = hin[buff_memLR(i ,xm,yp,pitch_f)];
float v111 = hin[buff_memLR(i ,xp,yp,pitch_f)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
hout[buff_mem(0 ,x,y,pitch_c)] = f[0 ];
hout[buff_mem(1 ,x,y,pitch_c)] = f[1 ];
hout[buff_mem(2 ,x,y,pitch_c)] = f[2 ];
hout[buff_mem(3 ,x,y,pitch_c)] = f[3 ];
hout[buff_mem(4 ,x,y,pitch_c)] = f[4 ];
hout[buff_mem(5 ,x,y,pitch_c)] = f[5 ];
hout[buff_mem(6 ,x,y,pitch_c)] = f[6 ];
hout[buff_mem(7 ,x,y,pitch_c)] = f[7 ];
hout[buff_mem(8 ,x,y,pitch_c)] = f[8 ];
hout[buff_mem(9 ,x,y,pitch_c)] = f[9 ];
hout[buff_mem(10,x,y,pitch_c)] = f[10];
hout[buff_mem(11,x,y,pitch_c)] = f[11];
hout[buff_mem(12,x,y,pitch_c)] = f[12];
hout[buff_mem(13,x,y,pitch_c)] = f[13];
hout[buff_mem(14,x,y,pitch_c)] = f[14];
hout[buff_mem(15,x,y,pitch_c)] = f[15];
hout[buff_mem(16,x,y,pitch_c)] = f[16];
hout[buff_mem(17,x,y,pitch_c)] = f[17];
hout[buff_mem(18,x,y,pitch_c)] = f[18];
}
}
__global__ void update_inner(float* fA, float* fB, float* g, float* h,
float omega, size_t pitch, int GPU, int zInner, //pitch in elements
float *uAv, float *vAv, float *ufluc, float *vfluc, int t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2)+1+z);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// if(REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1
// && y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 && z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
// (x>XDIM-1)){
// }
// else{
f0 = fA[j];
f1 = fA[f_mem (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_mem (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_mem (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_mem (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_mem (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_mem (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_mem (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_mem (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f9 = fA[f_mem (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_mem (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_mem (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_mem (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_mem (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_mem(14,x ,y ,pitch)];
f15= h [buff_mem(15,x-1,y ,pitch)];
f16= h [buff_mem(16,x ,y-1,pitch)];
f17= h [buff_mem(17,x+1,y ,pitch)];
f18= h [buff_mem(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_mem(9 ,x ,y ,pitch)];
f10= g [buff_mem(10,x-1,y ,pitch)];
f11= g [buff_mem(11,x ,y-1,pitch)];
f12= g [buff_mem(12,x+1,y ,pitch)];
f13= g [buff_mem(13,x ,y+1,pitch)];
f14= fA[f_mem (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_mem (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_mem (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_mem (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_mem (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_mem(9 ,x ,y ,z-1,pitch,zInner)];
f10= fA[f_mem(10,x-1,y ,z-1,pitch,zInner)];
f11= fA[f_mem(11,x ,y-1,z-1,pitch,zInner)];
f12= fA[f_mem(12,x+1,y ,z-1,pitch,zInner)];
f13= fA[f_mem(13,x ,y+1,z-1,pitch,zInner)];
f14= fA[f_mem(14,x ,y ,z+1,pitch,zInner)];
f15= fA[f_mem(15,x-1,y ,z+1,pitch,zInner)];
f16= fA[f_mem(16,x ,y-1,z+1,pitch,zInner)];
f17= fA[f_mem(17,x+1,y ,z+1,pitch,zInner)];
f18= fA[f_mem(18,x ,y+1,z+1,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_mem(10,x,y,z,pitch,zInner)] = f17;
fB[f_mem(11,x,y,z,pitch,zInner)] = f18;
fB[f_mem(12,x,y,z,pitch,zInner)] = f15;
fB[f_mem(13,x,y,z,pitch,zInner)] = f16;
fB[f_mem(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_mem(15,x,y,z,pitch,zInner)] = f12;
fB[f_mem(16,x,y,z,pitch,zInner)] = f13;
fB[f_mem(17,x,y,z,pitch,zInner)] = f10;
fB[f_mem(18,x,y,z,pitch,zInner)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = fA[f_mem(0 ,x,y-1,z,pitch,zInner)];
f1 = fA[f_mem(1 ,x,y-1,z,pitch,zInner)];
f3 = fA[f_mem(3 ,x,y-1,z,pitch,zInner)];
f2 = fA[f_mem(2 ,x,y-1,z,pitch,zInner)];
f5 = fA[f_mem(5 ,x,y-1,z,pitch,zInner)];
f6 = fA[f_mem(6 ,x,y-1,z,pitch,zInner)];
f4 = fA[f_mem(4 ,x,y-1,z,pitch,zInner)];
f7 = fA[f_mem(7 ,x,y-1,z,pitch,zInner)];
f8 = fA[f_mem(8 ,x,y-1,z,pitch,zInner)];
f9 = fA[f_mem(9 ,x,y-1,z,pitch,zInner)];
f10= fA[f_mem(10,x,y-1,z,pitch,zInner)];
f11= fA[f_mem(11,x,y-1,z,pitch,zInner)];
f12= fA[f_mem(12,x,y-1,z,pitch,zInner)];
f13= fA[f_mem(13,x,y-1,z,pitch,zInner)];
f14= fA[f_mem(14,x,y-1,z,pitch,zInner)];
f15= fA[f_mem(15,x,y-1,z,pitch,zInner)];
f16= fA[f_mem(16,x,y-1,z,pitch,zInner)];
f17= fA[f_mem(17,x,y-1,z,pitch,zInner)];
f18= fA[f_mem(18,x,y-1,z,pitch,zInner)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = fA[f_mem(0 ,x,y+1,z,pitch,zInner)];
f1 = fA[f_mem(1 ,x,y+1,z,pitch,zInner)];
f3 = fA[f_mem(3 ,x,y+1,z,pitch,zInner)];
f2 = fA[f_mem(2 ,x,y+1,z,pitch,zInner)];
f5 = fA[f_mem(5 ,x,y+1,z,pitch,zInner)];
f6 = fA[f_mem(6 ,x,y+1,z,pitch,zInner)];
f4 = fA[f_mem(4 ,x,y+1,z,pitch,zInner)];
f7 = fA[f_mem(7 ,x,y+1,z,pitch,zInner)];
f8 = fA[f_mem(8 ,x,y+1,z,pitch,zInner)];
f9 = fA[f_mem(9 ,x,y+1,z,pitch,zInner)];
f10= fA[f_mem(10,x,y+1,z,pitch,zInner)];
f11= fA[f_mem(11,x,y+1,z,pitch,zInner)];
f12= fA[f_mem(12,x,y+1,z,pitch,zInner)];
f13= fA[f_mem(13,x,y+1,z,pitch,zInner)];
f14= fA[f_mem(14,x,y+1,z,pitch,zInner)];
f15= fA[f_mem(15,x,y+1,z,pitch,zInner)];
f16= fA[f_mem(16,x,y+1,z,pitch,zInner)];
f17= fA[f_mem(17,x,y+1,z,pitch,zInner)];
f18= fA[f_mem(18,x,y+1,z,pitch,zInner)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,GPU*(zInner+2)+1+z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = uAv[x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = vAv[x+y*pitch+(z+1)*pitch*YDIM];
vel_av(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t);
uAv[x+y*pitch+(z+1)*pitch*YDIM] = u_Av;
vAv[x+y*pitch+(z+1)*pitch*YDIM] = v_Av;
}
else if(t>=START_VELFLUC){
float u_Av = uAv[x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = vAv[x+y*pitch+(z+1)*pitch*YDIM];
float u_fluc = ufluc[x+y*pitch+(z+1)*pitch*YDIM];
float v_fluc = vfluc[x+y*pitch+(z+1)*pitch*YDIM];
vel_fluc(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t);
ufluc[x+y*pitch+(z+1)*pitch*YDIM] = u_fluc;
vfluc[x+y*pitch+(z+1)*pitch*YDIM] = v_fluc;
}
}
fB[f_mem(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_mem(10,x,y,z,pitch,zInner)] = f10;
fB[f_mem(11,x,y,z,pitch,zInner)] = f11;
fB[f_mem(12,x,y,z,pitch,zInner)] = f12;
fB[f_mem(13,x,y,z,pitch,zInner)] = f13;
fB[f_mem(14,x,y,z,pitch,zInner)] = f14;
fB[f_mem(15,x,y,z,pitch,zInner)] = f15;
fB[f_mem(16,x,y,z,pitch,zInner)] = f16;
fB[f_mem(17,x,y,z,pitch,zInner)] = f17;
fB[f_mem(18,x,y,z,pitch,zInner)] = f18;
}
}
__global__ void update_bottom(float* gA, float* gB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2));
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = gA [j];
f1 = gA [buff_mem(1 ,x-1,y ,pitch)];
f3 = gA [buff_mem(3 ,x+1,y ,pitch)];
f2 = gA [buff_mem(2 ,x ,y-1,pitch)];
f5 = gA [buff_mem(5 ,x-1,y-1,pitch)];
f6 = gA [buff_mem(6 ,x+1,y-1,pitch)];
f4 = gA [buff_mem(4 ,x ,y+1,pitch)];
f7 = gA [buff_mem(7 ,x+1,y+1,pitch)];
f8 = gA [buff_mem(8 ,x-1,y+1,pitch)];
f9 = temp[buff_mem(9 ,x ,y ,pitch)];
f10= temp[buff_mem(10,x-1,y ,pitch)];
f11= temp[buff_mem(11,x ,y-1,pitch)];
f12= temp[buff_mem(12,x+1,y ,pitch)];
f13= temp[buff_mem(13,x ,y+1,pitch)];
f14= f [f_mem (14,x ,y ,0,pitch, zInner)];
f15= f [f_mem (15,x-1,y ,0,pitch, zInner)];
f16= f [f_mem (16,x ,y-1,0,pitch, zInner)];
f17= f [f_mem (17,x+1,y ,0,pitch, zInner)];
f18= f [f_mem (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f3 ;
gB[buff_mem(2 ,x,y,pitch)] = f4 ;
gB[buff_mem(3 ,x,y,pitch)] = f1 ;
gB[buff_mem(4 ,x,y,pitch)] = f2 ;
gB[buff_mem(5 ,x,y,pitch)] = f7 ;
gB[buff_mem(6 ,x,y,pitch)] = f8 ;
gB[buff_mem(7 ,x,y,pitch)] = f5 ;
gB[buff_mem(8 ,x,y,pitch)] = f6 ;
gB[buff_mem(9 ,x,y,pitch)] = f14;
gB[buff_mem(10,x,y,pitch)] = f17;
gB[buff_mem(11,x,y,pitch)] = f18;
gB[buff_mem(12,x,y,pitch)] = f15;
gB[buff_mem(13,x,y,pitch)] = f16;
gB[buff_mem(14,x,y,pitch)] = f9 ;
gB[buff_mem(15,x,y,pitch)] = f12;
gB[buff_mem(16,x,y,pitch)] = f13;
gB[buff_mem(17,x,y,pitch)] = f10;
gB[buff_mem(18,x,y,pitch)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = gA[buff_mem(0 ,x,y-1,pitch)];
f1 = gA[buff_mem(1 ,x,y-1,pitch)];
f3 = gA[buff_mem(3 ,x,y-1,pitch)];
f2 = gA[buff_mem(2 ,x,y-1,pitch)];
f5 = gA[buff_mem(5 ,x,y-1,pitch)];
f6 = gA[buff_mem(6 ,x,y-1,pitch)];
f4 = gA[buff_mem(4 ,x,y-1,pitch)];
f7 = gA[buff_mem(7 ,x,y-1,pitch)];
f8 = gA[buff_mem(8 ,x,y-1,pitch)];
f9 = gA[buff_mem(9 ,x,y-1,pitch)];
f10= gA[buff_mem(10,x,y-1,pitch)];
f11= gA[buff_mem(11,x,y-1,pitch)];
f12= gA[buff_mem(12,x,y-1,pitch)];
f13= gA[buff_mem(13,x,y-1,pitch)];
f14= gA[buff_mem(14,x,y-1,pitch)];
f15= gA[buff_mem(15,x,y-1,pitch)];
f16= gA[buff_mem(16,x,y-1,pitch)];
f17= gA[buff_mem(17,x,y-1,pitch)];
f18= gA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = gA[buff_mem(0 ,x,y+1,pitch)];
f1 = gA[buff_mem(1 ,x,y+1,pitch)];
f3 = gA[buff_mem(3 ,x,y+1,pitch)];
f2 = gA[buff_mem(2 ,x,y+1,pitch)];
f5 = gA[buff_mem(5 ,x,y+1,pitch)];
f6 = gA[buff_mem(6 ,x,y+1,pitch)];
f4 = gA[buff_mem(4 ,x,y+1,pitch)];
f7 = gA[buff_mem(7 ,x,y+1,pitch)];
f8 = gA[buff_mem(8 ,x,y+1,pitch)];
f9 = gA[buff_mem(9 ,x,y+1,pitch)];
f10= gA[buff_mem(10,x,y+1,pitch)];
f11= gA[buff_mem(11,x,y+1,pitch)];
f12= gA[buff_mem(12,x,y+1,pitch)];
f13= gA[buff_mem(13,x,y+1,pitch)];
f14= gA[buff_mem(14,x,y+1,pitch)];
f15= gA[buff_mem(15,x,y+1,pitch)];
f16= gA[buff_mem(16,x,y+1,pitch)];
f17= gA[buff_mem(17,x,y+1,pitch)];
f18= gA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,GPU*(zInner+2),im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f1 ;
gB[buff_mem(2 ,x,y,pitch)] = f2 ;
gB[buff_mem(3 ,x,y,pitch)] = f3 ;
gB[buff_mem(4 ,x,y,pitch)] = f4 ;
gB[buff_mem(5 ,x,y,pitch)] = f5 ;
gB[buff_mem(6 ,x,y,pitch)] = f6 ;
gB[buff_mem(7 ,x,y,pitch)] = f7 ;
gB[buff_mem(8 ,x,y,pitch)] = f8 ;
gB[buff_mem(9 ,x,y,pitch)] = f9 ;
gB[buff_mem(10,x,y,pitch)] = f10;
gB[buff_mem(11,x,y,pitch)] = f11;
gB[buff_mem(12,x,y,pitch)] = f12;
gB[buff_mem(13,x,y,pitch)] = f13;
gB[buff_mem(14,x,y,pitch)] = f14;
gB[buff_mem(15,x,y,pitch)] = f15;
gB[buff_mem(16,x,y,pitch)] = f16;
gB[buff_mem(17,x,y,pitch)] = f17;
gB[buff_mem(18,x,y,pitch)] = f18;
}
// }
}
__global__ void update_top(float* hA, float* hB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
//int z = (zInner+2)-1;//coord in GPU
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = hA[j];
f1 = hA [buff_mem(1 ,x-1,y ,pitch)];
f3 = hA [buff_mem(3 ,x+1,y ,pitch)];
f2 = hA [buff_mem(2 ,x ,y-1,pitch)];
f5 = hA [buff_mem(5 ,x-1,y-1,pitch)];
f6 = hA [buff_mem(6 ,x+1,y-1,pitch)];
f4 = hA [buff_mem(4 ,x ,y+1,pitch)];
f7 = hA [buff_mem(7 ,x+1,y+1,pitch)];
f8 = hA [buff_mem(8 ,x-1,y+1,pitch)];
f9 = f [f_mem (9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_mem (10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_mem (11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_mem (12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_mem (13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_mem(14,x ,y ,pitch)];
f15= temp[buff_mem(15,x-1,y ,pitch)];
f16= temp[buff_mem(16,x ,y-1,pitch)];
f17= temp[buff_mem(17,x+1,y ,pitch)];
f18= temp[buff_mem(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f3 ;
hB[buff_mem(2 ,x,y,pitch)] = f4 ;
hB[buff_mem(3 ,x,y,pitch)] = f1 ;
hB[buff_mem(4 ,x,y,pitch)] = f2 ;
hB[buff_mem(5 ,x,y,pitch)] = f7 ;
hB[buff_mem(6 ,x,y,pitch)] = f8 ;
hB[buff_mem(7 ,x,y,pitch)] = f5 ;
hB[buff_mem(8 ,x,y,pitch)] = f6 ;
hB[buff_mem(9 ,x,y,pitch)] = f14;
hB[buff_mem(10,x,y,pitch)] = f17;
hB[buff_mem(11,x,y,pitch)] = f18;
hB[buff_mem(12,x,y,pitch)] = f15;
hB[buff_mem(13,x,y,pitch)] = f16;
hB[buff_mem(14,x,y,pitch)] = f9 ;
hB[buff_mem(15,x,y,pitch)] = f12;
hB[buff_mem(16,x,y,pitch)] = f13;
hB[buff_mem(17,x,y,pitch)] = f10;
hB[buff_mem(18,x,y,pitch)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = hA[buff_mem(0 ,x,y-1,pitch)];
f1 = hA[buff_mem(1 ,x,y-1,pitch)];
f3 = hA[buff_mem(3 ,x,y-1,pitch)];
f2 = hA[buff_mem(2 ,x,y-1,pitch)];
f5 = hA[buff_mem(5 ,x,y-1,pitch)];
f6 = hA[buff_mem(6 ,x,y-1,pitch)];
f4 = hA[buff_mem(4 ,x,y-1,pitch)];
f7 = hA[buff_mem(7 ,x,y-1,pitch)];
f8 = hA[buff_mem(8 ,x,y-1,pitch)];
f9 = hA[buff_mem(9 ,x,y-1,pitch)];
f10= hA[buff_mem(10,x,y-1,pitch)];
f11= hA[buff_mem(11,x,y-1,pitch)];
f12= hA[buff_mem(12,x,y-1,pitch)];
f13= hA[buff_mem(13,x,y-1,pitch)];
f14= hA[buff_mem(14,x,y-1,pitch)];
f15= hA[buff_mem(15,x,y-1,pitch)];
f16= hA[buff_mem(16,x,y-1,pitch)];
f17= hA[buff_mem(17,x,y-1,pitch)];
f18= hA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = hA[buff_mem(0 ,x,y+1,pitch)];
f1 = hA[buff_mem(1 ,x,y+1,pitch)];
f3 = hA[buff_mem(3 ,x,y+1,pitch)];
f2 = hA[buff_mem(2 ,x,y+1,pitch)];
f5 = hA[buff_mem(5 ,x,y+1,pitch)];
f6 = hA[buff_mem(6 ,x,y+1,pitch)];
f4 = hA[buff_mem(4 ,x,y+1,pitch)];
f7 = hA[buff_mem(7 ,x,y+1,pitch)];
f8 = hA[buff_mem(8 ,x,y+1,pitch)];
f9 = hA[buff_mem(9 ,x,y+1,pitch)];
f10= hA[buff_mem(10,x,y+1,pitch)];
f11= hA[buff_mem(11,x,y+1,pitch)];
f12= hA[buff_mem(12,x,y+1,pitch)];
f13= hA[buff_mem(13,x,y+1,pitch)];
f14= hA[buff_mem(14,x,y+1,pitch)];
f15= hA[buff_mem(15,x,y+1,pitch)];
f16= hA[buff_mem(16,x,y+1,pitch)];
f17= hA[buff_mem(17,x,y+1,pitch)];
f18= hA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,(GPU+1)*(zInner+2)-1,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f1 ;
hB[buff_mem(2 ,x,y,pitch)] = f2 ;
hB[buff_mem(3 ,x,y,pitch)] = f3 ;
hB[buff_mem(4 ,x,y,pitch)] = f4 ;
hB[buff_mem(5 ,x,y,pitch)] = f5 ;
hB[buff_mem(6 ,x,y,pitch)] = f6 ;
hB[buff_mem(7 ,x,y,pitch)] = f7 ;
hB[buff_mem(8 ,x,y,pitch)] = f8 ;
hB[buff_mem(9 ,x,y,pitch)] = f9 ;
hB[buff_mem(10,x,y,pitch)] = f10;
hB[buff_mem(11,x,y,pitch)] = f11;
hB[buff_mem(12,x,y,pitch)] = f12;
hB[buff_mem(13,x,y,pitch)] = f13;
hB[buff_mem(14,x,y,pitch)] = f14;
hB[buff_mem(15,x,y,pitch)] = f15;
hB[buff_mem(16,x,y,pitch)] = f16;
hB[buff_mem(17,x,y,pitch)] = f17;
hB[buff_mem(18,x,y,pitch)] = f18;
}
// }
}
__global__ void update_inner_force(float* fA, float* fB, float* g, float* h,
float omega, size_t pitch, int GPU, int zInner,
float *FX, float *FY, float *FZ, int Force_t,//pitch in elements
float *uAv, float *vAv, float *ufluc, float *vfluc, int t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2)+1+z);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = fA[j];
f1 = fA[f_mem (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_mem (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_mem (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_mem (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_mem (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_mem (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_mem (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_mem (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f9 = fA[f_mem (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_mem (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_mem (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_mem (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_mem (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_mem(14,x ,y ,pitch)];
f15= h [buff_mem(15,x-1,y ,pitch)];
f16= h [buff_mem(16,x ,y-1,pitch)];
f17= h [buff_mem(17,x+1,y ,pitch)];
f18= h [buff_mem(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_mem(9 ,x ,y ,pitch)];
f10= g [buff_mem(10,x-1,y ,pitch)];
f11= g [buff_mem(11,x ,y-1,pitch)];
f12= g [buff_mem(12,x+1,y ,pitch)];
f13= g [buff_mem(13,x ,y+1,pitch)];
f14= fA[f_mem (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_mem (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_mem (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_mem (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_mem (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_mem(9 ,x ,y ,z-1,pitch,zInner)];
f10= fA[f_mem(10,x-1,y ,z-1,pitch,zInner)];
f11= fA[f_mem(11,x ,y-1,z-1,pitch,zInner)];
f12= fA[f_mem(12,x+1,y ,z-1,pitch,zInner)];
f13= fA[f_mem(13,x ,y+1,z-1,pitch,zInner)];
f14= fA[f_mem(14,x ,y ,z+1,pitch,zInner)];
f15= fA[f_mem(15,x-1,y ,z+1,pitch,zInner)];
f16= fA[f_mem(16,x ,y-1,z+1,pitch,zInner)];
f17= fA[f_mem(17,x+1,y ,z+1,pitch,zInner)];
f18= fA[f_mem(18,x ,y+1,z+1,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_mem(10,x,y,z,pitch,zInner)] = f17;
fB[f_mem(11,x,y,z,pitch,zInner)] = f18;
fB[f_mem(12,x,y,z,pitch,zInner)] = f15;
fB[f_mem(13,x,y,z,pitch,zInner)] = f16;
fB[f_mem(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_mem(15,x,y,z,pitch,zInner)] = f12;
fB[f_mem(16,x,y,z,pitch,zInner)] = f13;
fB[f_mem(17,x,y,z,pitch,zInner)] = f10;
fB[f_mem(18,x,y,z,pitch,zInner)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
f0 = fA[f_mem(0 ,x,y-1,z,pitch,zInner)];
f1 = fA[f_mem(1 ,x,y-1,z,pitch,zInner)];
f3 = fA[f_mem(3 ,x,y-1,z,pitch,zInner)];
f2 = fA[f_mem(2 ,x,y-1,z,pitch,zInner)];
f5 = fA[f_mem(5 ,x,y-1,z,pitch,zInner)];
f6 = fA[f_mem(6 ,x,y-1,z,pitch,zInner)];
f4 = fA[f_mem(4 ,x,y-1,z,pitch,zInner)];
f7 = fA[f_mem(7 ,x,y-1,z,pitch,zInner)];
f8 = fA[f_mem(8 ,x,y-1,z,pitch,zInner)];
f9 = fA[f_mem(9 ,x,y-1,z,pitch,zInner)];
f10= fA[f_mem(10,x,y-1,z,pitch,zInner)];
f11= fA[f_mem(11,x,y-1,z,pitch,zInner)];
f12= fA[f_mem(12,x,y-1,z,pitch,zInner)];
f13= fA[f_mem(13,x,y-1,z,pitch,zInner)];
f14= fA[f_mem(14,x,y-1,z,pitch,zInner)];
f15= fA[f_mem(15,x,y-1,z,pitch,zInner)];
f16= fA[f_mem(16,x,y-1,z,pitch,zInner)];
f17= fA[f_mem(17,x,y-1,z,pitch,zInner)];
f18= fA[f_mem(18,x,y-1,z,pitch,zInner)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = fA[f_mem(0 ,x,y+1,z,pitch,zInner)];
f1 = fA[f_mem(1 ,x,y+1,z,pitch,zInner)];
f3 = fA[f_mem(3 ,x,y+1,z,pitch,zInner)];
f2 = fA[f_mem(2 ,x,y+1,z,pitch,zInner)];
f5 = fA[f_mem(5 ,x,y+1,z,pitch,zInner)];
f6 = fA[f_mem(6 ,x,y+1,z,pitch,zInner)];
f4 = fA[f_mem(4 ,x,y+1,z,pitch,zInner)];
f7 = fA[f_mem(7 ,x,y+1,z,pitch,zInner)];
f8 = fA[f_mem(8 ,x,y+1,z,pitch,zInner)];
f9 = fA[f_mem(9 ,x,y+1,z,pitch,zInner)];
f10= fA[f_mem(10,x,y+1,z,pitch,zInner)];
f11= fA[f_mem(11,x,y+1,z,pitch,zInner)];
f12= fA[f_mem(12,x,y+1,z,pitch,zInner)];
f13= fA[f_mem(13,x,y+1,z,pitch,zInner)];
f14= fA[f_mem(14,x,y+1,z,pitch,zInner)];
f15= fA[f_mem(15,x,y+1,z,pitch,zInner)];
f16= fA[f_mem(16,x,y+1,z,pitch,zInner)];
f17= fA[f_mem(17,x,y+1,z,pitch,zInner)];
f18= fA[f_mem(18,x,y+1,z,pitch,zInner)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,GPU*(zInner+2)+1+z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = uAv[x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = vAv[x+y*pitch+(z+1)*pitch*YDIM];
vel_av(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t);
uAv[x+y*pitch+(z+1)*pitch*YDIM] = u_Av;
vAv[x+y*pitch+(z+1)*pitch*YDIM] = v_Av;
}
else if(t>=START_VELFLUC){
float u_Av = uAv[x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = vAv[x+y*pitch+(z+1)*pitch*YDIM];
float u_fluc = ufluc[x+y*pitch+(z+1)*pitch*YDIM];
float v_fluc = vfluc[x+y*pitch+(z+1)*pitch*YDIM];
vel_fluc(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t);
ufluc[x+y*pitch+(z+1)*pitch*YDIM] = u_fluc;
vfluc[x+y*pitch+(z+1)*pitch*YDIM] = v_fluc;
}
}
fB[f_mem(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_mem(10,x,y,z,pitch,zInner)] = f10;
fB[f_mem(11,x,y,z,pitch,zInner)] = f11;
fB[f_mem(12,x,y,z,pitch,zInner)] = f12;
fB[f_mem(13,x,y,z,pitch,zInner)] = f13;
fB[f_mem(14,x,y,z,pitch,zInner)] = f14;
fB[f_mem(15,x,y,z,pitch,zInner)] = f15;
fB[f_mem(16,x,y,z,pitch,zInner)] = f16;
fB[f_mem(17,x,y,z,pitch,zInner)] = f17;
fB[f_mem(18,x,y,z,pitch,zInner)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_bottom_force(float* gA, float* gB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner,
float *FX, float *FY, float *FZ, int Force_t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2));
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = gA [j];
f1 = gA [buff_mem(1 ,x-1,y ,pitch)];
f3 = gA [buff_mem(3 ,x+1,y ,pitch)];
f2 = gA [buff_mem(2 ,x ,y-1,pitch)];
f5 = gA [buff_mem(5 ,x-1,y-1,pitch)];
f6 = gA [buff_mem(6 ,x+1,y-1,pitch)];
f4 = gA [buff_mem(4 ,x ,y+1,pitch)];
f7 = gA [buff_mem(7 ,x+1,y+1,pitch)];
f8 = gA [buff_mem(8 ,x-1,y+1,pitch)];
f9 = temp[buff_mem(9 ,x ,y ,pitch)];
f10= temp[buff_mem(10,x-1,y ,pitch)];
f11= temp[buff_mem(11,x ,y-1,pitch)];
f12= temp[buff_mem(12,x+1,y ,pitch)];
f13= temp[buff_mem(13,x ,y+1,pitch)];
f14= f [f_mem (14,x ,y ,0,pitch, zInner)];
f15= f [f_mem (15,x-1,y ,0,pitch, zInner)];
f16= f [f_mem (16,x ,y-1,0,pitch, zInner)];
f17= f [f_mem (17,x+1,y ,0,pitch, zInner)];
f18= f [f_mem (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x] =2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x] =2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x] =2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f3 ;
gB[buff_mem(2 ,x,y,pitch)] = f4 ;
gB[buff_mem(3 ,x,y,pitch)] = f1 ;
gB[buff_mem(4 ,x,y,pitch)] = f2 ;
gB[buff_mem(5 ,x,y,pitch)] = f7 ;
gB[buff_mem(6 ,x,y,pitch)] = f8 ;
gB[buff_mem(7 ,x,y,pitch)] = f5 ;
gB[buff_mem(8 ,x,y,pitch)] = f6 ;
gB[buff_mem(9 ,x,y,pitch)] = f14;
gB[buff_mem(10,x,y,pitch)] = f17;
gB[buff_mem(11,x,y,pitch)] = f18;
gB[buff_mem(12,x,y,pitch)] = f15;
gB[buff_mem(13,x,y,pitch)] = f16;
gB[buff_mem(14,x,y,pitch)] = f9 ;
gB[buff_mem(15,x,y,pitch)] = f12;
gB[buff_mem(16,x,y,pitch)] = f13;
gB[buff_mem(17,x,y,pitch)] = f10;
gB[buff_mem(18,x,y,pitch)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
f0 = gA[buff_mem(0 ,x,y-1,pitch)];
f1 = gA[buff_mem(1 ,x,y-1,pitch)];
f3 = gA[buff_mem(3 ,x,y-1,pitch)];
f2 = gA[buff_mem(2 ,x,y-1,pitch)];
f5 = gA[buff_mem(5 ,x,y-1,pitch)];
f6 = gA[buff_mem(6 ,x,y-1,pitch)];
f4 = gA[buff_mem(4 ,x,y-1,pitch)];
f7 = gA[buff_mem(7 ,x,y-1,pitch)];
f8 = gA[buff_mem(8 ,x,y-1,pitch)];
f9 = gA[buff_mem(9 ,x,y-1,pitch)];
f10= gA[buff_mem(10,x,y-1,pitch)];
f11= gA[buff_mem(11,x,y-1,pitch)];
f12= gA[buff_mem(12,x,y-1,pitch)];
f13= gA[buff_mem(13,x,y-1,pitch)];
f14= gA[buff_mem(14,x,y-1,pitch)];
f15= gA[buff_mem(15,x,y-1,pitch)];
f16= gA[buff_mem(16,x,y-1,pitch)];
f17= gA[buff_mem(17,x,y-1,pitch)];
f18= gA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = gA[buff_mem(0 ,x,y+1,pitch)];
f1 = gA[buff_mem(1 ,x,y+1,pitch)];
f3 = gA[buff_mem(3 ,x,y+1,pitch)];
f2 = gA[buff_mem(2 ,x,y+1,pitch)];
f5 = gA[buff_mem(5 ,x,y+1,pitch)];
f6 = gA[buff_mem(6 ,x,y+1,pitch)];
f4 = gA[buff_mem(4 ,x,y+1,pitch)];
f7 = gA[buff_mem(7 ,x,y+1,pitch)];
f8 = gA[buff_mem(8 ,x,y+1,pitch)];
f9 = gA[buff_mem(9 ,x,y+1,pitch)];
f10= gA[buff_mem(10,x,y+1,pitch)];
f11= gA[buff_mem(11,x,y+1,pitch)];
f12= gA[buff_mem(12,x,y+1,pitch)];
f13= gA[buff_mem(13,x,y+1,pitch)];
f14= gA[buff_mem(14,x,y+1,pitch)];
f15= gA[buff_mem(15,x,y+1,pitch)];
f16= gA[buff_mem(16,x,y+1,pitch)];
f17= gA[buff_mem(17,x,y+1,pitch)];
f18= gA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,GPU*(zInner+2),im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f1 ;
gB[buff_mem(2 ,x,y,pitch)] = f2 ;
gB[buff_mem(3 ,x,y,pitch)] = f3 ;
gB[buff_mem(4 ,x,y,pitch)] = f4 ;
gB[buff_mem(5 ,x,y,pitch)] = f5 ;
gB[buff_mem(6 ,x,y,pitch)] = f6 ;
gB[buff_mem(7 ,x,y,pitch)] = f7 ;
gB[buff_mem(8 ,x,y,pitch)] = f8 ;
gB[buff_mem(9 ,x,y,pitch)] = f9 ;
gB[buff_mem(10,x,y,pitch)] = f10;
gB[buff_mem(11,x,y,pitch)] = f11;
gB[buff_mem(12,x,y,pitch)] = f12;
gB[buff_mem(13,x,y,pitch)] = f13;
gB[buff_mem(14,x,y,pitch)] = f14;
gB[buff_mem(15,x,y,pitch)] = f15;
gB[buff_mem(16,x,y,pitch)] = f16;
gB[buff_mem(17,x,y,pitch)] = f17;
gB[buff_mem(18,x,y,pitch)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_top_force(float* hA, float* hB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner,//pitch in elements
float *FX, float *FY, float *FZ, int Force_t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
//int z = (zInner+2)-1;//coord in GPU
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = hA[j];
f1 = hA [buff_mem(1 ,x-1,y ,pitch)];
f3 = hA [buff_mem(3 ,x+1,y ,pitch)];
f2 = hA [buff_mem(2 ,x ,y-1,pitch)];
f5 = hA [buff_mem(5 ,x-1,y-1,pitch)];
f6 = hA [buff_mem(6 ,x+1,y-1,pitch)];
f4 = hA [buff_mem(4 ,x ,y+1,pitch)];
f7 = hA [buff_mem(7 ,x+1,y+1,pitch)];
f8 = hA [buff_mem(8 ,x-1,y+1,pitch)];
f9 = f [f_mem (9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_mem (10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_mem (11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_mem (12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_mem (13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_mem(14,x ,y ,pitch)];
f15= temp[buff_mem(15,x-1,y ,pitch)];
f16= temp[buff_mem(16,x ,y-1,pitch)];
f17= temp[buff_mem(17,x+1,y ,pitch)];
f18= temp[buff_mem(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x] =2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x] =2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x] =2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f3 ;
hB[buff_mem(2 ,x,y,pitch)] = f4 ;
hB[buff_mem(3 ,x,y,pitch)] = f1 ;
hB[buff_mem(4 ,x,y,pitch)] = f2 ;
hB[buff_mem(5 ,x,y,pitch)] = f7 ;
hB[buff_mem(6 ,x,y,pitch)] = f8 ;
hB[buff_mem(7 ,x,y,pitch)] = f5 ;
hB[buff_mem(8 ,x,y,pitch)] = f6 ;
hB[buff_mem(9 ,x,y,pitch)] = f14;
hB[buff_mem(10,x,y,pitch)] = f17;
hB[buff_mem(11,x,y,pitch)] = f18;
hB[buff_mem(12,x,y,pitch)] = f15;
hB[buff_mem(13,x,y,pitch)] = f16;
hB[buff_mem(14,x,y,pitch)] = f9 ;
hB[buff_mem(15,x,y,pitch)] = f12;
hB[buff_mem(16,x,y,pitch)] = f13;
hB[buff_mem(17,x,y,pitch)] = f10;
hB[buff_mem(18,x,y,pitch)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
f0 = hA[buff_mem(0 ,x,y-1,pitch)];
f1 = hA[buff_mem(1 ,x,y-1,pitch)];
f3 = hA[buff_mem(3 ,x,y-1,pitch)];
f2 = hA[buff_mem(2 ,x,y-1,pitch)];
f5 = hA[buff_mem(5 ,x,y-1,pitch)];
f6 = hA[buff_mem(6 ,x,y-1,pitch)];
f4 = hA[buff_mem(4 ,x,y-1,pitch)];
f7 = hA[buff_mem(7 ,x,y-1,pitch)];
f8 = hA[buff_mem(8 ,x,y-1,pitch)];
f9 = hA[buff_mem(9 ,x,y-1,pitch)];
f10= hA[buff_mem(10,x,y-1,pitch)];
f11= hA[buff_mem(11,x,y-1,pitch)];
f12= hA[buff_mem(12,x,y-1,pitch)];
f13= hA[buff_mem(13,x,y-1,pitch)];
f14= hA[buff_mem(14,x,y-1,pitch)];
f15= hA[buff_mem(15,x,y-1,pitch)];
f16= hA[buff_mem(16,x,y-1,pitch)];
f17= hA[buff_mem(17,x,y-1,pitch)];
f18= hA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = hA[buff_mem(0 ,x,y+1,pitch)];
f1 = hA[buff_mem(1 ,x,y+1,pitch)];
f3 = hA[buff_mem(3 ,x,y+1,pitch)];
f2 = hA[buff_mem(2 ,x,y+1,pitch)];
f5 = hA[buff_mem(5 ,x,y+1,pitch)];
f6 = hA[buff_mem(6 ,x,y+1,pitch)];
f4 = hA[buff_mem(4 ,x,y+1,pitch)];
f7 = hA[buff_mem(7 ,x,y+1,pitch)];
f8 = hA[buff_mem(8 ,x,y+1,pitch)];
f9 = hA[buff_mem(9 ,x,y+1,pitch)];
f10= hA[buff_mem(10,x,y+1,pitch)];
f11= hA[buff_mem(11,x,y+1,pitch)];
f12= hA[buff_mem(12,x,y+1,pitch)];
f13= hA[buff_mem(13,x,y+1,pitch)];
f14= hA[buff_mem(14,x,y+1,pitch)];
f15= hA[buff_mem(15,x,y+1,pitch)];
f16= hA[buff_mem(16,x,y+1,pitch)];
f17= hA[buff_mem(17,x,y+1,pitch)];
f18= hA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,(GPU+1)*(zInner+2)-1,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f1 ;
hB[buff_mem(2 ,x,y,pitch)] = f2 ;
hB[buff_mem(3 ,x,y,pitch)] = f3 ;
hB[buff_mem(4 ,x,y,pitch)] = f4 ;
hB[buff_mem(5 ,x,y,pitch)] = f5 ;
hB[buff_mem(6 ,x,y,pitch)] = f6 ;
hB[buff_mem(7 ,x,y,pitch)] = f7 ;
hB[buff_mem(8 ,x,y,pitch)] = f8 ;
hB[buff_mem(9 ,x,y,pitch)] = f9 ;
hB[buff_mem(10,x,y,pitch)] = f10;
hB[buff_mem(11,x,y,pitch)] = f11;
hB[buff_mem(12,x,y,pitch)] = f12;
hB[buff_mem(13,x,y,pitch)] = f13;
hB[buff_mem(14,x,y,pitch)] = f14;
hB[buff_mem(15,x,y,pitch)] = f15;
hB[buff_mem(16,x,y,pitch)] = f16;
hB[buff_mem(17,x,y,pitch)] = f17;
hB[buff_mem(18,x,y,pitch)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_inner_LR(float* fA, float* fB, float* g, float* h,
float omega, size_t pitch, int GPU, int zInner,//pitch in elements
float *uAv, float *vAv, float *ufluc, float *vfluc, float t)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner+2)+1+z));
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = fA[j];
f1 = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f9 = fA[f_memLR (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_memLR (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_memLR (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_memLR (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_memLR (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_memLR(14,x ,y ,pitch)];
f15= h [buff_memLR(15,x-1,y ,pitch)];
f16= h [buff_memLR(16,x ,y-1,pitch)];
f17= h [buff_memLR(17,x+1,y ,pitch)];
f18= h [buff_memLR(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_memLR(9 ,x ,y ,pitch)];
f10= g [buff_memLR(10,x-1,y ,pitch)];
f11= g [buff_memLR(11,x ,y-1,pitch)];
f12= g [buff_memLR(12,x+1,y ,pitch)];
f13= g [buff_memLR(13,x ,y+1,pitch)];
f14= fA[f_memLR (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_memLR (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_memLR (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_memLR (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_memLR (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)];//
f10= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)];//
f11= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)];//
f12= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)];//
f13= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)];//
f14= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)];//
f15= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)];//
f16= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)];//
f17= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)];//
f18= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)];//
}//end normal nodes
if(im == 1 || im ==10){//BB
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f18;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = uAv[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = vAv[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t);
uAv[x+y*pitch+(z+1)*pitch*YLRDIM] = u_Av;
vAv[x+y*pitch+(z+1)*pitch*YLRDIM] = v_Av;
}
else if(t>=START_VELFLUC){
float u_Av = uAv[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = vAv[x+y*pitch+(z+1)*pitch*YLRDIM];
float u_fluc = ufluc[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_fluc = vfluc[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t);
ufluc[x+y*pitch+(z+1)*pitch*YLRDIM] = u_fluc;
vfluc[x+y*pitch+(z+1)*pitch*YLRDIM] = v_fluc;
}
}
fB[f_memLR(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f11;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f18;
}
}
__global__ void update_bottom_LR(float* gA, float* gB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (zInner+2)-1;
int j = x+y*pitch;//index on padded mem (pitch in elements)
//int im = ImageFcn(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+GPU*z*LRFACTOR);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+GPU*LRFACTOR*z;
int im = ImageFcn(xcoord,ycoord,zcoord);
f0 = gA [j];
f1 = gA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = gA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = gA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = gA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = gA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = gA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = gA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = gA [buff_memLR(8 ,x-1,y+1,pitch)];
f9 = temp[buff_memLR(9 ,x ,y ,pitch)];
f10= temp[buff_memLR(10,x-1,y ,pitch)];
f11= temp[buff_memLR(11,x ,y-1,pitch)];
f12= temp[buff_memLR(12,x+1,y ,pitch)];
f13= temp[buff_memLR(13,x ,y+1,pitch)];
f14= f [f_memLR (14,x ,y ,0,pitch, zInner)];
f15= f [f_memLR (15,x-1,y ,0,pitch, zInner)];
f16= f [f_memLR (16,x ,y-1,0,pitch, zInner)];
f17= f [f_memLR (17,x+1,y ,0,pitch, zInner)];
f18= f [f_memLR (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f3 ;
gB[buff_memLR(2 ,x,y,pitch)] = f4 ;
gB[buff_memLR(3 ,x,y,pitch)] = f1 ;
gB[buff_memLR(4 ,x,y,pitch)] = f2 ;
gB[buff_memLR(5 ,x,y,pitch)] = f7 ;
gB[buff_memLR(6 ,x,y,pitch)] = f8 ;
gB[buff_memLR(7 ,x,y,pitch)] = f5 ;
gB[buff_memLR(8 ,x,y,pitch)] = f6 ;
gB[buff_memLR(9 ,x,y,pitch)] = f14;
gB[buff_memLR(10,x,y,pitch)] = f17;
gB[buff_memLR(11,x,y,pitch)] = f18;
gB[buff_memLR(12,x,y,pitch)] = f15;
gB[buff_memLR(13,x,y,pitch)] = f16;
gB[buff_memLR(14,x,y,pitch)] = f9 ;
gB[buff_memLR(15,x,y,pitch)] = f12;
gB[buff_memLR(16,x,y,pitch)] = f13;
gB[buff_memLR(17,x,y,pitch)] = f10;
gB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f1 ;
gB[buff_memLR(2 ,x,y,pitch)] = f2 ;
gB[buff_memLR(3 ,x,y,pitch)] = f3 ;
gB[buff_memLR(4 ,x,y,pitch)] = f4 ;
gB[buff_memLR(5 ,x,y,pitch)] = f5 ;
gB[buff_memLR(6 ,x,y,pitch)] = f6 ;
gB[buff_memLR(7 ,x,y,pitch)] = f7 ;
gB[buff_memLR(8 ,x,y,pitch)] = f8 ;
gB[buff_memLR(9 ,x,y,pitch)] = f9 ;
gB[buff_memLR(10,x,y,pitch)] = f10;
gB[buff_memLR(11,x,y,pitch)] = f11;
gB[buff_memLR(12,x,y,pitch)] = f12;
gB[buff_memLR(13,x,y,pitch)] = f13;
gB[buff_memLR(14,x,y,pitch)] = f14;
gB[buff_memLR(15,x,y,pitch)] = f15;
gB[buff_memLR(16,x,y,pitch)] = f16;
gB[buff_memLR(17,x,y,pitch)] = f17;
gB[buff_memLR(18,x,y,pitch)] = f18;
}
}
__global__ void update_top_LR(float* hA, float* hB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (GPU+1)*(zInner+2)-1;//physical coord in LR region
int j = x+y*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+LRFACTOR*z;
int im = ImageFcn(xcoord,ycoord,zcoord);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = hA[j];
f1 = hA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = hA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = hA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = hA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = hA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = hA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = hA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = hA [buff_memLR(8 ,x-1,y+1,pitch)];
// f9 = hA [buff_memLR(9 ,x ,y ,pitch)];
// f10= hA [buff_memLR(10,x-1,y ,pitch)];
// f11= hA [buff_memLR(11,x ,y-1,pitch)];
// f12= hA [buff_memLR(12,x+1,y ,pitch)];
// f13= hA [buff_memLR(13,x ,y+1,pitch)];
// f14= hA [buff_memLR(9 ,x ,y ,pitch)];
// f15= hA [buff_memLR(10,x-1,y ,pitch)];
// f16= hA [buff_memLR(11,x ,y-1,pitch)];
// f17= hA [buff_memLR(12,x+1,y ,pitch)];
// f18= hA [buff_memLR(13,x ,y+1,pitch)];
f9 = f [f_memLR (9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_memLR (10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_memLR (11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_memLR (12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_memLR (13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_memLR(14,x ,y ,pitch)];
f15= temp[buff_memLR(15,x-1,y ,pitch)];
f16= temp[buff_memLR(16,x ,y-1,pitch)];
f17= temp[buff_memLR(17,x+1,y ,pitch)];
f18= temp[buff_memLR(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f3 ;
hB[buff_memLR(2 ,x,y,pitch)] = f4 ;
hB[buff_memLR(3 ,x,y,pitch)] = f1 ;
hB[buff_memLR(4 ,x,y,pitch)] = f2 ;
hB[buff_memLR(5 ,x,y,pitch)] = f7 ;
hB[buff_memLR(6 ,x,y,pitch)] = f8 ;
hB[buff_memLR(7 ,x,y,pitch)] = f5 ;
hB[buff_memLR(8 ,x,y,pitch)] = f6 ;
hB[buff_memLR(9 ,x,y,pitch)] = f14;
hB[buff_memLR(10,x,y,pitch)] = f17;
hB[buff_memLR(11,x,y,pitch)] = f18;
hB[buff_memLR(12,x,y,pitch)] = f15;
hB[buff_memLR(13,x,y,pitch)] = f16;
hB[buff_memLR(14,x,y,pitch)] = f9 ;
hB[buff_memLR(15,x,y,pitch)] = f12;
hB[buff_memLR(16,x,y,pitch)] = f13;
hB[buff_memLR(17,x,y,pitch)] = f10;
hB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f1 ;
hB[buff_memLR(2 ,x,y,pitch)] = f2 ;
hB[buff_memLR(3 ,x,y,pitch)] = f3 ;
hB[buff_memLR(4 ,x,y,pitch)] = f4 ;
hB[buff_memLR(5 ,x,y,pitch)] = f5 ;
hB[buff_memLR(6 ,x,y,pitch)] = f6 ;
hB[buff_memLR(7 ,x,y,pitch)] = f7 ;
hB[buff_memLR(8 ,x,y,pitch)] = f8 ;
hB[buff_memLR(9 ,x,y,pitch)] = f9 ;
hB[buff_memLR(10,x,y,pitch)] = f10;
hB[buff_memLR(11,x,y,pitch)] = f11;
hB[buff_memLR(12,x,y,pitch)] = f12;
hB[buff_memLR(13,x,y,pitch)] = f13;
hB[buff_memLR(14,x,y,pitch)] = f14;
hB[buff_memLR(15,x,y,pitch)] = f15;
hB[buff_memLR(16,x,y,pitch)] = f16;
hB[buff_memLR(17,x,y,pitch)] = f17;
hB[buff_memLR(18,x,y,pitch)] = f18;
}
}
__global__ void update_inner_LR_force(float* fA, float* fB, float* g, float* h,
float omega, size_t pitch, int GPU, int zInner,
float *FX, float *FY, float *FZ, int Force_t,//pitch in elements
float *uAv, float *vAv, float *ufluc, float *vfluc, float t)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner+2)+1+z));
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = fA[j];
f1 = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f9 = fA[f_memLR (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_memLR (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_memLR (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_memLR (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_memLR (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_memLR(14,x ,y ,pitch)];
f15= h [buff_memLR(15,x-1,y ,pitch)];
f16= h [buff_memLR(16,x ,y-1,pitch)];
f17= h [buff_memLR(17,x+1,y ,pitch)];
f18= h [buff_memLR(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_memLR(9 ,x ,y ,pitch)];
f10= g [buff_memLR(10,x-1,y ,pitch)];
f11= g [buff_memLR(11,x ,y-1,pitch)];
f12= g [buff_memLR(12,x+1,y ,pitch)];
f13= g [buff_memLR(13,x ,y+1,pitch)];
f14= fA[f_memLR (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_memLR (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_memLR (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_memLR (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_memLR (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)];//
f10= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)];//
f11= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)];//
f12= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)];//
f13= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)];//
f14= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)];//
f15= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)];//
f16= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)];//
f17= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)];//
f18= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)];//
}//end normal nodes
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f18;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = uAv[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = vAv[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t);
uAv[x+y*pitch+(z+1)*pitch*YLRDIM] = u_Av;
vAv[x+y*pitch+(z+1)*pitch*YLRDIM] = v_Av;
}
else if(t>=START_VELFLUC){
float u_Av = uAv[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = vAv[x+y*pitch+(z+1)*pitch*YLRDIM];
float u_fluc = ufluc[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_fluc = vfluc[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t);
ufluc[x+y*pitch+(z+1)*pitch*YLRDIM] = u_fluc;
vfluc[x+y*pitch+(z+1)*pitch*YLRDIM] = v_fluc;
}
}
fB[f_memLR(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f11;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_bottom_LR_force(float* gA, float* gB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner,
float *FX, float *FY, float *FZ, int Force_t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (zInner+2)-1;
int j = x+y*pitch;//index on padded mem (pitch in elements)
//int im = ImageFcn(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+GPU*z*LRFACTOR);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+GPU*LRFACTOR*z;
int im = ImageFcn(xcoord,ycoord,zcoord);
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = gA [j];
f1 = gA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = gA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = gA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = gA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = gA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = gA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = gA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = gA [buff_memLR(8 ,x-1,y+1,pitch)];
f9 = temp[buff_memLR(9 ,x ,y ,pitch)];
f10= temp[buff_memLR(10,x-1,y ,pitch)];
f11= temp[buff_memLR(11,x ,y-1,pitch)];
f12= temp[buff_memLR(12,x+1,y ,pitch)];
f13= temp[buff_memLR(13,x ,y+1,pitch)];
f14= f [f_memLR (14,x ,y ,0,pitch, zInner)];
f15= f [f_memLR (15,x-1,y ,0,pitch, zInner)];
f16= f [f_memLR (16,x ,y-1,0,pitch, zInner)];
f17= f [f_memLR (17,x+1,y ,0,pitch, zInner)];
f18= f [f_memLR (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f3 ;
gB[buff_memLR(2 ,x,y,pitch)] = f4 ;
gB[buff_memLR(3 ,x,y,pitch)] = f1 ;
gB[buff_memLR(4 ,x,y,pitch)] = f2 ;
gB[buff_memLR(5 ,x,y,pitch)] = f7 ;
gB[buff_memLR(6 ,x,y,pitch)] = f8 ;
gB[buff_memLR(7 ,x,y,pitch)] = f5 ;
gB[buff_memLR(8 ,x,y,pitch)] = f6 ;
gB[buff_memLR(9 ,x,y,pitch)] = f14;
gB[buff_memLR(10,x,y,pitch)] = f17;
gB[buff_memLR(11,x,y,pitch)] = f18;
gB[buff_memLR(12,x,y,pitch)] = f15;
gB[buff_memLR(13,x,y,pitch)] = f16;
gB[buff_memLR(14,x,y,pitch)] = f9 ;
gB[buff_memLR(15,x,y,pitch)] = f12;
gB[buff_memLR(16,x,y,pitch)] = f13;
gB[buff_memLR(17,x,y,pitch)] = f10;
gB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f1 ;
gB[buff_memLR(2 ,x,y,pitch)] = f2 ;
gB[buff_memLR(3 ,x,y,pitch)] = f3 ;
gB[buff_memLR(4 ,x,y,pitch)] = f4 ;
gB[buff_memLR(5 ,x,y,pitch)] = f5 ;
gB[buff_memLR(6 ,x,y,pitch)] = f6 ;
gB[buff_memLR(7 ,x,y,pitch)] = f7 ;
gB[buff_memLR(8 ,x,y,pitch)] = f8 ;
gB[buff_memLR(9 ,x,y,pitch)] = f9 ;
gB[buff_memLR(10,x,y,pitch)] = f10;
gB[buff_memLR(11,x,y,pitch)] = f11;
gB[buff_memLR(12,x,y,pitch)] = f12;
gB[buff_memLR(13,x,y,pitch)] = f13;
gB[buff_memLR(14,x,y,pitch)] = f14;
gB[buff_memLR(15,x,y,pitch)] = f15;
gB[buff_memLR(16,x,y,pitch)] = f16;
gB[buff_memLR(17,x,y,pitch)] = f17;
gB[buff_memLR(18,x,y,pitch)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_top_LR_force(float* hA, float* hB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner,
float *FX, float *FY, float *FZ, int Force_t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (GPU+1)*(zInner+2)-1;//physical coord in LR region
int j = x+y*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+LRFACTOR*z;
int im = ImageFcn(xcoord,ycoord,zcoord);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = hA[j];
f1 = hA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = hA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = hA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = hA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = hA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = hA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = hA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = hA [buff_memLR(8 ,x-1,y+1,pitch)];
f9 = f [f_memLR (9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_memLR (10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_memLR (11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_memLR (12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_memLR (13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_memLR(14,x ,y ,pitch)];
f15= temp[buff_memLR(15,x-1,y ,pitch)];
f16= temp[buff_memLR(16,x ,y-1,pitch)];
f17= temp[buff_memLR(17,x+1,y ,pitch)];
f18= temp[buff_memLR(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x] =2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x] =2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x] =2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f3 ;
hB[buff_memLR(2 ,x,y,pitch)] = f4 ;
hB[buff_memLR(3 ,x,y,pitch)] = f1 ;
hB[buff_memLR(4 ,x,y,pitch)] = f2 ;
hB[buff_memLR(5 ,x,y,pitch)] = f7 ;
hB[buff_memLR(6 ,x,y,pitch)] = f8 ;
hB[buff_memLR(7 ,x,y,pitch)] = f5 ;
hB[buff_memLR(8 ,x,y,pitch)] = f6 ;
hB[buff_memLR(9 ,x,y,pitch)] = f14;
hB[buff_memLR(10,x,y,pitch)] = f17;
hB[buff_memLR(11,x,y,pitch)] = f18;
hB[buff_memLR(12,x,y,pitch)] = f15;
hB[buff_memLR(13,x,y,pitch)] = f16;
hB[buff_memLR(14,x,y,pitch)] = f9 ;
hB[buff_memLR(15,x,y,pitch)] = f12;
hB[buff_memLR(16,x,y,pitch)] = f13;
hB[buff_memLR(17,x,y,pitch)] = f10;
hB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f1 ;
hB[buff_memLR(2 ,x,y,pitch)] = f2 ;
hB[buff_memLR(3 ,x,y,pitch)] = f3 ;
hB[buff_memLR(4 ,x,y,pitch)] = f4 ;
hB[buff_memLR(5 ,x,y,pitch)] = f5 ;
hB[buff_memLR(6 ,x,y,pitch)] = f6 ;
hB[buff_memLR(7 ,x,y,pitch)] = f7 ;
hB[buff_memLR(8 ,x,y,pitch)] = f8 ;
hB[buff_memLR(9 ,x,y,pitch)] = f9 ;
hB[buff_memLR(10,x,y,pitch)] = f10;
hB[buff_memLR(11,x,y,pitch)] = f11;
hB[buff_memLR(12,x,y,pitch)] = f12;
hB[buff_memLR(13,x,y,pitch)] = f13;
hB[buff_memLR(14,x,y,pitch)] = f14;
hB[buff_memLR(15,x,y,pitch)] = f15;
hB[buff_memLR(16,x,y,pitch)] = f16;
hB[buff_memLR(17,x,y,pitch)] = f17;
hB[buff_memLR(18,x,y,pitch)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_inner_LR_interp(float* fA, float* fB, float* g, float* h, float* f_c, float* g_c, float* h_c,
float omega, size_t pitch, int zInner, size_t pitch_c, int zInner_c, float SF, int GPU,//pitch in elements
float *uAv, float *vAv, float *ufluc, float *vfluc, float t)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+(1+z)*LRFACTOR;//local zcoord within GPU
int im = ImageFcn(xcoord,ycoord,GPU*(zInner_c+2)+zcoord);
if(x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL)
{
float f[19];
if(zcoord<1)
{
int xm = int(xcoord);
int ym = int(ycoord);
int zm = int(zcoord);//for zcoord<0
int xp = xm+1;
int yp = ym+1;
//int zp = zm+1;
float xf = xcoord-xm;
float yf = ycoord-ym;
float zf = zcoord-zm;
for(int i=0;i<19;i++){
float v000 = g_c[buff_mem(i ,xm,ym ,pitch_c)];
float v001 = g_c[buff_mem(i ,xp,ym ,pitch_c)];
float v010 = g_c[buff_mem(i ,xm,yp ,pitch_c)];
float v011 = g_c[buff_mem(i ,xp,yp ,pitch_c)];
float v100 = f_c[ f_mem(i ,xm,ym,0 ,pitch_c,zInner_c)];
float v101 = f_c[ f_mem(i ,xp,ym,0 ,pitch_c,zInner_c)];
float v110 = f_c[ f_mem(i ,xm,yp,0 ,pitch_c,zInner_c)];
float v111 = f_c[ f_mem(i ,xp,yp,0 ,pitch_c,zInner_c)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
}//end zcoord<0
else if(zcoord>(zInner_c+2)-2)
{
int xm = int(xcoord);
int ym = int(ycoord);
int zm = int(zcoord);
int xp = xm+1;
int yp = ym+1;
float xf = xcoord-xm;
float yf = ycoord-ym;
float zf = zcoord-zm;
for(int i=0;i<19;i++){
float v000 = f_c[ f_mem(i ,xm,ym,zInner_c-1,pitch_c,zInner_c)];
float v001 = f_c[ f_mem(i ,xp,ym,zInner_c-1,pitch_c,zInner_c)];
float v010 = f_c[ f_mem(i ,xm,yp,zInner_c-1,pitch_c,zInner_c)];
float v011 = f_c[ f_mem(i ,xp,yp,zInner_c-1,pitch_c,zInner_c)];
float v100 = h_c[buff_mem(i ,xm,ym ,pitch_c)];
float v101 = h_c[buff_mem(i ,xp,ym ,pitch_c)];
float v110 = h_c[buff_mem(i ,xm,yp ,pitch_c)];
float v111 = h_c[buff_mem(i ,xp,yp ,pitch_c)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
}//end zcoord>ZDIM
else{
int xm = int(xcoord);
int ym = int(ycoord);
int zm = int(zcoord);
int xp = xm+1;
int yp = ym+1;
int zp = zm+1;
float xf = xcoord-xm;
float yf = ycoord-ym;
float zf = zcoord-zm;
for(int i=0;i<19;i++){
float v000 = f_c[f_mem(i ,xm,ym,zm-1,pitch_c,zInner_c)];//-1 to correct for index in f
float v001 = f_c[f_mem(i ,xp,ym,zm-1,pitch_c,zInner_c)];
float v010 = f_c[f_mem(i ,xm,yp,zm-1,pitch_c,zInner_c)];
float v011 = f_c[f_mem(i ,xp,yp,zm-1,pitch_c,zInner_c)];
float v100 = f_c[f_mem(i ,xm,ym,zp-1,pitch_c,zInner_c)];
float v101 = f_c[f_mem(i ,xp,ym,zp-1,pitch_c,zInner_c)];
float v110 = f_c[f_mem(i ,xm,yp,zp-1,pitch_c,zInner_c)];
float v111 = f_c[f_mem(i ,xp,yp,zp-1,pitch_c,zInner_c)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
}
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
fB[f_memLR(0 ,x,y,z,pitch,zInner)] = f[0 ];
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f[1 ];
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f[2 ];
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f[3 ];
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f[4 ];
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f[5 ];
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f[6 ];
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f[7 ];
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f[8 ];
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f[9 ];
fB[f_memLR(10,x,y,z,pitch,zInner)] = f[10];
fB[f_memLR(11,x,y,z,pitch,zInner)] = f[11];
fB[f_memLR(12,x,y,z,pitch,zInner)] = f[12];
fB[f_memLR(13,x,y,z,pitch,zInner)] = f[13];
fB[f_memLR(14,x,y,z,pitch,zInner)] = f[14];
fB[f_memLR(15,x,y,z,pitch,zInner)] = f[15];
fB[f_memLR(16,x,y,z,pitch,zInner)] = f[16];
fB[f_memLR(17,x,y,z,pitch,zInner)] = f[17];
fB[f_memLR(18,x,y,z,pitch,zInner)] = f[18];
}
else
{
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = fA[j];
f1 = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f9 = fA[f_memLR (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_memLR (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_memLR (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_memLR (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_memLR (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_memLR(14,x ,y ,pitch)];
f15= h [buff_memLR(15,x-1,y ,pitch)];
f16= h [buff_memLR(16,x ,y-1,pitch)];
f17= h [buff_memLR(17,x+1,y ,pitch)];
f18= h [buff_memLR(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_memLR(9 ,x ,y ,pitch)];
f10= g [buff_memLR(10,x-1,y ,pitch)];
f11= g [buff_memLR(11,x ,y-1,pitch)];
f12= g [buff_memLR(12,x+1,y ,pitch)];
f13= g [buff_memLR(13,x ,y+1,pitch)];
f14= fA[f_memLR (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_memLR (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_memLR (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_memLR (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_memLR (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)];//
f10= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)];//
f11= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)];//
f12= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)];//
f13= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)];//
f14= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)];//
f15= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)];//
f16= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)];//
f17= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)];//
f18= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)];//
}//end normal nodes
if(im == 1 || im ==10){//BB
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f18;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = uAv[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = vAv[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t);
uAv[x+y*pitch+(z+1)*pitch*YLRDIM] = u_Av;
vAv[x+y*pitch+(z+1)*pitch*YLRDIM] = v_Av;
}
else if(t>=START_VELFLUC){
float u_Av = uAv[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = vAv[x+y*pitch+(z+1)*pitch*YLRDIM];
float u_fluc = ufluc[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_fluc = vfluc[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t);
ufluc[x+y*pitch+(z+1)*pitch*YLRDIM] = u_fluc;
vfluc[x+y*pitch+(z+1)*pitch*YLRDIM] = v_fluc;
}
}
fB[f_memLR(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f11;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f18;
}
}//end else (no interp)
}
__global__ void update_bottom_LR_interp(float* gA, float* gB, float* f, float* temp, float* g_c, float* temp_c,
float omega, size_t pitch, int zInner, size_t pitch_c, int zInner_c, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
//int z = (zInner+2)-1;
int j = x+y*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0;
int im = ImageFcn(xcoord,ycoord,zcoord+GPU*LRFACTOR*ZLRDIM);
if(x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL)
{
float f[19];
if(zcoord<0)
{
int xm = int(xcoord);
int ym = int(ycoord);
int zm = int(zcoord)-1;//for zcoord<0
int xp = xm+1;
int yp = ym+1;
//int zp = zm+1;
float xf = xcoord-xm;
float yf = ycoord-ym;
float zf = zcoord-zm;
for(int i=0;i<19;i++){
float v000 = temp_c[buff_mem(i ,xm,ym ,pitch_c)];
float v001 = temp_c[buff_mem(i ,xp,ym ,pitch_c)];
float v010 = temp_c[buff_mem(i ,xm,yp ,pitch_c)];
float v011 = temp_c[buff_mem(i ,xp,yp ,pitch_c)];
float v100 = g_c[buff_mem(i ,xm,ym,pitch_c)];
float v101 = g_c[buff_mem(i ,xp,ym,pitch_c)];
float v110 = g_c[buff_mem(i ,xm,yp,pitch_c)];
float v111 = g_c[buff_mem(i ,xp,yp,pitch_c)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
}//end zcoord<0
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
gB[buff_memLR(0 ,x,y,pitch)] = f[0 ];
gB[buff_memLR(1 ,x,y,pitch)] = f[1 ];
gB[buff_memLR(2 ,x,y,pitch)] = f[2 ];
gB[buff_memLR(3 ,x,y,pitch)] = f[3 ];
gB[buff_memLR(4 ,x,y,pitch)] = f[4 ];
gB[buff_memLR(5 ,x,y,pitch)] = f[5 ];
gB[buff_memLR(6 ,x,y,pitch)] = f[6 ];
gB[buff_memLR(7 ,x,y,pitch)] = f[7 ];
gB[buff_memLR(8 ,x,y,pitch)] = f[8 ];
gB[buff_memLR(9 ,x,y,pitch)] = f[9 ];
gB[buff_memLR(10,x,y,pitch)] = f[10];
gB[buff_memLR(11,x,y,pitch)] = f[11];
gB[buff_memLR(12,x,y,pitch)] = f[12];
gB[buff_memLR(13,x,y,pitch)] = f[13];
gB[buff_memLR(14,x,y,pitch)] = f[14];
gB[buff_memLR(15,x,y,pitch)] = f[15];
gB[buff_memLR(16,x,y,pitch)] = f[16];
gB[buff_memLR(17,x,y,pitch)] = f[17];
gB[buff_memLR(18,x,y,pitch)] = f[18];
}
else
{
f0 = gA [j];
f1 = gA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = gA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = gA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = gA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = gA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = gA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = gA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = gA [buff_memLR(8 ,x-1,y+1,pitch)];
f9 = temp[buff_memLR(9 ,x ,y ,pitch)];
f10= temp[buff_memLR(10,x-1,y ,pitch)];
f11= temp[buff_memLR(11,x ,y-1,pitch)];
f12= temp[buff_memLR(12,x+1,y ,pitch)];
f13= temp[buff_memLR(13,x ,y+1,pitch)];
f14= f [f_memLR(14,x ,y ,0,pitch, zInner)];
f15= f [f_memLR(15,x-1,y ,0,pitch, zInner)];
f16= f [f_memLR(16,x ,y-1,0,pitch, zInner)];
f17= f [f_memLR(17,x+1,y ,0,pitch, zInner)];
f18= f [f_memLR(18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f3 ;
gB[buff_memLR(2 ,x,y,pitch)] = f4 ;
gB[buff_memLR(3 ,x,y,pitch)] = f1 ;
gB[buff_memLR(4 ,x,y,pitch)] = f2 ;
gB[buff_memLR(5 ,x,y,pitch)] = f7 ;
gB[buff_memLR(6 ,x,y,pitch)] = f8 ;
gB[buff_memLR(7 ,x,y,pitch)] = f5 ;
gB[buff_memLR(8 ,x,y,pitch)] = f6 ;
gB[buff_memLR(9 ,x,y,pitch)] = f14;
gB[buff_memLR(10,x,y,pitch)] = f17;
gB[buff_memLR(11,x,y,pitch)] = f18;
gB[buff_memLR(12,x,y,pitch)] = f15;
gB[buff_memLR(13,x,y,pitch)] = f16;
gB[buff_memLR(14,x,y,pitch)] = f9 ;
gB[buff_memLR(15,x,y,pitch)] = f12;
gB[buff_memLR(16,x,y,pitch)] = f13;
gB[buff_memLR(17,x,y,pitch)] = f10;
gB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f1 ;
gB[buff_memLR(2 ,x,y,pitch)] = f2 ;
gB[buff_memLR(3 ,x,y,pitch)] = f3 ;
gB[buff_memLR(4 ,x,y,pitch)] = f4 ;
gB[buff_memLR(5 ,x,y,pitch)] = f5 ;
gB[buff_memLR(6 ,x,y,pitch)] = f6 ;
gB[buff_memLR(7 ,x,y,pitch)] = f7 ;
gB[buff_memLR(8 ,x,y,pitch)] = f8 ;
gB[buff_memLR(9 ,x,y,pitch)] = f9 ;
gB[buff_memLR(10,x,y,pitch)] = f10;
gB[buff_memLR(11,x,y,pitch)] = f11;
gB[buff_memLR(12,x,y,pitch)] = f12;
gB[buff_memLR(13,x,y,pitch)] = f13;
gB[buff_memLR(14,x,y,pitch)] = f14;
gB[buff_memLR(15,x,y,pitch)] = f15;
gB[buff_memLR(16,x,y,pitch)] = f16;
gB[buff_memLR(17,x,y,pitch)] = f17;
gB[buff_memLR(18,x,y,pitch)] = f18;
}
}
}
__global__ void update_top_LR_interp(float* hA, float* hB, float* f, float* temp, float* h_c, float* temp_c,
float omega, size_t pitch, int zInner, size_t pitch_c, int zInner_c, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (zInner+2)-1;//physical coord
int j = x+y*pitch;//index on padded mem (pitch in elements)
//int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1);
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;
int im = ImageFcn(xcoord,ycoord,GPU*LRFACTOR*(zInner+2)+zcoord);
if(x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL)
{
float f[19];
int xm = int(xcoord);
int ym = int(ycoord);
int zm = int(zcoord);
int xp = xm+1;
int yp = ym+1;
float xf = xcoord-xm;
float yf = ycoord-ym;
float zf = zcoord-zm;
for(int i=0;i<19;i++){
float v000 = h_c[buff_mem(i ,xm,ym,pitch_c)];
float v001 = h_c[buff_mem(i ,xp,ym,pitch_c)];
float v010 = h_c[buff_mem(i ,xm,yp,pitch_c)];
float v011 = h_c[buff_mem(i ,xp,yp,pitch_c)];
float v100 = temp_c[buff_mem(i ,xm,ym ,pitch_c)];
float v101 = temp_c[buff_mem(i ,xp,ym ,pitch_c)];
float v110 = temp_c[buff_mem(i ,xm,yp ,pitch_c)];
float v111 = temp_c[buff_mem(i ,xp,yp ,pitch_c)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
// }//end zcoord>ZDIM
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
hB[buff_memLR(0 ,x,y,pitch)] = f[0 ];
hB[buff_memLR(1 ,x,y,pitch)] = f[1 ];
hB[buff_memLR(2 ,x,y,pitch)] = f[2 ];
hB[buff_memLR(3 ,x,y,pitch)] = f[3 ];
hB[buff_memLR(4 ,x,y,pitch)] = f[4 ];
hB[buff_memLR(5 ,x,y,pitch)] = f[5 ];
hB[buff_memLR(6 ,x,y,pitch)] = f[6 ];
hB[buff_memLR(7 ,x,y,pitch)] = f[7 ];
hB[buff_memLR(8 ,x,y,pitch)] = f[8 ];
hB[buff_memLR(9 ,x,y,pitch)] = f[9 ];
hB[buff_memLR(10,x,y,pitch)] = f[10];
hB[buff_memLR(11,x,y,pitch)] = f[11];
hB[buff_memLR(12,x,y,pitch)] = f[12];
hB[buff_memLR(13,x,y,pitch)] = f[13];
hB[buff_memLR(14,x,y,pitch)] = f[14];
hB[buff_memLR(15,x,y,pitch)] = f[15];
hB[buff_memLR(16,x,y,pitch)] = f[16];
hB[buff_memLR(17,x,y,pitch)] = f[17];
hB[buff_memLR(18,x,y,pitch)] = f[18];
}
else{//not LR interp region
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = hA[j];
f1 = hA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = hA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = hA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = hA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = hA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = hA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = hA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = hA [buff_memLR(8 ,x-1,y+1,pitch)];
f9 = f [f_memLR(9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_memLR(10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_memLR(11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_memLR(12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_memLR(13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_memLR(14,x ,y ,pitch)];
f15= temp[buff_memLR(15,x-1,y ,pitch)];
f16= temp[buff_memLR(16,x ,y-1,pitch)];
f17= temp[buff_memLR(17,x+1,y ,pitch)];
f18= temp[buff_memLR(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f3 ;
hB[buff_memLR(2 ,x,y,pitch)] = f4 ;
hB[buff_memLR(3 ,x,y,pitch)] = f1 ;
hB[buff_memLR(4 ,x,y,pitch)] = f2 ;
hB[buff_memLR(5 ,x,y,pitch)] = f7 ;
hB[buff_memLR(6 ,x,y,pitch)] = f8 ;
hB[buff_memLR(7 ,x,y,pitch)] = f5 ;
hB[buff_memLR(8 ,x,y,pitch)] = f6 ;
hB[buff_memLR(9 ,x,y,pitch)] = f14;
hB[buff_memLR(10,x,y,pitch)] = f17;
hB[buff_memLR(11,x,y,pitch)] = f18;
hB[buff_memLR(12,x,y,pitch)] = f15;
hB[buff_memLR(13,x,y,pitch)] = f16;
hB[buff_memLR(14,x,y,pitch)] = f9 ;
hB[buff_memLR(15,x,y,pitch)] = f12;
hB[buff_memLR(16,x,y,pitch)] = f13;
hB[buff_memLR(17,x,y,pitch)] = f10;
hB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f1 ;
hB[buff_memLR(2 ,x,y,pitch)] = f2 ;
hB[buff_memLR(3 ,x,y,pitch)] = f3 ;
hB[buff_memLR(4 ,x,y,pitch)] = f4 ;
hB[buff_memLR(5 ,x,y,pitch)] = f5 ;
hB[buff_memLR(6 ,x,y,pitch)] = f6 ;
hB[buff_memLR(7 ,x,y,pitch)] = f7 ;
hB[buff_memLR(8 ,x,y,pitch)] = f8 ;
hB[buff_memLR(9 ,x,y,pitch)] = f9 ;
hB[buff_memLR(10,x,y,pitch)] = f10;
hB[buff_memLR(11,x,y,pitch)] = f11;
hB[buff_memLR(12,x,y,pitch)] = f12;
hB[buff_memLR(13,x,y,pitch)] = f13;
hB[buff_memLR(14,x,y,pitch)] = f14;
hB[buff_memLR(15,x,y,pitch)] = f15;
hB[buff_memLR(16,x,y,pitch)] = f16;
hB[buff_memLR(17,x,y,pitch)] = f17;
hB[buff_memLR(18,x,y,pitch)] = f18;
}
}
}
__device__ __inline__ float ld_gb1_cg(const float *addr)
{
float return_value;
asm("ld.global.cg.f32 %0, [%1];" : "=f"(return_value) : "l"(addr));
return return_value;
}
__global__ void initialize_single(float *f, size_t pitch, int yDim, int zDim, int GPU_N, int level)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = x;
float ycoord = y;
float zcoord = z+1+GPU_N*ZDIM;
if(level > 0){
xcoord = LRX0+x*LRFACTOR;
ycoord = LRY0+y*LRFACTOR;
zcoord = LRZ0+z*LRFACTOR;
}
int j = x+y*pitch+z*yDim*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.01f;
v = UMAX;
w = 0.0;
if(im == 10 || im == 1){
u = 0.0f;
v = 0.0f;
w = 0.0f;
}
usqr = u*u+v*v+w*w;
if(MODEL == "BGK"){
f[j+0 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/3.0f*(rho-1.5f*usqr);
f[j+1 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+2 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+3 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+4 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+5 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f[j+6 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f[j+7 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f[j+8 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f[j+9 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+10*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f[j+11*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
f[j+12*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f[j+13*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
f[j+14*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+15*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f[j+16*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f[j+17*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f[j+18*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
else{
mrt_feq(rho,u,v,w,f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18);
f[j+0 *pitch*yDim*(zDim/GPU_N-2)]=f0 ;
f[j+1 *pitch*yDim*(zDim/GPU_N-2)]=f1 ;
f[j+2 *pitch*yDim*(zDim/GPU_N-2)]=f2 ;
f[j+3 *pitch*yDim*(zDim/GPU_N-2)]=f3 ;
f[j+4 *pitch*yDim*(zDim/GPU_N-2)]=f4 ;
f[j+5 *pitch*yDim*(zDim/GPU_N-2)]=f5 ;
f[j+6 *pitch*yDim*(zDim/GPU_N-2)]=f6 ;
f[j+7 *pitch*yDim*(zDim/GPU_N-2)]=f7 ;
f[j+8 *pitch*yDim*(zDim/GPU_N-2)]=f8 ;
f[j+9 *pitch*yDim*(zDim/GPU_N-2)]=f9 ;
f[j+10*pitch*yDim*(zDim/GPU_N-2)]=f10;
f[j+11*pitch*yDim*(zDim/GPU_N-2)]=f11;
f[j+12*pitch*yDim*(zDim/GPU_N-2)]=f12;
f[j+13*pitch*yDim*(zDim/GPU_N-2)]=f13;
f[j+14*pitch*yDim*(zDim/GPU_N-2)]=f14;
f[j+15*pitch*yDim*(zDim/GPU_N-2)]=f15;
f[j+16*pitch*yDim*(zDim/GPU_N-2)]=f16;
f[j+17*pitch*yDim*(zDim/GPU_N-2)]=f17;
f[j+18*pitch*yDim*(zDim/GPU_N-2)]=f18;
}
}
__global__ void initialize_buffer(float *g, size_t pitch, int yDim, int GPU_N, int level)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
float xcoord = x;
float ycoord = y;
float zcoord = 0+1+GPU_N*ZDIM;
if(level > 0){
xcoord = LRX0+x*LRFACTOR;
ycoord = LRY0+y*LRFACTOR;
zcoord = LRZ0;
}
int im = ImageFcn(xcoord,ycoord,zcoord);
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.01f;
v = UMAX;
w = 0.0f;
if(im == 10 || im == 1){
u = 0.0f;
v = 0.0f;
w = 0.0f;
}
usqr = u*u+v*v+w*w;
if(MODEL == "BGK"){
g[j+0 *pitch*yDim]= 1.0f/3.0f*(rho-1.5f*usqr);
g[j+1 *pitch*yDim]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
g[j+2 *pitch*yDim]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
g[j+3 *pitch*yDim]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
g[j+4 *pitch*yDim]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
g[j+5 *pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
g[j+6 *pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
g[j+7 *pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
g[j+8 *pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
g[j+9 *pitch*yDim]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
g[j+10*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
g[j+11*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
g[j+12*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
g[j+13*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
g[j+14*pitch*yDim]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
g[j+15*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
g[j+16*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
g[j+17*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
g[j+18*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
else{
mrt_feq(rho,u,v,w,f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18);
g[j+0 *pitch*yDim]=f0 ;
g[j+1 *pitch*yDim]=f1 ;
g[j+2 *pitch*yDim]=f2 ;
g[j+3 *pitch*yDim]=f3 ;
g[j+4 *pitch*yDim]=f4 ;
g[j+5 *pitch*yDim]=f5 ;
g[j+6 *pitch*yDim]=f6 ;
g[j+7 *pitch*yDim]=f7 ;
g[j+8 *pitch*yDim]=f8 ;
g[j+9 *pitch*yDim]=f9 ;
g[j+10*pitch*yDim]=f10;
g[j+11*pitch*yDim]=f11;
g[j+12*pitch*yDim]=f12;
g[j+13*pitch*yDim]=f13;
g[j+14*pitch*yDim]=f14;
g[j+15*pitch*yDim]=f15;
g[j+16*pitch*yDim]=f16;
g[j+17*pitch*yDim]=f17;
g[j+18*pitch*yDim]=f18;
}
}
//zMin = minimum zcoord, zNum = number of nodes in z
//void WriteResults(float *f, ofstream &output, float omega, int xDim, int yDim, int zMin, int zNum, float x0, float y0, float z0, float scale)
void WriteResults(ofstream &output, float *fin, float *gin, float *hin, float *uAv, float *vAv, float *wAv,
float *uFluc, float *vFluc, float *wFluc, float omega, int GPU_N, int GPU)
{
float f[19];
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = gin[(i+j*XDIM)+l *XDIM*YDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*GPU)<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<uAv[i+j*XDIM]<<","<<vAv[i+j*XDIM]<<", "<<uFluc[i+j*XDIM]<<","<<vFluc[i+j*XDIM]<<endl;
}}
for(int k = 1; k<ZDIM/GPU_N-1; k++){
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = fin[(i+j*XDIM)+(k-1)*XDIM*YDIM+l*XDIM*YDIM*(ZDIM/GPU_N-2)];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
int z = (ZDIM/GPU_N*GPU+k);
output<<i<<", "<<j<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<uAv[i+j*XDIM+k*XDIM*YDIM]<<","<<vAv[i+j*XDIM+k*XDIM*YDIM]<<", "
<<uFluc[i+j*XDIM+k*XDIM*YDIM]<<","<<vFluc[i+j*XDIM+k*XDIM*YDIM]<<endl;
}}}
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = hin[(i+j*XDIM)+l *XDIM*YDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*(GPU+1)-1)<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<uAv[i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<vAv[i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<", "
<<uFluc[i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<vFluc[i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<endl;
}}
}
void WriteResultsLR(ofstream &output, float *fin, float *gin, float *hin, float *uAv, float *vAv, float *wAv,
float *uFluc, float *vFluc, float *wFluc, float omega, int GPU_N, int GPU)
{
float f[19];
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = gin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU);
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<uAv[i+j*XLRDIM]<<","<<vAv[i+j*XLRDIM]<<", "<<uFluc[i+j*XLRDIM]<<","<<vFluc[i+j*XLRDIM]<<endl;
}}
for(int k = 1; k<ZLRDIM/GPU_N-1; k++){
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = fin[(i+j*XLRDIM)+(k-1)*XLRDIM*YLRDIM+l*XLRDIM*YLRDIM*(ZLRDIM/GPU_N-2)];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU+k);
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<uAv[i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<vAv[i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", "
<<uFluc[i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<vFluc[i+j*XLRDIM+k*XLRDIM*YLRDIM]<<endl;
}}}
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = hin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*(GPU+1)-1);
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<uAv[i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<vAv[i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<", "
<<uFluc[i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<vFluc[i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<endl;
}}
}
void WriteForces(float *FX, float *FY, float *FZ, ofstream &output, int ForceTime, int level)
{
float ref = UMAX*UMAX*ZDIM*OBSTR1;
if(level > 0)
ref *= LRLEVEL*LRLEVEL;
for(int i = 0; i<ForceTime; i++){
output<<i+ForceTime<<", "<<FX[i]/ref<<", "<<FY[i]/ref<<", "<<FZ[i]/ref<<endl;
}
}
void WriteInputs(ofstream &output, float omega, float omegaLR, int GPU_per_node)
{
output<<"Base domain size \t"<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl;
output<<"Base blocksize: \t"<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl;
output<<"Obst1 location: \t("<<OBSTX1<<","<<OBSTY1<<","<<OBSTZ1<<")"<<endl;
output<<"Obst1 radius: \t"<<OBSTR1<<endl;
output<<"Obst2 location: \t("<<OBSTX2<<","<<OBSTY2<<","<<OBSTZ2<<")"<<endl;
output<<"Obst2 radius: \t"<<OBSTR2<<endl;
output<<"RE: \t"<<RE<<endl;
output<<"UMAX: \t"<<UMAX<<endl;
output<<"omega \t: "<<omega<<endl;
output<<"TMAX: \t"<<TMAX<<endl;
output<<"STARTF: \t"<<STARTF<<endl;
output<<"START_VELAV: \t"<<START_VELAV<<endl;
output<<"START_VELFLUC: \t"<<START_VELFLUC<<endl;
output<<"REFINEMENT: \t"<<REFINEMENT<<endl;
output<<"MODEL: \t"<<MODEL<<endl;
output<<"Smagorinski LES: \t"<<SmagLES<<endl;
output<<"CS: \t"<<CS<<endl;
output<<"LR domain size \t"<<XLRDIM<<"x"<<YLRDIM<<"x"<<ZLRDIM<<endl;
output<<"LR factor \t"<<LRFACTOR<<endl;
output<<"LR location \t"<<LRX0<<"x"<<LRY0<<"x"<<LRZ0<<endl;
output<<"LR blocksize: \t"<<BLOCKSIZELRX<<"x"<<BLOCKSIZELRY<<"x"<<BLOCKSIZELRZ<<endl;
output<<"omega in LR \t: "<<omegaLR<<endl;
output<<"GPUs per node \t: "<<GPU_per_node<<endl;
output.close();
}
int main(int argc, char *argv[])
{
int GPU_N;
hipGetDeviceCount(&GPU_N);
//GPU_N = 1;
cout<<"number of GPUs: "<<GPU_N<<endl;
int outputflag = 1;
if(argc>1){
if(strcmp(argv[1],"-no")==0){
outputflag = 0;
cout<<"no outputs option\n";
}
}
ofstream output;
ofstream outputForce;
ofstream outputInputs;
string FileName = CASENAME;
//output.open ("LBM1_out.dat");
output.open ((FileName+".dat").c_str());
outputForce.open ((FileName+".force").c_str());
outputInputs.open ((FileName+".inputs").c_str());
//size_t memsize, memsize2;
size_t pitch = 2;
while(pitch<XDIM)
pitch=pitch*2;
pitch = XDIM*sizeof(float);//pitch*sizeof(float);
size_t pitch_elements = XDIM;//pitch/sizeof(float);
cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl;
int i, nBlocks;
float omega, CharLength;
CharLength = OBSTR1*2.f;
omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f);
float omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
if(LRFACTOR == 0.25f){
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
}
float SF_cf = omega*(1.0f-omegaLR)/((1.0f-omega)*omegaLR/LRFACTOR);
float SF_fc = 1.f/SF_cf;
WriteInputs(outputInputs,omega,omegaLR,GPU_N);
cout<<"omega : "<<omega<<endl;
cout<<"omegaLR : "<<omegaLR<<endl;
cout<<"blocksize: "<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl;
cout<<"grid: "<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl;
cout<<"gridLR: "<<XLRDIM<<"x"<<YLRDIM<<"x"<<ZLRDIM<<endl;
cout<<"TMAX: "<<TMAX<<endl;
cout<<"Model: "<<MODEL<<endl;
cout<<"Refinement: "<<LRLEVEL<<endl;
if(abs(LRFACTOR-1.f/LRLEVEL)>0.001f){
cout<<"LRLEVEL and LRFACTOR don't match! Exiting..."<<endl;
return 0;
}
int zInner = ZDIM/GPU_N-2; //excluding halo
//int zGPU = ZDIM/GPU_N;//z nodes per GPU (including halo)
//nBlocks does not include the halo layers
nBlocks = ((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX)*((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY)
*((zInner+BLOCKSIZEZ-1)/BLOCKSIZEZ);
cout<<"nBlocks:"<<nBlocks<<endl;
int ForceTime = max(0,TMAX-STARTF);
dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ);
//2 halo layers per GPU (for 2 GPUs)
dim3 grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(zInner)/BLOCKSIZEZ);
dim3 g_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
dim3 h_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
hipStream_t stream_halo[GPU_N];
hipStream_t stream_inner[GPU_N];
//data pointers as 3D array (GPUxCoord)
float *f_inner_h[GPU_N], *g_h[GPU_N], *h_h[GPU_N];
float *f_inner_A_d[GPU_N], *g_A_d[GPU_N], *h_A_d[GPU_N];
float *f_inner_B_d[GPU_N], *g_B_d[GPU_N], *h_B_d[GPU_N];
float *g_temp[GPU_N], *h_temp[GPU_N];
float *FX_h[GPU_N],*FY_h[GPU_N],*FZ_h[GPU_N];
float *FX_d[GPU_N],*FY_d[GPU_N],*FZ_d[GPU_N];
float *FX_total,*FY_total,*FZ_total;
float *uAv_h[GPU_N], *vAv_h[GPU_N], *wAv_h[GPU_N], *uAv_d[GPU_N], *vAv_d[GPU_N], *wAv_d[GPU_N];
float *uFluc_h[GPU_N], *vFluc_h[GPU_N], *wFluc_h[GPU_N], *uFluc_d[GPU_N], *vFluc_d[GPU_N], *wFluc_d[GPU_N];
FX_total = (float *)malloc(ForceTime*sizeof(float));
FY_total = (float *)malloc(ForceTime*sizeof(float));
FZ_total = (float *)malloc(ForceTime*sizeof(float));
for(i=0;i<(ForceTime);i++){
FX_total[i] = 0;
FY_total[i] = 0;
FZ_total[i] = 0;
}
//Malloc and Initialize for each GPU
for(int n = 0; n<GPU_N; n++){
f_inner_h[n] = (float *)malloc(XDIM*YDIM*zInner*19*sizeof(float));
g_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
h_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
FX_h [n] = (float *)malloc(ForceTime*sizeof(float));
FY_h [n] = (float *)malloc(ForceTime*sizeof(float));
FZ_h [n] = (float *)malloc(ForceTime*sizeof(float));
uAv_h [n]= (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
vAv_h [n]= (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
wAv_h [n]= (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
uFluc_h [n]= (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
vFluc_h [n]= (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
wFluc_h [n]= (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
hipSetDevice(n);
hipStreamCreate(&stream_halo[n]);
hipStreamCreate(&stream_inner[n]);
for(int m = 0; m<GPU_N; m++){
if(m != n)
hipDeviceEnablePeerAccess(m,0);
}
hipMalloc((void **) &f_inner_A_d[n], pitch_elements*YDIM*zInner*19*sizeof(float));
hipMalloc((void **) &f_inner_B_d[n], pitch_elements*YDIM*zInner*19*sizeof(float));
hipMalloc((void **) & g_A_d[n], pitch_elements*YDIM* 19*sizeof(float));
hipMalloc((void **) & g_B_d[n], pitch_elements*YDIM* 19*sizeof(float));
hipMalloc((void **) & h_A_d[n], pitch_elements*YDIM* 19*sizeof(float));
hipMalloc((void **) & h_B_d[n], pitch_elements*YDIM* 19*sizeof(float));
hipMalloc((void **) & g_temp[n], pitch_elements*YDIM* 19*sizeof(float));
hipMalloc((void **) & h_temp[n], pitch_elements*YDIM* 19*sizeof(float));
hipMalloc((void **) & FX_d[n], (ForceTime)*sizeof(float));
hipMalloc((void **) & FY_d[n], (ForceTime)*sizeof(float));
hipMalloc((void **) & FZ_d[n], (ForceTime)*sizeof(float));
hipMalloc((void **) & uAv_d[n], pitch_elements*YDIM*ZDIM/GPU_N*sizeof(float));
hipMalloc((void **) & vAv_d[n], pitch_elements*YDIM*ZDIM/GPU_N*sizeof(float));
hipMalloc((void **) & wAv_d[n], pitch_elements*YDIM*ZDIM/GPU_N*sizeof(float));
hipMalloc((void **) & uFluc_d[n], pitch_elements*YDIM*ZDIM/GPU_N*sizeof(float));
hipMalloc((void **) & vFluc_d[n], pitch_elements*YDIM*ZDIM/GPU_N*sizeof(float));
hipMalloc((void **) & wFluc_d[n], pitch_elements*YDIM*ZDIM/GPU_N*sizeof(float));
//initialize host f_inner
for (i = 0; i < XDIM*YDIM*zInner*19; i++)
f_inner_h[n][i] = 0;
//initialize host g,h
for (i = 0; i < XDIM*YDIM*19; i++){
g_h[n][i] = 0;
h_h[n][i] = 0;
}
for(i=0;i<(ForceTime);i++){
FX_h[n][i] = 0;
FY_h[n][i] = 0;
FZ_h[n][i] = 0;
}
for (i = 0; i < XDIM*YDIM*ZDIM/GPU_N; i++){
uAv_h[n][i] = 0;
vAv_h[n][i] = 0;
wAv_h[n][i] = 0;
uFluc_h[n][i] = 0;
vFluc_h[n][i] = 0;
wFluc_h[n][i] = 0;
}
hipMemcpy2D(f_inner_A_d[n],pitch,f_inner_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyHostToDevice);
hipMemcpy2D(f_inner_B_d[n],pitch,f_inner_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyHostToDevice);
hipMemcpy2D( g_A_d[n],pitch, g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,hipMemcpyHostToDevice);
hipMemcpy2D( g_B_d[n],pitch, g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,hipMemcpyHostToDevice);
hipMemcpy2D( h_A_d[n],pitch, h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,hipMemcpyHostToDevice);
hipMemcpy2D( h_B_d[n],pitch, h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,hipMemcpyHostToDevice);
hipMemcpy2D( uAv_d[n],pitch, uAv_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyHostToDevice);
hipMemcpy2D( vAv_d[n],pitch, vAv_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyHostToDevice);
hipMemcpy2D( wAv_d[n],pitch, wAv_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyHostToDevice);
hipMemcpy2D( uFluc_d[n],pitch, uFluc_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyHostToDevice);
hipMemcpy2D( vFluc_d[n],pitch, vFluc_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyHostToDevice);
hipMemcpy2D( wFluc_d[n],pitch, wFluc_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyHostToDevice);
hipMemcpy( FX_d[n], FX_h[n],sizeof(float)*(ForceTime),hipMemcpyHostToDevice);
hipMemcpy( FY_d[n], FY_h[n],sizeof(float)*(ForceTime),hipMemcpyHostToDevice);
hipMemcpy( FZ_d[n], FZ_h[n],sizeof(float)*(ForceTime),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( initialize_single), dim3(grid) , dim3(threads), 0, 0, f_inner_A_d[n],pitch_elements,YDIM,ZDIM,GPU_N,0);
hipLaunchKernelGGL(( initialize_single), dim3(grid) , dim3(threads), 0, 0, f_inner_B_d[n],pitch_elements,YDIM,ZDIM,GPU_N,0);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_grid), dim3(threads), 0, 0, g_A_d[n],pitch_elements,YDIM,GPU_N,0);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_grid), dim3(threads), 0, 0, g_B_d[n],pitch_elements,YDIM,GPU_N,0);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_grid), dim3(threads), 0, 0, h_A_d[n],pitch_elements,YDIM,GPU_N,0);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_grid), dim3(threads), 0, 0, h_B_d[n],pitch_elements,YDIM,GPU_N,0);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_grid), dim3(threads), 0, 0, g_temp[n],pitch_elements,YDIM,GPU_N,0);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_grid), dim3(threads), 0, 0, h_temp[n],pitch_elements,YDIM,GPU_N,0);
}//end Malloc and Initialize
//data pointers for LR
float *f_inner_LR_h[GPU_N], *g_LR_h[GPU_N], *h_LR_h[GPU_N];
float *f_inner_LR_A_d[GPU_N], *g_LR_A_d[GPU_N], *h_LR_A_d[GPU_N];
float *f_inner_LR_B_d[GPU_N], *g_LR_B_d[GPU_N], *h_LR_B_d[GPU_N];
float *g_LR_temp[GPU_N], *h_LR_temp[GPU_N];
float *uAvLR_h[GPU_N], *vAvLR_h[GPU_N], *wAvLR_h[GPU_N], *uAvLR_d[GPU_N], *vAvLR_d[GPU_N], *wAvLR_d[GPU_N];
float *uFlucLR_h[GPU_N], *vFlucLR_h[GPU_N], *wFlucLR_h[GPU_N], *uFlucLR_d[GPU_N], *vFlucLR_d[GPU_N], *wFlucLR_d[GPU_N];
size_t LRpitch = 2;
while(LRpitch<XLRDIM)
LRpitch=LRpitch*2;
LRpitch = XLRDIM*sizeof(float);//LRpitch*sizeof(float);
size_t LRpitch_elements = XLRDIM;//LRpitch/sizeof(float);
cout<<"LR Pitch (in elements): "<<LRpitch/sizeof(float)<<endl;
int zLRInner = ZLRDIM/GPU_N-2;
dim3 LRthreads(BLOCKSIZELRX, BLOCKSIZELRY, BLOCKSIZELRZ);
dim3 LRgrid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),(zLRInner)/BLOCKSIZELRZ);
dim3 g_LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),1);
//LR setup
if(REFINEMENT == 1){
for(int n = 0; n<GPU_N; n++){
f_inner_LR_h[n] = (float *)malloc(XLRDIM*YLRDIM*zLRInner*19*sizeof(float));
g_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float));
h_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float));
uAvLR_h [n]= (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
vAvLR_h [n]= (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
wAvLR_h [n]= (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
uFlucLR_h [n]= (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
vFlucLR_h [n]= (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
wFlucLR_h [n]= (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
hipSetDevice(n);
hipMalloc((void **) &f_inner_LR_A_d[n], LRpitch_elements*YLRDIM*zLRInner*19*sizeof(float));
hipMalloc((void **) &f_inner_LR_B_d[n], LRpitch_elements*YLRDIM*zLRInner*19*sizeof(float));
hipMalloc((void **) & g_LR_A_d[n], LRpitch_elements*YLRDIM* 19*sizeof(float));
hipMalloc((void **) & g_LR_B_d[n], LRpitch_elements*YLRDIM* 19*sizeof(float));
hipMalloc((void **) & h_LR_A_d[n], LRpitch_elements*YLRDIM* 19*sizeof(float));
hipMalloc((void **) & h_LR_B_d[n], LRpitch_elements*YLRDIM* 19*sizeof(float));
hipMalloc((void **) & g_LR_temp[n], LRpitch_elements*YLRDIM* 19*sizeof(float));
hipMalloc((void **) & h_LR_temp[n], LRpitch_elements*YLRDIM* 19*sizeof(float));
hipMalloc((void **) & uAvLR_d[n], LRpitch_elements*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
hipMalloc((void **) & vAvLR_d[n], LRpitch_elements*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
hipMalloc((void **) & wAvLR_d[n], LRpitch_elements*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
hipMalloc((void **) & uFlucLR_d[n], LRpitch_elements*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
hipMalloc((void **) & vFlucLR_d[n], LRpitch_elements*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
hipMalloc((void **) & wFlucLR_d[n], LRpitch_elements*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
//initialize host f_inner
for (i = 0; i < XLRDIM*YLRDIM*zLRInner*19; i++)
f_inner_LR_h[n][i] = 0;
//initialize host g,h
for (i = 0; i < XLRDIM*YLRDIM*19; i++){
g_LR_h[n][i] = 0;
h_LR_h[n][i] = 0;
}
for (i = 0; i < XLRDIM*YLRDIM*ZLRDIM/GPU_N; i++){
uAvLR_h[n][i] = 0;
vAvLR_h[n][i] = 0;
wAvLR_h[n][i] = 0;
uFlucLR_h[n][i] = 0;
vFlucLR_h[n][i] = 0;
wFlucLR_h[n][i] = 0;
}
hipMemcpy2D(f_inner_LR_A_d[n],LRpitch,f_inner_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*zLRInner*19,hipMemcpyHostToDevice);
hipMemcpy2D(f_inner_LR_B_d[n],LRpitch,f_inner_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*zLRInner*19,hipMemcpyHostToDevice);
hipMemcpy2D( g_LR_A_d[n],LRpitch, g_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM* 19,hipMemcpyHostToDevice);
hipMemcpy2D( g_LR_B_d[n],LRpitch, g_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM* 19,hipMemcpyHostToDevice);
hipMemcpy2D( h_LR_A_d[n],LRpitch, h_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM* 19,hipMemcpyHostToDevice);
hipMemcpy2D( h_LR_B_d[n],LRpitch, h_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM* 19,hipMemcpyHostToDevice);
hipMemcpy2D( uAvLR_d[n],LRpitch, uAvLR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyHostToDevice);
hipMemcpy2D( vAvLR_d[n],LRpitch, vAvLR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyHostToDevice);
hipMemcpy2D( wAvLR_d[n],LRpitch, wAvLR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyHostToDevice);
hipMemcpy2D( uFlucLR_d[n],LRpitch, uFlucLR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyHostToDevice);
hipMemcpy2D( vFlucLR_d[n],LRpitch, vFlucLR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyHostToDevice);
hipMemcpy2D( wFlucLR_d[n],LRpitch, wFlucLR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( initialize_single), dim3(LRgrid) , dim3(LRthreads), 0, 0, f_inner_LR_A_d[n],LRpitch_elements,YLRDIM,ZLRDIM,GPU_N,LRLEVEL);
hipLaunchKernelGGL(( initialize_single), dim3(LRgrid) , dim3(LRthreads), 0, 0, f_inner_LR_B_d[n],LRpitch_elements,YLRDIM,ZLRDIM,GPU_N,LRLEVEL);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_LR_grid), dim3(LRthreads), 0, 0, g_LR_A_d[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_LR_grid), dim3(LRthreads), 0, 0, g_LR_B_d[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_LR_grid), dim3(LRthreads), 0, 0, h_LR_A_d[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_LR_grid), dim3(LRthreads), 0, 0, h_LR_B_d[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_LR_grid), dim3(LRthreads), 0, 0, g_LR_temp[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_LR_grid), dim3(LRthreads), 0, 0, h_LR_temp[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
}//end of GPU loop for malloc and initialize for LR
}//end of LR malloc and initialize
struct timeval tdr0,tdr1;
double restime;
hipDeviceSynchronize();
gettimeofday (&tdr0,NULL);
for(int n = 0; n<GPU_N; n++){
size_t mem_avail, mem_total;
hipSetDevice(n);
hipMemGetInfo(&mem_avail,&mem_total);
cout<<"Device memory used for dev"<<n<<" : "<<mem_total-mem_avail<<endl;
cout<<"Device memory available for dev"<<n<<" : "<<mem_avail<<endl;
}
//Time loop
for(int t = 0; t<TMAX; t+=2){
//A->B
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
if(t>=STARTF && REFINEMENT == 0){
hipLaunchKernelGGL(( update_top_force) , dim3(h_grid), dim3(threads), 0, stream_halo [n], h_A_d[n],h_B_d[n],f_inner_A_d[n],h_temp[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF);
hipLaunchKernelGGL(( update_bottom_force), dim3(h_grid), dim3(threads), 0, stream_halo [n], g_A_d[n],g_B_d[n],f_inner_A_d[n],g_temp[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF);
}
else{
hipLaunchKernelGGL(( update_top) , dim3(h_grid), dim3(threads), 0, stream_halo [n], h_A_d[n],h_B_d[n],f_inner_A_d[n],h_temp[n],omega,pitch_elements,n,zInner);
hipLaunchKernelGGL(( update_bottom), dim3(h_grid), dim3(threads), 0, stream_halo [n], g_A_d[n],g_B_d[n],f_inner_A_d[n],g_temp[n],omega,pitch_elements,n,zInner);
}
}
for(int n = 0; n<GPU_N; n++)
hipStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
if(t>=STARTF && REFINEMENT == 0){
hipLaunchKernelGGL(( update_inner_force) , dim3(grid), dim3(threads), 0, stream_inner[n], f_inner_A_d[n],f_inner_B_d[n],g_A_d[n],h_A_d[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF,uAv_d[n],vAv_d[n],uFluc_d[n],vFluc_d[n],t);
}
else{
hipLaunchKernelGGL(( update_inner) , dim3(grid), dim3(threads), 0, stream_inner[n], f_inner_A_d[n],f_inner_B_d[n], g_A_d[n], h_A_d[n],omega,pitch_elements,n,zInner,uAv_d[n],vAv_d[n],uFluc_d[n],vFluc_d[n],t);
}
}
for(int n = 0; n<GPU_N; n++)
hipMemcpyPeerAsync(&h_temp[n][0],n,&g_B_d[ (n+1)%GPU_N][0], (n+1)%GPU_N,pitch_elements*YDIM*sizeof(float)*19,stream_halo[n]);
for(int n = 0; n<GPU_N; n++)
hipMemcpyPeerAsync(&g_temp[n][0],n,&h_B_d[abs(n-1)%GPU_N][0],abs(n-1)%GPU_N,pitch_elements*YDIM*sizeof(float)*19,stream_halo[n]);
if(REFINEMENT == 1){
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
if(t>=STARTF){
hipLaunchKernelGGL(( update_top_LR_force) , dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], h_LR_A_d[n], h_LR_B_d[n],f_inner_LR_A_d[n],h_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF);
hipLaunchKernelGGL(( update_bottom_LR_force), dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], g_LR_A_d[n], g_LR_B_d[n],f_inner_LR_A_d[n],g_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF);
}
else{
hipLaunchKernelGGL(( update_top_LR) , dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], h_LR_A_d[n], h_LR_B_d[n],f_inner_LR_A_d[n],h_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner);
hipLaunchKernelGGL(( update_bottom_LR), dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], g_LR_A_d[n], g_LR_B_d[n],f_inner_LR_A_d[n],g_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner);
}
}
for(int n = 0; n<GPU_N; n++)
hipStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
if(t>=STARTF){
hipLaunchKernelGGL(( update_inner_LR_force) , dim3(LRgrid), dim3(LRthreads), 0, stream_inner[n], f_inner_LR_A_d[n],f_inner_LR_B_d[n], g_LR_A_d[n], h_LR_A_d[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF,uAvLR_d[n],vAvLR_d[n],uFlucLR_d[n],vFlucLR_d[n],t+0.5f);
}
else{
hipLaunchKernelGGL(( update_inner_LR) , dim3(LRgrid), dim3(LRthreads), 0, stream_inner[n], f_inner_LR_A_d[n],f_inner_LR_B_d[n],g_LR_A_d[n],h_LR_A_d[n],omegaLR,LRpitch_elements,n,zLRInner,uAvLR_d[n],vAvLR_d[n],uFlucLR_d[n],vFlucLR_d[n],t);
}
}
for(int n = 0; n<GPU_N; n++){
hipMemcpyPeerAsync(&h_LR_temp[n][LRpitch_elements*YLRDIM*14],n,&g_LR_B_d[ (n+1)%GPU_N][LRpitch_elements*YLRDIM*14], (n+1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
hipMemcpyPeerAsync(&g_LR_temp[n][LRpitch_elements*YLRDIM*9 ],n,&h_LR_B_d[abs(n-1)%GPU_N][LRpitch_elements*YLRDIM*9 ],abs(n-1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipDeviceSynchronize();
}
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( update_top_LR_interp) , dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], h_LR_B_d[n],h_LR_A_d[n],f_inner_LR_B_d[n],h_LR_temp[n],h_B_d[n],h_temp[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n);
hipLaunchKernelGGL(( update_bottom_LR_interp), dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], g_LR_B_d[n],g_LR_A_d[n],f_inner_LR_B_d[n],g_LR_temp[n],g_B_d[n],g_temp[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n);
}
for(int n = 0; n<GPU_N; n++)
hipStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( update_inner_LR_interp), dim3(LRgrid),dim3(LRthreads),0,stream_inner[n], f_inner_LR_B_d[n],f_inner_LR_A_d[n],g_LR_B_d[n],h_LR_B_d[n],f_inner_B_d[n],g_B_d[n],h_B_d[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n,uAvLR_d[n],vAvLR_d[n],uFlucLR_d[n],vFlucLR_d[n],t+0.5f);
}
for(int n = 0; n<GPU_N; n++){
hipMemcpyPeerAsync(&h_LR_temp[n][LRpitch_elements*YLRDIM*14],n,&g_LR_A_d[ (n+1)%GPU_N][LRpitch_elements*YLRDIM*14], (n+1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
hipMemcpyPeerAsync(&g_LR_temp[n][LRpitch_elements*YLRDIM*9 ],n,&h_LR_A_d[abs(n-1)%GPU_N][LRpitch_elements*YLRDIM*9 ],abs(n-1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( f_Extract), dim3(grid),dim3(threads),0,stream_inner[n], f_inner_B_d[n],f_inner_LR_A_d[n],g_LR_A_d[n],h_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
hipLaunchKernelGGL(( g_Extract), dim3(grid),dim3(threads),0,stream_inner[n], g_B_d[n],f_inner_LR_A_d[n],g_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
hipLaunchKernelGGL(( h_Extract), dim3(grid),dim3(threads),0,stream_inner[n], h_B_d[n],f_inner_LR_A_d[n],h_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
}
}//end refinement
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipDeviceSynchronize();
}
//B->A
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
if(t+1>=STARTF && REFINEMENT == 0){
hipLaunchKernelGGL(( update_top_force) , dim3(h_grid), dim3(threads), 0, stream_halo [n], h_B_d[n],h_A_d[n],f_inner_B_d[n],h_temp[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF+1);
hipLaunchKernelGGL(( update_bottom_force), dim3(h_grid), dim3(threads), 0, stream_halo [n], g_B_d[n],g_A_d[n],f_inner_B_d[n],g_temp[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF+1);
}
else{
hipLaunchKernelGGL(( update_top) , dim3(h_grid), dim3(threads), 0, stream_halo [n], h_B_d[n], h_A_d[n],f_inner_B_d[n],h_temp[n],omega,pitch_elements,n,zInner);
hipLaunchKernelGGL(( update_bottom), dim3(h_grid), dim3(threads), 0, stream_halo [n], g_B_d[n], g_A_d[n],f_inner_B_d[n],g_temp[n],omega,pitch_elements,n,zInner);
}
}
for(int n = 0; n<GPU_N; n++)
hipStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
if(t+1>=STARTF && REFINEMENT == 0){
hipLaunchKernelGGL(( update_inner_force) , dim3(grid), dim3(threads), 0, stream_inner[n], f_inner_B_d[n],f_inner_A_d[n],g_B_d[n],h_B_d[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF+1,uAv_d[n],vAv_d[n],uFluc_d[n],vFluc_d[n],t+1);
}
else{
hipLaunchKernelGGL(( update_inner) , dim3(grid), dim3(threads), 0, stream_inner[n], f_inner_B_d[n],f_inner_A_d[n], g_B_d[n], h_B_d[n],omega,pitch_elements,n,zInner,uAv_d[n],vAv_d[n],uFluc_d[n],vFluc_d[n],t+1);
}
}
for(int n = 0; n<GPU_N; n++)
hipMemcpyPeerAsync(&h_temp[n][0],n,&g_A_d[ (n+1)%GPU_N][0], (n+1)%GPU_N,pitch_elements*YDIM*sizeof(float)*19,stream_halo[n]);
for(int n = 0; n<GPU_N; n++)
hipMemcpyPeerAsync(&g_temp[n][0],n,&h_A_d[abs(n-1)%GPU_N][0],abs(n-1)%GPU_N,pitch_elements*YDIM*sizeof(float)*19,stream_halo[n]);
if(REFINEMENT == 1){
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
if(t+1>=STARTF){
hipLaunchKernelGGL(( update_top_LR_force) , dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], h_LR_A_d[n], h_LR_B_d[n],f_inner_LR_A_d[n],h_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t+1-STARTF);
hipLaunchKernelGGL(( update_bottom_LR_force), dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], g_LR_A_d[n], g_LR_B_d[n],f_inner_LR_A_d[n],g_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t+1-STARTF);
}
else{
hipLaunchKernelGGL(( update_top_LR) , dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], h_LR_A_d[n], h_LR_B_d[n],f_inner_LR_A_d[n],h_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner);
hipLaunchKernelGGL(( update_bottom_LR), dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], g_LR_A_d[n], g_LR_B_d[n],f_inner_LR_A_d[n],g_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner);
}
}
for(int n = 0; n<GPU_N; n++)
hipStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
if(t+1>=STARTF){
hipLaunchKernelGGL(( update_inner_LR_force) , dim3(LRgrid), dim3(LRthreads), 0, stream_inner[n], f_inner_LR_A_d[n],f_inner_LR_B_d[n], g_LR_A_d[n], h_LR_A_d[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t+1-STARTF,uAvLR_d[n],vAvLR_d[n],uFlucLR_d[n],vFlucLR_d[n],t+1);
}
else{
hipLaunchKernelGGL(( update_inner_LR) , dim3(LRgrid), dim3(LRthreads), 0, stream_inner[n], f_inner_LR_A_d[n],f_inner_LR_B_d[n], g_LR_A_d[n], h_LR_A_d[n],omegaLR,LRpitch_elements,n,zLRInner,uAvLR_d[n],vAvLR_d[n],uFlucLR_d[n],vFlucLR_d[n],t+1);
}
}
for(int n = 0; n<GPU_N; n++){
hipMemcpyPeerAsync(&h_LR_temp[n][LRpitch_elements*YLRDIM*14],n,&g_LR_B_d[ (n+1)%GPU_N][LRpitch_elements*YLRDIM*14], (n+1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
hipMemcpyPeerAsync(&g_LR_temp[n][LRpitch_elements*YLRDIM*9 ],n,&h_LR_B_d[abs(n-1)%GPU_N][LRpitch_elements*YLRDIM*9 ],abs(n-1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipDeviceSynchronize();
}
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( update_top_LR_interp) , dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], h_LR_B_d[n],h_LR_A_d[n],f_inner_LR_B_d[n],h_LR_temp[n],h_A_d[n],h_temp[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n);
hipLaunchKernelGGL(( update_bottom_LR_interp), dim3(g_LR_grid), dim3(LRthreads), 0, stream_halo [n], g_LR_B_d[n],g_LR_A_d[n],f_inner_LR_B_d[n],g_LR_temp[n],g_A_d[n],g_temp[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n);
}
for(int n = 0; n<GPU_N; n++)
hipStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( update_inner_LR_interp) , dim3(LRgrid),dim3(LRthreads),0,stream_inner[n], f_inner_LR_B_d[n],f_inner_LR_A_d[n],g_LR_B_d[n],h_LR_B_d[n],f_inner_A_d[n],g_A_d[n],h_A_d[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n,uAvLR_d[n],vAvLR_d[n],uFlucLR_d[n],vFlucLR_d[n],t+1.5f);
}
for(int n = 0; n<GPU_N; n++){
hipMemcpyPeerAsync(&h_LR_temp[n][LRpitch_elements*YLRDIM*14],n,&g_LR_A_d[ (n+1)%GPU_N][LRpitch_elements*YLRDIM*14], (n+1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
hipMemcpyPeerAsync(&g_LR_temp[n][LRpitch_elements*YLRDIM*9 ],n,&h_LR_A_d[abs(n-1)%GPU_N][LRpitch_elements*YLRDIM*9 ],abs(n-1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipLaunchKernelGGL(( f_Extract), dim3(grid),dim3(threads),0,stream_inner[n], f_inner_A_d[n],f_inner_LR_A_d[n],g_LR_A_d[n],h_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
hipLaunchKernelGGL(( g_Extract), dim3(grid),dim3(threads),0,stream_inner[n], g_A_d[n],f_inner_LR_A_d[n],g_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
hipLaunchKernelGGL(( h_Extract), dim3(grid),dim3(threads),0,stream_inner[n], h_A_d[n],f_inner_LR_A_d[n],h_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
}
}
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipDeviceSynchronize();
}
}//end Time loop
hipDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
int Nodes;
Nodes = XDIM*YDIM*ZDIM;
if (REFINEMENT == 1)
Nodes += XLRDIM*YLRDIM*ZLRDIM*LRLEVEL;
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)\n";
//D2H Memcpy and write results
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
if(outputflag == 1){
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n";
output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM/GPU_N<<"\n";
hipMemcpy2D(f_inner_h[n],XDIM*sizeof(float),f_inner_A_d[n],pitch,XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyDeviceToHost);
hipMemcpy2D( g_h[n],XDIM*sizeof(float), g_A_d[n],pitch,XDIM*sizeof(float),YDIM* 19,hipMemcpyDeviceToHost);
hipMemcpy2D( h_h[n],XDIM*sizeof(float), h_A_d[n],pitch,XDIM*sizeof(float),YDIM* 19,hipMemcpyDeviceToHost);
hipMemcpy2D(uAv_h[n],XDIM*sizeof(float),uAv_d[n],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyDeviceToHost);
hipMemcpy2D(vAv_h[n],XDIM*sizeof(float),vAv_d[n],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyDeviceToHost);
hipMemcpy2D(wAv_h[n],XDIM*sizeof(float),wAv_d[n],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyDeviceToHost);
hipMemcpy2D(uFluc_h[n],XDIM*sizeof(float),uFluc_d[n],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyDeviceToHost);
hipMemcpy2D(vFluc_h[n],XDIM*sizeof(float),vFluc_d[n],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyDeviceToHost);
hipMemcpy2D(wFluc_h[n],XDIM*sizeof(float),wFluc_d[n],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,hipMemcpyDeviceToHost);
hipMemcpy( FX_h[n],FX_d[n],sizeof(float)*ForceTime,hipMemcpyDeviceToHost);
hipMemcpy( FY_h[n],FY_d[n],sizeof(float)*ForceTime,hipMemcpyDeviceToHost);
hipMemcpy( FZ_h[n],FZ_d[n],sizeof(float)*ForceTime,hipMemcpyDeviceToHost);
WriteResults(output,f_inner_h[n],g_h[n],h_h[n],uAv_h[n],vAv_h[n],wAv_h[n],uFluc_h[n],vFluc_h[n],wFluc_h[n],omega,GPU_N,n);
output<<endl;
//Write results
// WriteResults( g_h[n],output,omega,XDIM,YDIM,ZDIM/GPU_N*n ,1 ,0,0,0,1);
// WriteResults(f_inner_h[n],output,omega,XDIM,YDIM,ZDIM/GPU_N*n+1 ,zInner,0,0,0,1);
// WriteResults( h_h[n],output,omega,XDIM,YDIM,ZDIM/GPU_N*(n+1)-1,1 ,0,0,0,1);
}
for(int i=0;i<ForceTime;i++){
FX_total[i] += FX_h[n][i];
FY_total[i] += FY_h[n][i];
FZ_total[i] += FZ_h[n][i];
}
hipFree(f_inner_A_d[n]);
hipFree(f_inner_B_d[n]);
hipFree( g_A_d[n]);
hipFree( g_B_d[n]);
hipFree( h_A_d[n]);
hipFree( h_B_d[n]);
hipFree( g_temp[n]);
hipFree( h_temp[n]);
hipFree( uAv_d[n]);
hipFree( vAv_d[n]);
hipFree( wAv_d[n]);
hipFree( uFluc_d[n]);
hipFree( vFluc_d[n]);
hipFree( wFluc_d[n]);
}//end write results
WriteForces(FX_total,FY_total,FZ_total,outputForce,ForceTime,REFINEMENT*LRLEVEL);
if(REFINEMENT == 1){
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n";
output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM<<"\n";
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
if(outputflag == 1){
hipMemcpy2D(f_inner_LR_h[n],XLRDIM*sizeof(float),f_inner_LR_A_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM*zLRInner*19,hipMemcpyDeviceToHost);
hipMemcpy2D( g_LR_h[n],XLRDIM*sizeof(float), g_LR_A_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM* 19,hipMemcpyDeviceToHost);
hipMemcpy2D( h_LR_h[n],XLRDIM*sizeof(float), h_LR_A_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM* 19,hipMemcpyDeviceToHost);
hipMemcpy2D( uAvLR_h[n],XLRDIM*sizeof(float), uAvLR_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyDeviceToHost);
hipMemcpy2D( vAvLR_h[n],XLRDIM*sizeof(float), vAvLR_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyDeviceToHost);
hipMemcpy2D( wAvLR_h[n],XLRDIM*sizeof(float), wAvLR_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyDeviceToHost);
hipMemcpy2D(uFlucLR_h[n],XLRDIM*sizeof(float),uFlucLR_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyDeviceToHost);
hipMemcpy2D(vFlucLR_h[n],XLRDIM*sizeof(float),vFlucLR_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyDeviceToHost);
hipMemcpy2D(wFlucLR_h[n],XLRDIM*sizeof(float),wFlucLR_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,hipMemcpyDeviceToHost);
WriteResultsLR(output,f_inner_LR_h[n],g_LR_h[n],h_LR_h[n],uAvLR_h[n],vAvLR_h[n],wAvLR_h[n],uFlucLR_h[n],vFlucLR_h[n],wFlucLR_h[n],omegaLR,GPU_N,n);
//Write results
// WriteResults( g_LR_h[n],output,omega,XLRDIM,YLRDIM,ZLRDIM/GPU_N*n ,1 ,LRX0,LRY0,LRZ0,LRFACTOR);
// WriteResults(f_inner_LR_h[n],output,omega,XLRDIM,YLRDIM,ZLRDIM/GPU_N*n+1 ,zLRInner,LRX0,LRY0,LRZ0,LRFACTOR);
// WriteResults( h_LR_h[n],output,omega,XLRDIM,YLRDIM,ZLRDIM/GPU_N*(n+1)-1,1 ,LRX0,LRY0,LRZ0,LRFACTOR);
}
hipFree(f_inner_LR_A_d[n]);
hipFree(f_inner_LR_B_d[n]);
hipFree( g_LR_A_d[n]);
hipFree( g_LR_B_d[n]);
hipFree( h_LR_A_d[n]);
hipFree( h_LR_B_d[n]);
hipFree( g_LR_temp[n]);
hipFree( h_LR_temp[n]);
}//end GPU loop for LR
}//end write results of LR
return(0);
}
| 07c8775033d9548919d7d1ee15c541acee965953.cu | #include <cuda.h>
//#include <cutil.h>
#include <iostream>
#include <ostream>
#include <fstream>
//#include "/home/yusuke/NVIDIA_GPU_Computing_SDK/C/common/inc/cutil.h"
using namespace std;
#define CASENAME "orig_LR3"
#define BLOCKSIZEX 192
#define BLOCKSIZEY 1
#define BLOCKSIZEZ 1
#define BLOCKSIZELRX 192
#define BLOCKSIZELRY 1
#define BLOCKSIZELRZ 1
#define XDIM 192
#define YDIM 256
#define ZDIM 4
#define TMAX 20000
#define STARTF 0
#define OBSTR1 6.0f
#define OBSTX1 95.5f
#define OBSTY1 95.5f
#define OBSTZ1 32.5f
#define OBSTR2 32.f
#define OBSTX2 319.5f
#define OBSTY2 511.5f
#define OBSTZ2 31.5f
#define LRFACTOR 0.5f
#define LRLEVEL 2
#define LRX0 47.75f //minimum x coord of LR
#define XLRDIM 192 //number of nodes in x
#define LRY0 47.75f
#define YLRDIM 256
#define LRZ0 -0.25f
#define ZLRDIM 8
#define RE 20.f//2000.f//100.f;
#define UMAX 0.08f
#define SmagLES "NO" //YES,NO
#define MODEL "MRT" //BGK,MRT,STREAM
#define REFINEMENT 1 //1,0
#define CS 0.1f
#define VELAV 1
#define START_VELAV 400000
#define START_VELFLUC 700000
//#define CHARLENGTH = XDIM-2.f;
//#define BLOCKSIZE 16;
//int const XDIM = 32;
//int const YDIM = 32;
#include <sys/time.h>
#include <time.h>
/*
Image List:
0 fluid
1 BB
2
3 DirichletWest(simple)
10 BB(force)
13 DirichletWest_Reg
14 NeumannEast_Reg
15 DirichletNorth_Reg
16 DirichletSouth_Reg
21 ysymmetry_top
22 ysymmetry_bot
23 zsymmetry_top
24 zsymmetry_bot
25 xsymmetry_top
26 xsymmetry_bot
*/
inline __device__ int ImageFcn(float x, float y, float z){
int value = 0;
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// value = 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// value = 10;
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// return 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// return 10;
//if(((x-OBSTX)*(x-OBSTX)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1)+(z-OBSTZ1)*(z-OBSTZ1))<OBSTR1*OBSTR1)
// {
// return 10;
// }
// else
// //if(y < 0.1f || z < 0.1f || (XDIM-x) < 0.1f || (YDIM-y) < 0.1f || (ZDIM-z) < 0.1f)
// if(y < 17.5f || z < 17.5f || y > 46.5f || z > 46.5f)
// return 1;
// else if(x < 17.5f)
// return 13;
// else if(x > 78.5f)
// return 14;
// else
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
return 10;
// else
// if(x < 3)//== 0)
// value = 1;
// else if(x > XDIM-4)//== XDIM-1)
// value = 1;
// if(z<3)
// value = 1;
// if(z>ZDIM-4)
// value = 1;
// if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
// value = 10;
// if(y == 0)
// value = 200;//22;
// else if(y == YDIM-1)
// value = 100;
return value;
}
inline __device__ int ImageFcn(int x, int y, int z){
int value = 0;
//Cylinder
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// value = 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// value = 10;
//Sphere
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1)+(z-OBSTZ1)*(z-OBSTZ1))<OBSTR1*OBSTR1)
// {
//// if(z == 0 || z == ZDIM-1)
//// return 1;
//// else
// return 10;
// }
// if(z == 0)
// value = 0;
// else if(z == ZDIM-1)
// value = 0;
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
value = 10;
// else if(y == 0)
// value = 200;//22;
// else if(y == YDIM-1)
// value = 100;
// else if(x == 0)
// value = 26;
// else if(x == XDIM-1)
// value = 25;
// else if(z == 0)
// value = 0;
// else if(z == ZDIM-1)
// value = 0;
//return value;
//Lid Driven Cavity
// if(y == 0 || y == YDIM-1 || z == 0 || z == ZDIM-1)
// value = 1;
// else if(x == XDIM-2 || y == 1 || y == YDIM-2 || z == 1 || z == ZDIM-2)
// return 1;
// else if(x == 0)
// return 1;
// if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
// value = 10;
// if(z == 1)
// value = 1;
// if(z == ZDIM-2)
// value = 1;
else if(y == 0)
value = 200;//22;
else if(y == YDIM-1)
value = 100;
if(x == 0)
value = 26;
else if(x == XDIM-1)
value = 25;
// else if(x < 3)//== 0)
// value = 1;
// else if(x > XDIM-4)//== XDIM-1)
// value = 1;
return value;
}
inline __device__ float PoisProf (float x){
float radius = (YDIM-1-1)*0.5f;
float result = -1.0f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
return (result);
// return 1.f;
}
inline __device__ float trilinear_interp (float v000, float v001, float v010, float v011,
float v100, float v101, float v110, float v111, float x, float y, float z){
return v000*(1.f-x)*(1.f-y)*(1.f-z)+
v001*( x)*(1.f-y)*(1.f-z)+
v010*(1.f-x)*( y)*(1.f-z)+
v011*( x)*( y)*(1.f-z)+
v100*(1.f-x)*(1.f-y)*( z)+
v101*( x)*(1.f-y)*( z)+
v110*(1.f-x)*( y)*( z)+
v111*( x)*( y)*( z);
}
__device__ void DirichletWest(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(y == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(y == YDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
// if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float u,v,w;//,rho;
u = UMAX;//*PoisProf(zcoord)*1.5;
v = 0.0f;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float usqr = u*u+v*v+w*w;
f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
//// f0 = 1.0f/3.0f*(rho-1.5f*usqr);
// f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
//// f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
//// f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
//// f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
//// f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
//// f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
//// f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
//// f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
//// f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
//// f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
//// f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
//// f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
//// f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
//// f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
__device__ void DirichletWest_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == YDIM-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;//*PoisProf(y)*1.5;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
// f1 =(0.166666667f*u)+
// (f3-(-(0.166666667f*u)));
f1 = f3+0.33333333f*u;
// f5 =(0.0833333333f*( u+v))+
// (f7-(0.0833333333f*(-u-v)));
f5 = f7+0.166666667f*(u+v);
// f8 =(0.0833333333f*( u-v ))+
// (f6-(0.0833333333f*(-u+v )));
f8 = f6+0.166666667f*(u-v);
// f10=(0.0833333333f*( u+w))+
// (f17-(0.0833333333f*(-u-w)));
f10= f17+0.166666667f*(u+w);
// f15=(0.0833333333f*( u-w))+
// (f12-(0.0833333333f*(-u+w)));
f15= f12+0.166666667f*(u-w);
// f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+
// (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2));
// f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+
// (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v));
// f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+
// (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v));
// f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+
// (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w));
// f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+
// (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w));
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
void __device__ DirichletWest_Regularized(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float PI11 = 0;
float PI12 = 0;
float PI22 = 0;
float PI33 = 0;
float PI13 = 0;
float PI23 = 0;
float u;//,v;//,w;//,rho;
u = UMAX;//*PoisProf(z)*1.5;
//v = 0.0f;
//w = 0.0f;
float usqr = u*u;//+v*v+w*w;
float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho -1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho -1.5f*usqr);
float feq9 = 0.0555555556f*(rho -1.5f*usqr);
float feq14 = 0.0555555556f*(rho -1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho -1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho -1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho -1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho -1.5f*usqr);
// float feq0 = 0.3333333333f*(rho-1.5f*usqr);
// float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
// float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
// float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// float feq8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
// float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq10 = 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
// float feq11 = 0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
// float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
// float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
// float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq15 = 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
// float feq16 = 0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
// float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
// float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f1 = feq1 +f3 -feq3 ;
f5 = feq5 +f7 -feq7 ;
f8 = feq8 +f6 -feq6 ;
f10= feq10+f17-feq17;
f15= feq15+f12-feq12;
PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
f0 = feq0 +1.5f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f1 = feq1 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f2 = feq2 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f3 = feq3 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f4 = feq4 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f5 = feq5 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f6 = feq6 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f7 = feq7 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f8 = feq8 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f9 = feq9 +0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f10 = feq10+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f11 = feq11+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f12 = feq12+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f13 = feq13+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f14 = feq14+0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f15 = feq15+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f16 = feq16+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f17 = feq17+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f18 = feq18+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
}
void __device__ NeumannEast_Regularized(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
f13 = f11;
f18 = f16;
f8 = f5;
}
else if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float PI11 = 0;
float PI12 = 0;
float PI22 = 0;
float PI33 = 0;
float PI13 = 0;
float PI23 = 0;
float u;//,v;//,w;//,rho;
float rho = 1.0f;
//v = 0.0f;
//w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
float usqr = u*u;//+v*v+w*w;
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho -1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho -1.5f*usqr);
float feq9 = 0.0555555556f*(rho -1.5f*usqr);
float feq14 = 0.0555555556f*(rho -1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho -1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho -1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho -1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho -1.5f*usqr);
// float feq0 = 0.3333333333f*(rho-1.5f*usqr);
// float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq5 = 0.0277777778f*(rho+3.0f*( u+v)+4.5f*( u+v)*( u+v)-1.5f*usqr);
// float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
// float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// float feq8 = 0.0277777778f*(rho+3.0f*( u-v)+4.5f*( u-v)*( u-v)-1.5f*usqr);
// float feq10 = 0.0277777778f*(rho+3.0f*( u+w)+4.5f*( u+w)*( u+w)-1.5f*usqr);
// float feq11 = 0.0277777778f*(rho+3.0f*( v+w)+4.5f*( v+w)*( v+w)-1.5f*usqr);
// float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
// float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
// float feq15 = 0.0277777778f*(rho+3.0f*( u-w)+4.5f*( u-w)*( u-w)-1.5f*usqr);
// float feq16 = 0.0277777778f*(rho+3.0f*( v-w)+4.5f*( v-w)*( v-w)-1.5f*usqr);
// float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
// float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f3 = feq3 +f1 -feq1 ;
f7 = feq7 +f5 -feq5 ;
f6 = feq6 +f8 -feq8 ;
f17= feq17+f10-feq10;
f12= feq12+f15-feq15;
PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
f0 = feq0 +1.5f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f1 = feq1 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f2 = feq2 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f3 = feq3 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f4 = feq4 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f5 = feq5 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f6 = feq6 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f7 = feq7 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f8 = feq8 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f9 = feq9 +0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f10 = feq10+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f11 = feq11+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f12 = feq12+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f13 = feq13+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f14 = feq14+0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f15 = feq15+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f16 = feq16+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f17 = feq17+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f18 = feq18+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
}
__device__ void NeumannEast(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
// f6 = f7;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
// f7 = f6;
f13 = f11;
f18 = f16;
f8 = f5;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
// f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
// f17 = f12;
f18 = f13;
}
}
float u,v,w;//,rho;
float rho = 1.0f;
v = 0.0f;
w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
float u2 = u*u;
float v2 = v*v;
float w2 = w*w;
float usqr = u2+v2+w2;
// f3 = f1 -0.333333333f*u;
// f7 = f5 -0.166666667f*(u+v);
// f6 = f8 -0.166666667f*(u-v);
// f17= f10-0.166666667f*(u+w);
// f12= f15-0.166666667f*(u-w);
f0 = 1.0f/3.0f*(rho-1.5f*usqr);
f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
__device__ void NeumannEast_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
// f6 = f7;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
// f7 = f6;
f13 = f11;
f18 = f16;
f8 = f5;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
// f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
// f17 = f12;
f18 = f13;
}
}
float u,v,w;//,rho;
float rho = 1.0f;
v = 0.0f;
w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
f3 = f1 -0.333333333f*u;
f7 = f5 -0.166666667f*(u+v);
f6 = f8 -0.166666667f*(u-v);
f17= f10-0.166666667f*(u+w);
f12= f15-0.166666667f*(u-w);
// f3 =(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2)+
// (f1-(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2));
// f7 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v)+
// (f5-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v));
// f6 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v)+
// (f8-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v));
// f17=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w)+
// (f10-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w));
// f12=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w)+
// (f15-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w));
// f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+
// (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2));
// f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+
// (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v));
// f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+
// (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v));
// f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+
// (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w));
// f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+
// (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w));
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void DirichletNorth_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(x == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(x == XDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
// f1 =(0.166666667f*u)+
// (f3-(-(0.166666667f*u)));
f4 = f2-0.33333333f*v;
// f5 =(0.0833333333f*( u+v))+
// (f7-(0.0833333333f*(-u-v)));
f7 = f5-0.166666667f*(u+v);
// f8 =(0.0833333333f*( u-v ))+
// (f6-(0.0833333333f*(-u+v )));
f8 = f6+0.166666667f*(u-v);
// f10=(0.0833333333f*( u+w))+
// (f17-(0.0833333333f*(-u-w)));
f13= f16-0.166666667f*(v-w);
// f15=(0.0833333333f*( u-w))+
// (f12-(0.0833333333f*(-u+w)));
f18= f11-0.166666667f*(v+w);
//
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr
//float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void DirichletSouth_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(x == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(x == XDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
f2 = f4 +0.33333333f*v;
f5 = f7 +0.166666667f*(u+v);
f6 = f8 -0.166666667f*(u-v);
f16= f13+0.166666667f*(v-w);
f11= f18+0.166666667f*(v+w);
//
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr
//float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void xsymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
// if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
}
f1 = f3 ;
f5 = f6 ;
f8 = f7 ;
f10= f12;
f15= f17;
}
__device__ void xsymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
f13 = f11;
f18 = f16;
f8 = f5;
}
// else if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
}
f3 = f1 ;
f6 = f5 ;
f7 = f8 ;
f12= f10;
f17= f15;
}
__device__ void ysymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int z)
{
if(z == 0){
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
if(z == ZDIM-1){
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
__device__ void ysymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int z)
{
if(z == 0){
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
if(z == ZDIM-1){
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
__device__ void zsymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y)
{
if(y == 0){
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
if(y == YDIM-1){
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
__device__ void zsymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y)
{
if(y == 0){
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
if(y == YDIM-1){
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
inline __device__ void boundaries(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z, int im)
{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 53)//DirichletWest
// {
// //DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// DirichletWest_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 54)//DirichletWest
// {
// //NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// NeumannEast_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 4)//DirichletWest
// {
// NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 21)//ysymm top
{
ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 22)//ysymm bot
{
ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 23)//zsymm top
{
zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 24)//zsymm bot
{
zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 25)//xsymm top
{
xsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 26)//xsymm bot
{
xsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
}
inline __device__ void boundaries_force(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z, int im)
{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 53)//DirichletWest
{
DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//DirichletWest_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 54)//DirichletWest
{
NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//NeumannEast_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
else if(im == 21)//ysymm top
{
ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 22)//ysymm bot
{
ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 23)//zsymm top
{
zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 24)//zsymm bot
{
zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 25)//zsymm top
{
xsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 26)//zsymm bot
{
xsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
}
inline __device__ void North_Extrap(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float rho)
{
rho = 1.0f;
float u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
float v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
float w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
m13 = f5+ - f6+ f7+ - f8 ;
m14 = f11 + - f13 + - f16 + f18;
m15 = f10 + - f12 + - f15 + f17 ;
m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2));
f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10));
f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10));
f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
}
inline __device__ void South_Extrap(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float v)
{
float rho,u,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = 0.f;//f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
w = 0.f;//f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
m13 = f5+ - f6+ f7+ - f8 ;
m14 = f11 + - f13 + - f16 + f18;
m15 = f10 + - f12 + - f15 + f17 ;
m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2));
f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10));
f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10));
f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
}
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
__device__ int dmin(int a, int b)
{
if (a<b) return a;
else return b-1;
}
__device__ int dmax(int a)
{
if (a>-1) return a;
else return 0;
}
__device__ int dmin_p(int a, int b)
{
if (a<b) return a;
else return 0;
}
__device__ int dmax_p(int a, int b)
{
if (a>-1) return a;
else return b-1;
}
inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YDIM*(zInner));
// if(index<0) index = 0;
// else if(index>19*pitch*YDIM*ZDIM/GPU_N-2) index = 19*pitch*(YDIM*ZDIM/GPU_N-2);
return index;
}
inline __device__ int f_memLR(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YLRDIM*pitch)+f_num*pitch*YLRDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM*(zInner));
return index;
}
inline __device__ int buff_mem(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YDIM;
index = dmax(index);
index = dmin(index,19*pitch*YDIM);
// if(index<0) index = 0;
// else if(index>19*pitch*YDIM) index = 19*pitch*YDIM;
return index;
}
inline __device__ int buff_memLR(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YLRDIM;
index = dmax(index);
index = dmin(index,19*pitch*YLRDIM);
return index;
}
inline __device__ void bgk_feq(float rho, float u, float v, float w, float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18)
{
float usqr = u*u+v*v+w*w;
f0 = 1.0f/3.0f*(rho-1.5f*usqr);
f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
inline __device__ void mrt_feq(float rho, float u, float v, float w, float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18)
{
float usqr = u*u+v*v+w*w;
f0 = 0.1904761791f*rho+-0.597127747f*usqr ;
f1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
f2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
f3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
f4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
f5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v);
f6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v);
f7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v);
f8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v);
f9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w ;
f10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w);
f11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
f12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w);
f13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
f14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w ;
f15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-w);
f16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v-w);
f17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+w);
f18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v+w);
f1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
f12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
f14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
f17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
}
inline __device__ void vel_av(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float& uAv, float& vAv, int t)
{
float u,v;//,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
uAv = (uAv*(t-START_VELAV)+u)/((t-START_VELAV)+1);
vAv = (vAv*(t-START_VELAV)+v)/((t-START_VELAV)+1);
}
inline __device__ void vel_avLR(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float& uAv, float& vAv, float t)
{
float u,v;//,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
uAv = (uAv*(t-START_VELAV)+u*LRFACTOR)/((t-START_VELAV)+LRFACTOR);
vAv = (vAv*(t-START_VELAV)+v*LRFACTOR)/((t-START_VELAV)+LRFACTOR);
}
inline __device__ void vel_fluc(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float& uAv,
float& vAv, float& ufluc, float& vfluc, int t)
{
float u,v;//,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
u = (u-uAv)*(u-uAv);
v = (v-vAv)*(v-vAv);
ufluc = (ufluc*(t-START_VELFLUC)+u)/((t-START_VELFLUC)+1);
vfluc = (vfluc*(t-START_VELFLUC)+v)/((t-START_VELFLUC)+1);
}
inline __device__ void vel_flucLR(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float& uAv,
float& vAv, float& ufluc, float& vfluc, float t)
{
float u,v;//,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
u = (u-uAv)*(u-uAv);
v = (v-vAv)*(v-vAv);
ufluc = (ufluc*(t-START_VELFLUC)+u*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR);
vfluc = (vfluc*(t-START_VELFLUC)+v*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR);
}
inline __device__ void bgk_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
// f0 =(1.f-omega)*f0 +omega*(0.3333333333f*(rho-1.5f*usqr));
// f1 =(1.f-omega)*f1 +omega*(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f2 =(1.f-omega)*f2 +omega*(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
// f3 =(1.f-omega)*f3 +omega*(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
// f4 =(1.f-omega)*f4 +omega*(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
// f5 =(1.f-omega)*f5 +omega*(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
// f6 =(1.f-omega)*f6 +omega*(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
// f7 =(1.f-omega)*f7 +omega*(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
// f8 =(1.f-omega)*f8 +omega*(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
// f9 =(1.f-omega)*f9 +omega*(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
// f10=(1.f-omega)*f10+omega*(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
// f11=(1.f-omega)*f11+omega*(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
// f12=(1.f-omega)*f12+omega*(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
// f13=(1.f-omega)*f13+omega*(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
// f14=(1.f-omega)*f14+omega*(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
// f15=(1.f-omega)*f15+omega*(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
// f16=(1.f-omega)*f16+omega*(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
// f17=(1.f-omega)*f17+omega*(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
// f18=(1.f-omega)*f18+omega*(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
f0 -=omega*(f0 -0.3333333333f*(rho-1.5f*usqr));
f1 -=omega*(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f2 -=omega*(f2 -0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
f3 -=omega*(f3 -0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
f4 -=omega*(f4 -0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
f5 -=omega*(f5 -0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
f6 -=omega*(f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
f7 -=omega*(f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
f8 -=omega*(f8 -0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
f9 -=omega*(f9 -0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
f10-=omega*(f10-0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
f11-=omega*(f11-0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr));
f12-=omega*(f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
f13-=omega*(f13-0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr));
f14-=omega*(f14-0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
f15-=omega*(f15-0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
f16-=omega*(f16-0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
f17-=omega*(f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
f18-=omega*(f18-0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
}
inline __device__ void mrt_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float u,v,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
float usqr = u*u+v*v+w*w;
// u = rho*u;
// v = rho*v;
// w = rho*w;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//COMPUTE M-MEQ
//m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
//m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17;
//m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18;
//m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18;
m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w);
// m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
// m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
// m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
m13 = f5+-f6+ f7+-f8 -u*v;
m14 = f11 +- f13 + - f16 + f18 -v*w;
m15 = f10 + - f12 +-f15 + f17 -u*w;
m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
if(SmagLES == "YES"){
//// float PI11 = -1.0f/38.0f*( (m1)+19.0f*omega* (m9));
//// float PI22 = -1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)-3.0f*omega*(m11)));
//// float PI33 = -1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)+3.0f*omega*(m11)));
// float PI11 = LRLEVEL*-0.026315789f*m1-0.5f *omega*m9;
// float PI22 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
// float PI33 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
// float PI12 = LRLEVEL*-1.5f*omega*m13;
// float PI23 = LRLEVEL*-1.5f*omega*m14;
// float PI13 = LRLEVEL*-1.5f*omega*m15;
// float nu0 = ((1.0f/omega)-0.5f)*LRFACTOR/3.0f;
// float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
// //float Cs = 0.01f;
// omega = 1.0f/(3.0f*(nu0+CS*Smag*LRFACTOR*LRFACTOR)*LRLEVEL+0.5f);
// //omega = 1.0f/(1.0f/omega+3.f*CS*Smag*LRFACTOR*LRFACTOR);
// //omega = 1.0f/(1.0f*LRLEVEL/1.99983f-1.f+0.5f+3.f*CS*Smag*LRFACTOR);
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)*LRFACTOR/3.0f;
float tau0 = 1.f/omega;
//float Smag = (sqrt(nu0*nu0+18.f*CS*LRFACTOR*LRFACTOR*Q)-nu0)/(6.f*CS*LRFACTOR*LRFACTOR);
//float Smag = LRFACTOR*(sqrt(4.f/9.f*tau0*tau0+8.f*CS*LRFACTOR*Q)-2.f/3.f*tau0)/(4.f*CS*LRFACTOR*LRFACTOR);
//omega = 1.0f/(3.0f*(nu0+CS*Smag*LRFACTOR*LRFACTOR)*LRLEVEL+0.5f);
//float tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*LRFACTOR*Q));
float tau = tau0+0.5f*(-tau0+sqrt(tau0*tau0+18.f*CS*sqrt(2.f)*Q));
omega = 1.f/tau;
//float tau = 3.f*nu0*LRFACTOR+0.5f+(sqrt(tau0*tau0+18.f*CS*CS*LRFACTOR*LRFACTOR*Q)-tau0)*0.5f;
//omega = 1.f/tau;
}
f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2);
f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
}
inline __device__ void mrt_collide_LES(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega, float Cs)
{
float u,v,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
float usqr = u*u+v*v+w*w;
// u = rho*u;
// v = rho*v;
// w = rho*w;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//COMPUTE M-MEQ
//m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
//m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17;
//m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18;
//m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18;
m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w);
// m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
// m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
// m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
m13 = f5+-f6+ f7+-f8 -u*v;
m14 = f11 +- f13 + - f16 + f18 -v*w;
m15 = f10 + - f12 +-f15 + f17 -u*w;
m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
if(SmagLES == "YES"){
// float PI11 = -0.026315789f*m1-0.5f *omega*m9;
// float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
// float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
//
// float PI12 = -1.5f*omega*m13;
// float PI23 = -1.5f*omega*m14;
// float PI13 = -1.5f*omega*m15;
// float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
// omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
// float PI11 = LRLEVEL*-1.0f/38.0f*( (m1)+19.0f*omega* (m9));
// float PI22 = LRLEVEL*-1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)-3.0f*omega*(m11)));
// float PI33 = LRLEVEL*-1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)+3.0f*omega*(m11)));
// float PI12 = LRLEVEL*-1.5f*omega*m13;
// float PI23 = LRLEVEL*-1.5f*omega*m14;
// float PI13 = LRLEVEL*-1.5f*omega*m15;
// float nu0 = ((1.0f/omega)-0.5f)/3.0f;
// float Smag = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+PI12*PI12+PI23*PI23+PI13*PI13);
// omega = 1.0f/(3.0f*(nu0+Cs*Smag*LRLEVEL*LRLEVEL)+0.5f);
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//
//float Smag = (sqrt(nu0*nu0+18.f*CS*Q)-nu0)/(6.f*CS);
//
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
//
//float tau0 = 1.f/omega;
//float tau = 3.f*nu0+0.5f+(sqrt(tau0*tau0+18.f*CS*CS*Q)-tau0)*0.5f;
//omega = 1.f/tau;
float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float tau0 = 1.f/omega;
//float Smag = (sqrt(nu0*nu0+18.f*CS*LRFACTOR*LRFACTOR*Q)-nu0)/(6.f*CS*LRFACTOR*LRFACTOR);
//float Smag = (sqrt(4.f/9.f*tau0*tau0+8.f*CS*Q)-2.f/3.f*tau0)/(4.f*CS);
//omega = 1.0f/(3.0f*(nu0+CS*Smag)+0.5f);
float tau = tau0+0.5f*(-tau0+sqrt(tau0*tau0+18.f*sqrt(2.f)*CS*Q));
omega = 1.f/tau;
}
f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2);
f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
}
inline __device__ void bgk_scale_cf(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float SF)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
inline __device__ void mrt_scale_cf(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float SF)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18) -19.f*(u*u+v*v+w*w);
//float m2 = 12.f*f0+-4.f*f1+-4.f*f2+-4.f*f3+-4.f*f4+f5+f6+f7+f8+-4.f*f9+f10+f11+f12+f13+-4.f*f14+f15+f16+f17+f18 +7.53968254f*(u*u+v*v+w*w);
//float m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
//float m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
//float m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
//float m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
//float m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
//float m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
//float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
//float m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
//float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
//float m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
//float m13 = f5+-f6+ f7+-f8 -u*v;
//float m14 = f11 +- f13 + - f16 + f18 -v*w;
//float m15 = f10 + - f12 +-f15 + f17 -u*w;
//float m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
//float m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
//float m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f);
//float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//we know Smag on coarse mesh
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//omega = 1.0f/(3.0f*(nu0+Cs*Smag*sqrt(2.f))+0.5f);
//omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
//omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*sqrt(2.f)*LRFACTOR*LRFACTOR);
//omega = 1.0f/(1.0f/omega +3.f*CS*Smag);
//omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*sqrt(2.f)*LRFACTOR*LRFACTOR);
//omega = 1.0f/(1.0f/omega +3.f*CS*Smag);
//omega2 = 1.0f/(1.0f*LRLEVEL/omega2-1.f+0.5f+3.f*CS*Smag*sqrt(2.f)*LRFACTOR);
//float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
//float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
//float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
//float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//float tau0c = 1.f/omega;
//float tau = tau0c+0.5*(-tau0c+sqrt(tau0c*tau0c+18.f*CS*Q));//tau_total of coarse mesh
//omega = 1.f/tau;//total omega on coarse mesh
//tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*LRFACTOR*Q));
//omega2= 1.f/tau;
//SF = (omega*(1.0f-omega2))/((1.0f-omega)*omega2);//for post-collision
//SF = omega*0.5f/omega2;//for post-streaming, pre-collision?
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
inline __device__ void mrt_scale_fc_LES(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega, float omega2)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
//float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
//float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
//float m13 = f5+-f6+ f7+-f8 -u*v;
//float m14 = f11 +- f13 + - f16 + f18 -v*w;
//float m15 = f10 + - f12 +-f15 + f17 -u*w;
//float PI11 = -0.026315789f*m1-0.5f *omega*m9;
//float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
//float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
//float PI12 = -1.5f*omega*m13;
//float PI23 = -1.5f*omega*m14;
//float PI13 = -1.5f*omega*m15;
////we know Smag on fine mesh. Smag_c=Smag_f*sqrt(2)
//float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
////omega = 1.0f/(3.0f*(nu0+CS*Smag*sqrt(2.f))+0.5f);
////omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*LRFACTOR);
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag/sqrt(2.f));
////omega2 = 1.0f/(1.0f*LRLEVEL/omega2-1.f+0.5f+3.f*CS*Smag*LRFACTOR);
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag/sqrt(2.f));
//float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
//float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
//float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
//float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//float tau0f = 1.f/omega2;
//float tau0c = 1.f/omega;
//float tau = tau0f+0.5*(-tau0f+sqrt(tau0f*tau0f+18.f*CS*sqrt(2.f)*Q));//tau_total of fine
//omega2 = 1.f/tau;//total omega on fine mesh
//tau = LRLEVEL*(tau-tau0f)+tau0c;
//omega= 1.f/tau;
//tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*Q));
float SF = (omega*(1.0f-omega2))/((1.0f-omega)*omega2);
//float SF = omega2*2.f/omega;
//float SF = ((1.0f-omega)*omega2/LRFACTOR)/(omega*(1.0f-omega2));
//SF = omega*2.f/omega2;
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
__global__ void f_Extract(float* fout, float* fin, float* gin, float* hin,
size_t pitch_c, size_t pitch_f, int zInner_c, int zInner_f, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//local index on f of coarse mesh
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = x;//xcoord in gpu
float ycoord = y;//ycoord in gpu
float zcoord = z+1;//zcoord in gpu
float f[19];
if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) &&
(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-1) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-1)) )
{
float xcoord_LR = LRLEVEL*(xcoord-LRX0);//coord in refined region coordinates
float ycoord_LR = LRLEVEL*(ycoord-LRY0);
float zcoord_LR = LRLEVEL*(zcoord-LRZ0)-1.f;//-1.f to account for g layer
int xm = int(xcoord_LR);
int ym = int(ycoord_LR);
int zm = int(zcoord_LR);
int xp = xm+1;
int yp = ym+1;
int zp = zm+1;
float xf = xcoord_LR-xm;
float yf = ycoord_LR-ym;
float zf = zcoord_LR-zm;
for(int i=0;i<19;i++){
float v000 = fin[f_memLR(i ,xm,ym,zm,pitch_f,zInner_f)];
float v001 = fin[f_memLR(i ,xp,ym,zm,pitch_f,zInner_f)];
float v010 = fin[f_memLR(i ,xm,yp,zm,pitch_f,zInner_f)];
float v011 = fin[f_memLR(i ,xp,yp,zm,pitch_f,zInner_f)];
float v100 = fin[f_memLR(i ,xm,ym,zp,pitch_f,zInner_f)];
float v101 = fin[f_memLR(i ,xp,ym,zp,pitch_f,zInner_f)];
float v110 = fin[f_memLR(i ,xm,yp,zp,pitch_f,zInner_f)];
float v111 = fin[f_memLR(i ,xp,yp,zp,pitch_f,zInner_f)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
fout[f_mem(0 ,x,y,z,pitch_c,zInner_c)] = f[0 ];
fout[f_mem(1 ,x,y,z,pitch_c,zInner_c)] = f[1 ];
fout[f_mem(2 ,x,y,z,pitch_c,zInner_c)] = f[2 ];
fout[f_mem(3 ,x,y,z,pitch_c,zInner_c)] = f[3 ];
fout[f_mem(4 ,x,y,z,pitch_c,zInner_c)] = f[4 ];
fout[f_mem(5 ,x,y,z,pitch_c,zInner_c)] = f[5 ];
fout[f_mem(6 ,x,y,z,pitch_c,zInner_c)] = f[6 ];
fout[f_mem(7 ,x,y,z,pitch_c,zInner_c)] = f[7 ];
fout[f_mem(8 ,x,y,z,pitch_c,zInner_c)] = f[8 ];
fout[f_mem(9 ,x,y,z,pitch_c,zInner_c)] = f[9 ];
fout[f_mem(10,x,y,z,pitch_c,zInner_c)] = f[10];
fout[f_mem(11,x,y,z,pitch_c,zInner_c)] = f[11];
fout[f_mem(12,x,y,z,pitch_c,zInner_c)] = f[12];
fout[f_mem(13,x,y,z,pitch_c,zInner_c)] = f[13];
fout[f_mem(14,x,y,z,pitch_c,zInner_c)] = f[14];
fout[f_mem(15,x,y,z,pitch_c,zInner_c)] = f[15];
fout[f_mem(16,x,y,z,pitch_c,zInner_c)] = f[16];
fout[f_mem(17,x,y,z,pitch_c,zInner_c)] = f[17];
fout[f_mem(18,x,y,z,pitch_c,zInner_c)] = f[18];
}
}
__global__ void g_Extract(float* gout, float* fin, float* gin,
size_t pitch_c, size_t pitch_f, int zInner_c, int zInner_f, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//local index on f of coarse mesh
int y = threadIdx.y+blockIdx.y*blockDim.y;
float xcoord = x;//xcoord in gpu
float ycoord = y;//ycoord in gpu
//float zcoord = 0;//zcoord in gpu
float f[19];
if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) &&
(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-1) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-1)) )
{
float xcoord_LR = LRLEVEL*(xcoord-LRX0);//coord in refined region coordinates
float ycoord_LR = LRLEVEL*(ycoord-LRY0);
//float zcoord_LR = LRLEVEL*(zcoord-LRZ0);
int xm = int(xcoord_LR);
int ym = int(ycoord_LR);
int xp = xm+1;
int yp = ym+1;
float xf = xcoord_LR-xm;
float yf = ycoord_LR-ym;
float zf = 0.5f;
for(int i=0;i<19;i++){
float v000 = gin[buff_memLR(i ,xm,ym,pitch_f)];
float v001 = gin[buff_memLR(i ,xp,ym,pitch_f)];
float v010 = gin[buff_memLR(i ,xm,yp,pitch_f)];
float v011 = gin[buff_memLR(i ,xp,yp,pitch_f)];
float v100 = fin[f_memLR(i ,xm,ym,0,pitch_f,zInner_f)];
float v101 = fin[f_memLR(i ,xp,ym,0,pitch_f,zInner_f)];
float v110 = fin[f_memLR(i ,xm,yp,0,pitch_f,zInner_f)];
float v111 = fin[f_memLR(i ,xp,yp,0,pitch_f,zInner_f)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
gout[buff_mem(0 ,x,y,pitch_c)] = f[0 ];
gout[buff_mem(1 ,x,y,pitch_c)] = f[1 ];
gout[buff_mem(2 ,x,y,pitch_c)] = f[2 ];
gout[buff_mem(3 ,x,y,pitch_c)] = f[3 ];
gout[buff_mem(4 ,x,y,pitch_c)] = f[4 ];
gout[buff_mem(5 ,x,y,pitch_c)] = f[5 ];
gout[buff_mem(6 ,x,y,pitch_c)] = f[6 ];
gout[buff_mem(7 ,x,y,pitch_c)] = f[7 ];
gout[buff_mem(8 ,x,y,pitch_c)] = f[8 ];
gout[buff_mem(9 ,x,y,pitch_c)] = f[9 ];
gout[buff_mem(10,x,y,pitch_c)] = f[10];
gout[buff_mem(11,x,y,pitch_c)] = f[11];
gout[buff_mem(12,x,y,pitch_c)] = f[12];
gout[buff_mem(13,x,y,pitch_c)] = f[13];
gout[buff_mem(14,x,y,pitch_c)] = f[14];
gout[buff_mem(15,x,y,pitch_c)] = f[15];
gout[buff_mem(16,x,y,pitch_c)] = f[16];
gout[buff_mem(17,x,y,pitch_c)] = f[17];
gout[buff_mem(18,x,y,pitch_c)] = f[18];
}
}
__global__ void h_Extract(float* hout, float* fin, float* hin,
size_t pitch_c, size_t pitch_f, int zInner_c, int zInner_f, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//local index on f of coarse mesh
int y = threadIdx.y+blockIdx.y*blockDim.y;
float xcoord = x;//xcoord in gpu
float ycoord = y;//ycoord in gpu
//float zcoord = zInner_c+2-1;//zcoord in gpu
float f[19];
if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1) &&
(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-1) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-1)) )
{
float xcoord_LR = LRLEVEL*(xcoord-LRX0);//coord in refined region coordinates
float ycoord_LR = LRLEVEL*(ycoord-LRY0);
//float zcoord_LR = LRLEVEL*(zcoord-LRZ0);
int xm = int(xcoord_LR);
int ym = int(ycoord_LR);
int xp = xm+1;
int yp = ym+1;
float xf = xcoord_LR-xm;
float yf = ycoord_LR-ym;
float zf = 0.5f;
for(int i=0;i<19;i++){
float v000 = fin[f_memLR(i ,xm,ym,zInner_f-1,pitch_f,zInner_f)];
float v001 = fin[f_memLR(i ,xp,ym,zInner_f-1,pitch_f,zInner_f)];
float v010 = fin[f_memLR(i ,xm,yp,zInner_f-1,pitch_f,zInner_f)];
float v011 = fin[f_memLR(i ,xp,yp,zInner_f-1,pitch_f,zInner_f)];
float v100 = hin[buff_memLR(i ,xm,ym,pitch_f)];
float v101 = hin[buff_memLR(i ,xp,ym,pitch_f)];
float v110 = hin[buff_memLR(i ,xm,yp,pitch_f)];
float v111 = hin[buff_memLR(i ,xp,yp,pitch_f)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
hout[buff_mem(0 ,x,y,pitch_c)] = f[0 ];
hout[buff_mem(1 ,x,y,pitch_c)] = f[1 ];
hout[buff_mem(2 ,x,y,pitch_c)] = f[2 ];
hout[buff_mem(3 ,x,y,pitch_c)] = f[3 ];
hout[buff_mem(4 ,x,y,pitch_c)] = f[4 ];
hout[buff_mem(5 ,x,y,pitch_c)] = f[5 ];
hout[buff_mem(6 ,x,y,pitch_c)] = f[6 ];
hout[buff_mem(7 ,x,y,pitch_c)] = f[7 ];
hout[buff_mem(8 ,x,y,pitch_c)] = f[8 ];
hout[buff_mem(9 ,x,y,pitch_c)] = f[9 ];
hout[buff_mem(10,x,y,pitch_c)] = f[10];
hout[buff_mem(11,x,y,pitch_c)] = f[11];
hout[buff_mem(12,x,y,pitch_c)] = f[12];
hout[buff_mem(13,x,y,pitch_c)] = f[13];
hout[buff_mem(14,x,y,pitch_c)] = f[14];
hout[buff_mem(15,x,y,pitch_c)] = f[15];
hout[buff_mem(16,x,y,pitch_c)] = f[16];
hout[buff_mem(17,x,y,pitch_c)] = f[17];
hout[buff_mem(18,x,y,pitch_c)] = f[18];
}
}
__global__ void update_inner(float* fA, float* fB, float* g, float* h,
float omega, size_t pitch, int GPU, int zInner, //pitch in elements
float *uAv, float *vAv, float *ufluc, float *vfluc, int t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2)+1+z);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// if(REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1
// && y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 && z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
// (x>XDIM-1)){
// }
// else{
f0 = fA[j];
f1 = fA[f_mem (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_mem (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_mem (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_mem (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_mem (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_mem (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_mem (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_mem (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f9 = fA[f_mem (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_mem (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_mem (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_mem (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_mem (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_mem(14,x ,y ,pitch)];
f15= h [buff_mem(15,x-1,y ,pitch)];
f16= h [buff_mem(16,x ,y-1,pitch)];
f17= h [buff_mem(17,x+1,y ,pitch)];
f18= h [buff_mem(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_mem(9 ,x ,y ,pitch)];
f10= g [buff_mem(10,x-1,y ,pitch)];
f11= g [buff_mem(11,x ,y-1,pitch)];
f12= g [buff_mem(12,x+1,y ,pitch)];
f13= g [buff_mem(13,x ,y+1,pitch)];
f14= fA[f_mem (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_mem (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_mem (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_mem (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_mem (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_mem(9 ,x ,y ,z-1,pitch,zInner)];
f10= fA[f_mem(10,x-1,y ,z-1,pitch,zInner)];
f11= fA[f_mem(11,x ,y-1,z-1,pitch,zInner)];
f12= fA[f_mem(12,x+1,y ,z-1,pitch,zInner)];
f13= fA[f_mem(13,x ,y+1,z-1,pitch,zInner)];
f14= fA[f_mem(14,x ,y ,z+1,pitch,zInner)];
f15= fA[f_mem(15,x-1,y ,z+1,pitch,zInner)];
f16= fA[f_mem(16,x ,y-1,z+1,pitch,zInner)];
f17= fA[f_mem(17,x+1,y ,z+1,pitch,zInner)];
f18= fA[f_mem(18,x ,y+1,z+1,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_mem(10,x,y,z,pitch,zInner)] = f17;
fB[f_mem(11,x,y,z,pitch,zInner)] = f18;
fB[f_mem(12,x,y,z,pitch,zInner)] = f15;
fB[f_mem(13,x,y,z,pitch,zInner)] = f16;
fB[f_mem(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_mem(15,x,y,z,pitch,zInner)] = f12;
fB[f_mem(16,x,y,z,pitch,zInner)] = f13;
fB[f_mem(17,x,y,z,pitch,zInner)] = f10;
fB[f_mem(18,x,y,z,pitch,zInner)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = fA[f_mem(0 ,x,y-1,z,pitch,zInner)];
f1 = fA[f_mem(1 ,x,y-1,z,pitch,zInner)];
f3 = fA[f_mem(3 ,x,y-1,z,pitch,zInner)];
f2 = fA[f_mem(2 ,x,y-1,z,pitch,zInner)];
f5 = fA[f_mem(5 ,x,y-1,z,pitch,zInner)];
f6 = fA[f_mem(6 ,x,y-1,z,pitch,zInner)];
f4 = fA[f_mem(4 ,x,y-1,z,pitch,zInner)];
f7 = fA[f_mem(7 ,x,y-1,z,pitch,zInner)];
f8 = fA[f_mem(8 ,x,y-1,z,pitch,zInner)];
f9 = fA[f_mem(9 ,x,y-1,z,pitch,zInner)];
f10= fA[f_mem(10,x,y-1,z,pitch,zInner)];
f11= fA[f_mem(11,x,y-1,z,pitch,zInner)];
f12= fA[f_mem(12,x,y-1,z,pitch,zInner)];
f13= fA[f_mem(13,x,y-1,z,pitch,zInner)];
f14= fA[f_mem(14,x,y-1,z,pitch,zInner)];
f15= fA[f_mem(15,x,y-1,z,pitch,zInner)];
f16= fA[f_mem(16,x,y-1,z,pitch,zInner)];
f17= fA[f_mem(17,x,y-1,z,pitch,zInner)];
f18= fA[f_mem(18,x,y-1,z,pitch,zInner)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = fA[f_mem(0 ,x,y+1,z,pitch,zInner)];
f1 = fA[f_mem(1 ,x,y+1,z,pitch,zInner)];
f3 = fA[f_mem(3 ,x,y+1,z,pitch,zInner)];
f2 = fA[f_mem(2 ,x,y+1,z,pitch,zInner)];
f5 = fA[f_mem(5 ,x,y+1,z,pitch,zInner)];
f6 = fA[f_mem(6 ,x,y+1,z,pitch,zInner)];
f4 = fA[f_mem(4 ,x,y+1,z,pitch,zInner)];
f7 = fA[f_mem(7 ,x,y+1,z,pitch,zInner)];
f8 = fA[f_mem(8 ,x,y+1,z,pitch,zInner)];
f9 = fA[f_mem(9 ,x,y+1,z,pitch,zInner)];
f10= fA[f_mem(10,x,y+1,z,pitch,zInner)];
f11= fA[f_mem(11,x,y+1,z,pitch,zInner)];
f12= fA[f_mem(12,x,y+1,z,pitch,zInner)];
f13= fA[f_mem(13,x,y+1,z,pitch,zInner)];
f14= fA[f_mem(14,x,y+1,z,pitch,zInner)];
f15= fA[f_mem(15,x,y+1,z,pitch,zInner)];
f16= fA[f_mem(16,x,y+1,z,pitch,zInner)];
f17= fA[f_mem(17,x,y+1,z,pitch,zInner)];
f18= fA[f_mem(18,x,y+1,z,pitch,zInner)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,GPU*(zInner+2)+1+z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = uAv[x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = vAv[x+y*pitch+(z+1)*pitch*YDIM];
vel_av(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t);
uAv[x+y*pitch+(z+1)*pitch*YDIM] = u_Av;
vAv[x+y*pitch+(z+1)*pitch*YDIM] = v_Av;
}
else if(t>=START_VELFLUC){
float u_Av = uAv[x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = vAv[x+y*pitch+(z+1)*pitch*YDIM];
float u_fluc = ufluc[x+y*pitch+(z+1)*pitch*YDIM];
float v_fluc = vfluc[x+y*pitch+(z+1)*pitch*YDIM];
vel_fluc(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t);
ufluc[x+y*pitch+(z+1)*pitch*YDIM] = u_fluc;
vfluc[x+y*pitch+(z+1)*pitch*YDIM] = v_fluc;
}
}
fB[f_mem(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_mem(10,x,y,z,pitch,zInner)] = f10;
fB[f_mem(11,x,y,z,pitch,zInner)] = f11;
fB[f_mem(12,x,y,z,pitch,zInner)] = f12;
fB[f_mem(13,x,y,z,pitch,zInner)] = f13;
fB[f_mem(14,x,y,z,pitch,zInner)] = f14;
fB[f_mem(15,x,y,z,pitch,zInner)] = f15;
fB[f_mem(16,x,y,z,pitch,zInner)] = f16;
fB[f_mem(17,x,y,z,pitch,zInner)] = f17;
fB[f_mem(18,x,y,z,pitch,zInner)] = f18;
}
}
__global__ void update_bottom(float* gA, float* gB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2));
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = gA [j];
f1 = gA [buff_mem(1 ,x-1,y ,pitch)];
f3 = gA [buff_mem(3 ,x+1,y ,pitch)];
f2 = gA [buff_mem(2 ,x ,y-1,pitch)];
f5 = gA [buff_mem(5 ,x-1,y-1,pitch)];
f6 = gA [buff_mem(6 ,x+1,y-1,pitch)];
f4 = gA [buff_mem(4 ,x ,y+1,pitch)];
f7 = gA [buff_mem(7 ,x+1,y+1,pitch)];
f8 = gA [buff_mem(8 ,x-1,y+1,pitch)];
f9 = temp[buff_mem(9 ,x ,y ,pitch)];
f10= temp[buff_mem(10,x-1,y ,pitch)];
f11= temp[buff_mem(11,x ,y-1,pitch)];
f12= temp[buff_mem(12,x+1,y ,pitch)];
f13= temp[buff_mem(13,x ,y+1,pitch)];
f14= f [f_mem (14,x ,y ,0,pitch, zInner)];
f15= f [f_mem (15,x-1,y ,0,pitch, zInner)];
f16= f [f_mem (16,x ,y-1,0,pitch, zInner)];
f17= f [f_mem (17,x+1,y ,0,pitch, zInner)];
f18= f [f_mem (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f3 ;
gB[buff_mem(2 ,x,y,pitch)] = f4 ;
gB[buff_mem(3 ,x,y,pitch)] = f1 ;
gB[buff_mem(4 ,x,y,pitch)] = f2 ;
gB[buff_mem(5 ,x,y,pitch)] = f7 ;
gB[buff_mem(6 ,x,y,pitch)] = f8 ;
gB[buff_mem(7 ,x,y,pitch)] = f5 ;
gB[buff_mem(8 ,x,y,pitch)] = f6 ;
gB[buff_mem(9 ,x,y,pitch)] = f14;
gB[buff_mem(10,x,y,pitch)] = f17;
gB[buff_mem(11,x,y,pitch)] = f18;
gB[buff_mem(12,x,y,pitch)] = f15;
gB[buff_mem(13,x,y,pitch)] = f16;
gB[buff_mem(14,x,y,pitch)] = f9 ;
gB[buff_mem(15,x,y,pitch)] = f12;
gB[buff_mem(16,x,y,pitch)] = f13;
gB[buff_mem(17,x,y,pitch)] = f10;
gB[buff_mem(18,x,y,pitch)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = gA[buff_mem(0 ,x,y-1,pitch)];
f1 = gA[buff_mem(1 ,x,y-1,pitch)];
f3 = gA[buff_mem(3 ,x,y-1,pitch)];
f2 = gA[buff_mem(2 ,x,y-1,pitch)];
f5 = gA[buff_mem(5 ,x,y-1,pitch)];
f6 = gA[buff_mem(6 ,x,y-1,pitch)];
f4 = gA[buff_mem(4 ,x,y-1,pitch)];
f7 = gA[buff_mem(7 ,x,y-1,pitch)];
f8 = gA[buff_mem(8 ,x,y-1,pitch)];
f9 = gA[buff_mem(9 ,x,y-1,pitch)];
f10= gA[buff_mem(10,x,y-1,pitch)];
f11= gA[buff_mem(11,x,y-1,pitch)];
f12= gA[buff_mem(12,x,y-1,pitch)];
f13= gA[buff_mem(13,x,y-1,pitch)];
f14= gA[buff_mem(14,x,y-1,pitch)];
f15= gA[buff_mem(15,x,y-1,pitch)];
f16= gA[buff_mem(16,x,y-1,pitch)];
f17= gA[buff_mem(17,x,y-1,pitch)];
f18= gA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = gA[buff_mem(0 ,x,y+1,pitch)];
f1 = gA[buff_mem(1 ,x,y+1,pitch)];
f3 = gA[buff_mem(3 ,x,y+1,pitch)];
f2 = gA[buff_mem(2 ,x,y+1,pitch)];
f5 = gA[buff_mem(5 ,x,y+1,pitch)];
f6 = gA[buff_mem(6 ,x,y+1,pitch)];
f4 = gA[buff_mem(4 ,x,y+1,pitch)];
f7 = gA[buff_mem(7 ,x,y+1,pitch)];
f8 = gA[buff_mem(8 ,x,y+1,pitch)];
f9 = gA[buff_mem(9 ,x,y+1,pitch)];
f10= gA[buff_mem(10,x,y+1,pitch)];
f11= gA[buff_mem(11,x,y+1,pitch)];
f12= gA[buff_mem(12,x,y+1,pitch)];
f13= gA[buff_mem(13,x,y+1,pitch)];
f14= gA[buff_mem(14,x,y+1,pitch)];
f15= gA[buff_mem(15,x,y+1,pitch)];
f16= gA[buff_mem(16,x,y+1,pitch)];
f17= gA[buff_mem(17,x,y+1,pitch)];
f18= gA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,GPU*(zInner+2),im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f1 ;
gB[buff_mem(2 ,x,y,pitch)] = f2 ;
gB[buff_mem(3 ,x,y,pitch)] = f3 ;
gB[buff_mem(4 ,x,y,pitch)] = f4 ;
gB[buff_mem(5 ,x,y,pitch)] = f5 ;
gB[buff_mem(6 ,x,y,pitch)] = f6 ;
gB[buff_mem(7 ,x,y,pitch)] = f7 ;
gB[buff_mem(8 ,x,y,pitch)] = f8 ;
gB[buff_mem(9 ,x,y,pitch)] = f9 ;
gB[buff_mem(10,x,y,pitch)] = f10;
gB[buff_mem(11,x,y,pitch)] = f11;
gB[buff_mem(12,x,y,pitch)] = f12;
gB[buff_mem(13,x,y,pitch)] = f13;
gB[buff_mem(14,x,y,pitch)] = f14;
gB[buff_mem(15,x,y,pitch)] = f15;
gB[buff_mem(16,x,y,pitch)] = f16;
gB[buff_mem(17,x,y,pitch)] = f17;
gB[buff_mem(18,x,y,pitch)] = f18;
}
// }
}
__global__ void update_top(float* hA, float* hB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
//int z = (zInner+2)-1;//coord in GPU
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = hA[j];
f1 = hA [buff_mem(1 ,x-1,y ,pitch)];
f3 = hA [buff_mem(3 ,x+1,y ,pitch)];
f2 = hA [buff_mem(2 ,x ,y-1,pitch)];
f5 = hA [buff_mem(5 ,x-1,y-1,pitch)];
f6 = hA [buff_mem(6 ,x+1,y-1,pitch)];
f4 = hA [buff_mem(4 ,x ,y+1,pitch)];
f7 = hA [buff_mem(7 ,x+1,y+1,pitch)];
f8 = hA [buff_mem(8 ,x-1,y+1,pitch)];
f9 = f [f_mem (9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_mem (10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_mem (11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_mem (12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_mem (13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_mem(14,x ,y ,pitch)];
f15= temp[buff_mem(15,x-1,y ,pitch)];
f16= temp[buff_mem(16,x ,y-1,pitch)];
f17= temp[buff_mem(17,x+1,y ,pitch)];
f18= temp[buff_mem(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f3 ;
hB[buff_mem(2 ,x,y,pitch)] = f4 ;
hB[buff_mem(3 ,x,y,pitch)] = f1 ;
hB[buff_mem(4 ,x,y,pitch)] = f2 ;
hB[buff_mem(5 ,x,y,pitch)] = f7 ;
hB[buff_mem(6 ,x,y,pitch)] = f8 ;
hB[buff_mem(7 ,x,y,pitch)] = f5 ;
hB[buff_mem(8 ,x,y,pitch)] = f6 ;
hB[buff_mem(9 ,x,y,pitch)] = f14;
hB[buff_mem(10,x,y,pitch)] = f17;
hB[buff_mem(11,x,y,pitch)] = f18;
hB[buff_mem(12,x,y,pitch)] = f15;
hB[buff_mem(13,x,y,pitch)] = f16;
hB[buff_mem(14,x,y,pitch)] = f9 ;
hB[buff_mem(15,x,y,pitch)] = f12;
hB[buff_mem(16,x,y,pitch)] = f13;
hB[buff_mem(17,x,y,pitch)] = f10;
hB[buff_mem(18,x,y,pitch)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = hA[buff_mem(0 ,x,y-1,pitch)];
f1 = hA[buff_mem(1 ,x,y-1,pitch)];
f3 = hA[buff_mem(3 ,x,y-1,pitch)];
f2 = hA[buff_mem(2 ,x,y-1,pitch)];
f5 = hA[buff_mem(5 ,x,y-1,pitch)];
f6 = hA[buff_mem(6 ,x,y-1,pitch)];
f4 = hA[buff_mem(4 ,x,y-1,pitch)];
f7 = hA[buff_mem(7 ,x,y-1,pitch)];
f8 = hA[buff_mem(8 ,x,y-1,pitch)];
f9 = hA[buff_mem(9 ,x,y-1,pitch)];
f10= hA[buff_mem(10,x,y-1,pitch)];
f11= hA[buff_mem(11,x,y-1,pitch)];
f12= hA[buff_mem(12,x,y-1,pitch)];
f13= hA[buff_mem(13,x,y-1,pitch)];
f14= hA[buff_mem(14,x,y-1,pitch)];
f15= hA[buff_mem(15,x,y-1,pitch)];
f16= hA[buff_mem(16,x,y-1,pitch)];
f17= hA[buff_mem(17,x,y-1,pitch)];
f18= hA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = hA[buff_mem(0 ,x,y+1,pitch)];
f1 = hA[buff_mem(1 ,x,y+1,pitch)];
f3 = hA[buff_mem(3 ,x,y+1,pitch)];
f2 = hA[buff_mem(2 ,x,y+1,pitch)];
f5 = hA[buff_mem(5 ,x,y+1,pitch)];
f6 = hA[buff_mem(6 ,x,y+1,pitch)];
f4 = hA[buff_mem(4 ,x,y+1,pitch)];
f7 = hA[buff_mem(7 ,x,y+1,pitch)];
f8 = hA[buff_mem(8 ,x,y+1,pitch)];
f9 = hA[buff_mem(9 ,x,y+1,pitch)];
f10= hA[buff_mem(10,x,y+1,pitch)];
f11= hA[buff_mem(11,x,y+1,pitch)];
f12= hA[buff_mem(12,x,y+1,pitch)];
f13= hA[buff_mem(13,x,y+1,pitch)];
f14= hA[buff_mem(14,x,y+1,pitch)];
f15= hA[buff_mem(15,x,y+1,pitch)];
f16= hA[buff_mem(16,x,y+1,pitch)];
f17= hA[buff_mem(17,x,y+1,pitch)];
f18= hA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,(GPU+1)*(zInner+2)-1,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f1 ;
hB[buff_mem(2 ,x,y,pitch)] = f2 ;
hB[buff_mem(3 ,x,y,pitch)] = f3 ;
hB[buff_mem(4 ,x,y,pitch)] = f4 ;
hB[buff_mem(5 ,x,y,pitch)] = f5 ;
hB[buff_mem(6 ,x,y,pitch)] = f6 ;
hB[buff_mem(7 ,x,y,pitch)] = f7 ;
hB[buff_mem(8 ,x,y,pitch)] = f8 ;
hB[buff_mem(9 ,x,y,pitch)] = f9 ;
hB[buff_mem(10,x,y,pitch)] = f10;
hB[buff_mem(11,x,y,pitch)] = f11;
hB[buff_mem(12,x,y,pitch)] = f12;
hB[buff_mem(13,x,y,pitch)] = f13;
hB[buff_mem(14,x,y,pitch)] = f14;
hB[buff_mem(15,x,y,pitch)] = f15;
hB[buff_mem(16,x,y,pitch)] = f16;
hB[buff_mem(17,x,y,pitch)] = f17;
hB[buff_mem(18,x,y,pitch)] = f18;
}
// }
}
__global__ void update_inner_force(float* fA, float* fB, float* g, float* h,
float omega, size_t pitch, int GPU, int zInner,
float *FX, float *FY, float *FZ, int Force_t,//pitch in elements
float *uAv, float *vAv, float *ufluc, float *vfluc, int t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2)+1+z);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = fA[j];
f1 = fA[f_mem (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_mem (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_mem (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_mem (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_mem (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_mem (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_mem (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_mem (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f9 = fA[f_mem (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_mem (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_mem (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_mem (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_mem (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_mem(14,x ,y ,pitch)];
f15= h [buff_mem(15,x-1,y ,pitch)];
f16= h [buff_mem(16,x ,y-1,pitch)];
f17= h [buff_mem(17,x+1,y ,pitch)];
f18= h [buff_mem(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_mem(9 ,x ,y ,pitch)];
f10= g [buff_mem(10,x-1,y ,pitch)];
f11= g [buff_mem(11,x ,y-1,pitch)];
f12= g [buff_mem(12,x+1,y ,pitch)];
f13= g [buff_mem(13,x ,y+1,pitch)];
f14= fA[f_mem (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_mem (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_mem (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_mem (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_mem (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_mem(9 ,x ,y ,z-1,pitch,zInner)];
f10= fA[f_mem(10,x-1,y ,z-1,pitch,zInner)];
f11= fA[f_mem(11,x ,y-1,z-1,pitch,zInner)];
f12= fA[f_mem(12,x+1,y ,z-1,pitch,zInner)];
f13= fA[f_mem(13,x ,y+1,z-1,pitch,zInner)];
f14= fA[f_mem(14,x ,y ,z+1,pitch,zInner)];
f15= fA[f_mem(15,x-1,y ,z+1,pitch,zInner)];
f16= fA[f_mem(16,x ,y-1,z+1,pitch,zInner)];
f17= fA[f_mem(17,x+1,y ,z+1,pitch,zInner)];
f18= fA[f_mem(18,x ,y+1,z+1,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_mem(10,x,y,z,pitch,zInner)] = f17;
fB[f_mem(11,x,y,z,pitch,zInner)] = f18;
fB[f_mem(12,x,y,z,pitch,zInner)] = f15;
fB[f_mem(13,x,y,z,pitch,zInner)] = f16;
fB[f_mem(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_mem(15,x,y,z,pitch,zInner)] = f12;
fB[f_mem(16,x,y,z,pitch,zInner)] = f13;
fB[f_mem(17,x,y,z,pitch,zInner)] = f10;
fB[f_mem(18,x,y,z,pitch,zInner)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
f0 = fA[f_mem(0 ,x,y-1,z,pitch,zInner)];
f1 = fA[f_mem(1 ,x,y-1,z,pitch,zInner)];
f3 = fA[f_mem(3 ,x,y-1,z,pitch,zInner)];
f2 = fA[f_mem(2 ,x,y-1,z,pitch,zInner)];
f5 = fA[f_mem(5 ,x,y-1,z,pitch,zInner)];
f6 = fA[f_mem(6 ,x,y-1,z,pitch,zInner)];
f4 = fA[f_mem(4 ,x,y-1,z,pitch,zInner)];
f7 = fA[f_mem(7 ,x,y-1,z,pitch,zInner)];
f8 = fA[f_mem(8 ,x,y-1,z,pitch,zInner)];
f9 = fA[f_mem(9 ,x,y-1,z,pitch,zInner)];
f10= fA[f_mem(10,x,y-1,z,pitch,zInner)];
f11= fA[f_mem(11,x,y-1,z,pitch,zInner)];
f12= fA[f_mem(12,x,y-1,z,pitch,zInner)];
f13= fA[f_mem(13,x,y-1,z,pitch,zInner)];
f14= fA[f_mem(14,x,y-1,z,pitch,zInner)];
f15= fA[f_mem(15,x,y-1,z,pitch,zInner)];
f16= fA[f_mem(16,x,y-1,z,pitch,zInner)];
f17= fA[f_mem(17,x,y-1,z,pitch,zInner)];
f18= fA[f_mem(18,x,y-1,z,pitch,zInner)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = fA[f_mem(0 ,x,y+1,z,pitch,zInner)];
f1 = fA[f_mem(1 ,x,y+1,z,pitch,zInner)];
f3 = fA[f_mem(3 ,x,y+1,z,pitch,zInner)];
f2 = fA[f_mem(2 ,x,y+1,z,pitch,zInner)];
f5 = fA[f_mem(5 ,x,y+1,z,pitch,zInner)];
f6 = fA[f_mem(6 ,x,y+1,z,pitch,zInner)];
f4 = fA[f_mem(4 ,x,y+1,z,pitch,zInner)];
f7 = fA[f_mem(7 ,x,y+1,z,pitch,zInner)];
f8 = fA[f_mem(8 ,x,y+1,z,pitch,zInner)];
f9 = fA[f_mem(9 ,x,y+1,z,pitch,zInner)];
f10= fA[f_mem(10,x,y+1,z,pitch,zInner)];
f11= fA[f_mem(11,x,y+1,z,pitch,zInner)];
f12= fA[f_mem(12,x,y+1,z,pitch,zInner)];
f13= fA[f_mem(13,x,y+1,z,pitch,zInner)];
f14= fA[f_mem(14,x,y+1,z,pitch,zInner)];
f15= fA[f_mem(15,x,y+1,z,pitch,zInner)];
f16= fA[f_mem(16,x,y+1,z,pitch,zInner)];
f17= fA[f_mem(17,x,y+1,z,pitch,zInner)];
f18= fA[f_mem(18,x,y+1,z,pitch,zInner)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,GPU*(zInner+2)+1+z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = uAv[x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = vAv[x+y*pitch+(z+1)*pitch*YDIM];
vel_av(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t);
uAv[x+y*pitch+(z+1)*pitch*YDIM] = u_Av;
vAv[x+y*pitch+(z+1)*pitch*YDIM] = v_Av;
}
else if(t>=START_VELFLUC){
float u_Av = uAv[x+y*pitch+(z+1)*pitch*YDIM];
float v_Av = vAv[x+y*pitch+(z+1)*pitch*YDIM];
float u_fluc = ufluc[x+y*pitch+(z+1)*pitch*YDIM];
float v_fluc = vfluc[x+y*pitch+(z+1)*pitch*YDIM];
vel_fluc(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t);
ufluc[x+y*pitch+(z+1)*pitch*YDIM] = u_fluc;
vfluc[x+y*pitch+(z+1)*pitch*YDIM] = v_fluc;
}
}
fB[f_mem(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_mem(10,x,y,z,pitch,zInner)] = f10;
fB[f_mem(11,x,y,z,pitch,zInner)] = f11;
fB[f_mem(12,x,y,z,pitch,zInner)] = f12;
fB[f_mem(13,x,y,z,pitch,zInner)] = f13;
fB[f_mem(14,x,y,z,pitch,zInner)] = f14;
fB[f_mem(15,x,y,z,pitch,zInner)] = f15;
fB[f_mem(16,x,y,z,pitch,zInner)] = f16;
fB[f_mem(17,x,y,z,pitch,zInner)] = f17;
fB[f_mem(18,x,y,z,pitch,zInner)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_bottom_force(float* gA, float* gB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner,
float *FX, float *FY, float *FZ, int Force_t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2));
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = gA [j];
f1 = gA [buff_mem(1 ,x-1,y ,pitch)];
f3 = gA [buff_mem(3 ,x+1,y ,pitch)];
f2 = gA [buff_mem(2 ,x ,y-1,pitch)];
f5 = gA [buff_mem(5 ,x-1,y-1,pitch)];
f6 = gA [buff_mem(6 ,x+1,y-1,pitch)];
f4 = gA [buff_mem(4 ,x ,y+1,pitch)];
f7 = gA [buff_mem(7 ,x+1,y+1,pitch)];
f8 = gA [buff_mem(8 ,x-1,y+1,pitch)];
f9 = temp[buff_mem(9 ,x ,y ,pitch)];
f10= temp[buff_mem(10,x-1,y ,pitch)];
f11= temp[buff_mem(11,x ,y-1,pitch)];
f12= temp[buff_mem(12,x+1,y ,pitch)];
f13= temp[buff_mem(13,x ,y+1,pitch)];
f14= f [f_mem (14,x ,y ,0,pitch, zInner)];
f15= f [f_mem (15,x-1,y ,0,pitch, zInner)];
f16= f [f_mem (16,x ,y-1,0,pitch, zInner)];
f17= f [f_mem (17,x+1,y ,0,pitch, zInner)];
f18= f [f_mem (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x] =2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x] =2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x] =2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f3 ;
gB[buff_mem(2 ,x,y,pitch)] = f4 ;
gB[buff_mem(3 ,x,y,pitch)] = f1 ;
gB[buff_mem(4 ,x,y,pitch)] = f2 ;
gB[buff_mem(5 ,x,y,pitch)] = f7 ;
gB[buff_mem(6 ,x,y,pitch)] = f8 ;
gB[buff_mem(7 ,x,y,pitch)] = f5 ;
gB[buff_mem(8 ,x,y,pitch)] = f6 ;
gB[buff_mem(9 ,x,y,pitch)] = f14;
gB[buff_mem(10,x,y,pitch)] = f17;
gB[buff_mem(11,x,y,pitch)] = f18;
gB[buff_mem(12,x,y,pitch)] = f15;
gB[buff_mem(13,x,y,pitch)] = f16;
gB[buff_mem(14,x,y,pitch)] = f9 ;
gB[buff_mem(15,x,y,pitch)] = f12;
gB[buff_mem(16,x,y,pitch)] = f13;
gB[buff_mem(17,x,y,pitch)] = f10;
gB[buff_mem(18,x,y,pitch)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
f0 = gA[buff_mem(0 ,x,y-1,pitch)];
f1 = gA[buff_mem(1 ,x,y-1,pitch)];
f3 = gA[buff_mem(3 ,x,y-1,pitch)];
f2 = gA[buff_mem(2 ,x,y-1,pitch)];
f5 = gA[buff_mem(5 ,x,y-1,pitch)];
f6 = gA[buff_mem(6 ,x,y-1,pitch)];
f4 = gA[buff_mem(4 ,x,y-1,pitch)];
f7 = gA[buff_mem(7 ,x,y-1,pitch)];
f8 = gA[buff_mem(8 ,x,y-1,pitch)];
f9 = gA[buff_mem(9 ,x,y-1,pitch)];
f10= gA[buff_mem(10,x,y-1,pitch)];
f11= gA[buff_mem(11,x,y-1,pitch)];
f12= gA[buff_mem(12,x,y-1,pitch)];
f13= gA[buff_mem(13,x,y-1,pitch)];
f14= gA[buff_mem(14,x,y-1,pitch)];
f15= gA[buff_mem(15,x,y-1,pitch)];
f16= gA[buff_mem(16,x,y-1,pitch)];
f17= gA[buff_mem(17,x,y-1,pitch)];
f18= gA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = gA[buff_mem(0 ,x,y+1,pitch)];
f1 = gA[buff_mem(1 ,x,y+1,pitch)];
f3 = gA[buff_mem(3 ,x,y+1,pitch)];
f2 = gA[buff_mem(2 ,x,y+1,pitch)];
f5 = gA[buff_mem(5 ,x,y+1,pitch)];
f6 = gA[buff_mem(6 ,x,y+1,pitch)];
f4 = gA[buff_mem(4 ,x,y+1,pitch)];
f7 = gA[buff_mem(7 ,x,y+1,pitch)];
f8 = gA[buff_mem(8 ,x,y+1,pitch)];
f9 = gA[buff_mem(9 ,x,y+1,pitch)];
f10= gA[buff_mem(10,x,y+1,pitch)];
f11= gA[buff_mem(11,x,y+1,pitch)];
f12= gA[buff_mem(12,x,y+1,pitch)];
f13= gA[buff_mem(13,x,y+1,pitch)];
f14= gA[buff_mem(14,x,y+1,pitch)];
f15= gA[buff_mem(15,x,y+1,pitch)];
f16= gA[buff_mem(16,x,y+1,pitch)];
f17= gA[buff_mem(17,x,y+1,pitch)];
f18= gA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,GPU*(zInner+2),im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f1 ;
gB[buff_mem(2 ,x,y,pitch)] = f2 ;
gB[buff_mem(3 ,x,y,pitch)] = f3 ;
gB[buff_mem(4 ,x,y,pitch)] = f4 ;
gB[buff_mem(5 ,x,y,pitch)] = f5 ;
gB[buff_mem(6 ,x,y,pitch)] = f6 ;
gB[buff_mem(7 ,x,y,pitch)] = f7 ;
gB[buff_mem(8 ,x,y,pitch)] = f8 ;
gB[buff_mem(9 ,x,y,pitch)] = f9 ;
gB[buff_mem(10,x,y,pitch)] = f10;
gB[buff_mem(11,x,y,pitch)] = f11;
gB[buff_mem(12,x,y,pitch)] = f12;
gB[buff_mem(13,x,y,pitch)] = f13;
gB[buff_mem(14,x,y,pitch)] = f14;
gB[buff_mem(15,x,y,pitch)] = f15;
gB[buff_mem(16,x,y,pitch)] = f16;
gB[buff_mem(17,x,y,pitch)] = f17;
gB[buff_mem(18,x,y,pitch)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_top_force(float* hA, float* hB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner,//pitch in elements
float *FX, float *FY, float *FZ, int Force_t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
//int z = (zInner+2)-1;//coord in GPU
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = hA[j];
f1 = hA [buff_mem(1 ,x-1,y ,pitch)];
f3 = hA [buff_mem(3 ,x+1,y ,pitch)];
f2 = hA [buff_mem(2 ,x ,y-1,pitch)];
f5 = hA [buff_mem(5 ,x-1,y-1,pitch)];
f6 = hA [buff_mem(6 ,x+1,y-1,pitch)];
f4 = hA [buff_mem(4 ,x ,y+1,pitch)];
f7 = hA [buff_mem(7 ,x+1,y+1,pitch)];
f8 = hA [buff_mem(8 ,x-1,y+1,pitch)];
f9 = f [f_mem (9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_mem (10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_mem (11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_mem (12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_mem (13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_mem(14,x ,y ,pitch)];
f15= temp[buff_mem(15,x-1,y ,pitch)];
f16= temp[buff_mem(16,x ,y-1,pitch)];
f17= temp[buff_mem(17,x+1,y ,pitch)];
f18= temp[buff_mem(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x] =2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x] =2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x] =2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f3 ;
hB[buff_mem(2 ,x,y,pitch)] = f4 ;
hB[buff_mem(3 ,x,y,pitch)] = f1 ;
hB[buff_mem(4 ,x,y,pitch)] = f2 ;
hB[buff_mem(5 ,x,y,pitch)] = f7 ;
hB[buff_mem(6 ,x,y,pitch)] = f8 ;
hB[buff_mem(7 ,x,y,pitch)] = f5 ;
hB[buff_mem(8 ,x,y,pitch)] = f6 ;
hB[buff_mem(9 ,x,y,pitch)] = f14;
hB[buff_mem(10,x,y,pitch)] = f17;
hB[buff_mem(11,x,y,pitch)] = f18;
hB[buff_mem(12,x,y,pitch)] = f15;
hB[buff_mem(13,x,y,pitch)] = f16;
hB[buff_mem(14,x,y,pitch)] = f9 ;
hB[buff_mem(15,x,y,pitch)] = f12;
hB[buff_mem(16,x,y,pitch)] = f13;
hB[buff_mem(17,x,y,pitch)] = f10;
hB[buff_mem(18,x,y,pitch)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(im == 100)//north outlet
{
f0 = hA[buff_mem(0 ,x,y-1,pitch)];
f1 = hA[buff_mem(1 ,x,y-1,pitch)];
f3 = hA[buff_mem(3 ,x,y-1,pitch)];
f2 = hA[buff_mem(2 ,x,y-1,pitch)];
f5 = hA[buff_mem(5 ,x,y-1,pitch)];
f6 = hA[buff_mem(6 ,x,y-1,pitch)];
f4 = hA[buff_mem(4 ,x,y-1,pitch)];
f7 = hA[buff_mem(7 ,x,y-1,pitch)];
f8 = hA[buff_mem(8 ,x,y-1,pitch)];
f9 = hA[buff_mem(9 ,x,y-1,pitch)];
f10= hA[buff_mem(10,x,y-1,pitch)];
f11= hA[buff_mem(11,x,y-1,pitch)];
f12= hA[buff_mem(12,x,y-1,pitch)];
f13= hA[buff_mem(13,x,y-1,pitch)];
f14= hA[buff_mem(14,x,y-1,pitch)];
f15= hA[buff_mem(15,x,y-1,pitch)];
f16= hA[buff_mem(16,x,y-1,pitch)];
f17= hA[buff_mem(17,x,y-1,pitch)];
f18= hA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = hA[buff_mem(0 ,x,y+1,pitch)];
f1 = hA[buff_mem(1 ,x,y+1,pitch)];
f3 = hA[buff_mem(3 ,x,y+1,pitch)];
f2 = hA[buff_mem(2 ,x,y+1,pitch)];
f5 = hA[buff_mem(5 ,x,y+1,pitch)];
f6 = hA[buff_mem(6 ,x,y+1,pitch)];
f4 = hA[buff_mem(4 ,x,y+1,pitch)];
f7 = hA[buff_mem(7 ,x,y+1,pitch)];
f8 = hA[buff_mem(8 ,x,y+1,pitch)];
f9 = hA[buff_mem(9 ,x,y+1,pitch)];
f10= hA[buff_mem(10,x,y+1,pitch)];
f11= hA[buff_mem(11,x,y+1,pitch)];
f12= hA[buff_mem(12,x,y+1,pitch)];
f13= hA[buff_mem(13,x,y+1,pitch)];
f14= hA[buff_mem(14,x,y+1,pitch)];
f15= hA[buff_mem(15,x,y+1,pitch)];
f16= hA[buff_mem(16,x,y+1,pitch)];
f17= hA[buff_mem(17,x,y+1,pitch)];
f18= hA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,(GPU+1)*(zInner+2)-1,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f1 ;
hB[buff_mem(2 ,x,y,pitch)] = f2 ;
hB[buff_mem(3 ,x,y,pitch)] = f3 ;
hB[buff_mem(4 ,x,y,pitch)] = f4 ;
hB[buff_mem(5 ,x,y,pitch)] = f5 ;
hB[buff_mem(6 ,x,y,pitch)] = f6 ;
hB[buff_mem(7 ,x,y,pitch)] = f7 ;
hB[buff_mem(8 ,x,y,pitch)] = f8 ;
hB[buff_mem(9 ,x,y,pitch)] = f9 ;
hB[buff_mem(10,x,y,pitch)] = f10;
hB[buff_mem(11,x,y,pitch)] = f11;
hB[buff_mem(12,x,y,pitch)] = f12;
hB[buff_mem(13,x,y,pitch)] = f13;
hB[buff_mem(14,x,y,pitch)] = f14;
hB[buff_mem(15,x,y,pitch)] = f15;
hB[buff_mem(16,x,y,pitch)] = f16;
hB[buff_mem(17,x,y,pitch)] = f17;
hB[buff_mem(18,x,y,pitch)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_inner_LR(float* fA, float* fB, float* g, float* h,
float omega, size_t pitch, int GPU, int zInner,//pitch in elements
float *uAv, float *vAv, float *ufluc, float *vfluc, float t)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner+2)+1+z));
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = fA[j];
f1 = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f9 = fA[f_memLR (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_memLR (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_memLR (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_memLR (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_memLR (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_memLR(14,x ,y ,pitch)];
f15= h [buff_memLR(15,x-1,y ,pitch)];
f16= h [buff_memLR(16,x ,y-1,pitch)];
f17= h [buff_memLR(17,x+1,y ,pitch)];
f18= h [buff_memLR(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_memLR(9 ,x ,y ,pitch)];
f10= g [buff_memLR(10,x-1,y ,pitch)];
f11= g [buff_memLR(11,x ,y-1,pitch)];
f12= g [buff_memLR(12,x+1,y ,pitch)];
f13= g [buff_memLR(13,x ,y+1,pitch)];
f14= fA[f_memLR (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_memLR (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_memLR (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_memLR (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_memLR (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)];//
f10= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)];//
f11= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)];//
f12= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)];//
f13= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)];//
f14= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)];//
f15= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)];//
f16= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)];//
f17= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)];//
f18= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)];//
}//end normal nodes
if(im == 1 || im ==10){//BB
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f18;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = uAv[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = vAv[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t);
uAv[x+y*pitch+(z+1)*pitch*YLRDIM] = u_Av;
vAv[x+y*pitch+(z+1)*pitch*YLRDIM] = v_Av;
}
else if(t>=START_VELFLUC){
float u_Av = uAv[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = vAv[x+y*pitch+(z+1)*pitch*YLRDIM];
float u_fluc = ufluc[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_fluc = vfluc[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t);
ufluc[x+y*pitch+(z+1)*pitch*YLRDIM] = u_fluc;
vfluc[x+y*pitch+(z+1)*pitch*YLRDIM] = v_fluc;
}
}
fB[f_memLR(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f11;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f18;
}
}
__global__ void update_bottom_LR(float* gA, float* gB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (zInner+2)-1;
int j = x+y*pitch;//index on padded mem (pitch in elements)
//int im = ImageFcn(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+GPU*z*LRFACTOR);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+GPU*LRFACTOR*z;
int im = ImageFcn(xcoord,ycoord,zcoord);
f0 = gA [j];
f1 = gA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = gA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = gA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = gA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = gA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = gA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = gA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = gA [buff_memLR(8 ,x-1,y+1,pitch)];
f9 = temp[buff_memLR(9 ,x ,y ,pitch)];
f10= temp[buff_memLR(10,x-1,y ,pitch)];
f11= temp[buff_memLR(11,x ,y-1,pitch)];
f12= temp[buff_memLR(12,x+1,y ,pitch)];
f13= temp[buff_memLR(13,x ,y+1,pitch)];
f14= f [f_memLR (14,x ,y ,0,pitch, zInner)];
f15= f [f_memLR (15,x-1,y ,0,pitch, zInner)];
f16= f [f_memLR (16,x ,y-1,0,pitch, zInner)];
f17= f [f_memLR (17,x+1,y ,0,pitch, zInner)];
f18= f [f_memLR (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f3 ;
gB[buff_memLR(2 ,x,y,pitch)] = f4 ;
gB[buff_memLR(3 ,x,y,pitch)] = f1 ;
gB[buff_memLR(4 ,x,y,pitch)] = f2 ;
gB[buff_memLR(5 ,x,y,pitch)] = f7 ;
gB[buff_memLR(6 ,x,y,pitch)] = f8 ;
gB[buff_memLR(7 ,x,y,pitch)] = f5 ;
gB[buff_memLR(8 ,x,y,pitch)] = f6 ;
gB[buff_memLR(9 ,x,y,pitch)] = f14;
gB[buff_memLR(10,x,y,pitch)] = f17;
gB[buff_memLR(11,x,y,pitch)] = f18;
gB[buff_memLR(12,x,y,pitch)] = f15;
gB[buff_memLR(13,x,y,pitch)] = f16;
gB[buff_memLR(14,x,y,pitch)] = f9 ;
gB[buff_memLR(15,x,y,pitch)] = f12;
gB[buff_memLR(16,x,y,pitch)] = f13;
gB[buff_memLR(17,x,y,pitch)] = f10;
gB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f1 ;
gB[buff_memLR(2 ,x,y,pitch)] = f2 ;
gB[buff_memLR(3 ,x,y,pitch)] = f3 ;
gB[buff_memLR(4 ,x,y,pitch)] = f4 ;
gB[buff_memLR(5 ,x,y,pitch)] = f5 ;
gB[buff_memLR(6 ,x,y,pitch)] = f6 ;
gB[buff_memLR(7 ,x,y,pitch)] = f7 ;
gB[buff_memLR(8 ,x,y,pitch)] = f8 ;
gB[buff_memLR(9 ,x,y,pitch)] = f9 ;
gB[buff_memLR(10,x,y,pitch)] = f10;
gB[buff_memLR(11,x,y,pitch)] = f11;
gB[buff_memLR(12,x,y,pitch)] = f12;
gB[buff_memLR(13,x,y,pitch)] = f13;
gB[buff_memLR(14,x,y,pitch)] = f14;
gB[buff_memLR(15,x,y,pitch)] = f15;
gB[buff_memLR(16,x,y,pitch)] = f16;
gB[buff_memLR(17,x,y,pitch)] = f17;
gB[buff_memLR(18,x,y,pitch)] = f18;
}
}
__global__ void update_top_LR(float* hA, float* hB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (GPU+1)*(zInner+2)-1;//physical coord in LR region
int j = x+y*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+LRFACTOR*z;
int im = ImageFcn(xcoord,ycoord,zcoord);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = hA[j];
f1 = hA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = hA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = hA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = hA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = hA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = hA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = hA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = hA [buff_memLR(8 ,x-1,y+1,pitch)];
// f9 = hA [buff_memLR(9 ,x ,y ,pitch)];
// f10= hA [buff_memLR(10,x-1,y ,pitch)];
// f11= hA [buff_memLR(11,x ,y-1,pitch)];
// f12= hA [buff_memLR(12,x+1,y ,pitch)];
// f13= hA [buff_memLR(13,x ,y+1,pitch)];
// f14= hA [buff_memLR(9 ,x ,y ,pitch)];
// f15= hA [buff_memLR(10,x-1,y ,pitch)];
// f16= hA [buff_memLR(11,x ,y-1,pitch)];
// f17= hA [buff_memLR(12,x+1,y ,pitch)];
// f18= hA [buff_memLR(13,x ,y+1,pitch)];
f9 = f [f_memLR (9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_memLR (10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_memLR (11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_memLR (12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_memLR (13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_memLR(14,x ,y ,pitch)];
f15= temp[buff_memLR(15,x-1,y ,pitch)];
f16= temp[buff_memLR(16,x ,y-1,pitch)];
f17= temp[buff_memLR(17,x+1,y ,pitch)];
f18= temp[buff_memLR(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f3 ;
hB[buff_memLR(2 ,x,y,pitch)] = f4 ;
hB[buff_memLR(3 ,x,y,pitch)] = f1 ;
hB[buff_memLR(4 ,x,y,pitch)] = f2 ;
hB[buff_memLR(5 ,x,y,pitch)] = f7 ;
hB[buff_memLR(6 ,x,y,pitch)] = f8 ;
hB[buff_memLR(7 ,x,y,pitch)] = f5 ;
hB[buff_memLR(8 ,x,y,pitch)] = f6 ;
hB[buff_memLR(9 ,x,y,pitch)] = f14;
hB[buff_memLR(10,x,y,pitch)] = f17;
hB[buff_memLR(11,x,y,pitch)] = f18;
hB[buff_memLR(12,x,y,pitch)] = f15;
hB[buff_memLR(13,x,y,pitch)] = f16;
hB[buff_memLR(14,x,y,pitch)] = f9 ;
hB[buff_memLR(15,x,y,pitch)] = f12;
hB[buff_memLR(16,x,y,pitch)] = f13;
hB[buff_memLR(17,x,y,pitch)] = f10;
hB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f1 ;
hB[buff_memLR(2 ,x,y,pitch)] = f2 ;
hB[buff_memLR(3 ,x,y,pitch)] = f3 ;
hB[buff_memLR(4 ,x,y,pitch)] = f4 ;
hB[buff_memLR(5 ,x,y,pitch)] = f5 ;
hB[buff_memLR(6 ,x,y,pitch)] = f6 ;
hB[buff_memLR(7 ,x,y,pitch)] = f7 ;
hB[buff_memLR(8 ,x,y,pitch)] = f8 ;
hB[buff_memLR(9 ,x,y,pitch)] = f9 ;
hB[buff_memLR(10,x,y,pitch)] = f10;
hB[buff_memLR(11,x,y,pitch)] = f11;
hB[buff_memLR(12,x,y,pitch)] = f12;
hB[buff_memLR(13,x,y,pitch)] = f13;
hB[buff_memLR(14,x,y,pitch)] = f14;
hB[buff_memLR(15,x,y,pitch)] = f15;
hB[buff_memLR(16,x,y,pitch)] = f16;
hB[buff_memLR(17,x,y,pitch)] = f17;
hB[buff_memLR(18,x,y,pitch)] = f18;
}
}
__global__ void update_inner_LR_force(float* fA, float* fB, float* g, float* h,
float omega, size_t pitch, int GPU, int zInner,
float *FX, float *FY, float *FZ, int Force_t,//pitch in elements
float *uAv, float *vAv, float *ufluc, float *vfluc, float t)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+LRFACTOR*(GPU*(zInner+2)+1+z));
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = fA[j];
f1 = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f9 = fA[f_memLR (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_memLR (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_memLR (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_memLR (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_memLR (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_memLR(14,x ,y ,pitch)];
f15= h [buff_memLR(15,x-1,y ,pitch)];
f16= h [buff_memLR(16,x ,y-1,pitch)];
f17= h [buff_memLR(17,x+1,y ,pitch)];
f18= h [buff_memLR(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_memLR(9 ,x ,y ,pitch)];
f10= g [buff_memLR(10,x-1,y ,pitch)];
f11= g [buff_memLR(11,x ,y-1,pitch)];
f12= g [buff_memLR(12,x+1,y ,pitch)];
f13= g [buff_memLR(13,x ,y+1,pitch)];
f14= fA[f_memLR (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_memLR (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_memLR (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_memLR (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_memLR (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)];//
f10= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)];//
f11= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)];//
f12= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)];//
f13= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)];//
f14= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)];//
f15= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)];//
f16= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)];//
f17= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)];//
f18= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)];//
}//end normal nodes
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f18;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = uAv[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = vAv[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t);
uAv[x+y*pitch+(z+1)*pitch*YLRDIM] = u_Av;
vAv[x+y*pitch+(z+1)*pitch*YLRDIM] = v_Av;
}
else if(t>=START_VELFLUC){
float u_Av = uAv[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = vAv[x+y*pitch+(z+1)*pitch*YLRDIM];
float u_fluc = ufluc[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_fluc = vfluc[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t);
ufluc[x+y*pitch+(z+1)*pitch*YLRDIM] = u_fluc;
vfluc[x+y*pitch+(z+1)*pitch*YLRDIM] = v_fluc;
}
}
fB[f_memLR(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f11;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_bottom_LR_force(float* gA, float* gB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner,
float *FX, float *FY, float *FZ, int Force_t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (zInner+2)-1;
int j = x+y*pitch;//index on padded mem (pitch in elements)
//int im = ImageFcn(LRX0+LRFACTOR*x,LRY0+LRFACTOR*y,LRZ0+GPU*z*LRFACTOR);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+GPU*LRFACTOR*z;
int im = ImageFcn(xcoord,ycoord,zcoord);
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = gA [j];
f1 = gA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = gA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = gA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = gA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = gA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = gA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = gA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = gA [buff_memLR(8 ,x-1,y+1,pitch)];
f9 = temp[buff_memLR(9 ,x ,y ,pitch)];
f10= temp[buff_memLR(10,x-1,y ,pitch)];
f11= temp[buff_memLR(11,x ,y-1,pitch)];
f12= temp[buff_memLR(12,x+1,y ,pitch)];
f13= temp[buff_memLR(13,x ,y+1,pitch)];
f14= f [f_memLR (14,x ,y ,0,pitch, zInner)];
f15= f [f_memLR (15,x-1,y ,0,pitch, zInner)];
f16= f [f_memLR (16,x ,y-1,0,pitch, zInner)];
f17= f [f_memLR (17,x+1,y ,0,pitch, zInner)];
f18= f [f_memLR (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f3 ;
gB[buff_memLR(2 ,x,y,pitch)] = f4 ;
gB[buff_memLR(3 ,x,y,pitch)] = f1 ;
gB[buff_memLR(4 ,x,y,pitch)] = f2 ;
gB[buff_memLR(5 ,x,y,pitch)] = f7 ;
gB[buff_memLR(6 ,x,y,pitch)] = f8 ;
gB[buff_memLR(7 ,x,y,pitch)] = f5 ;
gB[buff_memLR(8 ,x,y,pitch)] = f6 ;
gB[buff_memLR(9 ,x,y,pitch)] = f14;
gB[buff_memLR(10,x,y,pitch)] = f17;
gB[buff_memLR(11,x,y,pitch)] = f18;
gB[buff_memLR(12,x,y,pitch)] = f15;
gB[buff_memLR(13,x,y,pitch)] = f16;
gB[buff_memLR(14,x,y,pitch)] = f9 ;
gB[buff_memLR(15,x,y,pitch)] = f12;
gB[buff_memLR(16,x,y,pitch)] = f13;
gB[buff_memLR(17,x,y,pitch)] = f10;
gB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f1 ;
gB[buff_memLR(2 ,x,y,pitch)] = f2 ;
gB[buff_memLR(3 ,x,y,pitch)] = f3 ;
gB[buff_memLR(4 ,x,y,pitch)] = f4 ;
gB[buff_memLR(5 ,x,y,pitch)] = f5 ;
gB[buff_memLR(6 ,x,y,pitch)] = f6 ;
gB[buff_memLR(7 ,x,y,pitch)] = f7 ;
gB[buff_memLR(8 ,x,y,pitch)] = f8 ;
gB[buff_memLR(9 ,x,y,pitch)] = f9 ;
gB[buff_memLR(10,x,y,pitch)] = f10;
gB[buff_memLR(11,x,y,pitch)] = f11;
gB[buff_memLR(12,x,y,pitch)] = f12;
gB[buff_memLR(13,x,y,pitch)] = f13;
gB[buff_memLR(14,x,y,pitch)] = f14;
gB[buff_memLR(15,x,y,pitch)] = f15;
gB[buff_memLR(16,x,y,pitch)] = f16;
gB[buff_memLR(17,x,y,pitch)] = f17;
gB[buff_memLR(18,x,y,pitch)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_top_LR_force(float* hA, float* hB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner,
float *FX, float *FY, float *FZ, int Force_t)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (GPU+1)*(zInner+2)-1;//physical coord in LR region
int j = x+y*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+LRFACTOR*z;
int im = ImageFcn(xcoord,ycoord,zcoord);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
__shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
f0 = hA[j];
f1 = hA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = hA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = hA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = hA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = hA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = hA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = hA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = hA [buff_memLR(8 ,x-1,y+1,pitch)];
f9 = f [f_memLR (9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_memLR (10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_memLR (11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_memLR (12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_memLR (13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_memLR(14,x ,y ,pitch)];
f15= temp[buff_memLR(15,x-1,y ,pitch)];
f16= temp[buff_memLR(16,x ,y-1,pitch)];
f17= temp[buff_memLR(17,x+1,y ,pitch)];
f18= temp[buff_memLR(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
if(im == 10){
check[0] = 1;
sumX[threadIdx.x] =2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17;
sumY[threadIdx.x] =2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18;
sumZ[threadIdx.x] =2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
}
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f3 ;
hB[buff_memLR(2 ,x,y,pitch)] = f4 ;
hB[buff_memLR(3 ,x,y,pitch)] = f1 ;
hB[buff_memLR(4 ,x,y,pitch)] = f2 ;
hB[buff_memLR(5 ,x,y,pitch)] = f7 ;
hB[buff_memLR(6 ,x,y,pitch)] = f8 ;
hB[buff_memLR(7 ,x,y,pitch)] = f5 ;
hB[buff_memLR(8 ,x,y,pitch)] = f6 ;
hB[buff_memLR(9 ,x,y,pitch)] = f14;
hB[buff_memLR(10,x,y,pitch)] = f17;
hB[buff_memLR(11,x,y,pitch)] = f18;
hB[buff_memLR(12,x,y,pitch)] = f15;
hB[buff_memLR(13,x,y,pitch)] = f16;
hB[buff_memLR(14,x,y,pitch)] = f9 ;
hB[buff_memLR(15,x,y,pitch)] = f12;
hB[buff_memLR(16,x,y,pitch)] = f13;
hB[buff_memLR(17,x,y,pitch)] = f10;
hB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
sumX[threadIdx.x]=0.f;
sumY[threadIdx.x]=0.f;
sumZ[threadIdx.x]=0.f;
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f1 ;
hB[buff_memLR(2 ,x,y,pitch)] = f2 ;
hB[buff_memLR(3 ,x,y,pitch)] = f3 ;
hB[buff_memLR(4 ,x,y,pitch)] = f4 ;
hB[buff_memLR(5 ,x,y,pitch)] = f5 ;
hB[buff_memLR(6 ,x,y,pitch)] = f6 ;
hB[buff_memLR(7 ,x,y,pitch)] = f7 ;
hB[buff_memLR(8 ,x,y,pitch)] = f8 ;
hB[buff_memLR(9 ,x,y,pitch)] = f9 ;
hB[buff_memLR(10,x,y,pitch)] = f10;
hB[buff_memLR(11,x,y,pitch)] = f11;
hB[buff_memLR(12,x,y,pitch)] = f12;
hB[buff_memLR(13,x,y,pitch)] = f13;
hB[buff_memLR(14,x,y,pitch)] = f14;
hB[buff_memLR(15,x,y,pitch)] = f15;
hB[buff_memLR(16,x,y,pitch)] = f16;
hB[buff_memLR(17,x,y,pitch)] = f17;
hB[buff_memLR(18,x,y,pitch)] = f18;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint];
sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint];
sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&FX[Force_t],sumX[0]);
atomicAdd(&FY[Force_t],sumY[0]);
atomicAdd(&FZ[Force_t],sumZ[0]);
}
}
}
__global__ void update_inner_LR_interp(float* fA, float* fB, float* g, float* h, float* f_c, float* g_c, float* h_c,
float omega, size_t pitch, int zInner, size_t pitch_c, int zInner_c, float SF, int GPU,//pitch in elements
float *uAv, float *vAv, float *ufluc, float *vfluc, float t)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements)
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+(1+z)*LRFACTOR;//local zcoord within GPU
int im = ImageFcn(xcoord,ycoord,GPU*(zInner_c+2)+zcoord);
if(x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL)
{
float f[19];
if(zcoord<1)
{
int xm = int(xcoord);
int ym = int(ycoord);
int zm = int(zcoord);//for zcoord<0
int xp = xm+1;
int yp = ym+1;
//int zp = zm+1;
float xf = xcoord-xm;
float yf = ycoord-ym;
float zf = zcoord-zm;
for(int i=0;i<19;i++){
float v000 = g_c[buff_mem(i ,xm,ym ,pitch_c)];
float v001 = g_c[buff_mem(i ,xp,ym ,pitch_c)];
float v010 = g_c[buff_mem(i ,xm,yp ,pitch_c)];
float v011 = g_c[buff_mem(i ,xp,yp ,pitch_c)];
float v100 = f_c[ f_mem(i ,xm,ym,0 ,pitch_c,zInner_c)];
float v101 = f_c[ f_mem(i ,xp,ym,0 ,pitch_c,zInner_c)];
float v110 = f_c[ f_mem(i ,xm,yp,0 ,pitch_c,zInner_c)];
float v111 = f_c[ f_mem(i ,xp,yp,0 ,pitch_c,zInner_c)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
}//end zcoord<0
else if(zcoord>(zInner_c+2)-2)
{
int xm = int(xcoord);
int ym = int(ycoord);
int zm = int(zcoord);
int xp = xm+1;
int yp = ym+1;
float xf = xcoord-xm;
float yf = ycoord-ym;
float zf = zcoord-zm;
for(int i=0;i<19;i++){
float v000 = f_c[ f_mem(i ,xm,ym,zInner_c-1,pitch_c,zInner_c)];
float v001 = f_c[ f_mem(i ,xp,ym,zInner_c-1,pitch_c,zInner_c)];
float v010 = f_c[ f_mem(i ,xm,yp,zInner_c-1,pitch_c,zInner_c)];
float v011 = f_c[ f_mem(i ,xp,yp,zInner_c-1,pitch_c,zInner_c)];
float v100 = h_c[buff_mem(i ,xm,ym ,pitch_c)];
float v101 = h_c[buff_mem(i ,xp,ym ,pitch_c)];
float v110 = h_c[buff_mem(i ,xm,yp ,pitch_c)];
float v111 = h_c[buff_mem(i ,xp,yp ,pitch_c)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
}//end zcoord>ZDIM
else{
int xm = int(xcoord);
int ym = int(ycoord);
int zm = int(zcoord);
int xp = xm+1;
int yp = ym+1;
int zp = zm+1;
float xf = xcoord-xm;
float yf = ycoord-ym;
float zf = zcoord-zm;
for(int i=0;i<19;i++){
float v000 = f_c[f_mem(i ,xm,ym,zm-1,pitch_c,zInner_c)];//-1 to correct for index in f
float v001 = f_c[f_mem(i ,xp,ym,zm-1,pitch_c,zInner_c)];
float v010 = f_c[f_mem(i ,xm,yp,zm-1,pitch_c,zInner_c)];
float v011 = f_c[f_mem(i ,xp,yp,zm-1,pitch_c,zInner_c)];
float v100 = f_c[f_mem(i ,xm,ym,zp-1,pitch_c,zInner_c)];
float v101 = f_c[f_mem(i ,xp,ym,zp-1,pitch_c,zInner_c)];
float v110 = f_c[f_mem(i ,xm,yp,zp-1,pitch_c,zInner_c)];
float v111 = f_c[f_mem(i ,xp,yp,zp-1,pitch_c,zInner_c)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
}
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
fB[f_memLR(0 ,x,y,z,pitch,zInner)] = f[0 ];
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f[1 ];
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f[2 ];
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f[3 ];
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f[4 ];
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f[5 ];
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f[6 ];
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f[7 ];
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f[8 ];
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f[9 ];
fB[f_memLR(10,x,y,z,pitch,zInner)] = f[10];
fB[f_memLR(11,x,y,z,pitch,zInner)] = f[11];
fB[f_memLR(12,x,y,z,pitch,zInner)] = f[12];
fB[f_memLR(13,x,y,z,pitch,zInner)] = f[13];
fB[f_memLR(14,x,y,z,pitch,zInner)] = f[14];
fB[f_memLR(15,x,y,z,pitch,zInner)] = f[15];
fB[f_memLR(16,x,y,z,pitch,zInner)] = f[16];
fB[f_memLR(17,x,y,z,pitch,zInner)] = f[17];
fB[f_memLR(18,x,y,z,pitch,zInner)] = f[18];
}
else
{
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = fA[j];
f1 = fA[f_memLR (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_memLR (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_memLR (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_memLR (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_memLR (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_memLR (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_memLR (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_memLR (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner-1){//top nodes need info from h
f9 = fA[f_memLR (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_memLR (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_memLR (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_memLR (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_memLR (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_memLR(14,x ,y ,pitch)];
f15= h [buff_memLR(15,x-1,y ,pitch)];
f16= h [buff_memLR(16,x ,y-1,pitch)];
f17= h [buff_memLR(17,x+1,y ,pitch)];
f18= h [buff_memLR(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_memLR(9 ,x ,y ,pitch)];
f10= g [buff_memLR(10,x-1,y ,pitch)];
f11= g [buff_memLR(11,x ,y-1,pitch)];
f12= g [buff_memLR(12,x+1,y ,pitch)];
f13= g [buff_memLR(13,x ,y+1,pitch)];
f14= fA[f_memLR (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_memLR (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_memLR (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_memLR (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_memLR (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_memLR(9 ,x ,y ,z-1,pitch,zInner)];//
f10= fA[f_memLR(10,x-1,y ,z-1,pitch,zInner)];//
f11= fA[f_memLR(11,x ,y-1,z-1,pitch,zInner)];//
f12= fA[f_memLR(12,x+1,y ,z-1,pitch,zInner)];//
f13= fA[f_memLR(13,x ,y+1,z-1,pitch,zInner)];//
f14= fA[f_memLR(14,x ,y ,z+1,pitch,zInner)];//
f15= fA[f_memLR(15,x-1,y ,z+1,pitch,zInner)];//
f16= fA[f_memLR(16,x ,y-1,z+1,pitch,zInner)];//
f17= fA[f_memLR(17,x+1,y ,z+1,pitch,zInner)];//
f18= fA[f_memLR(18,x ,y+1,z+1,pitch,zInner)];//
}//end normal nodes
if(im == 1 || im ==10){//BB
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f18;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
if(VELAV == 1){
if(t>=START_VELAV && t<START_VELFLUC){
float u_Av = uAv[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = vAv[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t);
uAv[x+y*pitch+(z+1)*pitch*YLRDIM] = u_Av;
vAv[x+y*pitch+(z+1)*pitch*YLRDIM] = v_Av;
}
else if(t>=START_VELFLUC){
float u_Av = uAv[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_Av = vAv[x+y*pitch+(z+1)*pitch*YLRDIM];
float u_fluc = ufluc[x+y*pitch+(z+1)*pitch*YLRDIM];
float v_fluc = vfluc[x+y*pitch+(z+1)*pitch*YLRDIM];
vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t);
ufluc[x+y*pitch+(z+1)*pitch*YLRDIM] = u_fluc;
vfluc[x+y*pitch+(z+1)*pitch*YLRDIM] = v_fluc;
}
}
fB[f_memLR(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_memLR(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_memLR(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_memLR(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_memLR(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_memLR(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_memLR(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_memLR(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_memLR(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_memLR(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_memLR(10,x,y,z,pitch,zInner)] = f10;
fB[f_memLR(11,x,y,z,pitch,zInner)] = f11;
fB[f_memLR(12,x,y,z,pitch,zInner)] = f12;
fB[f_memLR(13,x,y,z,pitch,zInner)] = f13;
fB[f_memLR(14,x,y,z,pitch,zInner)] = f14;
fB[f_memLR(15,x,y,z,pitch,zInner)] = f15;
fB[f_memLR(16,x,y,z,pitch,zInner)] = f16;
fB[f_memLR(17,x,y,z,pitch,zInner)] = f17;
fB[f_memLR(18,x,y,z,pitch,zInner)] = f18;
}
}//end else (no interp)
}
__global__ void update_bottom_LR_interp(float* gA, float* gB, float* f, float* temp, float* g_c, float* temp_c,
float omega, size_t pitch, int zInner, size_t pitch_c, int zInner_c, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
//int z = (zInner+2)-1;
int j = x+y*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0;
int im = ImageFcn(xcoord,ycoord,zcoord+GPU*LRFACTOR*ZLRDIM);
if(x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL)
{
float f[19];
if(zcoord<0)
{
int xm = int(xcoord);
int ym = int(ycoord);
int zm = int(zcoord)-1;//for zcoord<0
int xp = xm+1;
int yp = ym+1;
//int zp = zm+1;
float xf = xcoord-xm;
float yf = ycoord-ym;
float zf = zcoord-zm;
for(int i=0;i<19;i++){
float v000 = temp_c[buff_mem(i ,xm,ym ,pitch_c)];
float v001 = temp_c[buff_mem(i ,xp,ym ,pitch_c)];
float v010 = temp_c[buff_mem(i ,xm,yp ,pitch_c)];
float v011 = temp_c[buff_mem(i ,xp,yp ,pitch_c)];
float v100 = g_c[buff_mem(i ,xm,ym,pitch_c)];
float v101 = g_c[buff_mem(i ,xp,ym,pitch_c)];
float v110 = g_c[buff_mem(i ,xm,yp,pitch_c)];
float v111 = g_c[buff_mem(i ,xp,yp,pitch_c)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
}//end zcoord<0
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
gB[buff_memLR(0 ,x,y,pitch)] = f[0 ];
gB[buff_memLR(1 ,x,y,pitch)] = f[1 ];
gB[buff_memLR(2 ,x,y,pitch)] = f[2 ];
gB[buff_memLR(3 ,x,y,pitch)] = f[3 ];
gB[buff_memLR(4 ,x,y,pitch)] = f[4 ];
gB[buff_memLR(5 ,x,y,pitch)] = f[5 ];
gB[buff_memLR(6 ,x,y,pitch)] = f[6 ];
gB[buff_memLR(7 ,x,y,pitch)] = f[7 ];
gB[buff_memLR(8 ,x,y,pitch)] = f[8 ];
gB[buff_memLR(9 ,x,y,pitch)] = f[9 ];
gB[buff_memLR(10,x,y,pitch)] = f[10];
gB[buff_memLR(11,x,y,pitch)] = f[11];
gB[buff_memLR(12,x,y,pitch)] = f[12];
gB[buff_memLR(13,x,y,pitch)] = f[13];
gB[buff_memLR(14,x,y,pitch)] = f[14];
gB[buff_memLR(15,x,y,pitch)] = f[15];
gB[buff_memLR(16,x,y,pitch)] = f[16];
gB[buff_memLR(17,x,y,pitch)] = f[17];
gB[buff_memLR(18,x,y,pitch)] = f[18];
}
else
{
f0 = gA [j];
f1 = gA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = gA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = gA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = gA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = gA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = gA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = gA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = gA [buff_memLR(8 ,x-1,y+1,pitch)];
f9 = temp[buff_memLR(9 ,x ,y ,pitch)];
f10= temp[buff_memLR(10,x-1,y ,pitch)];
f11= temp[buff_memLR(11,x ,y-1,pitch)];
f12= temp[buff_memLR(12,x+1,y ,pitch)];
f13= temp[buff_memLR(13,x ,y+1,pitch)];
f14= f [f_memLR(14,x ,y ,0,pitch, zInner)];
f15= f [f_memLR(15,x-1,y ,0,pitch, zInner)];
f16= f [f_memLR(16,x ,y-1,0,pitch, zInner)];
f17= f [f_memLR(17,x+1,y ,0,pitch, zInner)];
f18= f [f_memLR(18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f3 ;
gB[buff_memLR(2 ,x,y,pitch)] = f4 ;
gB[buff_memLR(3 ,x,y,pitch)] = f1 ;
gB[buff_memLR(4 ,x,y,pitch)] = f2 ;
gB[buff_memLR(5 ,x,y,pitch)] = f7 ;
gB[buff_memLR(6 ,x,y,pitch)] = f8 ;
gB[buff_memLR(7 ,x,y,pitch)] = f5 ;
gB[buff_memLR(8 ,x,y,pitch)] = f6 ;
gB[buff_memLR(9 ,x,y,pitch)] = f14;
gB[buff_memLR(10,x,y,pitch)] = f17;
gB[buff_memLR(11,x,y,pitch)] = f18;
gB[buff_memLR(12,x,y,pitch)] = f15;
gB[buff_memLR(13,x,y,pitch)] = f16;
gB[buff_memLR(14,x,y,pitch)] = f9 ;
gB[buff_memLR(15,x,y,pitch)] = f12;
gB[buff_memLR(16,x,y,pitch)] = f13;
gB[buff_memLR(17,x,y,pitch)] = f10;
gB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_memLR(0 ,x,y,pitch)] = f0 ;
gB[buff_memLR(1 ,x,y,pitch)] = f1 ;
gB[buff_memLR(2 ,x,y,pitch)] = f2 ;
gB[buff_memLR(3 ,x,y,pitch)] = f3 ;
gB[buff_memLR(4 ,x,y,pitch)] = f4 ;
gB[buff_memLR(5 ,x,y,pitch)] = f5 ;
gB[buff_memLR(6 ,x,y,pitch)] = f6 ;
gB[buff_memLR(7 ,x,y,pitch)] = f7 ;
gB[buff_memLR(8 ,x,y,pitch)] = f8 ;
gB[buff_memLR(9 ,x,y,pitch)] = f9 ;
gB[buff_memLR(10,x,y,pitch)] = f10;
gB[buff_memLR(11,x,y,pitch)] = f11;
gB[buff_memLR(12,x,y,pitch)] = f12;
gB[buff_memLR(13,x,y,pitch)] = f13;
gB[buff_memLR(14,x,y,pitch)] = f14;
gB[buff_memLR(15,x,y,pitch)] = f15;
gB[buff_memLR(16,x,y,pitch)] = f16;
gB[buff_memLR(17,x,y,pitch)] = f17;
gB[buff_memLR(18,x,y,pitch)] = f18;
}
}
}
__global__ void update_top_LR_interp(float* hA, float* hB, float* f, float* temp, float* h_c, float* temp_c,
float omega, size_t pitch, int zInner, size_t pitch_c, int zInner_c, float SF, int GPU)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (zInner+2)-1;//physical coord
int j = x+y*pitch;//index on padded mem (pitch in elements)
//int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1);
float xcoord = LRX0+x*LRFACTOR;
float ycoord = LRY0+y*LRFACTOR;
float zcoord = LRZ0+z*LRFACTOR;
int im = ImageFcn(xcoord,ycoord,GPU*LRFACTOR*(zInner+2)+zcoord);
if(x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL)
{
float f[19];
int xm = int(xcoord);
int ym = int(ycoord);
int zm = int(zcoord);
int xp = xm+1;
int yp = ym+1;
float xf = xcoord-xm;
float yf = ycoord-ym;
float zf = zcoord-zm;
for(int i=0;i<19;i++){
float v000 = h_c[buff_mem(i ,xm,ym,pitch_c)];
float v001 = h_c[buff_mem(i ,xp,ym,pitch_c)];
float v010 = h_c[buff_mem(i ,xm,yp,pitch_c)];
float v011 = h_c[buff_mem(i ,xp,yp,pitch_c)];
float v100 = temp_c[buff_mem(i ,xm,ym ,pitch_c)];
float v101 = temp_c[buff_mem(i ,xp,ym ,pitch_c)];
float v110 = temp_c[buff_mem(i ,xm,yp ,pitch_c)];
float v111 = temp_c[buff_mem(i ,xp,yp ,pitch_c)];
f[i] = trilinear_interp(v000, v001, v010, v011, v100, v101, v110, v111, xf, yf, zf);
}
// }//end zcoord>ZDIM
if(MODEL == "MRT")
mrt_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
else if(MODEL == "BGK")
bgk_scale_cf(f[0],f[1],f[2],f[3],f[4],f[5],f[6],f[7],f[8],f[9],f[10],f[11],f[12],f[13],f[14],f[15],f[16],f[17],f[18],SF);
hB[buff_memLR(0 ,x,y,pitch)] = f[0 ];
hB[buff_memLR(1 ,x,y,pitch)] = f[1 ];
hB[buff_memLR(2 ,x,y,pitch)] = f[2 ];
hB[buff_memLR(3 ,x,y,pitch)] = f[3 ];
hB[buff_memLR(4 ,x,y,pitch)] = f[4 ];
hB[buff_memLR(5 ,x,y,pitch)] = f[5 ];
hB[buff_memLR(6 ,x,y,pitch)] = f[6 ];
hB[buff_memLR(7 ,x,y,pitch)] = f[7 ];
hB[buff_memLR(8 ,x,y,pitch)] = f[8 ];
hB[buff_memLR(9 ,x,y,pitch)] = f[9 ];
hB[buff_memLR(10,x,y,pitch)] = f[10];
hB[buff_memLR(11,x,y,pitch)] = f[11];
hB[buff_memLR(12,x,y,pitch)] = f[12];
hB[buff_memLR(13,x,y,pitch)] = f[13];
hB[buff_memLR(14,x,y,pitch)] = f[14];
hB[buff_memLR(15,x,y,pitch)] = f[15];
hB[buff_memLR(16,x,y,pitch)] = f[16];
hB[buff_memLR(17,x,y,pitch)] = f[17];
hB[buff_memLR(18,x,y,pitch)] = f[18];
}
else{//not LR interp region
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = hA[j];
f1 = hA [buff_memLR(1 ,x-1,y ,pitch)];
f3 = hA [buff_memLR(3 ,x+1,y ,pitch)];
f2 = hA [buff_memLR(2 ,x ,y-1,pitch)];
f5 = hA [buff_memLR(5 ,x-1,y-1,pitch)];
f6 = hA [buff_memLR(6 ,x+1,y-1,pitch)];
f4 = hA [buff_memLR(4 ,x ,y+1,pitch)];
f7 = hA [buff_memLR(7 ,x+1,y+1,pitch)];
f8 = hA [buff_memLR(8 ,x-1,y+1,pitch)];
f9 = f [f_memLR(9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_memLR(10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_memLR(11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_memLR(12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_memLR(13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_memLR(14,x ,y ,pitch)];
f15= temp[buff_memLR(15,x-1,y ,pitch)];
f16= temp[buff_memLR(16,x ,y-1,pitch)];
f17= temp[buff_memLR(17,x+1,y ,pitch)];
f18= temp[buff_memLR(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f3 ;
hB[buff_memLR(2 ,x,y,pitch)] = f4 ;
hB[buff_memLR(3 ,x,y,pitch)] = f1 ;
hB[buff_memLR(4 ,x,y,pitch)] = f2 ;
hB[buff_memLR(5 ,x,y,pitch)] = f7 ;
hB[buff_memLR(6 ,x,y,pitch)] = f8 ;
hB[buff_memLR(7 ,x,y,pitch)] = f5 ;
hB[buff_memLR(8 ,x,y,pitch)] = f6 ;
hB[buff_memLR(9 ,x,y,pitch)] = f14;
hB[buff_memLR(10,x,y,pitch)] = f17;
hB[buff_memLR(11,x,y,pitch)] = f18;
hB[buff_memLR(12,x,y,pitch)] = f15;
hB[buff_memLR(13,x,y,pitch)] = f16;
hB[buff_memLR(14,x,y,pitch)] = f9 ;
hB[buff_memLR(15,x,y,pitch)] = f12;
hB[buff_memLR(16,x,y,pitch)] = f13;
hB[buff_memLR(17,x,y,pitch)] = f10;
hB[buff_memLR(18,x,y,pitch)] = f11;
}
else{
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_memLR(0 ,x,y,pitch)] = f0 ;
hB[buff_memLR(1 ,x,y,pitch)] = f1 ;
hB[buff_memLR(2 ,x,y,pitch)] = f2 ;
hB[buff_memLR(3 ,x,y,pitch)] = f3 ;
hB[buff_memLR(4 ,x,y,pitch)] = f4 ;
hB[buff_memLR(5 ,x,y,pitch)] = f5 ;
hB[buff_memLR(6 ,x,y,pitch)] = f6 ;
hB[buff_memLR(7 ,x,y,pitch)] = f7 ;
hB[buff_memLR(8 ,x,y,pitch)] = f8 ;
hB[buff_memLR(9 ,x,y,pitch)] = f9 ;
hB[buff_memLR(10,x,y,pitch)] = f10;
hB[buff_memLR(11,x,y,pitch)] = f11;
hB[buff_memLR(12,x,y,pitch)] = f12;
hB[buff_memLR(13,x,y,pitch)] = f13;
hB[buff_memLR(14,x,y,pitch)] = f14;
hB[buff_memLR(15,x,y,pitch)] = f15;
hB[buff_memLR(16,x,y,pitch)] = f16;
hB[buff_memLR(17,x,y,pitch)] = f17;
hB[buff_memLR(18,x,y,pitch)] = f18;
}
}
}
__device__ __inline__ float ld_gb1_cg(const float *addr)
{
float return_value;
asm("ld.global.cg.f32 %0, [%1];" : "=f"(return_value) : "l"(addr));
return return_value;
}
__global__ void initialize_single(float *f, size_t pitch, int yDim, int zDim, int GPU_N, int level)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
float xcoord = x;
float ycoord = y;
float zcoord = z+1+GPU_N*ZDIM;
if(level > 0){
xcoord = LRX0+x*LRFACTOR;
ycoord = LRY0+y*LRFACTOR;
zcoord = LRZ0+z*LRFACTOR;
}
int j = x+y*pitch+z*yDim*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
int im = ImageFcn(xcoord,ycoord,zcoord);
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.01f;
v = UMAX;
w = 0.0;
if(im == 10 || im == 1){
u = 0.0f;
v = 0.0f;
w = 0.0f;
}
usqr = u*u+v*v+w*w;
if(MODEL == "BGK"){
f[j+0 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/3.0f*(rho-1.5f*usqr);
f[j+1 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+2 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+3 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+4 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+5 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f[j+6 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f[j+7 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f[j+8 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f[j+9 *pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+10*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f[j+11*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
f[j+12*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f[j+13*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
f[j+14*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+15*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f[j+16*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f[j+17*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f[j+18*pitch*yDim*(zDim/GPU_N-2)]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
else{
mrt_feq(rho,u,v,w,f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18);
f[j+0 *pitch*yDim*(zDim/GPU_N-2)]=f0 ;
f[j+1 *pitch*yDim*(zDim/GPU_N-2)]=f1 ;
f[j+2 *pitch*yDim*(zDim/GPU_N-2)]=f2 ;
f[j+3 *pitch*yDim*(zDim/GPU_N-2)]=f3 ;
f[j+4 *pitch*yDim*(zDim/GPU_N-2)]=f4 ;
f[j+5 *pitch*yDim*(zDim/GPU_N-2)]=f5 ;
f[j+6 *pitch*yDim*(zDim/GPU_N-2)]=f6 ;
f[j+7 *pitch*yDim*(zDim/GPU_N-2)]=f7 ;
f[j+8 *pitch*yDim*(zDim/GPU_N-2)]=f8 ;
f[j+9 *pitch*yDim*(zDim/GPU_N-2)]=f9 ;
f[j+10*pitch*yDim*(zDim/GPU_N-2)]=f10;
f[j+11*pitch*yDim*(zDim/GPU_N-2)]=f11;
f[j+12*pitch*yDim*(zDim/GPU_N-2)]=f12;
f[j+13*pitch*yDim*(zDim/GPU_N-2)]=f13;
f[j+14*pitch*yDim*(zDim/GPU_N-2)]=f14;
f[j+15*pitch*yDim*(zDim/GPU_N-2)]=f15;
f[j+16*pitch*yDim*(zDim/GPU_N-2)]=f16;
f[j+17*pitch*yDim*(zDim/GPU_N-2)]=f17;
f[j+18*pitch*yDim*(zDim/GPU_N-2)]=f18;
}
}
__global__ void initialize_buffer(float *g, size_t pitch, int yDim, int GPU_N, int level)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
float xcoord = x;
float ycoord = y;
float zcoord = 0+1+GPU_N*ZDIM;
if(level > 0){
xcoord = LRX0+x*LRFACTOR;
ycoord = LRY0+y*LRFACTOR;
zcoord = LRZ0;
}
int im = ImageFcn(xcoord,ycoord,zcoord);
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.01f;
v = UMAX;
w = 0.0f;
if(im == 10 || im == 1){
u = 0.0f;
v = 0.0f;
w = 0.0f;
}
usqr = u*u+v*v+w*w;
if(MODEL == "BGK"){
g[j+0 *pitch*yDim]= 1.0f/3.0f*(rho-1.5f*usqr);
g[j+1 *pitch*yDim]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
g[j+2 *pitch*yDim]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
g[j+3 *pitch*yDim]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
g[j+4 *pitch*yDim]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
g[j+5 *pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
g[j+6 *pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
g[j+7 *pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
g[j+8 *pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
g[j+9 *pitch*yDim]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
g[j+10*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
g[j+11*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
g[j+12*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
g[j+13*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
g[j+14*pitch*yDim]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
g[j+15*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
g[j+16*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
g[j+17*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
g[j+18*pitch*yDim]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
else{
mrt_feq(rho,u,v,w,f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18);
g[j+0 *pitch*yDim]=f0 ;
g[j+1 *pitch*yDim]=f1 ;
g[j+2 *pitch*yDim]=f2 ;
g[j+3 *pitch*yDim]=f3 ;
g[j+4 *pitch*yDim]=f4 ;
g[j+5 *pitch*yDim]=f5 ;
g[j+6 *pitch*yDim]=f6 ;
g[j+7 *pitch*yDim]=f7 ;
g[j+8 *pitch*yDim]=f8 ;
g[j+9 *pitch*yDim]=f9 ;
g[j+10*pitch*yDim]=f10;
g[j+11*pitch*yDim]=f11;
g[j+12*pitch*yDim]=f12;
g[j+13*pitch*yDim]=f13;
g[j+14*pitch*yDim]=f14;
g[j+15*pitch*yDim]=f15;
g[j+16*pitch*yDim]=f16;
g[j+17*pitch*yDim]=f17;
g[j+18*pitch*yDim]=f18;
}
}
//zMin = minimum zcoord, zNum = number of nodes in z
//void WriteResults(float *f, ofstream &output, float omega, int xDim, int yDim, int zMin, int zNum, float x0, float y0, float z0, float scale)
void WriteResults(ofstream &output, float *fin, float *gin, float *hin, float *uAv, float *vAv, float *wAv,
float *uFluc, float *vFluc, float *wFluc, float omega, int GPU_N, int GPU)
{
float f[19];
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = gin[(i+j*XDIM)+l *XDIM*YDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*GPU)<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<uAv[i+j*XDIM]<<","<<vAv[i+j*XDIM]<<", "<<uFluc[i+j*XDIM]<<","<<vFluc[i+j*XDIM]<<endl;
}}
for(int k = 1; k<ZDIM/GPU_N-1; k++){
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = fin[(i+j*XDIM)+(k-1)*XDIM*YDIM+l*XDIM*YDIM*(ZDIM/GPU_N-2)];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
int z = (ZDIM/GPU_N*GPU+k);
output<<i<<", "<<j<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<uAv[i+j*XDIM+k*XDIM*YDIM]<<","<<vAv[i+j*XDIM+k*XDIM*YDIM]<<", "
<<uFluc[i+j*XDIM+k*XDIM*YDIM]<<","<<vFluc[i+j*XDIM+k*XDIM*YDIM]<<endl;
}}}
for(int j = 0; j<YDIM; j++){
for(int i = 0; i<XDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = hin[(i+j*XDIM)+l *XDIM*YDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
output<<i<<", "<<j<<", "<<(ZDIM/GPU_N*(GPU+1)-1)<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<uAv[i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<vAv[i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<", "
<<uFluc[i+j*XDIM+(ZDIM-1)*XDIM*YDIM]<<","<<vFluc[i+j*XDIM+(ZDIM/GPU_N-1)*XDIM*YDIM]<<endl;
}}
}
void WriteResultsLR(ofstream &output, float *fin, float *gin, float *hin, float *uAv, float *vAv, float *wAv,
float *uFluc, float *vFluc, float *wFluc, float omega, int GPU_N, int GPU)
{
float f[19];
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = gin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU);
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<uAv[i+j*XLRDIM]<<","<<vAv[i+j*XLRDIM]<<", "<<uFluc[i+j*XLRDIM]<<","<<vFluc[i+j*XLRDIM]<<endl;
}}
for(int k = 1; k<ZLRDIM/GPU_N-1; k++){
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = fin[(i+j*XLRDIM)+(k-1)*XLRDIM*YLRDIM+l*XLRDIM*YLRDIM*(ZLRDIM/GPU_N-2)];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*GPU+k);
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<uAv[i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<vAv[i+j*XLRDIM+k*XLRDIM*YLRDIM]<<", "
<<uFluc[i+j*XLRDIM+k*XLRDIM*YLRDIM]<<","<<vFluc[i+j*XLRDIM+k*XLRDIM*YLRDIM]<<endl;
}}}
for(int j = 0; j<YLRDIM; j++){
for(int i = 0; i<XLRDIM; i++){
float rho = 0;
for(int l = 0; l<19; l++){
f[l] = hin[(i+j*XLRDIM)+l *XLRDIM*YLRDIM];
rho += f[l];
}
float u = f[1]-f[3 ]+f[5 ]-f[6 ]-f[7 ]+f[8 ]+f[10]-f[12]+f[15]-f[17];
float v = f[2]-f[4 ]+f[5 ]+f[6 ]-f[7 ]-f[8 ]+f[11]-f[13]+f[16]-f[18];
float w = f[9]+f[10]+f[11]+f[12]+f[13]-f[14]-f[15]-f[16]-f[17]-f[18];
float x = LRX0+LRFACTOR*i;
float y = LRY0+LRFACTOR*j;
float z = LRZ0+LRFACTOR*(ZLRDIM/GPU_N*(GPU+1)-1);
output<<x<<", "<<y<<", "<<z<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
<<uAv[i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<vAv[i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<", "
<<uFluc[i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<","<<vFluc[i+j*XLRDIM+(ZLRDIM/GPU_N-1)*XLRDIM*YLRDIM]<<endl;
}}
}
void WriteForces(float *FX, float *FY, float *FZ, ofstream &output, int ForceTime, int level)
{
float ref = UMAX*UMAX*ZDIM*OBSTR1;
if(level > 0)
ref *= LRLEVEL*LRLEVEL;
for(int i = 0; i<ForceTime; i++){
output<<i+ForceTime<<", "<<FX[i]/ref<<", "<<FY[i]/ref<<", "<<FZ[i]/ref<<endl;
}
}
void WriteInputs(ofstream &output, float omega, float omegaLR, int GPU_per_node)
{
output<<"Base domain size \t"<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl;
output<<"Base blocksize: \t"<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl;
output<<"Obst1 location: \t("<<OBSTX1<<","<<OBSTY1<<","<<OBSTZ1<<")"<<endl;
output<<"Obst1 radius: \t"<<OBSTR1<<endl;
output<<"Obst2 location: \t("<<OBSTX2<<","<<OBSTY2<<","<<OBSTZ2<<")"<<endl;
output<<"Obst2 radius: \t"<<OBSTR2<<endl;
output<<"RE: \t"<<RE<<endl;
output<<"UMAX: \t"<<UMAX<<endl;
output<<"omega \t: "<<omega<<endl;
output<<"TMAX: \t"<<TMAX<<endl;
output<<"STARTF: \t"<<STARTF<<endl;
output<<"START_VELAV: \t"<<START_VELAV<<endl;
output<<"START_VELFLUC: \t"<<START_VELFLUC<<endl;
output<<"REFINEMENT: \t"<<REFINEMENT<<endl;
output<<"MODEL: \t"<<MODEL<<endl;
output<<"Smagorinski LES: \t"<<SmagLES<<endl;
output<<"CS: \t"<<CS<<endl;
output<<"LR domain size \t"<<XLRDIM<<"x"<<YLRDIM<<"x"<<ZLRDIM<<endl;
output<<"LR factor \t"<<LRFACTOR<<endl;
output<<"LR location \t"<<LRX0<<"x"<<LRY0<<"x"<<LRZ0<<endl;
output<<"LR blocksize: \t"<<BLOCKSIZELRX<<"x"<<BLOCKSIZELRY<<"x"<<BLOCKSIZELRZ<<endl;
output<<"omega in LR \t: "<<omegaLR<<endl;
output<<"GPUs per node \t: "<<GPU_per_node<<endl;
output.close();
}
int main(int argc, char *argv[])
{
int GPU_N;
cudaGetDeviceCount(&GPU_N);
//GPU_N = 1;
cout<<"number of GPUs: "<<GPU_N<<endl;
int outputflag = 1;
if(argc>1){
if(strcmp(argv[1],"-no")==0){
outputflag = 0;
cout<<"no outputs option\n";
}
}
ofstream output;
ofstream outputForce;
ofstream outputInputs;
string FileName = CASENAME;
//output.open ("LBM1_out.dat");
output.open ((FileName+".dat").c_str());
outputForce.open ((FileName+".force").c_str());
outputInputs.open ((FileName+".inputs").c_str());
//size_t memsize, memsize2;
size_t pitch = 2;
while(pitch<XDIM)
pitch=pitch*2;
pitch = XDIM*sizeof(float);//pitch*sizeof(float);
size_t pitch_elements = XDIM;//pitch/sizeof(float);
cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl;
int i, nBlocks;
float omega, CharLength;
CharLength = OBSTR1*2.f;
omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f);
float omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
if(LRFACTOR == 0.25f){
omegaLR = 2.0f/(1.0f+2.0f*(2.0f/omegaLR-1.0f));
}
float SF_cf = omega*(1.0f-omegaLR)/((1.0f-omega)*omegaLR/LRFACTOR);
float SF_fc = 1.f/SF_cf;
WriteInputs(outputInputs,omega,omegaLR,GPU_N);
cout<<"omega : "<<omega<<endl;
cout<<"omegaLR : "<<omegaLR<<endl;
cout<<"blocksize: "<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl;
cout<<"grid: "<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl;
cout<<"gridLR: "<<XLRDIM<<"x"<<YLRDIM<<"x"<<ZLRDIM<<endl;
cout<<"TMAX: "<<TMAX<<endl;
cout<<"Model: "<<MODEL<<endl;
cout<<"Refinement: "<<LRLEVEL<<endl;
if(abs(LRFACTOR-1.f/LRLEVEL)>0.001f){
cout<<"LRLEVEL and LRFACTOR don't match! Exiting..."<<endl;
return 0;
}
int zInner = ZDIM/GPU_N-2; //excluding halo
//int zGPU = ZDIM/GPU_N;//z nodes per GPU (including halo)
//nBlocks does not include the halo layers
nBlocks = ((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX)*((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY)
*((zInner+BLOCKSIZEZ-1)/BLOCKSIZEZ);
cout<<"nBlocks:"<<nBlocks<<endl;
int ForceTime = max(0,TMAX-STARTF);
dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ);
//2 halo layers per GPU (for 2 GPUs)
dim3 grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(zInner)/BLOCKSIZEZ);
dim3 g_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
dim3 h_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
cudaStream_t stream_halo[GPU_N];
cudaStream_t stream_inner[GPU_N];
//data pointers as 3D array (GPUxCoord)
float *f_inner_h[GPU_N], *g_h[GPU_N], *h_h[GPU_N];
float *f_inner_A_d[GPU_N], *g_A_d[GPU_N], *h_A_d[GPU_N];
float *f_inner_B_d[GPU_N], *g_B_d[GPU_N], *h_B_d[GPU_N];
float *g_temp[GPU_N], *h_temp[GPU_N];
float *FX_h[GPU_N],*FY_h[GPU_N],*FZ_h[GPU_N];
float *FX_d[GPU_N],*FY_d[GPU_N],*FZ_d[GPU_N];
float *FX_total,*FY_total,*FZ_total;
float *uAv_h[GPU_N], *vAv_h[GPU_N], *wAv_h[GPU_N], *uAv_d[GPU_N], *vAv_d[GPU_N], *wAv_d[GPU_N];
float *uFluc_h[GPU_N], *vFluc_h[GPU_N], *wFluc_h[GPU_N], *uFluc_d[GPU_N], *vFluc_d[GPU_N], *wFluc_d[GPU_N];
FX_total = (float *)malloc(ForceTime*sizeof(float));
FY_total = (float *)malloc(ForceTime*sizeof(float));
FZ_total = (float *)malloc(ForceTime*sizeof(float));
for(i=0;i<(ForceTime);i++){
FX_total[i] = 0;
FY_total[i] = 0;
FZ_total[i] = 0;
}
//Malloc and Initialize for each GPU
for(int n = 0; n<GPU_N; n++){
f_inner_h[n] = (float *)malloc(XDIM*YDIM*zInner*19*sizeof(float));
g_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
h_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
FX_h [n] = (float *)malloc(ForceTime*sizeof(float));
FY_h [n] = (float *)malloc(ForceTime*sizeof(float));
FZ_h [n] = (float *)malloc(ForceTime*sizeof(float));
uAv_h [n]= (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
vAv_h [n]= (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
wAv_h [n]= (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
uFluc_h [n]= (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
vFluc_h [n]= (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
wFluc_h [n]= (float *)malloc(XDIM*YDIM*ZDIM/GPU_N*sizeof(float));
cudaSetDevice(n);
cudaStreamCreate(&stream_halo[n]);
cudaStreamCreate(&stream_inner[n]);
for(int m = 0; m<GPU_N; m++){
if(m != n)
cudaDeviceEnablePeerAccess(m,0);
}
cudaMalloc((void **) &f_inner_A_d[n], pitch_elements*YDIM*zInner*19*sizeof(float));
cudaMalloc((void **) &f_inner_B_d[n], pitch_elements*YDIM*zInner*19*sizeof(float));
cudaMalloc((void **) & g_A_d[n], pitch_elements*YDIM* 19*sizeof(float));
cudaMalloc((void **) & g_B_d[n], pitch_elements*YDIM* 19*sizeof(float));
cudaMalloc((void **) & h_A_d[n], pitch_elements*YDIM* 19*sizeof(float));
cudaMalloc((void **) & h_B_d[n], pitch_elements*YDIM* 19*sizeof(float));
cudaMalloc((void **) & g_temp[n], pitch_elements*YDIM* 19*sizeof(float));
cudaMalloc((void **) & h_temp[n], pitch_elements*YDIM* 19*sizeof(float));
cudaMalloc((void **) & FX_d[n], (ForceTime)*sizeof(float));
cudaMalloc((void **) & FY_d[n], (ForceTime)*sizeof(float));
cudaMalloc((void **) & FZ_d[n], (ForceTime)*sizeof(float));
cudaMalloc((void **) & uAv_d[n], pitch_elements*YDIM*ZDIM/GPU_N*sizeof(float));
cudaMalloc((void **) & vAv_d[n], pitch_elements*YDIM*ZDIM/GPU_N*sizeof(float));
cudaMalloc((void **) & wAv_d[n], pitch_elements*YDIM*ZDIM/GPU_N*sizeof(float));
cudaMalloc((void **) & uFluc_d[n], pitch_elements*YDIM*ZDIM/GPU_N*sizeof(float));
cudaMalloc((void **) & vFluc_d[n], pitch_elements*YDIM*ZDIM/GPU_N*sizeof(float));
cudaMalloc((void **) & wFluc_d[n], pitch_elements*YDIM*ZDIM/GPU_N*sizeof(float));
//initialize host f_inner
for (i = 0; i < XDIM*YDIM*zInner*19; i++)
f_inner_h[n][i] = 0;
//initialize host g,h
for (i = 0; i < XDIM*YDIM*19; i++){
g_h[n][i] = 0;
h_h[n][i] = 0;
}
for(i=0;i<(ForceTime);i++){
FX_h[n][i] = 0;
FY_h[n][i] = 0;
FZ_h[n][i] = 0;
}
for (i = 0; i < XDIM*YDIM*ZDIM/GPU_N; i++){
uAv_h[n][i] = 0;
vAv_h[n][i] = 0;
wAv_h[n][i] = 0;
uFluc_h[n][i] = 0;
vFluc_h[n][i] = 0;
wFluc_h[n][i] = 0;
}
cudaMemcpy2D(f_inner_A_d[n],pitch,f_inner_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(f_inner_B_d[n],pitch,f_inner_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyHostToDevice);
cudaMemcpy2D( g_A_d[n],pitch, g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,cudaMemcpyHostToDevice);
cudaMemcpy2D( g_B_d[n],pitch, g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,cudaMemcpyHostToDevice);
cudaMemcpy2D( h_A_d[n],pitch, h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,cudaMemcpyHostToDevice);
cudaMemcpy2D( h_B_d[n],pitch, h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,cudaMemcpyHostToDevice);
cudaMemcpy2D( uAv_d[n],pitch, uAv_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy2D( vAv_d[n],pitch, vAv_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy2D( wAv_d[n],pitch, wAv_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy2D( uFluc_d[n],pitch, uFluc_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy2D( vFluc_d[n],pitch, vFluc_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy2D( wFluc_d[n],pitch, wFluc_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy( FX_d[n], FX_h[n],sizeof(float)*(ForceTime),cudaMemcpyHostToDevice);
cudaMemcpy( FY_d[n], FY_h[n],sizeof(float)*(ForceTime),cudaMemcpyHostToDevice);
cudaMemcpy( FZ_d[n], FZ_h[n],sizeof(float)*(ForceTime),cudaMemcpyHostToDevice);
initialize_single<<<grid , threads>>>(f_inner_A_d[n],pitch_elements,YDIM,ZDIM,GPU_N,0);
initialize_single<<<grid , threads>>>(f_inner_B_d[n],pitch_elements,YDIM,ZDIM,GPU_N,0);
initialize_buffer<<<g_grid, threads>>>( g_A_d[n],pitch_elements,YDIM,GPU_N,0);
initialize_buffer<<<g_grid, threads>>>( g_B_d[n],pitch_elements,YDIM,GPU_N,0);
initialize_buffer<<<g_grid, threads>>>( h_A_d[n],pitch_elements,YDIM,GPU_N,0);
initialize_buffer<<<g_grid, threads>>>( h_B_d[n],pitch_elements,YDIM,GPU_N,0);
initialize_buffer<<<g_grid, threads>>>( g_temp[n],pitch_elements,YDIM,GPU_N,0);
initialize_buffer<<<g_grid, threads>>>( h_temp[n],pitch_elements,YDIM,GPU_N,0);
}//end Malloc and Initialize
//data pointers for LR
float *f_inner_LR_h[GPU_N], *g_LR_h[GPU_N], *h_LR_h[GPU_N];
float *f_inner_LR_A_d[GPU_N], *g_LR_A_d[GPU_N], *h_LR_A_d[GPU_N];
float *f_inner_LR_B_d[GPU_N], *g_LR_B_d[GPU_N], *h_LR_B_d[GPU_N];
float *g_LR_temp[GPU_N], *h_LR_temp[GPU_N];
float *uAvLR_h[GPU_N], *vAvLR_h[GPU_N], *wAvLR_h[GPU_N], *uAvLR_d[GPU_N], *vAvLR_d[GPU_N], *wAvLR_d[GPU_N];
float *uFlucLR_h[GPU_N], *vFlucLR_h[GPU_N], *wFlucLR_h[GPU_N], *uFlucLR_d[GPU_N], *vFlucLR_d[GPU_N], *wFlucLR_d[GPU_N];
size_t LRpitch = 2;
while(LRpitch<XLRDIM)
LRpitch=LRpitch*2;
LRpitch = XLRDIM*sizeof(float);//LRpitch*sizeof(float);
size_t LRpitch_elements = XLRDIM;//LRpitch/sizeof(float);
cout<<"LR Pitch (in elements): "<<LRpitch/sizeof(float)<<endl;
int zLRInner = ZLRDIM/GPU_N-2;
dim3 LRthreads(BLOCKSIZELRX, BLOCKSIZELRY, BLOCKSIZELRZ);
dim3 LRgrid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),(zLRInner)/BLOCKSIZELRZ);
dim3 g_LR_grid(((XLRDIM+BLOCKSIZELRX-1)/BLOCKSIZELRX),((YLRDIM+BLOCKSIZELRY-1)/BLOCKSIZELRY),1);
//LR setup
if(REFINEMENT == 1){
for(int n = 0; n<GPU_N; n++){
f_inner_LR_h[n] = (float *)malloc(XLRDIM*YLRDIM*zLRInner*19*sizeof(float));
g_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float));
h_LR_h [n] = (float *)malloc(XLRDIM*YLRDIM* 19*sizeof(float));
uAvLR_h [n]= (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
vAvLR_h [n]= (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
wAvLR_h [n]= (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
uFlucLR_h [n]= (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
vFlucLR_h [n]= (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
wFlucLR_h [n]= (float *)malloc(XLRDIM*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
cudaSetDevice(n);
cudaMalloc((void **) &f_inner_LR_A_d[n], LRpitch_elements*YLRDIM*zLRInner*19*sizeof(float));
cudaMalloc((void **) &f_inner_LR_B_d[n], LRpitch_elements*YLRDIM*zLRInner*19*sizeof(float));
cudaMalloc((void **) & g_LR_A_d[n], LRpitch_elements*YLRDIM* 19*sizeof(float));
cudaMalloc((void **) & g_LR_B_d[n], LRpitch_elements*YLRDIM* 19*sizeof(float));
cudaMalloc((void **) & h_LR_A_d[n], LRpitch_elements*YLRDIM* 19*sizeof(float));
cudaMalloc((void **) & h_LR_B_d[n], LRpitch_elements*YLRDIM* 19*sizeof(float));
cudaMalloc((void **) & g_LR_temp[n], LRpitch_elements*YLRDIM* 19*sizeof(float));
cudaMalloc((void **) & h_LR_temp[n], LRpitch_elements*YLRDIM* 19*sizeof(float));
cudaMalloc((void **) & uAvLR_d[n], LRpitch_elements*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
cudaMalloc((void **) & vAvLR_d[n], LRpitch_elements*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
cudaMalloc((void **) & wAvLR_d[n], LRpitch_elements*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
cudaMalloc((void **) & uFlucLR_d[n], LRpitch_elements*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
cudaMalloc((void **) & vFlucLR_d[n], LRpitch_elements*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
cudaMalloc((void **) & wFlucLR_d[n], LRpitch_elements*YLRDIM*ZLRDIM/GPU_N*sizeof(float));
//initialize host f_inner
for (i = 0; i < XLRDIM*YLRDIM*zLRInner*19; i++)
f_inner_LR_h[n][i] = 0;
//initialize host g,h
for (i = 0; i < XLRDIM*YLRDIM*19; i++){
g_LR_h[n][i] = 0;
h_LR_h[n][i] = 0;
}
for (i = 0; i < XLRDIM*YLRDIM*ZLRDIM/GPU_N; i++){
uAvLR_h[n][i] = 0;
vAvLR_h[n][i] = 0;
wAvLR_h[n][i] = 0;
uFlucLR_h[n][i] = 0;
vFlucLR_h[n][i] = 0;
wFlucLR_h[n][i] = 0;
}
cudaMemcpy2D(f_inner_LR_A_d[n],LRpitch,f_inner_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*zLRInner*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(f_inner_LR_B_d[n],LRpitch,f_inner_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*zLRInner*19,cudaMemcpyHostToDevice);
cudaMemcpy2D( g_LR_A_d[n],LRpitch, g_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM* 19,cudaMemcpyHostToDevice);
cudaMemcpy2D( g_LR_B_d[n],LRpitch, g_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM* 19,cudaMemcpyHostToDevice);
cudaMemcpy2D( h_LR_A_d[n],LRpitch, h_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM* 19,cudaMemcpyHostToDevice);
cudaMemcpy2D( h_LR_B_d[n],LRpitch, h_LR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM* 19,cudaMemcpyHostToDevice);
cudaMemcpy2D( uAvLR_d[n],LRpitch, uAvLR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy2D( vAvLR_d[n],LRpitch, vAvLR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy2D( wAvLR_d[n],LRpitch, wAvLR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy2D( uFlucLR_d[n],LRpitch, uFlucLR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy2D( vFlucLR_d[n],LRpitch, vFlucLR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyHostToDevice);
cudaMemcpy2D( wFlucLR_d[n],LRpitch, wFlucLR_h[n],XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyHostToDevice);
initialize_single<<<LRgrid , LRthreads>>>(f_inner_LR_A_d[n],LRpitch_elements,YLRDIM,ZLRDIM,GPU_N,LRLEVEL);
initialize_single<<<LRgrid , LRthreads>>>(f_inner_LR_B_d[n],LRpitch_elements,YLRDIM,ZLRDIM,GPU_N,LRLEVEL);
initialize_buffer<<<g_LR_grid, LRthreads>>>( g_LR_A_d[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
initialize_buffer<<<g_LR_grid, LRthreads>>>( g_LR_B_d[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
initialize_buffer<<<g_LR_grid, LRthreads>>>( h_LR_A_d[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
initialize_buffer<<<g_LR_grid, LRthreads>>>( h_LR_B_d[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
initialize_buffer<<<g_LR_grid, LRthreads>>>( g_LR_temp[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
initialize_buffer<<<g_LR_grid, LRthreads>>>( h_LR_temp[n],LRpitch_elements,YLRDIM,GPU_N,LRLEVEL);
}//end of GPU loop for malloc and initialize for LR
}//end of LR malloc and initialize
struct timeval tdr0,tdr1;
double restime;
cudaDeviceSynchronize();
gettimeofday (&tdr0,NULL);
for(int n = 0; n<GPU_N; n++){
size_t mem_avail, mem_total;
cudaSetDevice(n);
cudaMemGetInfo(&mem_avail,&mem_total);
cout<<"Device memory used for dev"<<n<<" : "<<mem_total-mem_avail<<endl;
cout<<"Device memory available for dev"<<n<<" : "<<mem_avail<<endl;
}
//Time loop
for(int t = 0; t<TMAX; t+=2){
//A->B
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
if(t>=STARTF && REFINEMENT == 0){
update_top_force <<<h_grid, threads, 0, stream_halo [n]>>>(h_A_d[n],h_B_d[n],f_inner_A_d[n],h_temp[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF);
update_bottom_force<<<h_grid, threads, 0, stream_halo [n]>>>(g_A_d[n],g_B_d[n],f_inner_A_d[n],g_temp[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF);
}
else{
update_top <<<h_grid, threads, 0, stream_halo [n]>>>(h_A_d[n],h_B_d[n],f_inner_A_d[n],h_temp[n],omega,pitch_elements,n,zInner);
update_bottom<<<h_grid, threads, 0, stream_halo [n]>>>(g_A_d[n],g_B_d[n],f_inner_A_d[n],g_temp[n],omega,pitch_elements,n,zInner);
}
}
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
if(t>=STARTF && REFINEMENT == 0){
update_inner_force <<< grid, threads, 0, stream_inner[n]>>>(f_inner_A_d[n],f_inner_B_d[n],g_A_d[n],h_A_d[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF,uAv_d[n],vAv_d[n],uFluc_d[n],vFluc_d[n],t);
}
else{
update_inner <<< grid, threads, 0, stream_inner[n]>>>(f_inner_A_d[n],f_inner_B_d[n], g_A_d[n], h_A_d[n],omega,pitch_elements,n,zInner,uAv_d[n],vAv_d[n],uFluc_d[n],vFluc_d[n],t);
}
}
for(int n = 0; n<GPU_N; n++)
cudaMemcpyPeerAsync(&h_temp[n][0],n,&g_B_d[ (n+1)%GPU_N][0], (n+1)%GPU_N,pitch_elements*YDIM*sizeof(float)*19,stream_halo[n]);
for(int n = 0; n<GPU_N; n++)
cudaMemcpyPeerAsync(&g_temp[n][0],n,&h_B_d[abs(n-1)%GPU_N][0],abs(n-1)%GPU_N,pitch_elements*YDIM*sizeof(float)*19,stream_halo[n]);
if(REFINEMENT == 1){
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
if(t>=STARTF){
update_top_LR_force <<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>( h_LR_A_d[n], h_LR_B_d[n],f_inner_LR_A_d[n],h_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF);
update_bottom_LR_force<<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>( g_LR_A_d[n], g_LR_B_d[n],f_inner_LR_A_d[n],g_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF);
}
else{
update_top_LR <<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>( h_LR_A_d[n], h_LR_B_d[n],f_inner_LR_A_d[n],h_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner);
update_bottom_LR<<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>( g_LR_A_d[n], g_LR_B_d[n],f_inner_LR_A_d[n],g_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner);
}
}
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
if(t>=STARTF){
update_inner_LR_force <<< LRgrid, LRthreads, 0, stream_inner[n]>>>(f_inner_LR_A_d[n],f_inner_LR_B_d[n], g_LR_A_d[n], h_LR_A_d[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF,uAvLR_d[n],vAvLR_d[n],uFlucLR_d[n],vFlucLR_d[n],t+0.5f);
}
else{
update_inner_LR <<< LRgrid, LRthreads, 0, stream_inner[n]>>>(f_inner_LR_A_d[n],f_inner_LR_B_d[n],g_LR_A_d[n],h_LR_A_d[n],omegaLR,LRpitch_elements,n,zLRInner,uAvLR_d[n],vAvLR_d[n],uFlucLR_d[n],vFlucLR_d[n],t);
}
}
for(int n = 0; n<GPU_N; n++){
cudaMemcpyPeerAsync(&h_LR_temp[n][LRpitch_elements*YLRDIM*14],n,&g_LR_B_d[ (n+1)%GPU_N][LRpitch_elements*YLRDIM*14], (n+1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
cudaMemcpyPeerAsync(&g_LR_temp[n][LRpitch_elements*YLRDIM*9 ],n,&h_LR_B_d[abs(n-1)%GPU_N][LRpitch_elements*YLRDIM*9 ],abs(n-1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaDeviceSynchronize();
}
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
update_top_LR_interp <<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>(h_LR_B_d[n],h_LR_A_d[n],f_inner_LR_B_d[n],h_LR_temp[n],h_B_d[n],h_temp[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n);
update_bottom_LR_interp<<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>(g_LR_B_d[n],g_LR_A_d[n],f_inner_LR_B_d[n],g_LR_temp[n],g_B_d[n],g_temp[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n);
}
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
update_inner_LR_interp<<<LRgrid,LRthreads,0,stream_inner[n]>>>(f_inner_LR_B_d[n],f_inner_LR_A_d[n],g_LR_B_d[n],h_LR_B_d[n],f_inner_B_d[n],g_B_d[n],h_B_d[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n,uAvLR_d[n],vAvLR_d[n],uFlucLR_d[n],vFlucLR_d[n],t+0.5f);
}
for(int n = 0; n<GPU_N; n++){
cudaMemcpyPeerAsync(&h_LR_temp[n][LRpitch_elements*YLRDIM*14],n,&g_LR_A_d[ (n+1)%GPU_N][LRpitch_elements*YLRDIM*14], (n+1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
cudaMemcpyPeerAsync(&g_LR_temp[n][LRpitch_elements*YLRDIM*9 ],n,&h_LR_A_d[abs(n-1)%GPU_N][LRpitch_elements*YLRDIM*9 ],abs(n-1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
f_Extract<<<grid,threads,0,stream_inner[n]>>>(f_inner_B_d[n],f_inner_LR_A_d[n],g_LR_A_d[n],h_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
g_Extract<<<grid,threads,0,stream_inner[n]>>>(g_B_d[n],f_inner_LR_A_d[n],g_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
h_Extract<<<grid,threads,0,stream_inner[n]>>>(h_B_d[n],f_inner_LR_A_d[n],h_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
}
}//end refinement
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaDeviceSynchronize();
}
//B->A
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
if(t+1>=STARTF && REFINEMENT == 0){
update_top_force <<<h_grid, threads, 0, stream_halo [n]>>>(h_B_d[n],h_A_d[n],f_inner_B_d[n],h_temp[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF+1);
update_bottom_force<<<h_grid, threads, 0, stream_halo [n]>>>(g_B_d[n],g_A_d[n],f_inner_B_d[n],g_temp[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF+1);
}
else{
update_top <<<h_grid, threads, 0, stream_halo [n]>>>( h_B_d[n], h_A_d[n],f_inner_B_d[n],h_temp[n],omega,pitch_elements,n,zInner);
update_bottom<<<h_grid, threads, 0, stream_halo [n]>>>( g_B_d[n], g_A_d[n],f_inner_B_d[n],g_temp[n],omega,pitch_elements,n,zInner);
}
}
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
if(t+1>=STARTF && REFINEMENT == 0){
update_inner_force <<< grid, threads, 0, stream_inner[n]>>>(f_inner_B_d[n],f_inner_A_d[n],g_B_d[n],h_B_d[n],omega,pitch_elements,n,zInner,FX_d[n],FY_d[n],FZ_d[n],t-STARTF+1,uAv_d[n],vAv_d[n],uFluc_d[n],vFluc_d[n],t+1);
}
else{
update_inner <<< grid, threads, 0, stream_inner[n]>>>(f_inner_B_d[n],f_inner_A_d[n], g_B_d[n], h_B_d[n],omega,pitch_elements,n,zInner,uAv_d[n],vAv_d[n],uFluc_d[n],vFluc_d[n],t+1);
}
}
for(int n = 0; n<GPU_N; n++)
cudaMemcpyPeerAsync(&h_temp[n][0],n,&g_A_d[ (n+1)%GPU_N][0], (n+1)%GPU_N,pitch_elements*YDIM*sizeof(float)*19,stream_halo[n]);
for(int n = 0; n<GPU_N; n++)
cudaMemcpyPeerAsync(&g_temp[n][0],n,&h_A_d[abs(n-1)%GPU_N][0],abs(n-1)%GPU_N,pitch_elements*YDIM*sizeof(float)*19,stream_halo[n]);
if(REFINEMENT == 1){
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
if(t+1>=STARTF){
update_top_LR_force <<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>( h_LR_A_d[n], h_LR_B_d[n],f_inner_LR_A_d[n],h_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t+1-STARTF);
update_bottom_LR_force<<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>( g_LR_A_d[n], g_LR_B_d[n],f_inner_LR_A_d[n],g_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t+1-STARTF);
}
else{
update_top_LR <<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>( h_LR_A_d[n], h_LR_B_d[n],f_inner_LR_A_d[n],h_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner);
update_bottom_LR<<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>( g_LR_A_d[n], g_LR_B_d[n],f_inner_LR_A_d[n],g_LR_temp[n],omegaLR,LRpitch_elements,n,zLRInner);
}
}
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
if(t+1>=STARTF){
update_inner_LR_force <<< LRgrid, LRthreads, 0, stream_inner[n]>>>(f_inner_LR_A_d[n],f_inner_LR_B_d[n], g_LR_A_d[n], h_LR_A_d[n],omegaLR,LRpitch_elements,n,zLRInner,FX_d[n],FY_d[n],FZ_d[n],t+1-STARTF,uAvLR_d[n],vAvLR_d[n],uFlucLR_d[n],vFlucLR_d[n],t+1);
}
else{
update_inner_LR <<< LRgrid, LRthreads, 0, stream_inner[n]>>>(f_inner_LR_A_d[n],f_inner_LR_B_d[n], g_LR_A_d[n], h_LR_A_d[n],omegaLR,LRpitch_elements,n,zLRInner,uAvLR_d[n],vAvLR_d[n],uFlucLR_d[n],vFlucLR_d[n],t+1);
}
}
for(int n = 0; n<GPU_N; n++){
cudaMemcpyPeerAsync(&h_LR_temp[n][LRpitch_elements*YLRDIM*14],n,&g_LR_B_d[ (n+1)%GPU_N][LRpitch_elements*YLRDIM*14], (n+1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
cudaMemcpyPeerAsync(&g_LR_temp[n][LRpitch_elements*YLRDIM*9 ],n,&h_LR_B_d[abs(n-1)%GPU_N][LRpitch_elements*YLRDIM*9 ],abs(n-1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaDeviceSynchronize();
}
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
update_top_LR_interp <<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>(h_LR_B_d[n],h_LR_A_d[n],f_inner_LR_B_d[n],h_LR_temp[n],h_A_d[n],h_temp[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n);
update_bottom_LR_interp<<<g_LR_grid, LRthreads, 0, stream_halo [n]>>>(g_LR_B_d[n],g_LR_A_d[n],f_inner_LR_B_d[n],g_LR_temp[n],g_A_d[n],g_temp[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n);
}
for(int n = 0; n<GPU_N; n++)
cudaStreamSynchronize(stream_halo[n]);
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
update_inner_LR_interp <<<LRgrid,LRthreads,0,stream_inner[n]>>>(f_inner_LR_B_d[n],f_inner_LR_A_d[n],g_LR_B_d[n],h_LR_B_d[n],f_inner_A_d[n],g_A_d[n],h_A_d[n],omegaLR,LRpitch_elements,zLRInner,pitch_elements,zInner,SF_cf,n,uAvLR_d[n],vAvLR_d[n],uFlucLR_d[n],vFlucLR_d[n],t+1.5f);
}
for(int n = 0; n<GPU_N; n++){
cudaMemcpyPeerAsync(&h_LR_temp[n][LRpitch_elements*YLRDIM*14],n,&g_LR_A_d[ (n+1)%GPU_N][LRpitch_elements*YLRDIM*14], (n+1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
cudaMemcpyPeerAsync(&g_LR_temp[n][LRpitch_elements*YLRDIM*9 ],n,&h_LR_A_d[abs(n-1)%GPU_N][LRpitch_elements*YLRDIM*9 ],abs(n-1)%GPU_N,LRpitch_elements*YLRDIM*sizeof(float)*5,stream_halo[n]);
}
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
f_Extract<<<grid,threads,0,stream_inner[n]>>>(f_inner_A_d[n],f_inner_LR_A_d[n],g_LR_A_d[n],h_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
g_Extract<<<grid,threads,0,stream_inner[n]>>>(g_A_d[n],f_inner_LR_A_d[n],g_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
h_Extract<<<grid,threads,0,stream_inner[n]>>>(h_A_d[n],f_inner_LR_A_d[n],h_LR_A_d[n],pitch_elements,LRpitch_elements,zInner,zLRInner,SF_fc,n);
}
}
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaDeviceSynchronize();
}
}//end Time loop
cudaDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
int Nodes;
Nodes = XDIM*YDIM*ZDIM;
if (REFINEMENT == 1)
Nodes += XLRDIM*YLRDIM*ZLRDIM*LRLEVEL;
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)\n";
//D2H Memcpy and write results
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
if(outputflag == 1){
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n";
output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM/GPU_N<<"\n";
cudaMemcpy2D(f_inner_h[n],XDIM*sizeof(float),f_inner_A_d[n],pitch,XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyDeviceToHost);
cudaMemcpy2D( g_h[n],XDIM*sizeof(float), g_A_d[n],pitch,XDIM*sizeof(float),YDIM* 19,cudaMemcpyDeviceToHost);
cudaMemcpy2D( h_h[n],XDIM*sizeof(float), h_A_d[n],pitch,XDIM*sizeof(float),YDIM* 19,cudaMemcpyDeviceToHost);
cudaMemcpy2D(uAv_h[n],XDIM*sizeof(float),uAv_d[n],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy2D(vAv_h[n],XDIM*sizeof(float),vAv_d[n],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy2D(wAv_h[n],XDIM*sizeof(float),wAv_d[n],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy2D(uFluc_h[n],XDIM*sizeof(float),uFluc_d[n],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy2D(vFluc_h[n],XDIM*sizeof(float),vFluc_d[n],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy2D(wFluc_h[n],XDIM*sizeof(float),wFluc_d[n],pitch,XDIM*sizeof(float),YDIM*ZDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy( FX_h[n],FX_d[n],sizeof(float)*ForceTime,cudaMemcpyDeviceToHost);
cudaMemcpy( FY_h[n],FY_d[n],sizeof(float)*ForceTime,cudaMemcpyDeviceToHost);
cudaMemcpy( FZ_h[n],FZ_d[n],sizeof(float)*ForceTime,cudaMemcpyDeviceToHost);
WriteResults(output,f_inner_h[n],g_h[n],h_h[n],uAv_h[n],vAv_h[n],wAv_h[n],uFluc_h[n],vFluc_h[n],wFluc_h[n],omega,GPU_N,n);
output<<endl;
//Write results
// WriteResults( g_h[n],output,omega,XDIM,YDIM,ZDIM/GPU_N*n ,1 ,0,0,0,1);
// WriteResults(f_inner_h[n],output,omega,XDIM,YDIM,ZDIM/GPU_N*n+1 ,zInner,0,0,0,1);
// WriteResults( h_h[n],output,omega,XDIM,YDIM,ZDIM/GPU_N*(n+1)-1,1 ,0,0,0,1);
}
for(int i=0;i<ForceTime;i++){
FX_total[i] += FX_h[n][i];
FY_total[i] += FY_h[n][i];
FZ_total[i] += FZ_h[n][i];
}
cudaFree(f_inner_A_d[n]);
cudaFree(f_inner_B_d[n]);
cudaFree( g_A_d[n]);
cudaFree( g_B_d[n]);
cudaFree( h_A_d[n]);
cudaFree( h_B_d[n]);
cudaFree( g_temp[n]);
cudaFree( h_temp[n]);
cudaFree( uAv_d[n]);
cudaFree( vAv_d[n]);
cudaFree( wAv_d[n]);
cudaFree( uFluc_d[n]);
cudaFree( vFluc_d[n]);
cudaFree( wFluc_d[n]);
}//end write results
WriteForces(FX_total,FY_total,FZ_total,outputForce,ForceTime,REFINEMENT*LRLEVEL);
if(REFINEMENT == 1){
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n";
output<<"ZONE F=POINT, I="<<XLRDIM<<", J="<<YLRDIM<<", K="<<ZLRDIM<<"\n";
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
if(outputflag == 1){
cudaMemcpy2D(f_inner_LR_h[n],XLRDIM*sizeof(float),f_inner_LR_A_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM*zLRInner*19,cudaMemcpyDeviceToHost);
cudaMemcpy2D( g_LR_h[n],XLRDIM*sizeof(float), g_LR_A_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM* 19,cudaMemcpyDeviceToHost);
cudaMemcpy2D( h_LR_h[n],XLRDIM*sizeof(float), h_LR_A_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM* 19,cudaMemcpyDeviceToHost);
cudaMemcpy2D( uAvLR_h[n],XLRDIM*sizeof(float), uAvLR_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy2D( vAvLR_h[n],XLRDIM*sizeof(float), vAvLR_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy2D( wAvLR_h[n],XLRDIM*sizeof(float), wAvLR_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy2D(uFlucLR_h[n],XLRDIM*sizeof(float),uFlucLR_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy2D(vFlucLR_h[n],XLRDIM*sizeof(float),vFlucLR_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyDeviceToHost);
cudaMemcpy2D(wFlucLR_h[n],XLRDIM*sizeof(float),wFlucLR_d[n],LRpitch,XLRDIM*sizeof(float),YLRDIM*ZLRDIM/GPU_N,cudaMemcpyDeviceToHost);
WriteResultsLR(output,f_inner_LR_h[n],g_LR_h[n],h_LR_h[n],uAvLR_h[n],vAvLR_h[n],wAvLR_h[n],uFlucLR_h[n],vFlucLR_h[n],wFlucLR_h[n],omegaLR,GPU_N,n);
//Write results
// WriteResults( g_LR_h[n],output,omega,XLRDIM,YLRDIM,ZLRDIM/GPU_N*n ,1 ,LRX0,LRY0,LRZ0,LRFACTOR);
// WriteResults(f_inner_LR_h[n],output,omega,XLRDIM,YLRDIM,ZLRDIM/GPU_N*n+1 ,zLRInner,LRX0,LRY0,LRZ0,LRFACTOR);
// WriteResults( h_LR_h[n],output,omega,XLRDIM,YLRDIM,ZLRDIM/GPU_N*(n+1)-1,1 ,LRX0,LRY0,LRZ0,LRFACTOR);
}
cudaFree(f_inner_LR_A_d[n]);
cudaFree(f_inner_LR_B_d[n]);
cudaFree( g_LR_A_d[n]);
cudaFree( g_LR_B_d[n]);
cudaFree( h_LR_A_d[n]);
cudaFree( h_LR_B_d[n]);
cudaFree( g_LR_temp[n]);
cudaFree( h_LR_temp[n]);
}//end GPU loop for LR
}//end write results of LR
return(0);
}
|
d3860d20154449d5affa634f3b89c5a092fe32f3.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <cuda_bf16.h>
#include <stdio.h>
using namespace std;
#include <sys/time.h>
#include <unistd.h>
#include <stdlib.h>
#include <cstdlib>
union FP32
{
unsigned int i;
float f;
};
union BF16
{
unsigned short int i;
__nv_bfloat16 f;
};
__global__ void test(float* dst, __nv_bfloat16* a, __nv_bfloat16* b, float* c){
asm volatile(
// "ld.param.u64 %rd1, [_Z4testPfP6__halfS1_S__param_0];\n\t"
"ld.param.u64 %rd1, [_Z4testPfP13__nv_bfloat16S1_S__param_0];\n\t"
".reg .b32 a<4>, b<4>, c<8>,d<8>;\n\t"
"wmma.load.a.sync.aligned.m16n16k16.global.row.bf16 {a0, a1, a2, a3}, [%1];\n\t"
"wmma.load.b.sync.aligned.m16n16k16.global.col.bf16 {b0, b1, b2, b3}, [%2];\n\t"
"wmma.load.c.sync.aligned.m16n16k16.global.row.f32 {c0, c1, c2, c3, c4, c5, c6, c7}, [%3];\n\t"
"wmma.mma.sync.aligned.m16n16k16.row.col.f32.bf16.bf16.f32 {d0,d1,d2,d3,d4,d5,d6,d7}, {a0, a1, a2, a3}, {b0, b1, b2, b3}, {c0, c1, c2, c3, c4, c5, c6, c7};\n\t"
"wmma.store.d.sync.aligned.m16n16k16.global.row.f32 [%0], {d0,d1,d2,d3,d4,d5,d6,d7};" : "=l"(dst): "l"(a), "l"(b), "l"(c));
}
void InitOne(__nv_bfloat16* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 1.0;
}
}
void InitZero(__nv_bfloat16* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void InitZero_float(float* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void show(float * a, const int n) {
std::cout << std::endl;
for ( int i=0; i<n; i++){
std::cout<<a[i] << std::endl;
}
std::cout << std::endl;
}
int main(int argc, char** argv){
int size = 256;
__nv_bfloat16* host_a=(__nv_bfloat16*)malloc(sizeof(__nv_bfloat16) * size);
__nv_bfloat16* host_b=(__nv_bfloat16*)malloc(sizeof(__nv_bfloat16) * size);
float* host_c=(float*)malloc(sizeof(float) * size);
float* host_d=(float*)malloc(sizeof(float) * size);
__nv_bfloat16* device_a=NULL;
__nv_bfloat16* device_b=NULL;
float* device_c=NULL;
float* device_d=NULL;
hipMalloc((void**)(&device_a), sizeof(__nv_bfloat16) * size);
hipMalloc((void**)(&device_b), sizeof(__nv_bfloat16) * size);
hipMalloc((void**)(&device_c), sizeof(float) * size);
hipMalloc((void**)(&device_d), sizeof(float) * size);
InitZero(host_a, size);
InitOne(host_b, size);
InitZero_float(host_c, size);
InitZero_float(host_d, size);
BF16 bf16;
bf16.i = 0x3f80; host_a[0]=bf16.f;
bf16.i = 0x3f80; host_a[1]=bf16.f;
bf16.i = 0x3f80; host_a[2]=bf16.f;
bf16.i = 0x3f80; host_a[3]=bf16.f;
bf16.i = 0x4c00; host_a[4]=bf16.f;
bf16.i = 0x0000; host_a[5]=bf16.f;
bf16.i = 0x0000; host_a[6]=bf16.f;
bf16.i = 0x0000; host_a[7]=bf16.f;
hipMemcpy((void*)device_a, (void*)host_a, sizeof(__nv_bfloat16)* size, hipMemcpyHostToDevice);
hipMemcpy((void*)device_b, (void*)host_b, sizeof(__nv_bfloat16)* size, hipMemcpyHostToDevice);
hipMemcpy((void*)device_c, (void*)host_c, sizeof(float)* size, hipMemcpyHostToDevice);
hipMemcpy((void*)device_d, (void*)host_d, sizeof(float)* size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( test), dim3(1),dim3(32), 0, 0, device_d, device_a, device_b, device_c);
hipDeviceSynchronize();
hipMemcpy((void*)host_d, (void*)device_d, sizeof(float) * size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
FP32 fp32;
fp32.f=host_d[0];
//std::cout<< host_d[0] << std::endl;
std::cout<< hex << fp32.i << std::endl;
//show(host_d, size);
}
| d3860d20154449d5affa634f3b89c5a092fe32f3.cu | #include <iostream>
#include <cuda.h>
#include <cuda_bf16.h>
#include <stdio.h>
using namespace std;
#include <sys/time.h>
#include <unistd.h>
#include <stdlib.h>
#include <cstdlib>
union FP32
{
unsigned int i;
float f;
};
union BF16
{
unsigned short int i;
__nv_bfloat16 f;
};
__global__ void test(float* dst, __nv_bfloat16* a, __nv_bfloat16* b, float* c){
asm volatile(
// "ld.param.u64 %rd1, [_Z4testPfP6__halfS1_S__param_0];\n\t"
"ld.param.u64 %rd1, [_Z4testPfP13__nv_bfloat16S1_S__param_0];\n\t"
".reg .b32 a<4>, b<4>, c<8>,d<8>;\n\t"
"wmma.load.a.sync.aligned.m16n16k16.global.row.bf16 {a0, a1, a2, a3}, [%1];\n\t"
"wmma.load.b.sync.aligned.m16n16k16.global.col.bf16 {b0, b1, b2, b3}, [%2];\n\t"
"wmma.load.c.sync.aligned.m16n16k16.global.row.f32 {c0, c1, c2, c3, c4, c5, c6, c7}, [%3];\n\t"
"wmma.mma.sync.aligned.m16n16k16.row.col.f32.bf16.bf16.f32 {d0,d1,d2,d3,d4,d5,d6,d7}, {a0, a1, a2, a3}, {b0, b1, b2, b3}, {c0, c1, c2, c3, c4, c5, c6, c7};\n\t"
"wmma.store.d.sync.aligned.m16n16k16.global.row.f32 [%0], {d0,d1,d2,d3,d4,d5,d6,d7};" : "=l"(dst): "l"(a), "l"(b), "l"(c));
}
void InitOne(__nv_bfloat16* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 1.0;
}
}
void InitZero(__nv_bfloat16* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void InitZero_float(float* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void show(float * a, const int n) {
std::cout << std::endl;
for ( int i=0; i<n; i++){
std::cout<<a[i] << std::endl;
}
std::cout << std::endl;
}
int main(int argc, char** argv){
int size = 256;
__nv_bfloat16* host_a=(__nv_bfloat16*)malloc(sizeof(__nv_bfloat16) * size);
__nv_bfloat16* host_b=(__nv_bfloat16*)malloc(sizeof(__nv_bfloat16) * size);
float* host_c=(float*)malloc(sizeof(float) * size);
float* host_d=(float*)malloc(sizeof(float) * size);
__nv_bfloat16* device_a=NULL;
__nv_bfloat16* device_b=NULL;
float* device_c=NULL;
float* device_d=NULL;
cudaMalloc((void**)(&device_a), sizeof(__nv_bfloat16) * size);
cudaMalloc((void**)(&device_b), sizeof(__nv_bfloat16) * size);
cudaMalloc((void**)(&device_c), sizeof(float) * size);
cudaMalloc((void**)(&device_d), sizeof(float) * size);
InitZero(host_a, size);
InitOne(host_b, size);
InitZero_float(host_c, size);
InitZero_float(host_d, size);
BF16 bf16;
bf16.i = 0x3f80; host_a[0]=bf16.f;
bf16.i = 0x3f80; host_a[1]=bf16.f;
bf16.i = 0x3f80; host_a[2]=bf16.f;
bf16.i = 0x3f80; host_a[3]=bf16.f;
bf16.i = 0x4c00; host_a[4]=bf16.f;
bf16.i = 0x0000; host_a[5]=bf16.f;
bf16.i = 0x0000; host_a[6]=bf16.f;
bf16.i = 0x0000; host_a[7]=bf16.f;
cudaMemcpy((void*)device_a, (void*)host_a, sizeof(__nv_bfloat16)* size, cudaMemcpyHostToDevice);
cudaMemcpy((void*)device_b, (void*)host_b, sizeof(__nv_bfloat16)* size, cudaMemcpyHostToDevice);
cudaMemcpy((void*)device_c, (void*)host_c, sizeof(float)* size, cudaMemcpyHostToDevice);
cudaMemcpy((void*)device_d, (void*)host_d, sizeof(float)* size, cudaMemcpyHostToDevice);
test<<<1,32>>>(device_d, device_a, device_b, device_c);
cudaDeviceSynchronize();
cudaMemcpy((void*)host_d, (void*)device_d, sizeof(float) * size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
FP32 fp32;
fp32.f=host_d[0];
//std::cout<< host_d[0] << std::endl;
std::cout<< hex << fp32.i << std::endl;
//show(host_d, size);
}
|
0818b645ea16281a127c1da61bd5944e310d7bfc.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/datetime.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/chrono.hpp>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/column_utilities.hpp>
#include <tests/utilities/column_wrapper.hpp>
#include <tests/utilities/timestamp_utilities.cuh>
#include <tests/utilities/type_lists.hpp>
#include <gmock/gmock.h>
template <typename T>
struct NonTimestampTest : public cudf::test::BaseFixture {
cudf::data_type type() {
return cudf::data_type{cudf::experimental::type_to_id<T>()};
}
};
using NonTimestampTypes =
cudf::test::Concat<cudf::test::NumericTypes, cudf::test::StringTypes>;
TYPED_TEST_CASE(NonTimestampTest, NonTimestampTypes);
TYPED_TEST(NonTimestampTest, TestThrowsOnNonTimestamp) {
using T = TypeParam;
using namespace cudf::test;
using namespace cudf::datetime;
using namespace simt::std::chrono;
cudf::data_type dtype{cudf::experimental::type_to_id<T>()};
cudf::column col{dtype, 0, rmm::device_buffer{0}};
EXPECT_THROW(extract_year(col), cudf::logic_error);
EXPECT_THROW(extract_month(col), cudf::logic_error);
EXPECT_THROW(extract_day(col), cudf::logic_error);
EXPECT_THROW(extract_weekday(col), cudf::logic_error);
EXPECT_THROW(extract_hour(col), cudf::logic_error);
EXPECT_THROW(extract_minute(col), cudf::logic_error);
EXPECT_THROW(extract_second(col), cudf::logic_error);
}
struct BasicDatetimeOpsTest : public cudf::test::BaseFixture {};
TEST_F(BasicDatetimeOpsTest, TestExtractingDatetimeComponents) {
using namespace cudf::test;
using namespace cudf::datetime;
using namespace simt::std::chrono;
auto timestamps_D = fixed_width_column_wrapper<cudf::timestamp_D>{
-1528, // 1965-10-26
17716, // 2018-07-04
19382, // 2023-01-25
};
auto timestamps_s = fixed_width_column_wrapper<cudf::timestamp_s>{
-131968728, // 1965-10-26 14:01:12
1530705600, // 2018-07-04 12:00:00
1674631932, // 2023-01-25 07:32:12
};
auto timestamps_ms = fixed_width_column_wrapper<cudf::timestamp_ms>{
-131968727238, // 1965-10-26 14:01:12.762
1530705600000, // 2018-07-04 12:00:00.000
1674631932929, // 2023-01-25 07:32:12.929
};
expect_columns_equal(*extract_year(timestamps_D),
fixed_width_column_wrapper<int16_t>{1965, 2018, 2023});
expect_columns_equal(*extract_year(timestamps_s),
fixed_width_column_wrapper<int16_t>{1965, 2018, 2023});
expect_columns_equal(*extract_year(timestamps_ms),
fixed_width_column_wrapper<int16_t>{1965, 2018, 2023});
expect_columns_equal(*extract_month(timestamps_D),
fixed_width_column_wrapper<int16_t>{10, 7, 1});
expect_columns_equal(*extract_month(timestamps_s),
fixed_width_column_wrapper<int16_t>{10, 7, 1});
expect_columns_equal(*extract_month(timestamps_ms),
fixed_width_column_wrapper<int16_t>{10, 7, 1});
expect_columns_equal(*extract_day(timestamps_D),
fixed_width_column_wrapper<int16_t>{26, 4, 25});
expect_columns_equal(*extract_day(timestamps_s),
fixed_width_column_wrapper<int16_t>{26, 4, 25});
expect_columns_equal(*extract_day(timestamps_ms),
fixed_width_column_wrapper<int16_t>{26, 4, 25});
expect_columns_equal(*extract_weekday(timestamps_D),
fixed_width_column_wrapper<int16_t>{2, 3, 3});
expect_columns_equal(*extract_weekday(timestamps_s),
fixed_width_column_wrapper<int16_t>{2, 3, 3});
expect_columns_equal(*extract_weekday(timestamps_ms),
fixed_width_column_wrapper<int16_t>{2, 3, 3});
expect_columns_equal(*extract_hour(timestamps_D),
fixed_width_column_wrapper<int16_t>{0, 0, 0});
expect_columns_equal(*extract_hour(timestamps_s),
fixed_width_column_wrapper<int16_t>{14, 12, 7});
expect_columns_equal(*extract_hour(timestamps_ms),
fixed_width_column_wrapper<int16_t>{14, 12, 7});
expect_columns_equal(*extract_minute(timestamps_D),
fixed_width_column_wrapper<int16_t>{0, 0, 0});
expect_columns_equal(*extract_minute(timestamps_s),
fixed_width_column_wrapper<int16_t>{1, 0, 32});
expect_columns_equal(*extract_minute(timestamps_ms),
fixed_width_column_wrapper<int16_t>{1, 0, 32});
expect_columns_equal(*extract_second(timestamps_D),
fixed_width_column_wrapper<int16_t>{0, 0, 0});
expect_columns_equal(*extract_second(timestamps_s),
fixed_width_column_wrapper<int16_t>{12, 0, 12});
expect_columns_equal(*extract_second(timestamps_ms),
fixed_width_column_wrapper<int16_t>{12, 0, 12});
}
template <typename T>
struct TypedDatetimeOpsTest : public cudf::test::BaseFixture {
hipStream_t stream() { return hipStream_t(0); }
cudf::size_type size() { return cudf::size_type(10); }
cudf::data_type type() {
return cudf::data_type{cudf::experimental::type_to_id<T>()};
}
};
TYPED_TEST_CASE(TypedDatetimeOpsTest, cudf::test::TimestampTypes);
TYPED_TEST(TypedDatetimeOpsTest, TestEmptyColumns) {
using T = TypeParam;
using namespace cudf::test;
using namespace cudf::datetime;
using namespace simt::std::chrono;
auto int16s_dtype =
cudf::data_type{cudf::experimental::type_to_id<int16_t>()};
auto timestamps_dtype = cudf::data_type{cudf::experimental::type_to_id<T>()};
cudf::column int16s{int16s_dtype, 0, rmm::device_buffer{0}};
cudf::column timestamps{timestamps_dtype, 0, rmm::device_buffer{0}};
expect_columns_equal(*extract_year(timestamps), int16s);
expect_columns_equal(*extract_month(timestamps), int16s);
expect_columns_equal(*extract_day(timestamps), int16s);
expect_columns_equal(*extract_weekday(timestamps), int16s);
expect_columns_equal(*extract_hour(timestamps), int16s);
expect_columns_equal(*extract_minute(timestamps), int16s);
expect_columns_equal(*extract_second(timestamps), int16s);
}
TYPED_TEST(TypedDatetimeOpsTest, TestExtractingGeneratedDatetimeComponents) {
using T = TypeParam;
using namespace cudf::test;
using namespace cudf::datetime;
using namespace simt::std::chrono;
auto start = milliseconds(-2500000000000); // Sat, 11 Oct 1890 19:33:20 GMT
auto stop_ = milliseconds(2500000000000); // Mon, 22 Mar 2049 04:26:40 GMT
auto timestamps = generate_timestamps<T>(this->size(), time_point_ms(start),
time_point_ms(stop_));
auto expected_years = fixed_width_column_wrapper<int16_t>{
1890, 1906, 1922, 1938, 1954, 1970, 1985, 2001, 2017, 2033};
auto expected_months =
fixed_width_column_wrapper<int16_t>{10, 8, 6, 4, 2, 1, 11, 9, 7, 5};
auto expected_days =
fixed_width_column_wrapper<int16_t>{11, 16, 20, 24, 26, 1, 5, 9, 14, 18};
auto expected_weekdays =
fixed_width_column_wrapper<int16_t>{6, 4, 2, 7, 5, 4, 2, 7, 5, 3};
auto expected_hours =
fixed_width_column_wrapper<int16_t>{19, 20, 21, 22, 23, 0, 0, 1, 2, 3};
auto expected_minutes =
fixed_width_column_wrapper<int16_t>{33, 26, 20, 13, 6, 0, 53, 46, 40, 33};
auto expected_seconds =
fixed_width_column_wrapper<int16_t>{20, 40, 0, 20, 40, 0, 20, 40, 0, 20};
// Special cases for timestamp_D: zero out the hh/mm/ss cols and +1 the
// expected weekdays
if (std::is_same<TypeParam, cudf::timestamp_D>::value) {
expected_days = fixed_width_column_wrapper<int16_t>{12, 17, 21, 25, 27,
1, 5, 9, 14, 18};
expected_weekdays =
fixed_width_column_wrapper<int16_t>{7, 5, 3, 1, 6, 4, 2, 7, 5, 3};
expected_hours =
fixed_width_column_wrapper<int16_t>{0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
expected_minutes =
fixed_width_column_wrapper<int16_t>{0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
expected_seconds =
fixed_width_column_wrapper<int16_t>{0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
}
expect_columns_equal(*extract_year(timestamps), expected_years);
expect_columns_equal(*extract_month(timestamps), expected_months);
expect_columns_equal(*extract_day(timestamps), expected_days);
expect_columns_equal(*extract_weekday(timestamps), expected_weekdays);
expect_columns_equal(*extract_hour(timestamps), expected_hours);
expect_columns_equal(*extract_minute(timestamps), expected_minutes);
expect_columns_equal(*extract_second(timestamps), expected_seconds);
}
TYPED_TEST(TypedDatetimeOpsTest,
TestExtractingGeneratedNullableDatetimeComponents) {
using T = TypeParam;
using namespace cudf::test;
using namespace cudf::datetime;
using namespace simt::std::chrono;
auto start = milliseconds(-2500000000000); // Sat, 11 Oct 1890 19:33:20 GMT
auto stop_ = milliseconds(2500000000000); // Mon, 22 Mar 2049 04:26:40 GMT
auto timestamps = generate_timestamps<T, true>(
this->size(), time_point_ms(start), time_point_ms(stop_));
auto expected_years = fixed_width_column_wrapper<int16_t>{
{1890, 1906, 1922, 1938, 1954, 1970, 1985, 2001, 2017, 2033},
{true, false, true, false, true, false, true, false, true, false}};
auto expected_months = fixed_width_column_wrapper<int16_t>{
{10, 8, 6, 4, 2, 1, 11, 9, 7, 5},
{true, false, true, false, true, false, true, false, true, false}};
auto expected_days = fixed_width_column_wrapper<int16_t>{
{11, 16, 20, 24, 26, 1, 5, 9, 14, 18},
{true, false, true, false, true, false, true, false, true, false}};
auto expected_weekdays = fixed_width_column_wrapper<int16_t>{
{6, 4, 2, 7, 5, 4, 2, 7, 5, 3},
{true, false, true, false, true, false, true, false, true, false}};
auto expected_hours = fixed_width_column_wrapper<int16_t>{
{19, 20, 21, 22, 23, 0, 0, 1, 2, 3},
{true, false, true, false, true, false, true, false, true, false}};
auto expected_minutes = fixed_width_column_wrapper<int16_t>{
{33, 26, 20, 13, 6, 0, 53, 46, 40, 33},
{true, false, true, false, true, false, true, false, true, false}};
auto expected_seconds = fixed_width_column_wrapper<int16_t>{
{20, 40, 0, 20, 40, 0, 20, 40, 0, 20},
{true, false, true, false, true, false, true, false, true, false}};
// Special cases for timestamp_D: zero out the hh/mm/ss cols and +1 the
// expected weekdays
if (std::is_same<TypeParam, cudf::timestamp_D>::value) {
expected_days = fixed_width_column_wrapper<int16_t>{
{12, 17, 21, 25, 27, 1, 5, 9, 14, 18},
{true, false, true, false, true, false, true, false, true, false}};
expected_weekdays = fixed_width_column_wrapper<int16_t>{
{7, 5, 3, 1, 6, 4, 2, 7, 5, 3},
{true, false, true, false, true, false, true, false, true, false}};
expected_hours = fixed_width_column_wrapper<int16_t>{
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{true, false, true, false, true, false, true, false, true, false}};
expected_minutes = fixed_width_column_wrapper<int16_t>{
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{true, false, true, false, true, false, true, false, true, false}};
expected_seconds = fixed_width_column_wrapper<int16_t>{
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{true, false, true, false, true, false, true, false, true, false}};
}
expect_columns_equal(*extract_year(timestamps), expected_years);
expect_columns_equal(*extract_month(timestamps), expected_months);
expect_columns_equal(*extract_day(timestamps), expected_days);
expect_columns_equal(*extract_weekday(timestamps), expected_weekdays);
expect_columns_equal(*extract_hour(timestamps), expected_hours);
expect_columns_equal(*extract_minute(timestamps), expected_minutes);
expect_columns_equal(*extract_second(timestamps), expected_seconds);
}
| 0818b645ea16281a127c1da61bd5944e310d7bfc.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/datetime.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/chrono.hpp>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/column_utilities.hpp>
#include <tests/utilities/column_wrapper.hpp>
#include <tests/utilities/timestamp_utilities.cuh>
#include <tests/utilities/type_lists.hpp>
#include <gmock/gmock.h>
template <typename T>
struct NonTimestampTest : public cudf::test::BaseFixture {
cudf::data_type type() {
return cudf::data_type{cudf::experimental::type_to_id<T>()};
}
};
using NonTimestampTypes =
cudf::test::Concat<cudf::test::NumericTypes, cudf::test::StringTypes>;
TYPED_TEST_CASE(NonTimestampTest, NonTimestampTypes);
TYPED_TEST(NonTimestampTest, TestThrowsOnNonTimestamp) {
using T = TypeParam;
using namespace cudf::test;
using namespace cudf::datetime;
using namespace simt::std::chrono;
cudf::data_type dtype{cudf::experimental::type_to_id<T>()};
cudf::column col{dtype, 0, rmm::device_buffer{0}};
EXPECT_THROW(extract_year(col), cudf::logic_error);
EXPECT_THROW(extract_month(col), cudf::logic_error);
EXPECT_THROW(extract_day(col), cudf::logic_error);
EXPECT_THROW(extract_weekday(col), cudf::logic_error);
EXPECT_THROW(extract_hour(col), cudf::logic_error);
EXPECT_THROW(extract_minute(col), cudf::logic_error);
EXPECT_THROW(extract_second(col), cudf::logic_error);
}
struct BasicDatetimeOpsTest : public cudf::test::BaseFixture {};
TEST_F(BasicDatetimeOpsTest, TestExtractingDatetimeComponents) {
using namespace cudf::test;
using namespace cudf::datetime;
using namespace simt::std::chrono;
auto timestamps_D = fixed_width_column_wrapper<cudf::timestamp_D>{
-1528, // 1965-10-26
17716, // 2018-07-04
19382, // 2023-01-25
};
auto timestamps_s = fixed_width_column_wrapper<cudf::timestamp_s>{
-131968728, // 1965-10-26 14:01:12
1530705600, // 2018-07-04 12:00:00
1674631932, // 2023-01-25 07:32:12
};
auto timestamps_ms = fixed_width_column_wrapper<cudf::timestamp_ms>{
-131968727238, // 1965-10-26 14:01:12.762
1530705600000, // 2018-07-04 12:00:00.000
1674631932929, // 2023-01-25 07:32:12.929
};
expect_columns_equal(*extract_year(timestamps_D),
fixed_width_column_wrapper<int16_t>{1965, 2018, 2023});
expect_columns_equal(*extract_year(timestamps_s),
fixed_width_column_wrapper<int16_t>{1965, 2018, 2023});
expect_columns_equal(*extract_year(timestamps_ms),
fixed_width_column_wrapper<int16_t>{1965, 2018, 2023});
expect_columns_equal(*extract_month(timestamps_D),
fixed_width_column_wrapper<int16_t>{10, 7, 1});
expect_columns_equal(*extract_month(timestamps_s),
fixed_width_column_wrapper<int16_t>{10, 7, 1});
expect_columns_equal(*extract_month(timestamps_ms),
fixed_width_column_wrapper<int16_t>{10, 7, 1});
expect_columns_equal(*extract_day(timestamps_D),
fixed_width_column_wrapper<int16_t>{26, 4, 25});
expect_columns_equal(*extract_day(timestamps_s),
fixed_width_column_wrapper<int16_t>{26, 4, 25});
expect_columns_equal(*extract_day(timestamps_ms),
fixed_width_column_wrapper<int16_t>{26, 4, 25});
expect_columns_equal(*extract_weekday(timestamps_D),
fixed_width_column_wrapper<int16_t>{2, 3, 3});
expect_columns_equal(*extract_weekday(timestamps_s),
fixed_width_column_wrapper<int16_t>{2, 3, 3});
expect_columns_equal(*extract_weekday(timestamps_ms),
fixed_width_column_wrapper<int16_t>{2, 3, 3});
expect_columns_equal(*extract_hour(timestamps_D),
fixed_width_column_wrapper<int16_t>{0, 0, 0});
expect_columns_equal(*extract_hour(timestamps_s),
fixed_width_column_wrapper<int16_t>{14, 12, 7});
expect_columns_equal(*extract_hour(timestamps_ms),
fixed_width_column_wrapper<int16_t>{14, 12, 7});
expect_columns_equal(*extract_minute(timestamps_D),
fixed_width_column_wrapper<int16_t>{0, 0, 0});
expect_columns_equal(*extract_minute(timestamps_s),
fixed_width_column_wrapper<int16_t>{1, 0, 32});
expect_columns_equal(*extract_minute(timestamps_ms),
fixed_width_column_wrapper<int16_t>{1, 0, 32});
expect_columns_equal(*extract_second(timestamps_D),
fixed_width_column_wrapper<int16_t>{0, 0, 0});
expect_columns_equal(*extract_second(timestamps_s),
fixed_width_column_wrapper<int16_t>{12, 0, 12});
expect_columns_equal(*extract_second(timestamps_ms),
fixed_width_column_wrapper<int16_t>{12, 0, 12});
}
template <typename T>
struct TypedDatetimeOpsTest : public cudf::test::BaseFixture {
cudaStream_t stream() { return cudaStream_t(0); }
cudf::size_type size() { return cudf::size_type(10); }
cudf::data_type type() {
return cudf::data_type{cudf::experimental::type_to_id<T>()};
}
};
TYPED_TEST_CASE(TypedDatetimeOpsTest, cudf::test::TimestampTypes);
TYPED_TEST(TypedDatetimeOpsTest, TestEmptyColumns) {
using T = TypeParam;
using namespace cudf::test;
using namespace cudf::datetime;
using namespace simt::std::chrono;
auto int16s_dtype =
cudf::data_type{cudf::experimental::type_to_id<int16_t>()};
auto timestamps_dtype = cudf::data_type{cudf::experimental::type_to_id<T>()};
cudf::column int16s{int16s_dtype, 0, rmm::device_buffer{0}};
cudf::column timestamps{timestamps_dtype, 0, rmm::device_buffer{0}};
expect_columns_equal(*extract_year(timestamps), int16s);
expect_columns_equal(*extract_month(timestamps), int16s);
expect_columns_equal(*extract_day(timestamps), int16s);
expect_columns_equal(*extract_weekday(timestamps), int16s);
expect_columns_equal(*extract_hour(timestamps), int16s);
expect_columns_equal(*extract_minute(timestamps), int16s);
expect_columns_equal(*extract_second(timestamps), int16s);
}
TYPED_TEST(TypedDatetimeOpsTest, TestExtractingGeneratedDatetimeComponents) {
using T = TypeParam;
using namespace cudf::test;
using namespace cudf::datetime;
using namespace simt::std::chrono;
auto start = milliseconds(-2500000000000); // Sat, 11 Oct 1890 19:33:20 GMT
auto stop_ = milliseconds(2500000000000); // Mon, 22 Mar 2049 04:26:40 GMT
auto timestamps = generate_timestamps<T>(this->size(), time_point_ms(start),
time_point_ms(stop_));
auto expected_years = fixed_width_column_wrapper<int16_t>{
1890, 1906, 1922, 1938, 1954, 1970, 1985, 2001, 2017, 2033};
auto expected_months =
fixed_width_column_wrapper<int16_t>{10, 8, 6, 4, 2, 1, 11, 9, 7, 5};
auto expected_days =
fixed_width_column_wrapper<int16_t>{11, 16, 20, 24, 26, 1, 5, 9, 14, 18};
auto expected_weekdays =
fixed_width_column_wrapper<int16_t>{6, 4, 2, 7, 5, 4, 2, 7, 5, 3};
auto expected_hours =
fixed_width_column_wrapper<int16_t>{19, 20, 21, 22, 23, 0, 0, 1, 2, 3};
auto expected_minutes =
fixed_width_column_wrapper<int16_t>{33, 26, 20, 13, 6, 0, 53, 46, 40, 33};
auto expected_seconds =
fixed_width_column_wrapper<int16_t>{20, 40, 0, 20, 40, 0, 20, 40, 0, 20};
// Special cases for timestamp_D: zero out the hh/mm/ss cols and +1 the
// expected weekdays
if (std::is_same<TypeParam, cudf::timestamp_D>::value) {
expected_days = fixed_width_column_wrapper<int16_t>{12, 17, 21, 25, 27,
1, 5, 9, 14, 18};
expected_weekdays =
fixed_width_column_wrapper<int16_t>{7, 5, 3, 1, 6, 4, 2, 7, 5, 3};
expected_hours =
fixed_width_column_wrapper<int16_t>{0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
expected_minutes =
fixed_width_column_wrapper<int16_t>{0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
expected_seconds =
fixed_width_column_wrapper<int16_t>{0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
}
expect_columns_equal(*extract_year(timestamps), expected_years);
expect_columns_equal(*extract_month(timestamps), expected_months);
expect_columns_equal(*extract_day(timestamps), expected_days);
expect_columns_equal(*extract_weekday(timestamps), expected_weekdays);
expect_columns_equal(*extract_hour(timestamps), expected_hours);
expect_columns_equal(*extract_minute(timestamps), expected_minutes);
expect_columns_equal(*extract_second(timestamps), expected_seconds);
}
TYPED_TEST(TypedDatetimeOpsTest,
TestExtractingGeneratedNullableDatetimeComponents) {
using T = TypeParam;
using namespace cudf::test;
using namespace cudf::datetime;
using namespace simt::std::chrono;
auto start = milliseconds(-2500000000000); // Sat, 11 Oct 1890 19:33:20 GMT
auto stop_ = milliseconds(2500000000000); // Mon, 22 Mar 2049 04:26:40 GMT
auto timestamps = generate_timestamps<T, true>(
this->size(), time_point_ms(start), time_point_ms(stop_));
auto expected_years = fixed_width_column_wrapper<int16_t>{
{1890, 1906, 1922, 1938, 1954, 1970, 1985, 2001, 2017, 2033},
{true, false, true, false, true, false, true, false, true, false}};
auto expected_months = fixed_width_column_wrapper<int16_t>{
{10, 8, 6, 4, 2, 1, 11, 9, 7, 5},
{true, false, true, false, true, false, true, false, true, false}};
auto expected_days = fixed_width_column_wrapper<int16_t>{
{11, 16, 20, 24, 26, 1, 5, 9, 14, 18},
{true, false, true, false, true, false, true, false, true, false}};
auto expected_weekdays = fixed_width_column_wrapper<int16_t>{
{6, 4, 2, 7, 5, 4, 2, 7, 5, 3},
{true, false, true, false, true, false, true, false, true, false}};
auto expected_hours = fixed_width_column_wrapper<int16_t>{
{19, 20, 21, 22, 23, 0, 0, 1, 2, 3},
{true, false, true, false, true, false, true, false, true, false}};
auto expected_minutes = fixed_width_column_wrapper<int16_t>{
{33, 26, 20, 13, 6, 0, 53, 46, 40, 33},
{true, false, true, false, true, false, true, false, true, false}};
auto expected_seconds = fixed_width_column_wrapper<int16_t>{
{20, 40, 0, 20, 40, 0, 20, 40, 0, 20},
{true, false, true, false, true, false, true, false, true, false}};
// Special cases for timestamp_D: zero out the hh/mm/ss cols and +1 the
// expected weekdays
if (std::is_same<TypeParam, cudf::timestamp_D>::value) {
expected_days = fixed_width_column_wrapper<int16_t>{
{12, 17, 21, 25, 27, 1, 5, 9, 14, 18},
{true, false, true, false, true, false, true, false, true, false}};
expected_weekdays = fixed_width_column_wrapper<int16_t>{
{7, 5, 3, 1, 6, 4, 2, 7, 5, 3},
{true, false, true, false, true, false, true, false, true, false}};
expected_hours = fixed_width_column_wrapper<int16_t>{
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{true, false, true, false, true, false, true, false, true, false}};
expected_minutes = fixed_width_column_wrapper<int16_t>{
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{true, false, true, false, true, false, true, false, true, false}};
expected_seconds = fixed_width_column_wrapper<int16_t>{
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{true, false, true, false, true, false, true, false, true, false}};
}
expect_columns_equal(*extract_year(timestamps), expected_years);
expect_columns_equal(*extract_month(timestamps), expected_months);
expect_columns_equal(*extract_day(timestamps), expected_days);
expect_columns_equal(*extract_weekday(timestamps), expected_weekdays);
expect_columns_equal(*extract_hour(timestamps), expected_hours);
expect_columns_equal(*extract_minute(timestamps), expected_minutes);
expect_columns_equal(*extract_second(timestamps), expected_seconds);
}
|
191f1f442431d9f213e67ce2a3add77aa5333e84.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2016 Mikko Ronkainen <[email protected]>
// License: MIT, see the LICENSE file.
#include "Precompiled.h"
#ifdef USE_ROCM
#include <device_launch_parameters.h>
#endif
#include "Core/Common.h"
#include "Core/Film.h"
#include "Core/Ray.h"
#include "Core/Scene.h"
#include "Core/Intersection.h"
#include "Renderers/CudaRenderer.h"
#include "Renderers/Renderer.h"
#include "Utils/CudaUtils.h"
#include "Utils/Settings.h"
#include "App.h"
using namespace Valo;
CudaRenderer::CudaRenderer() : sceneAlloc(true), filmAlloc(true), randomStatesAlloc(false)
{
}
void CudaRenderer::initialize()
{
sceneAlloc.resize(1);
filmAlloc.resize(1);
}
void CudaRenderer::resize(uint32_t width, uint32_t height)
{
std::vector<RandomGeneratorState> randomStates(width * height);
std::random_device rd;
std::mt19937_64 generator(rd());
for (RandomGeneratorState& randomState : randomStates)
{
randomState.state = generator();
randomState.inc = generator();
}
randomStatesAlloc.resize(width * height);
randomStatesAlloc.write(randomStates.data(), width * height);
}
#ifdef USE_ROCM
__global__ void renderKernel(const Scene& scene, Film& film, RandomGeneratorState* randomStates, bool filtering, uint32_t pixelSamples)
{
uint32_t x = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t y = threadIdx.y + blockIdx.y * blockDim.y;
uint32_t index = y * film.getWidth() + x;
if (x >= film.getWidth() || y >= film.getHeight())
return;
Random random(randomStates[index]);
for (uint32_t i = 0; i < pixelSamples; ++i)
{
Vector2 pixel = Vector2(x, y);
float filterWeight = 1.0f;
if (filtering && scene.renderer.filtering)
{
Vector2 offset = (random.getVector2() - Vector2(0.5f, 0.5f)) * 2.0f * scene.renderer.filter.getRadius();
filterWeight = scene.renderer.filter.getWeight(offset);
pixel += offset;
}
CameraRay cameraRay = scene.camera.getRay(pixel, random);
cameraRay.ray.isPrimaryRay = true;
if (cameraRay.offLens)
{
film.addSample(x, y, scene.general.offLensColor, filterWeight);
randomStates[index] = random.getState();
return;
}
Intersection intersection;
if (!scene.intersect(cameraRay.ray, intersection))
{
film.addSample(x, y, scene.general.backgroundColor, filterWeight);
randomStates[index] = random.getState();
return;
}
if (intersection.hasColor)
{
film.addSample(x, y, intersection.color, filterWeight);
randomStates[index] = random.getState();
return;
}
scene.calculateNormalMapping(intersection);
if (scene.general.normalVisualization)
{
film.addSample(x, y, Color::fromNormal(intersection.normal), filterWeight);
randomStates[index] = random.getState();
return;
}
Color color = scene.integrator.calculateLight(scene, intersection, cameraRay.ray, random);
if (scene.volume.enabled)
{
VolumeEffect volumeEffect = Integrator::calculateVolumeEffect(scene, cameraRay.ray.origin, intersection.position, random);
color = color * volumeEffect.transmittance + volumeEffect.emittance;
}
if (!color.isNegative() && !color.isNan())
film.addSample(x, y, color * cameraRay.brightness, filterWeight);
}
randomStates[index] = random.getState();
}
void CudaRenderer::render(RenderJob& job, bool filtering)
{
Scene& scene = *job.scene;
Film& film = *job.film;
Settings& settings = App::getSettings();
sceneAlloc.write(&scene, 1);
filmAlloc.write(&film, 1);
dim3 dimBlock(16, 16);
dim3 dimGrid;
dimGrid.x = (film.getWidth() + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (film.getHeight() + dimBlock.y - 1) / dimBlock.y;
hipLaunchKernelGGL(( renderKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, *sceneAlloc.getDevicePtr(), *filmAlloc.getDevicePtr(), randomStatesAlloc.getDevicePtr(), filtering, settings.renderer.pixelSamples);
CudaUtils::checkError(hipPeekAtLastError(), "Could not launch render kernel");
CudaUtils::checkError(hipDeviceSynchronize(), "Could not execute render kernel");
job.totalSampleCount += film.getWidth() * film.getHeight() * settings.renderer.pixelSamples;
}
#else
void CudaRenderer::render(RenderJob& job, bool filtering)
{
(void)filtering;
job.film->clear(RendererType::CPU);
}
#endif
| 191f1f442431d9f213e67ce2a3add77aa5333e84.cu | // Copyright © 2016 Mikko Ronkainen <[email protected]>
// License: MIT, see the LICENSE file.
#include "Precompiled.h"
#ifdef USE_CUDA
#include <device_launch_parameters.h>
#endif
#include "Core/Common.h"
#include "Core/Film.h"
#include "Core/Ray.h"
#include "Core/Scene.h"
#include "Core/Intersection.h"
#include "Renderers/CudaRenderer.h"
#include "Renderers/Renderer.h"
#include "Utils/CudaUtils.h"
#include "Utils/Settings.h"
#include "App.h"
using namespace Valo;
CudaRenderer::CudaRenderer() : sceneAlloc(true), filmAlloc(true), randomStatesAlloc(false)
{
}
void CudaRenderer::initialize()
{
sceneAlloc.resize(1);
filmAlloc.resize(1);
}
void CudaRenderer::resize(uint32_t width, uint32_t height)
{
std::vector<RandomGeneratorState> randomStates(width * height);
std::random_device rd;
std::mt19937_64 generator(rd());
for (RandomGeneratorState& randomState : randomStates)
{
randomState.state = generator();
randomState.inc = generator();
}
randomStatesAlloc.resize(width * height);
randomStatesAlloc.write(randomStates.data(), width * height);
}
#ifdef USE_CUDA
__global__ void renderKernel(const Scene& scene, Film& film, RandomGeneratorState* randomStates, bool filtering, uint32_t pixelSamples)
{
uint32_t x = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t y = threadIdx.y + blockIdx.y * blockDim.y;
uint32_t index = y * film.getWidth() + x;
if (x >= film.getWidth() || y >= film.getHeight())
return;
Random random(randomStates[index]);
for (uint32_t i = 0; i < pixelSamples; ++i)
{
Vector2 pixel = Vector2(x, y);
float filterWeight = 1.0f;
if (filtering && scene.renderer.filtering)
{
Vector2 offset = (random.getVector2() - Vector2(0.5f, 0.5f)) * 2.0f * scene.renderer.filter.getRadius();
filterWeight = scene.renderer.filter.getWeight(offset);
pixel += offset;
}
CameraRay cameraRay = scene.camera.getRay(pixel, random);
cameraRay.ray.isPrimaryRay = true;
if (cameraRay.offLens)
{
film.addSample(x, y, scene.general.offLensColor, filterWeight);
randomStates[index] = random.getState();
return;
}
Intersection intersection;
if (!scene.intersect(cameraRay.ray, intersection))
{
film.addSample(x, y, scene.general.backgroundColor, filterWeight);
randomStates[index] = random.getState();
return;
}
if (intersection.hasColor)
{
film.addSample(x, y, intersection.color, filterWeight);
randomStates[index] = random.getState();
return;
}
scene.calculateNormalMapping(intersection);
if (scene.general.normalVisualization)
{
film.addSample(x, y, Color::fromNormal(intersection.normal), filterWeight);
randomStates[index] = random.getState();
return;
}
Color color = scene.integrator.calculateLight(scene, intersection, cameraRay.ray, random);
if (scene.volume.enabled)
{
VolumeEffect volumeEffect = Integrator::calculateVolumeEffect(scene, cameraRay.ray.origin, intersection.position, random);
color = color * volumeEffect.transmittance + volumeEffect.emittance;
}
if (!color.isNegative() && !color.isNan())
film.addSample(x, y, color * cameraRay.brightness, filterWeight);
}
randomStates[index] = random.getState();
}
void CudaRenderer::render(RenderJob& job, bool filtering)
{
Scene& scene = *job.scene;
Film& film = *job.film;
Settings& settings = App::getSettings();
sceneAlloc.write(&scene, 1);
filmAlloc.write(&film, 1);
dim3 dimBlock(16, 16);
dim3 dimGrid;
dimGrid.x = (film.getWidth() + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (film.getHeight() + dimBlock.y - 1) / dimBlock.y;
renderKernel<<<dimGrid, dimBlock>>>(*sceneAlloc.getDevicePtr(), *filmAlloc.getDevicePtr(), randomStatesAlloc.getDevicePtr(), filtering, settings.renderer.pixelSamples);
CudaUtils::checkError(cudaPeekAtLastError(), "Could not launch render kernel");
CudaUtils::checkError(cudaDeviceSynchronize(), "Could not execute render kernel");
job.totalSampleCount += film.getWidth() * film.getHeight() * settings.renderer.pixelSamples;
}
#else
void CudaRenderer::render(RenderJob& job, bool filtering)
{
(void)filtering;
job.film->clear(RendererType::CPU);
}
#endif
|
c99d27a69522951a3faf10dcd6ee9ce4795bd952.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
> File Name : threadfence.cpp
> Author : Liu Junhong
> Mail : [email protected]
> Created Time: Tuesday, February 11, 2020 PM11:44:49 HKT
************************************************************************/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
using namespace std;
__device__ unsigned int count = 0;
__shared__ bool isLastBlockDone;
__device__ float calculatePartialSum(const float* array, int N) {
__shared__ float tmp[32];
const float* local_array = array + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
int laneId = threadIdx.x & 31;
int warpId = threadIdx.x / 32;
int warpNum = blockDim.x / 32;
float sum = 0;
if (threadIdx.x + blockIdx.x * blockDim.x < N) {
sum = local_array[tid];
}
for (int i = 16; i >= 1; i /= 2) {
sum += __shfl_xor_sync(0xffffffff, sum, i, 32);
}
if(laneId == 0) {
tmp[warpId] = sum;
}
__syncthreads();
if(warpId == 0) {
if (laneId < warpNum) {
sum = tmp[laneId];
}
else {
sum = 0;
}
for (int i = 16; i >= 1; i /= 2) {
sum += __shfl_xor_sync(0xffffffff, sum, i, 32);
}
}
__syncthreads();
return sum;
}
__device__ float calculateTotalSum(volatile float* result) {
int tid = threadIdx.x;
int laneId = threadIdx.x & 31;
int warpId = threadIdx.x / 32;
int warpNum = blockDim.x / 32;
float sum = 0;
if (gridDim.x < 32) {
if (warpId == 0) {
if (laneId < gridDim.x) {
sum = result[laneId];
}
else {
sum = 0;
}
for (int i = 16; i >= 1; i /= 2) {
sum += __shfl_xor_sync(0xffffffff, sum, i, 32);
}
}
}
else {
for (int i = tid; i < gridDim.x; i+= blockDim.x) {
sum += result[i];
}
__shared__ float tmp[32];
for (int i = 16; i >= 1; i /= 2) {
sum += __shfl_xor_sync(0xffffffff, sum, i, 32);
}
if(laneId == 0) {
tmp[warpId] = sum;
}
__syncthreads();
if(warpId == 0) {
if (laneId < warpNum) {
sum = tmp[laneId];
}
else {
sum = 0;
}
for (int i = 16; i >= 1; i /= 2) {
sum += __shfl_xor_sync(0xffffffff, sum, i, 32);
}
}
__syncthreads();
}
return sum;
}
__global__ void sum(const float* array, unsigned int N, volatile float* result) {
float partialSum = calculatePartialSum(array, N);
if (threadIdx.x == 0) {
result[blockIdx.x] = partialSum;
__threadfence(); // attention
unsigned int value = atomicInc(&count, gridDim.x);
isLastBlockDone = (value == (gridDim.x - 1));
}
__syncthreads(); //must have
if (isLastBlockDone) {
float totalSum = calculateTotalSum(result);
if (threadIdx.x == 0) {
result[0] = totalSum;
count = 0;
}
}
}
int main(int argc, char **argv){
int N = 1048;
float* cpu_array = (float*)malloc(sizeof(float) * N);
float* gpu_array;
float* gpu_result;
hipMalloc((void**)&gpu_array, sizeof(float)*N);
for(int i = 0; i < N; i++) {
cpu_array[i] = i;
}
hipMemcpy(gpu_array, cpu_array, sizeof(float)*N, hipMemcpyHostToDevice);
int threadsNum = 256;
int blocksNum = (N + threadsNum - 1) / threadsNum;
hipMalloc((void**)&gpu_result, sizeof(float)*blocksNum);
float* cpu_result = (float*)malloc(sizeof(float)*blocksNum);
hipLaunchKernelGGL(( sum), dim3(blocksNum), dim3(threadsNum), 0, 0, gpu_array, N, gpu_result);
hipMemcpy(cpu_result, gpu_result, sizeof(float)*blocksNum, hipMemcpyDeviceToHost);
// for (int i = 0; i < blocksNum; i++) {
// printf("%f\t", cpu_result[i]);
// }
// printf("\n");
printf("%f \n", cpu_result[0]);
return 0;
}
| c99d27a69522951a3faf10dcd6ee9ce4795bd952.cu | /*************************************************************************
> File Name : threadfence.cpp
> Author : Liu Junhong
> Mail : [email protected]
> Created Time: Tuesday, February 11, 2020 PM11:44:49 HKT
************************************************************************/
#include <stdio.h>
#include <cuda_runtime.h>
#include <cuda.h>
using namespace std;
__device__ unsigned int count = 0;
__shared__ bool isLastBlockDone;
__device__ float calculatePartialSum(const float* array, int N) {
__shared__ float tmp[32];
const float* local_array = array + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
int laneId = threadIdx.x & 31;
int warpId = threadIdx.x / 32;
int warpNum = blockDim.x / 32;
float sum = 0;
if (threadIdx.x + blockIdx.x * blockDim.x < N) {
sum = local_array[tid];
}
for (int i = 16; i >= 1; i /= 2) {
sum += __shfl_xor_sync(0xffffffff, sum, i, 32);
}
if(laneId == 0) {
tmp[warpId] = sum;
}
__syncthreads();
if(warpId == 0) {
if (laneId < warpNum) {
sum = tmp[laneId];
}
else {
sum = 0;
}
for (int i = 16; i >= 1; i /= 2) {
sum += __shfl_xor_sync(0xffffffff, sum, i, 32);
}
}
__syncthreads();
return sum;
}
__device__ float calculateTotalSum(volatile float* result) {
int tid = threadIdx.x;
int laneId = threadIdx.x & 31;
int warpId = threadIdx.x / 32;
int warpNum = blockDim.x / 32;
float sum = 0;
if (gridDim.x < 32) {
if (warpId == 0) {
if (laneId < gridDim.x) {
sum = result[laneId];
}
else {
sum = 0;
}
for (int i = 16; i >= 1; i /= 2) {
sum += __shfl_xor_sync(0xffffffff, sum, i, 32);
}
}
}
else {
for (int i = tid; i < gridDim.x; i+= blockDim.x) {
sum += result[i];
}
__shared__ float tmp[32];
for (int i = 16; i >= 1; i /= 2) {
sum += __shfl_xor_sync(0xffffffff, sum, i, 32);
}
if(laneId == 0) {
tmp[warpId] = sum;
}
__syncthreads();
if(warpId == 0) {
if (laneId < warpNum) {
sum = tmp[laneId];
}
else {
sum = 0;
}
for (int i = 16; i >= 1; i /= 2) {
sum += __shfl_xor_sync(0xffffffff, sum, i, 32);
}
}
__syncthreads();
}
return sum;
}
__global__ void sum(const float* array, unsigned int N, volatile float* result) {
float partialSum = calculatePartialSum(array, N);
if (threadIdx.x == 0) {
result[blockIdx.x] = partialSum;
__threadfence(); // attention
unsigned int value = atomicInc(&count, gridDim.x);
isLastBlockDone = (value == (gridDim.x - 1));
}
__syncthreads(); //must have
if (isLastBlockDone) {
float totalSum = calculateTotalSum(result);
if (threadIdx.x == 0) {
result[0] = totalSum;
count = 0;
}
}
}
int main(int argc, char **argv){
int N = 1048;
float* cpu_array = (float*)malloc(sizeof(float) * N);
float* gpu_array;
float* gpu_result;
cudaMalloc((void**)&gpu_array, sizeof(float)*N);
for(int i = 0; i < N; i++) {
cpu_array[i] = i;
}
cudaMemcpy(gpu_array, cpu_array, sizeof(float)*N, cudaMemcpyHostToDevice);
int threadsNum = 256;
int blocksNum = (N + threadsNum - 1) / threadsNum;
cudaMalloc((void**)&gpu_result, sizeof(float)*blocksNum);
float* cpu_result = (float*)malloc(sizeof(float)*blocksNum);
sum<<<blocksNum, threadsNum>>>(gpu_array, N, gpu_result);
cudaMemcpy(cpu_result, gpu_result, sizeof(float)*blocksNum, cudaMemcpyDeviceToHost);
// for (int i = 0; i < blocksNum; i++) {
// printf("%f\t", cpu_result[i]);
// }
// printf("\n");
printf("%f \n", cpu_result[0]);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.