hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
44ac40cdc92d19ede61c39eaee417162c0560ff8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
/*
Function: trilinear devoxlization (forward)
Args:
b : batch size
c : #channels
n : number of points
r : voxel resolution
r2 : r ** 2
r3 : r ** 3
coords : the coordinates of points, FloatTensor[b, 3, n]
feat : features, FloatTensor[b, c, r3]
inds : the voxel indices of point cube, IntTensor[b, 8, n]
wgts : weight for trilinear interpolation, FloatTensor[b, 8, n]
outs : outputs, FloatTensor[b, c, n]
*/
__global__ void trilinear_devoxelize_kernel(int b, int c, int n, int r, int r2,
int r3, bool is_training,
const float *__restrict__ coords,
const float *__restrict__ feat,
int *__restrict__ inds,
float *__restrict__ wgts,
float *__restrict__ outs) {
int batch_index = blockIdx.x;
int stride = blockDim.x;
int index = threadIdx.x;
coords += batch_index * n * 3;
inds += batch_index * n * 8;
wgts += batch_index * n * 8;
feat += batch_index * c * r3;
outs += batch_index * c * n;
for (int i = index; i < n; i += stride) {
float x = coords[i];
float y = coords[i + n];
float z = coords[i + n + n];
float x_lo_f = floorf(x);
float y_lo_f = floorf(y);
float z_lo_f = floorf(z);
float x_d_1 = x - x_lo_f; // / (x_hi_f - x_lo_f + 1e-8f)
float y_d_1 = y - y_lo_f;
float z_d_1 = z - z_lo_f;
float x_d_0 = 1.0f - x_d_1;
float y_d_0 = 1.0f - y_d_1;
float z_d_0 = 1.0f - z_d_1;
float wgt000 = x_d_0 * y_d_0 * z_d_0;
float wgt001 = x_d_0 * y_d_0 * z_d_1;
float wgt010 = x_d_0 * y_d_1 * z_d_0;
float wgt011 = x_d_0 * y_d_1 * z_d_1;
float wgt100 = x_d_1 * y_d_0 * z_d_0;
float wgt101 = x_d_1 * y_d_0 * z_d_1;
float wgt110 = x_d_1 * y_d_1 * z_d_0;
float wgt111 = x_d_1 * y_d_1 * z_d_1;
int x_lo = static_cast<int>(x_lo_f);
int y_lo = static_cast<int>(y_lo_f);
int z_lo = static_cast<int>(z_lo_f);
int x_hi = (x_d_1 > 0) ? -1 : 0;
int y_hi = (y_d_1 > 0) ? -1 : 0;
int z_hi = (z_d_1 > 0) ? 1 : 0;
int idx000 = x_lo * r2 + y_lo * r + z_lo;
int idx001 = idx000 + z_hi; // x_lo * r2 + y_lo * r + z_hi;
int idx010 = idx000 + (y_hi & r); // x_lo * r2 + y_hi * r + z_lo;
int idx011 = idx010 + z_hi; // x_lo * r2 + y_hi * r + z_hi;
int idx100 = idx000 + (x_hi & r2); // x_hi * r2 + y_lo * r + z_lo;
int idx101 = idx100 + z_hi; // x_hi * r2 + y_lo * r + z_hi;
int idx110 = idx100 + (y_hi & r); // x_hi * r2 + y_hi * r + z_lo;
int idx111 = idx110 + z_hi; // x_hi * r2 + y_hi * r + z_hi;
if (is_training) {
wgts[i] = wgt000;
wgts[i + n] = wgt001;
wgts[i + n * 2] = wgt010;
wgts[i + n * 3] = wgt011;
wgts[i + n * 4] = wgt100;
wgts[i + n * 5] = wgt101;
wgts[i + n * 6] = wgt110;
wgts[i + n * 7] = wgt111;
inds[i] = idx000;
inds[i + n] = idx001;
inds[i + n * 2] = idx010;
inds[i + n * 3] = idx011;
inds[i + n * 4] = idx100;
inds[i + n * 5] = idx101;
inds[i + n * 6] = idx110;
inds[i + n * 7] = idx111;
}
for (int j = 0; j < c; j++) {
int jr3 = j * r3;
outs[j * n + i] =
wgt000 * feat[jr3 + idx000] + wgt001 * feat[jr3 + idx001] +
wgt010 * feat[jr3 + idx010] + wgt011 * feat[jr3 + idx011] +
wgt100 * feat[jr3 + idx100] + wgt101 * feat[jr3 + idx101] +
wgt110 * feat[jr3 + idx110] + wgt111 * feat[jr3 + idx111];
}
}
}
/*
Function: trilinear devoxlization (backward)
Args:
b : batch size
c : #channels
n : number of points
r3 : voxel cube size = voxel resolution ** 3
inds : the voxel indices of point cube, IntTensor[b, 8, n]
wgts : weight for trilinear interpolation, FloatTensor[b, 8, n]
grad_y : grad outputs, FloatTensor[b, c, n]
grad_x : grad inputs, FloatTensor[b, c, r3]
*/
__global__ void trilinear_devoxelize_grad_kernel(
int b, int c, int n, int r3, const int *__restrict__ inds,
const float *__restrict__ wgts, const float *__restrict__ grad_y,
float *__restrict__ grad_x) {
int batch_index = blockIdx.x;
int stride = blockDim.x;
int index = threadIdx.x;
inds += batch_index * n * 8;
wgts += batch_index * n * 8;
grad_x += batch_index * c * r3;
grad_y += batch_index * c * n;
for (int i = index; i < n; i += stride) {
int idx000 = inds[i];
int idx001 = inds[i + n];
int idx010 = inds[i + n * 2];
int idx011 = inds[i + n * 3];
int idx100 = inds[i + n * 4];
int idx101 = inds[i + n * 5];
int idx110 = inds[i + n * 6];
int idx111 = inds[i + n * 7];
float wgt000 = wgts[i];
float wgt001 = wgts[i + n];
float wgt010 = wgts[i + n * 2];
float wgt011 = wgts[i + n * 3];
float wgt100 = wgts[i + n * 4];
float wgt101 = wgts[i + n * 5];
float wgt110 = wgts[i + n * 6];
float wgt111 = wgts[i + n * 7];
for (int j = 0; j < c; j++) {
int jr3 = j * r3;
float g = grad_y[j * n + i];
atomicAdd(grad_x + jr3 + idx000, wgt000 * g);
atomicAdd(grad_x + jr3 + idx001, wgt001 * g);
atomicAdd(grad_x + jr3 + idx010, wgt010 * g);
atomicAdd(grad_x + jr3 + idx011, wgt011 * g);
atomicAdd(grad_x + jr3 + idx100, wgt100 * g);
atomicAdd(grad_x + jr3 + idx101, wgt101 * g);
atomicAdd(grad_x + jr3 + idx110, wgt110 * g);
atomicAdd(grad_x + jr3 + idx111, wgt111 * g);
}
}
}
void trilinearDevoxelizeKernelLauncher(int b, int c, int n, int r, int r2, int r3,
bool training, const float *coords, const float *feat,
int *inds, float *wgts, float *outs) {
hipLaunchKernelGGL(( trilinear_devoxelize_kernel), dim3(b), dim3(512), 0, 0,
b, c, n, r, r2, r3, training, coords, feat, inds, wgts, outs);
//CUDA_CHECK_ERRORS();
}
void trilinearDevoxelizeKernelGradLauncher(int b, int c, int n, int r3, const int *inds,
const float *wgts, const float *grad_y,
float *grad_x) {
hipLaunchKernelGGL(( trilinear_devoxelize_grad_kernel), dim3(b), dim3(512), 0, 0,
b, c, n, r3, inds, wgts, grad_y, grad_x);
//CUDA_CHECK_ERRORS();
}
| 44ac40cdc92d19ede61c39eaee417162c0560ff8.cu | #include <stdio.h>
#include <stdlib.h>
/*
Function: trilinear devoxlization (forward)
Args:
b : batch size
c : #channels
n : number of points
r : voxel resolution
r2 : r ** 2
r3 : r ** 3
coords : the coordinates of points, FloatTensor[b, 3, n]
feat : features, FloatTensor[b, c, r3]
inds : the voxel indices of point cube, IntTensor[b, 8, n]
wgts : weight for trilinear interpolation, FloatTensor[b, 8, n]
outs : outputs, FloatTensor[b, c, n]
*/
__global__ void trilinear_devoxelize_kernel(int b, int c, int n, int r, int r2,
int r3, bool is_training,
const float *__restrict__ coords,
const float *__restrict__ feat,
int *__restrict__ inds,
float *__restrict__ wgts,
float *__restrict__ outs) {
int batch_index = blockIdx.x;
int stride = blockDim.x;
int index = threadIdx.x;
coords += batch_index * n * 3;
inds += batch_index * n * 8;
wgts += batch_index * n * 8;
feat += batch_index * c * r3;
outs += batch_index * c * n;
for (int i = index; i < n; i += stride) {
float x = coords[i];
float y = coords[i + n];
float z = coords[i + n + n];
float x_lo_f = floorf(x);
float y_lo_f = floorf(y);
float z_lo_f = floorf(z);
float x_d_1 = x - x_lo_f; // / (x_hi_f - x_lo_f + 1e-8f)
float y_d_1 = y - y_lo_f;
float z_d_1 = z - z_lo_f;
float x_d_0 = 1.0f - x_d_1;
float y_d_0 = 1.0f - y_d_1;
float z_d_0 = 1.0f - z_d_1;
float wgt000 = x_d_0 * y_d_0 * z_d_0;
float wgt001 = x_d_0 * y_d_0 * z_d_1;
float wgt010 = x_d_0 * y_d_1 * z_d_0;
float wgt011 = x_d_0 * y_d_1 * z_d_1;
float wgt100 = x_d_1 * y_d_0 * z_d_0;
float wgt101 = x_d_1 * y_d_0 * z_d_1;
float wgt110 = x_d_1 * y_d_1 * z_d_0;
float wgt111 = x_d_1 * y_d_1 * z_d_1;
int x_lo = static_cast<int>(x_lo_f);
int y_lo = static_cast<int>(y_lo_f);
int z_lo = static_cast<int>(z_lo_f);
int x_hi = (x_d_1 > 0) ? -1 : 0;
int y_hi = (y_d_1 > 0) ? -1 : 0;
int z_hi = (z_d_1 > 0) ? 1 : 0;
int idx000 = x_lo * r2 + y_lo * r + z_lo;
int idx001 = idx000 + z_hi; // x_lo * r2 + y_lo * r + z_hi;
int idx010 = idx000 + (y_hi & r); // x_lo * r2 + y_hi * r + z_lo;
int idx011 = idx010 + z_hi; // x_lo * r2 + y_hi * r + z_hi;
int idx100 = idx000 + (x_hi & r2); // x_hi * r2 + y_lo * r + z_lo;
int idx101 = idx100 + z_hi; // x_hi * r2 + y_lo * r + z_hi;
int idx110 = idx100 + (y_hi & r); // x_hi * r2 + y_hi * r + z_lo;
int idx111 = idx110 + z_hi; // x_hi * r2 + y_hi * r + z_hi;
if (is_training) {
wgts[i] = wgt000;
wgts[i + n] = wgt001;
wgts[i + n * 2] = wgt010;
wgts[i + n * 3] = wgt011;
wgts[i + n * 4] = wgt100;
wgts[i + n * 5] = wgt101;
wgts[i + n * 6] = wgt110;
wgts[i + n * 7] = wgt111;
inds[i] = idx000;
inds[i + n] = idx001;
inds[i + n * 2] = idx010;
inds[i + n * 3] = idx011;
inds[i + n * 4] = idx100;
inds[i + n * 5] = idx101;
inds[i + n * 6] = idx110;
inds[i + n * 7] = idx111;
}
for (int j = 0; j < c; j++) {
int jr3 = j * r3;
outs[j * n + i] =
wgt000 * feat[jr3 + idx000] + wgt001 * feat[jr3 + idx001] +
wgt010 * feat[jr3 + idx010] + wgt011 * feat[jr3 + idx011] +
wgt100 * feat[jr3 + idx100] + wgt101 * feat[jr3 + idx101] +
wgt110 * feat[jr3 + idx110] + wgt111 * feat[jr3 + idx111];
}
}
}
/*
Function: trilinear devoxlization (backward)
Args:
b : batch size
c : #channels
n : number of points
r3 : voxel cube size = voxel resolution ** 3
inds : the voxel indices of point cube, IntTensor[b, 8, n]
wgts : weight for trilinear interpolation, FloatTensor[b, 8, n]
grad_y : grad outputs, FloatTensor[b, c, n]
grad_x : grad inputs, FloatTensor[b, c, r3]
*/
__global__ void trilinear_devoxelize_grad_kernel(
int b, int c, int n, int r3, const int *__restrict__ inds,
const float *__restrict__ wgts, const float *__restrict__ grad_y,
float *__restrict__ grad_x) {
int batch_index = blockIdx.x;
int stride = blockDim.x;
int index = threadIdx.x;
inds += batch_index * n * 8;
wgts += batch_index * n * 8;
grad_x += batch_index * c * r3;
grad_y += batch_index * c * n;
for (int i = index; i < n; i += stride) {
int idx000 = inds[i];
int idx001 = inds[i + n];
int idx010 = inds[i + n * 2];
int idx011 = inds[i + n * 3];
int idx100 = inds[i + n * 4];
int idx101 = inds[i + n * 5];
int idx110 = inds[i + n * 6];
int idx111 = inds[i + n * 7];
float wgt000 = wgts[i];
float wgt001 = wgts[i + n];
float wgt010 = wgts[i + n * 2];
float wgt011 = wgts[i + n * 3];
float wgt100 = wgts[i + n * 4];
float wgt101 = wgts[i + n * 5];
float wgt110 = wgts[i + n * 6];
float wgt111 = wgts[i + n * 7];
for (int j = 0; j < c; j++) {
int jr3 = j * r3;
float g = grad_y[j * n + i];
atomicAdd(grad_x + jr3 + idx000, wgt000 * g);
atomicAdd(grad_x + jr3 + idx001, wgt001 * g);
atomicAdd(grad_x + jr3 + idx010, wgt010 * g);
atomicAdd(grad_x + jr3 + idx011, wgt011 * g);
atomicAdd(grad_x + jr3 + idx100, wgt100 * g);
atomicAdd(grad_x + jr3 + idx101, wgt101 * g);
atomicAdd(grad_x + jr3 + idx110, wgt110 * g);
atomicAdd(grad_x + jr3 + idx111, wgt111 * g);
}
}
}
void trilinearDevoxelizeKernelLauncher(int b, int c, int n, int r, int r2, int r3,
bool training, const float *coords, const float *feat,
int *inds, float *wgts, float *outs) {
trilinear_devoxelize_kernel<<<b, 512>>>(
b, c, n, r, r2, r3, training, coords, feat, inds, wgts, outs);
//CUDA_CHECK_ERRORS();
}
void trilinearDevoxelizeKernelGradLauncher(int b, int c, int n, int r3, const int *inds,
const float *wgts, const float *grad_y,
float *grad_x) {
trilinear_devoxelize_grad_kernel<<<b, 512>>>(
b, c, n, r3, inds, wgts, grad_y, grad_x);
//CUDA_CHECK_ERRORS();
}
|
23b45a8de769ada6797d6795233b8582fd486636.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "struct_types.cuh"
#include "scalar_multiplication.cuh"
#include <random>
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <ctime>
template <size_t N>
void generateScalarArray(BigNum<N> *arr, int count) {
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<unsigned int> dis(0, UINT32_MAX);
for (int i = 0; i < count; i++) {
for (int j = 0; j < N; j++) {
arr[i].value[j] = dis(gen);
}
}
}
template <size_t N>
void generatePointArray(AffinePoint<N> *arr, int count) {
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<unsigned int> dis(0, UINT32_MAX);
for (int i = 0; i < count; i++) {
for (int j = 0; j < N; j++) {
arr[i].x.value[j] = dis(gen);
arr[i].y.value[j] = dis(gen);
}
}
}
template <size_t N>
void printScalarFirstElement(BigNum<N> *arr) {
for (int j = 0; j < N; j++) {
printf("%x ", arr[0].value[j]);
}
printf("\n");
}
template <size_t N>
void printPointFirstElement(AffinePoint<N> *arr) {
printf("x ");
for (int j = 0; j < N; j++) {
printf("%x ", arr[0].x.value[j]);
}
printf("y ");
for (int j = 0; j < N; j++) {
printf("%x ", arr[0].y.value[j]);
}
printf("\n");
}
int main()
{
//Set launch device.
hipError_t cudaStat = hipSetDevice(0);
if (cudaStat != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
}
hipDeviceSynchronize();
/*test block*/
{
const int num_elems = 65536;
AffinePoint<6> *result;
BigNum<6> *scalar;
AffinePoint<6> *point;
point = (AffinePoint<6>*) malloc(num_elems * sizeof(AffinePoint<6>));
scalar = (BigNum<6>*) malloc(num_elems * sizeof(BigNum<6>));
result = (AffinePoint<6>*) malloc(num_elems * sizeof(AffinePoint<6>));
generateScalarArray(scalar, num_elems);
for (int i = 0; i < num_elems; i++) {
point[i] = { {0x82ff1012, 0xf4ff0afd, 0x43a18800, 0x7cbf20eb, 0xb03090f6, 0x188da80e},
{ 0x1e794811, 0x73f977a1, 0x6b24cdd5, 0x631011ed, 0xffc8da78, 0x7192b95} };
}
printf("scalar mult secp192r1:\n");
printScalarFirstElement(scalar);
printPointFirstElement(point);
auto exectime = std::chrono::high_resolution_clock::now();
ScalarMultiply::scalarMultiplyOnGpu(result, scalar, point, num_elems, CurveType::secp192r1);
auto time = std::chrono::duration<double, std::milli>(std::chrono::high_resolution_clock::now() - exectime).count();
printf("exec time %lf ms\n", time);
printf("result:\n");
printPointFirstElement(result);
free(point); free(scalar); free(result);
}
{
const int num_elems = 65536;
AffinePoint<7> *result;
BigNum<7> *scalar;
AffinePoint<7> *point;
point = (AffinePoint<7>*) malloc(num_elems * sizeof(AffinePoint<7>));
scalar = (BigNum<7>*) malloc(num_elems * sizeof(BigNum<77>));
result = (AffinePoint<7>*) malloc(num_elems * sizeof(AffinePoint<7>));
generateScalarArray(scalar, num_elems);
for (int i = 0; i < num_elems; i++) {
point[i] = { { 0x115C1D21, 0x343280D6, 0x56C21122, 0x4A03C1D3, 0x321390B9, 0x6BB4BF7F, 0xB70E0CBD },
{ 0x85007E34, 0x44D58199, 0x5A074764, 0xCD4375A0, 0x4C22DFE6, 0xB5F723FB, 0xBD376388 } };
}
printf("scalar mult secp224r1:\n");
printScalarFirstElement(scalar);
printPointFirstElement(point);
auto exectime = std::chrono::high_resolution_clock::now();
ScalarMultiply::scalarMultiplyOnGpu(result, scalar, point, num_elems, CurveType::secp224r1);
auto time = std::chrono::duration<double, std::milli>(std::chrono::high_resolution_clock::now() - exectime).count();
printf("exec time %lf ms\n", time);
printf("result:\n");
printPointFirstElement(result);
free(point); free(scalar); free(result);
}
{
const int num_elems = 65536;
AffinePoint<8> *result;
BigNum<8> *scalar;
AffinePoint<8> *point;
point = (AffinePoint<8>*) malloc(num_elems * sizeof(AffinePoint<8>));
scalar = (BigNum<8>*) malloc(num_elems * sizeof(BigNum<8>));
result = (AffinePoint<8>*) malloc(num_elems * sizeof(AffinePoint<8>));
generateScalarArray(scalar, num_elems);
for (int i = 0; i < num_elems; i++) {
point[i] = { { 0xD898C296, 0xF4A13945, 0x2DEB33A0, 0x77037D81, 0x63A440F2, 0xF8BCE6E5, 0xE12C4247, 0x6B17D1F2 },
{ 0x37BF51F5, 0xCBB64068, 0x6B315ECE, 0x2BCE3357, 0x7C0F9E16, 0x8EE7EB4A, 0xFE1A7F9B, 0x4FE342E2 } };
}
printf("scalar mult secp256r1:\n");
printScalarFirstElement(scalar);
printPointFirstElement(point);
auto exectime = std::chrono::high_resolution_clock::now();
ScalarMultiply::scalarMultiplyOnGpu(result, scalar, point, num_elems, CurveType::secp256r1);
auto time = std::chrono::duration<double, std::milli>(std::chrono::high_resolution_clock::now() - exectime).count();
printf("exec time %lf ms\n", time);
printf("result:\n");
printPointFirstElement(result);
free(point); free(scalar); free(result);
}
{
const int num_elems = 65536;
AffinePoint<12> *result;
BigNum<12> *scalar;
AffinePoint<12> *point;
point = (AffinePoint<12>*) malloc(num_elems * sizeof(AffinePoint<12>));
scalar = (BigNum<12>*) malloc(num_elems * sizeof(BigNum<12>));
result = (AffinePoint<12>*) malloc(num_elems * sizeof(AffinePoint<12>));
generateScalarArray(scalar, num_elems);
for (int i = 0; i < num_elems; i++) {
point[i] = { { 0x72760AB7, 0x3A545E38, 0xBF55296C, 0x5502F25D, 0x82542A38, 0x59F741E0, 0x8BA79B98, 0x6E1D3B62, 0xF320AD74, 0x8EB1C71E, 0xBE8B0537, 0xAA87CA22 },
{ 0x90EA0E5F, 0x7A431D7C, 0x1D7E819D, 0x0A60B1CE, 0xB5F0B8C0, 0xE9DA3113, 0x289A147C, 0xF8F41DBD, 0x9292DC29, 0x5D9E98BF, 0x96262C6F, 0x3617DE4A } };
}
printf("scalar mult secp384r1:\n");
printScalarFirstElement(scalar);
printPointFirstElement(point);
auto exectime = std::chrono::high_resolution_clock::now();
ScalarMultiply::scalarMultiplyOnGpu(result, scalar, point, num_elems, CurveType::secp384r1);
auto time = std::chrono::duration<double, std::milli>(std::chrono::high_resolution_clock::now() - exectime).count();
printf("exec time %lf ms\n", time);
printf("result:\n");
printPointFirstElement(point);
free(point); free(scalar); free(result);
}
{
const int num_elems = 65536;
AffinePoint<17> *result;
BigNum<17> *scalar;
AffinePoint<17> *point;
point = (AffinePoint<17>*) malloc(num_elems * sizeof(AffinePoint<17>));
scalar = (BigNum<17>*) malloc(num_elems * sizeof(BigNum<17>));
result = (AffinePoint<17>*) malloc(num_elems * sizeof(AffinePoint<17>));
generateScalarArray(scalar, num_elems);
for (int i = 0; i < num_elems; i++) {
point[i] = { { 0xC2E5BD66, 0xF97E7E31, 0x856A429B, 0x3348B3C1, 0xA2FFA8DE, 0xFE1DC127, 0xEFE75928, 0xA14B5E77, 0x6B4D3DBA, 0xF828AF60, 0x053FB521, 0x9C648139,
0x2395B442, 0x9E3ECB66, 0x0404E9CD, 0x858E06B7, 0xC6 },
{ 0x9FD16650, 0x88BE9476, 0xA272C240, 0x353C7086, 0x3FAD0761, 0xC550B901, 0x5EF42640, 0x97EE7299, 0x273E662C, 0x17AFBD17, 0x579B4468, 0x98F54449,
0x2C7D1BD9, 0x5C8A5FB4, 0x9A3BC004, 0x39296A78, 0x118 } };
}
printf("scalar mult secp521r1:\n");
printScalarFirstElement(scalar);
printPointFirstElement(point);
auto exectime = std::chrono::high_resolution_clock::now();
ScalarMultiply::scalarMultiplyOnGpu(result, scalar, point, num_elems, CurveType::secp521r1);
auto time = std::chrono::duration<double, std::milli>(std::chrono::high_resolution_clock::now() - exectime).count();
printf("exec time %lf ms\n", time);
printf("result:\n");
printPointFirstElement(result);
free(point); free(scalar); free(result);
}
/*end check arithm block*/
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
hipError_t cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
} | 23b45a8de769ada6797d6795233b8582fd486636.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "struct_types.cuh"
#include "scalar_multiplication.cuh"
#include <random>
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <ctime>
template <size_t N>
void generateScalarArray(BigNum<N> *arr, int count) {
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<unsigned int> dis(0, UINT32_MAX);
for (int i = 0; i < count; i++) {
for (int j = 0; j < N; j++) {
arr[i].value[j] = dis(gen);
}
}
}
template <size_t N>
void generatePointArray(AffinePoint<N> *arr, int count) {
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<unsigned int> dis(0, UINT32_MAX);
for (int i = 0; i < count; i++) {
for (int j = 0; j < N; j++) {
arr[i].x.value[j] = dis(gen);
arr[i].y.value[j] = dis(gen);
}
}
}
template <size_t N>
void printScalarFirstElement(BigNum<N> *arr) {
for (int j = 0; j < N; j++) {
printf("%x ", arr[0].value[j]);
}
printf("\n");
}
template <size_t N>
void printPointFirstElement(AffinePoint<N> *arr) {
printf("x ");
for (int j = 0; j < N; j++) {
printf("%x ", arr[0].x.value[j]);
}
printf("y ");
for (int j = 0; j < N; j++) {
printf("%x ", arr[0].y.value[j]);
}
printf("\n");
}
int main()
{
//Set launch device.
cudaError_t cudaStat = cudaSetDevice(0);
if (cudaStat != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
}
cudaDeviceSynchronize();
/*test block*/
{
const int num_elems = 65536;
AffinePoint<6> *result;
BigNum<6> *scalar;
AffinePoint<6> *point;
point = (AffinePoint<6>*) malloc(num_elems * sizeof(AffinePoint<6>));
scalar = (BigNum<6>*) malloc(num_elems * sizeof(BigNum<6>));
result = (AffinePoint<6>*) malloc(num_elems * sizeof(AffinePoint<6>));
generateScalarArray(scalar, num_elems);
for (int i = 0; i < num_elems; i++) {
point[i] = { {0x82ff1012, 0xf4ff0afd, 0x43a18800, 0x7cbf20eb, 0xb03090f6, 0x188da80e},
{ 0x1e794811, 0x73f977a1, 0x6b24cdd5, 0x631011ed, 0xffc8da78, 0x7192b95} };
}
printf("scalar mult secp192r1:\n");
printScalarFirstElement(scalar);
printPointFirstElement(point);
auto exectime = std::chrono::high_resolution_clock::now();
ScalarMultiply::scalarMultiplyOnGpu(result, scalar, point, num_elems, CurveType::secp192r1);
auto time = std::chrono::duration<double, std::milli>(std::chrono::high_resolution_clock::now() - exectime).count();
printf("exec time %lf ms\n", time);
printf("result:\n");
printPointFirstElement(result);
free(point); free(scalar); free(result);
}
{
const int num_elems = 65536;
AffinePoint<7> *result;
BigNum<7> *scalar;
AffinePoint<7> *point;
point = (AffinePoint<7>*) malloc(num_elems * sizeof(AffinePoint<7>));
scalar = (BigNum<7>*) malloc(num_elems * sizeof(BigNum<77>));
result = (AffinePoint<7>*) malloc(num_elems * sizeof(AffinePoint<7>));
generateScalarArray(scalar, num_elems);
for (int i = 0; i < num_elems; i++) {
point[i] = { { 0x115C1D21, 0x343280D6, 0x56C21122, 0x4A03C1D3, 0x321390B9, 0x6BB4BF7F, 0xB70E0CBD },
{ 0x85007E34, 0x44D58199, 0x5A074764, 0xCD4375A0, 0x4C22DFE6, 0xB5F723FB, 0xBD376388 } };
}
printf("scalar mult secp224r1:\n");
printScalarFirstElement(scalar);
printPointFirstElement(point);
auto exectime = std::chrono::high_resolution_clock::now();
ScalarMultiply::scalarMultiplyOnGpu(result, scalar, point, num_elems, CurveType::secp224r1);
auto time = std::chrono::duration<double, std::milli>(std::chrono::high_resolution_clock::now() - exectime).count();
printf("exec time %lf ms\n", time);
printf("result:\n");
printPointFirstElement(result);
free(point); free(scalar); free(result);
}
{
const int num_elems = 65536;
AffinePoint<8> *result;
BigNum<8> *scalar;
AffinePoint<8> *point;
point = (AffinePoint<8>*) malloc(num_elems * sizeof(AffinePoint<8>));
scalar = (BigNum<8>*) malloc(num_elems * sizeof(BigNum<8>));
result = (AffinePoint<8>*) malloc(num_elems * sizeof(AffinePoint<8>));
generateScalarArray(scalar, num_elems);
for (int i = 0; i < num_elems; i++) {
point[i] = { { 0xD898C296, 0xF4A13945, 0x2DEB33A0, 0x77037D81, 0x63A440F2, 0xF8BCE6E5, 0xE12C4247, 0x6B17D1F2 },
{ 0x37BF51F5, 0xCBB64068, 0x6B315ECE, 0x2BCE3357, 0x7C0F9E16, 0x8EE7EB4A, 0xFE1A7F9B, 0x4FE342E2 } };
}
printf("scalar mult secp256r1:\n");
printScalarFirstElement(scalar);
printPointFirstElement(point);
auto exectime = std::chrono::high_resolution_clock::now();
ScalarMultiply::scalarMultiplyOnGpu(result, scalar, point, num_elems, CurveType::secp256r1);
auto time = std::chrono::duration<double, std::milli>(std::chrono::high_resolution_clock::now() - exectime).count();
printf("exec time %lf ms\n", time);
printf("result:\n");
printPointFirstElement(result);
free(point); free(scalar); free(result);
}
{
const int num_elems = 65536;
AffinePoint<12> *result;
BigNum<12> *scalar;
AffinePoint<12> *point;
point = (AffinePoint<12>*) malloc(num_elems * sizeof(AffinePoint<12>));
scalar = (BigNum<12>*) malloc(num_elems * sizeof(BigNum<12>));
result = (AffinePoint<12>*) malloc(num_elems * sizeof(AffinePoint<12>));
generateScalarArray(scalar, num_elems);
for (int i = 0; i < num_elems; i++) {
point[i] = { { 0x72760AB7, 0x3A545E38, 0xBF55296C, 0x5502F25D, 0x82542A38, 0x59F741E0, 0x8BA79B98, 0x6E1D3B62, 0xF320AD74, 0x8EB1C71E, 0xBE8B0537, 0xAA87CA22 },
{ 0x90EA0E5F, 0x7A431D7C, 0x1D7E819D, 0x0A60B1CE, 0xB5F0B8C0, 0xE9DA3113, 0x289A147C, 0xF8F41DBD, 0x9292DC29, 0x5D9E98BF, 0x96262C6F, 0x3617DE4A } };
}
printf("scalar mult secp384r1:\n");
printScalarFirstElement(scalar);
printPointFirstElement(point);
auto exectime = std::chrono::high_resolution_clock::now();
ScalarMultiply::scalarMultiplyOnGpu(result, scalar, point, num_elems, CurveType::secp384r1);
auto time = std::chrono::duration<double, std::milli>(std::chrono::high_resolution_clock::now() - exectime).count();
printf("exec time %lf ms\n", time);
printf("result:\n");
printPointFirstElement(point);
free(point); free(scalar); free(result);
}
{
const int num_elems = 65536;
AffinePoint<17> *result;
BigNum<17> *scalar;
AffinePoint<17> *point;
point = (AffinePoint<17>*) malloc(num_elems * sizeof(AffinePoint<17>));
scalar = (BigNum<17>*) malloc(num_elems * sizeof(BigNum<17>));
result = (AffinePoint<17>*) malloc(num_elems * sizeof(AffinePoint<17>));
generateScalarArray(scalar, num_elems);
for (int i = 0; i < num_elems; i++) {
point[i] = { { 0xC2E5BD66, 0xF97E7E31, 0x856A429B, 0x3348B3C1, 0xA2FFA8DE, 0xFE1DC127, 0xEFE75928, 0xA14B5E77, 0x6B4D3DBA, 0xF828AF60, 0x053FB521, 0x9C648139,
0x2395B442, 0x9E3ECB66, 0x0404E9CD, 0x858E06B7, 0xC6 },
{ 0x9FD16650, 0x88BE9476, 0xA272C240, 0x353C7086, 0x3FAD0761, 0xC550B901, 0x5EF42640, 0x97EE7299, 0x273E662C, 0x17AFBD17, 0x579B4468, 0x98F54449,
0x2C7D1BD9, 0x5C8A5FB4, 0x9A3BC004, 0x39296A78, 0x118 } };
}
printf("scalar mult secp521r1:\n");
printScalarFirstElement(scalar);
printPointFirstElement(point);
auto exectime = std::chrono::high_resolution_clock::now();
ScalarMultiply::scalarMultiplyOnGpu(result, scalar, point, num_elems, CurveType::secp521r1);
auto time = std::chrono::duration<double, std::milli>(std::chrono::high_resolution_clock::now() - exectime).count();
printf("exec time %lf ms\n", time);
printf("result:\n");
printPointFirstElement(result);
free(point); free(scalar); free(result);
}
/*end check arithm block*/
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaError_t cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
} |
a5faf27b7a0ae5b1599a9de645f1f1a9e66ced22.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
using namespace std;
//standard deviation = sqrt(summation(x-mean^2) / n)
//Did not figure out why this code doesn't work with threads
__global__ void standard_deviation(int *a, float *b, float mean, int n) {
int tid = blockIdx.x;
//int tid - threadIdx.x;
b[0] = 0.0;
for(int i = tid; i < n; i++) {
b[0] += (a[i] - mean) * (a[i] - mean);
//printf("b[%d] = %d, a[%d] = %d", i, b[0], i, a[i]);
}
b[0] = b[0]/n;
}
int main() {
int n;
cin>>n;
//int a[n]; //does not work on some cuda versions
int *a = (int *)malloc(n * sizeof(int));
cout<<"The input numbers are: "<<endl;
for(int i = 0; i < n; i++) {
a[i] = i+1;
cout<<a[i]<<"\t";
}
cout<<endl;
float mean = (n + 1)/2;
cout<<"Mean: "<<mean<<endl;
int *dev_a;
float *dev_b;
hipMalloc(&dev_a, n * sizeof(int));
hipMalloc(&dev_b, sizeof(float));
hipMemcpy(dev_a, a, n * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( standard_deviation), dim3(n), dim3(1), 0, 0, dev_a, dev_b, mean, n);
float *ans = (float *)malloc(sizeof(float));
hipMemcpy(ans, dev_b, sizeof(float), hipMemcpyDeviceToHost);
cout<<"The answer is: "<< sqrt(ans[0])<<endl;
}
| a5faf27b7a0ae5b1599a9de645f1f1a9e66ced22.cu | #include<iostream>
using namespace std;
//standard deviation = sqrt(summation(x-mean^2) / n)
//Did not figure out why this code doesn't work with threads
__global__ void standard_deviation(int *a, float *b, float mean, int n) {
int tid = blockIdx.x;
//int tid - threadIdx.x;
b[0] = 0.0;
for(int i = tid; i < n; i++) {
b[0] += (a[i] - mean) * (a[i] - mean);
//printf("b[%d] = %d, a[%d] = %d", i, b[0], i, a[i]);
}
b[0] = b[0]/n;
}
int main() {
int n;
cin>>n;
//int a[n]; //does not work on some cuda versions
int *a = (int *)malloc(n * sizeof(int));
cout<<"The input numbers are: "<<endl;
for(int i = 0; i < n; i++) {
a[i] = i+1;
cout<<a[i]<<"\t";
}
cout<<endl;
float mean = (n + 1)/2;
cout<<"Mean: "<<mean<<endl;
int *dev_a;
float *dev_b;
cudaMalloc(&dev_a, n * sizeof(int));
cudaMalloc(&dev_b, sizeof(float));
cudaMemcpy(dev_a, a, n * sizeof(int), cudaMemcpyHostToDevice);
standard_deviation<<<n, 1>>>(dev_a, dev_b, mean, n);
float *ans = (float *)malloc(sizeof(float));
cudaMemcpy(ans, dev_b, sizeof(float), cudaMemcpyDeviceToHost);
cout<<"The answer is: "<< sqrt(ans[0])<<endl;
}
|
7c5bfa5a57d683a6a9bedcf234f1a93fae5c8246.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/lrn.h>
#include <Status.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static _CUDA_G void lrnKernel(void *vx, Nd4jLong *xTadShapeInfo, Nd4jLong *xTadOffsets, void *vz, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets, Nd4jLong numTads, Nd4jLong tadLength, int depth, double bias, double alpha, double beta) {
extern __shared__ char sharedChar[];
T* shared = reinterpret_cast<T*>(sharedChar);
auto xEws = shape::elementWiseStride(xTadShapeInfo);
auto zEws = shape::elementWiseStride(zTadShapeInfo);
auto xOrder = shape::order(xTadShapeInfo);
auto zOrder = shape::order(zTadShapeInfo);
const T tbias = static_cast<T>(bias);
const T tbeta = static_cast<T>(beta);
const T talpha = static_cast<T>(alpha);
for (uint i = blockIdx.x; i < numTads; i += gridDim.x) {
auto x = reinterpret_cast<T*>(vx) + xTadOffsets[i];
auto z = reinterpret_cast<T*>(vz) + zTadOffsets[i];
// load everything into shared memory
shared[threadIdx.x] = x[threadIdx.x * xEws];
__syncthreads();
const uint begin = nd4j::math::nd4j_max<int>(0, threadIdx.x - depth);
const uint last = depth + threadIdx.x + 1;
const uint end = nd4j::math::nd4j_min<int>(last, tadLength);
T prev = 0.;
for (int s = begin; s < end; s++)
prev = prev + shared[s] * shared[s];
z[threadIdx.x * zEws] = shared[threadIdx.x] / nd4j::math::nd4j_pow<T, T, T>(tbias + alpha * prev, tbeta);
}
}
template <typename X, typename Z>
static _CUDA_G void lrnBPKernel(void *vx, Nd4jLong *xTadShapeInfo, Nd4jLong *xTadOffsets, void *vz, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets, Nd4jLong numTads, Nd4jLong tadLength, int depth, double bias, double alpha, double beta) {
extern __shared__ char sharedChar[];
X* sharedX = reinterpret_cast<X*>(sharedChar);
Z* sharedY = reinterpret_cast<Z*>(sharedX + blockDim.x);
auto xEws = shape::elementWiseStride(xTadShapeInfo);
auto zEws = shape::elementWiseStride(zTadShapeInfo);
auto xOrder = shape::order(xTadShapeInfo);
auto zOrder = shape::order(zTadShapeInfo);
const Z tbias = static_cast<Z>(bias);
const Z tbeta = static_cast<Z>(beta);
const Z talpha = static_cast<Z>(alpha);
const Z coeff = talpha * tbeta;
for (uint i = blockIdx.x; i < numTads; i += gridDim.x) {
auto x = reinterpret_cast<X*>(vx) + xTadOffsets[i];
auto z = reinterpret_cast<Z*>(vz) + zTadOffsets[i];
const uint begin = nd4j::math::nd4j_max<int>(0, threadIdx.x - depth);
const uint last = depth + threadIdx.x + 1;
const uint end = nd4j::math::nd4j_min<int>(last, tadLength);
// load everything into shared memory
sharedX[threadIdx.x] = x[threadIdx.x * xEws];
sharedY[threadIdx.x] = 0.f;
__syncthreads();
for (int s = begin; s < end; s++)
sharedY[threadIdx.x] = sharedY[threadIdx.x] + sharedX[s] * sharedX[s];
__syncthreads();
Z factor[1024];
Z init = tbias + talpha * sharedY[threadIdx.x];
Z prev = 0.f;
for (uint s = begin; s < end; ++s) {
factor[s] = nd4j::math::nd4j_pow<Z, Z, Z>(tbias + talpha * sharedY[s], -tbeta - 1);
prev = prev + sharedX[s] * factor[s];
}
z[threadIdx.x * zEws] = factor[threadIdx.x] * init - 2 * sharedX[threadIdx.x] * coeff * prev;
}
}
template <typename X, typename Z>
static void lrnBP_(nd4j::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int depth, const float bias, const float alpha, const float beta) {
auto rank = input.rankOf();
auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), {rank - 1});
auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(gradI.getShapeInfo(), {rank - 1});
const auto tadLength = shape::length(packX.primaryShapeInfo());
const int numBlocks = nd4j::math::nd4j_min<Nd4jLong>(1024, packX.numberOfTads());
const int numThreads = tadLength;
if (tadLength > 1024 || tadLength < 1)
throw std::runtime_error("LRN: tadLength > 1024 isn't implemented yet");
hipLaunchKernelGGL(( lrnBPKernel<X, Z>), dim3(numBlocks), dim3(numThreads), numThreads * sizeof(X) + numThreads * sizeof(Z) + 1024, *block.launchContext()->getCudaStream(), input.getSpecialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), gradI.specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), packX.numberOfTads(), tadLength, depth, bias, alpha, beta);
gradI.tickWriteDevice();
gradI *= gradO;
}
void lrnBP(nd4j::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int depth, const float bias, const float alpha, const float beta) {
input.syncToDevice();
gradO.syncToDevice();
BUILD_DOUBLE_SELECTOR(input.dataType(), gradO.dataType(), lrnBP_, (block, input, gradO, gradI, depth, bias, alpha, beta), FLOAT_TYPES, FLOAT_TYPES);
gradI.tickWriteDevice();
}
template <typename T>
static void lrnFunctor_(nd4j::graph::Context& block, NDArray* input, NDArray* output, int depth, double bias, double alpha, double beta) {
auto rank = input->rankOf();
auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), {rank - 1});
auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {rank - 1});
const auto tadLength = shape::length(packX.primaryShapeInfo());
const int numBlocks = nd4j::math::nd4j_min<Nd4jLong>(1024, packX.numberOfTads());
const int numThreads = tadLength;
if (tadLength > 1024 || tadLength < 1)
throw std::runtime_error("LRN: tadLength > 1024 isn't implemented yet");
hipLaunchKernelGGL(( lrnKernel<T>), dim3(numBlocks), dim3(numThreads), numThreads * sizeof(T), *block.launchContext()->getCudaStream(), input->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), packX.numberOfTads(), tadLength, depth, bias, alpha, beta);
}
int lrnFunctor(nd4j::graph::Context& block, NDArray* input, NDArray* output, int depth, double bias, double alpha, double beta) {
input->syncToDevice();
BUILD_SINGLE_SELECTOR(input->dataType(), lrnFunctor_, (block, input, output, depth, bias, alpha, beta), FLOAT_TYPES);
output->tickWriteDevice();
return Status::OK();
}
}
}
}
| 7c5bfa5a57d683a6a9bedcf234f1a93fae5c8246.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/lrn.h>
#include <Status.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static _CUDA_G void lrnKernel(void *vx, Nd4jLong *xTadShapeInfo, Nd4jLong *xTadOffsets, void *vz, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets, Nd4jLong numTads, Nd4jLong tadLength, int depth, double bias, double alpha, double beta) {
extern __shared__ char sharedChar[];
T* shared = reinterpret_cast<T*>(sharedChar);
auto xEws = shape::elementWiseStride(xTadShapeInfo);
auto zEws = shape::elementWiseStride(zTadShapeInfo);
auto xOrder = shape::order(xTadShapeInfo);
auto zOrder = shape::order(zTadShapeInfo);
const T tbias = static_cast<T>(bias);
const T tbeta = static_cast<T>(beta);
const T talpha = static_cast<T>(alpha);
for (uint i = blockIdx.x; i < numTads; i += gridDim.x) {
auto x = reinterpret_cast<T*>(vx) + xTadOffsets[i];
auto z = reinterpret_cast<T*>(vz) + zTadOffsets[i];
// load everything into shared memory
shared[threadIdx.x] = x[threadIdx.x * xEws];
__syncthreads();
const uint begin = nd4j::math::nd4j_max<int>(0, threadIdx.x - depth);
const uint last = depth + threadIdx.x + 1;
const uint end = nd4j::math::nd4j_min<int>(last, tadLength);
T prev = 0.;
for (int s = begin; s < end; s++)
prev = prev + shared[s] * shared[s];
z[threadIdx.x * zEws] = shared[threadIdx.x] / nd4j::math::nd4j_pow<T, T, T>(tbias + alpha * prev, tbeta);
}
}
template <typename X, typename Z>
static _CUDA_G void lrnBPKernel(void *vx, Nd4jLong *xTadShapeInfo, Nd4jLong *xTadOffsets, void *vz, Nd4jLong *zTadShapeInfo, Nd4jLong *zTadOffsets, Nd4jLong numTads, Nd4jLong tadLength, int depth, double bias, double alpha, double beta) {
extern __shared__ char sharedChar[];
X* sharedX = reinterpret_cast<X*>(sharedChar);
Z* sharedY = reinterpret_cast<Z*>(sharedX + blockDim.x);
auto xEws = shape::elementWiseStride(xTadShapeInfo);
auto zEws = shape::elementWiseStride(zTadShapeInfo);
auto xOrder = shape::order(xTadShapeInfo);
auto zOrder = shape::order(zTadShapeInfo);
const Z tbias = static_cast<Z>(bias);
const Z tbeta = static_cast<Z>(beta);
const Z talpha = static_cast<Z>(alpha);
const Z coeff = talpha * tbeta;
for (uint i = blockIdx.x; i < numTads; i += gridDim.x) {
auto x = reinterpret_cast<X*>(vx) + xTadOffsets[i];
auto z = reinterpret_cast<Z*>(vz) + zTadOffsets[i];
const uint begin = nd4j::math::nd4j_max<int>(0, threadIdx.x - depth);
const uint last = depth + threadIdx.x + 1;
const uint end = nd4j::math::nd4j_min<int>(last, tadLength);
// load everything into shared memory
sharedX[threadIdx.x] = x[threadIdx.x * xEws];
sharedY[threadIdx.x] = 0.f;
__syncthreads();
for (int s = begin; s < end; s++)
sharedY[threadIdx.x] = sharedY[threadIdx.x] + sharedX[s] * sharedX[s];
__syncthreads();
Z factor[1024];
Z init = tbias + talpha * sharedY[threadIdx.x];
Z prev = 0.f;
for (uint s = begin; s < end; ++s) {
factor[s] = nd4j::math::nd4j_pow<Z, Z, Z>(tbias + talpha * sharedY[s], -tbeta - 1);
prev = prev + sharedX[s] * factor[s];
}
z[threadIdx.x * zEws] = factor[threadIdx.x] * init - 2 * sharedX[threadIdx.x] * coeff * prev;
}
}
template <typename X, typename Z>
static void lrnBP_(nd4j::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int depth, const float bias, const float alpha, const float beta) {
auto rank = input.rankOf();
auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), {rank - 1});
auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(gradI.getShapeInfo(), {rank - 1});
const auto tadLength = shape::length(packX.primaryShapeInfo());
const int numBlocks = nd4j::math::nd4j_min<Nd4jLong>(1024, packX.numberOfTads());
const int numThreads = tadLength;
if (tadLength > 1024 || tadLength < 1)
throw std::runtime_error("LRN: tadLength > 1024 isn't implemented yet");
lrnBPKernel<X, Z><<<numBlocks, numThreads, numThreads * sizeof(X) + numThreads * sizeof(Z) + 1024, *block.launchContext()->getCudaStream()>>>(input.getSpecialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), gradI.specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), packX.numberOfTads(), tadLength, depth, bias, alpha, beta);
gradI.tickWriteDevice();
gradI *= gradO;
}
void lrnBP(nd4j::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int depth, const float bias, const float alpha, const float beta) {
input.syncToDevice();
gradO.syncToDevice();
BUILD_DOUBLE_SELECTOR(input.dataType(), gradO.dataType(), lrnBP_, (block, input, gradO, gradI, depth, bias, alpha, beta), FLOAT_TYPES, FLOAT_TYPES);
gradI.tickWriteDevice();
}
template <typename T>
static void lrnFunctor_(nd4j::graph::Context& block, NDArray* input, NDArray* output, int depth, double bias, double alpha, double beta) {
auto rank = input->rankOf();
auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), {rank - 1});
auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {rank - 1});
const auto tadLength = shape::length(packX.primaryShapeInfo());
const int numBlocks = nd4j::math::nd4j_min<Nd4jLong>(1024, packX.numberOfTads());
const int numThreads = tadLength;
if (tadLength > 1024 || tadLength < 1)
throw std::runtime_error("LRN: tadLength > 1024 isn't implemented yet");
lrnKernel<T><<<numBlocks, numThreads, numThreads * sizeof(T), *block.launchContext()->getCudaStream()>>>(input->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), packX.numberOfTads(), tadLength, depth, bias, alpha, beta);
}
int lrnFunctor(nd4j::graph::Context& block, NDArray* input, NDArray* output, int depth, double bias, double alpha, double beta) {
input->syncToDevice();
BUILD_SINGLE_SELECTOR(input->dataType(), lrnFunctor_, (block, input, output, depth, bias, alpha, beta), FLOAT_TYPES);
output->tickWriteDevice();
return Status::OK();
}
}
}
}
|
a07da8f71f3adc83b4d12f05d54d4e245f65fe47.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdafx.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include "kernels.h"
#include "kernels_cpu.h"
#include <hip/hip_runtime.h>
using namespace std;
hipError_t cudaErr;
#define T double
int main(char** argv, int argc)
{
int N=10;
int* A=new int[N*N];
int* b=new int[N];
load_matrix<int>(&N, A, b, "../diplomka/mat1.txt");
cout << N << endl;
//vypsat_mat<int>(N, A, b);
/*
int* cuda_A;
hipMalloc((void**)&cuda_A, N*N*sizeof(int));
hipMemcpy(cuda_A, A, N*N*sizeof(int), hipMemcpyHostToDevice);
int* cuda_b=NULL;
hipMalloc((void**)&cuda_b, N*sizeof(int));
cudaErr=hipMemcpy(cuda_b, b, N*sizeof(int), hipMemcpyHostToDevice);
*/
//hipFree(cuda_b);
delete[] A;
delete[] b;
#ifdef _DEBUG
cin.get();
#endif
}
| a07da8f71f3adc83b4d12f05d54d4e245f65fe47.cu | #include "stdafx.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cuda.h>
#include "kernels.h"
#include "kernels_cpu.h"
#include <cuda_runtime.h>
using namespace std;
cudaError_t cudaErr;
#define T double
int main(char** argv, int argc)
{
int N=10;
int* A=new int[N*N];
int* b=new int[N];
load_matrix<int>(&N, A, b, "../diplomka/mat1.txt");
cout << N << endl;
//vypsat_mat<int>(N, A, b);
/*
int* cuda_A;
cudaMalloc((void**)&cuda_A, N*N*sizeof(int));
cudaMemcpy(cuda_A, A, N*N*sizeof(int), cudaMemcpyHostToDevice);
int* cuda_b=NULL;
cudaMalloc((void**)&cuda_b, N*sizeof(int));
cudaErr=cudaMemcpy(cuda_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
*/
//cudaFree(cuda_b);
delete[] A;
delete[] b;
#ifdef _DEBUG
cin.get();
#endif
}
|
6aae3f4744e6c2518388de49498659d44ed0b9d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/conv_bias/cutlass_reorder_filter.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include "src/cuda/conv_bias/cutlass_reorder_filter.cuh"
#include "src/cuda/query_blocksize.cuh"
#include "src/cuda/integer_subbyte_utils.cuh"
using namespace megdnn;
using namespace cuda;
using namespace cutlass_wrapper;
namespace {
template <uint32_t size_bits, uint32_t interleaved>
__device__ __forceinline__ void reorder_ncxhwx_imma_filter_func(
int8_t* dst, const int8_t* src, uint32_t OC, uint32_t IC, uint32_t FH,
uint32_t FW, uint32_t lane, bool trans_oc) {
static constexpr uint32_t elements_per_lane = 128 / size_bits;
static constexpr uint32_t threads_per_interleaved =
interleaved / elements_per_lane;
static constexpr uint32_t instruction_shape_col = 8;
// 4 threads per Quad
static constexpr uint32_t elements_per_thread = instruction_shape_col / 4;
// 4 threads per Quad
static constexpr uint32_t reordered_elements_per_thread = interleaved / 4;
uint32_t id = lane / threads_per_interleaved;
uint32_t residue = lane % threads_per_interleaved;
uint32_t ICx = IC / interleaved;
uint32_t row = id / (ICx * FH * FW);
uint32_t col = id - row * ICx * FH * FW;
// transpose ncxhwx to cxhwnx
uint32_t src_offset = id * interleaved + residue * elements_per_lane;
row = (trans_oc) ? (row / interleaved) * interleaved +
((row % reordered_elements_per_thread) /
elements_per_thread) *
instruction_shape_col +
((row % interleaved) /
reordered_elements_per_thread) *
elements_per_thread +
(row % elements_per_thread)
: row;
uint32_t dst_offset =
(col * OC + row) * interleaved + residue * elements_per_lane;
*(reinterpret_cast<int4*>(dst + dst_offset * size_bits / 8)) =
*(reinterpret_cast<const int4*>(src + src_offset * size_bits / 8));
}
template <uint32_t size_bits, uint32_t interleaved>
__global__ void reorder_ncxhwx_imma_filter_kernel(
int8_t* __restrict__ dst_filter, const int8_t* __restrict__ src_filter,
uint32_t OC, uint32_t IC, uint32_t FH, uint32_t FW, bool trans_oc) {
static constexpr uint32_t elements_per_lane = 128 / size_bits;
const uint32_t size = OC * IC * FH * FW / elements_per_lane;
uint32_t lane = threadIdx.x + blockIdx.x * blockDim.x;
if (lane < size) {
reorder_ncxhwx_imma_filter_func<size_bits, interleaved>(
dst_filter, src_filter, OC, IC, FH, FW, lane, trans_oc);
}
}
template <uint32_t size_bits, uint32_t alignbits, uint32_t interleaved>
__device__ __forceinline__ void reorder_nhwc_imma_filter_func(
int8_t* dst, const int8_t* src, uint32_t OC, uint32_t IC, uint32_t FH,
uint32_t FW, uint32_t lane, bool trans_oc) {
static constexpr uint32_t elements_per_access = alignbits / size_bits;
static constexpr uint32_t instruction_shape_col = 8;
// 4 threads per Quad
static constexpr uint32_t elements_per_thread = instruction_shape_col / 4;
// 4 threads per Quad
static constexpr uint32_t reordered_elements_per_thread = interleaved / 4;
uint32_t ICx = IC / elements_per_access;
uint32_t k = lane / (ICx * FH * FW);
uint32_t cxrs = lane - k * ICx * FH * FW;
uint32_t rs = cxrs / ICx;
uint32_t cx = cxrs - rs * ICx;
// transpose nhwc to ncxhwx
uint32_t src_offset = lane * elements_per_access;
// reorder k
k = (trans_oc)
? (k / interleaved) * interleaved +
((k % reordered_elements_per_thread) /
elements_per_thread) *
instruction_shape_col +
((k % interleaved) / reordered_elements_per_thread) *
elements_per_thread +
(k % elements_per_thread)
: k;
uint32_t dst_offset =
(k * ICx * FH * FW + cx * FH * FW + rs) * elements_per_access;
if (alignbits == 32) {
*(reinterpret_cast<int*>(dst + dst_offset * size_bits / 8)) = *(
reinterpret_cast<const int*>(src + src_offset * size_bits / 8));
} else if (alignbits == 64) {
*(reinterpret_cast<int2*>(dst + dst_offset * size_bits / 8)) =
*(reinterpret_cast<const int2*>(src +
src_offset * size_bits / 8));
} else {
*(reinterpret_cast<int4*>(dst + dst_offset * size_bits / 8)) =
*(reinterpret_cast<const int4*>(src +
src_offset * size_bits / 8));
}
}
template <uint32_t size_bits, uint32_t alignbits, uint32_t interleaved>
__global__ void reorder_nhwc_imma_filter_kernel(
int8_t* __restrict__ dst_filter, const int8_t* __restrict__ src_filter,
uint32_t OC, uint32_t IC, uint32_t FH, uint32_t FW, bool trans_oc) {
static constexpr uint32_t elements_per_access = alignbits / size_bits;
const uint32_t size = OC * IC * FH * FW / elements_per_access;
uint32_t lane = threadIdx.x + blockIdx.x * blockDim.x;
if (lane < size) {
reorder_nhwc_imma_filter_func<size_bits, alignbits, interleaved>(
dst_filter, src_filter, OC, IC, FH, FW, lane, trans_oc);
}
}
} // namespace
template <uint32_t size_bits, uint32_t interleaved>
void megdnn::cuda::cutlass_wrapper::reorder_ncxhwx_imma_filter(
int8_t* dst_filter, const int8_t* src_filter, uint32_t OC, uint32_t IC,
uint32_t FH, uint32_t FW, bool trans_oc, hipStream_t stream) {
static constexpr uint32_t elements_per_lane = 128 / size_bits;
uint32_t nr_threads =
query_blocksize_for_kernel(reinterpret_cast<const void*>(
reorder_ncxhwx_imma_filter_kernel<size_bits, interleaved>));
uint32_t vthreads = DIVUP(OC * IC * FH * FW, elements_per_lane);
nr_threads = ::min(nr_threads, vthreads);
uint32_t nr_blocks = DIVUP(vthreads, nr_threads);
hipLaunchKernelGGL(( reorder_ncxhwx_imma_filter_kernel<size_bits, interleaved>)
, dim3(nr_blocks), dim3(nr_threads), 0, stream, dst_filter, src_filter, OC,
IC, FH, FW, trans_oc);
after_kernel_launch();
}
template <uint32_t size_bits, uint32_t alignbits>
void megdnn::cuda::cutlass_wrapper::reorder_nhwc_imma_filter(
int8_t* dst_filter, const int8_t* src_filter, uint32_t OC, uint32_t IC,
uint32_t FH, uint32_t FW, bool trans_oc, uint32_t oc_interleaved,
hipStream_t stream) {
static constexpr uint32_t elements_per_access = alignbits / size_bits;
uint32_t nr_threads =
query_blocksize_for_kernel(reinterpret_cast<const void*>(
reorder_nhwc_imma_filter_kernel<size_bits, alignbits, 32>));
uint32_t vthreads = DIVUP(OC * IC * FH * FW, elements_per_access);
nr_threads = ::min(nr_threads, vthreads);
uint32_t nr_blocks = DIVUP(vthreads, nr_threads);
if (oc_interleaved == 32) {
hipLaunchKernelGGL(( reorder_nhwc_imma_filter_kernel<size_bits, alignbits, 32>)
, dim3(nr_blocks), dim3(nr_threads), 0, stream,
dst_filter, src_filter, OC, IC, FH, FW, trans_oc);
} else {
hipLaunchKernelGGL(( reorder_nhwc_imma_filter_kernel<size_bits, alignbits, 64>)
, dim3(nr_blocks), dim3(nr_threads), 0, stream,
dst_filter, src_filter, OC, IC, FH, FW, trans_oc);
}
after_kernel_launch();
}
#define INST(_size_bits, _interleaved) \
template void megdnn::cuda::cutlass_wrapper::reorder_ncxhwx_imma_filter< \
_size_bits, _interleaved>(int8_t * dst_filter, \
const int8_t* src_filter, uint32_t OC, \
uint32_t IC, uint32_t FH, uint32_t FW, \
bool trans_oc, hipStream_t stream);
INST(8, 32)
INST(4, 64)
#undef INST
#define INST(_size_bits, _alignbits) \
template void megdnn::cuda::cutlass_wrapper::reorder_nhwc_imma_filter< \
_size_bits, _alignbits>( \
int8_t * dst_filter, const int8_t* src_filter, uint32_t OC, \
uint32_t IC, uint32_t FH, uint32_t FW, bool trans_oc, \
uint32_t oc_interleaved, hipStream_t stream);
INST(4, 32)
INST(4, 64)
INST(4, 128)
#undef INST
// vim: syntax=cuda.doxygen
| 6aae3f4744e6c2518388de49498659d44ed0b9d1.cu | /**
* \file dnn/src/cuda/conv_bias/cutlass_reorder_filter.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include "src/cuda/conv_bias/cutlass_reorder_filter.cuh"
#include "src/cuda/query_blocksize.cuh"
#include "src/cuda/integer_subbyte_utils.cuh"
using namespace megdnn;
using namespace cuda;
using namespace cutlass_wrapper;
namespace {
template <uint32_t size_bits, uint32_t interleaved>
__device__ __forceinline__ void reorder_ncxhwx_imma_filter_func(
int8_t* dst, const int8_t* src, uint32_t OC, uint32_t IC, uint32_t FH,
uint32_t FW, uint32_t lane, bool trans_oc) {
static constexpr uint32_t elements_per_lane = 128 / size_bits;
static constexpr uint32_t threads_per_interleaved =
interleaved / elements_per_lane;
static constexpr uint32_t instruction_shape_col = 8;
// 4 threads per Quad
static constexpr uint32_t elements_per_thread = instruction_shape_col / 4;
// 4 threads per Quad
static constexpr uint32_t reordered_elements_per_thread = interleaved / 4;
uint32_t id = lane / threads_per_interleaved;
uint32_t residue = lane % threads_per_interleaved;
uint32_t ICx = IC / interleaved;
uint32_t row = id / (ICx * FH * FW);
uint32_t col = id - row * ICx * FH * FW;
// transpose ncxhwx to cxhwnx
uint32_t src_offset = id * interleaved + residue * elements_per_lane;
row = (trans_oc) ? (row / interleaved) * interleaved +
((row % reordered_elements_per_thread) /
elements_per_thread) *
instruction_shape_col +
((row % interleaved) /
reordered_elements_per_thread) *
elements_per_thread +
(row % elements_per_thread)
: row;
uint32_t dst_offset =
(col * OC + row) * interleaved + residue * elements_per_lane;
*(reinterpret_cast<int4*>(dst + dst_offset * size_bits / 8)) =
*(reinterpret_cast<const int4*>(src + src_offset * size_bits / 8));
}
template <uint32_t size_bits, uint32_t interleaved>
__global__ void reorder_ncxhwx_imma_filter_kernel(
int8_t* __restrict__ dst_filter, const int8_t* __restrict__ src_filter,
uint32_t OC, uint32_t IC, uint32_t FH, uint32_t FW, bool trans_oc) {
static constexpr uint32_t elements_per_lane = 128 / size_bits;
const uint32_t size = OC * IC * FH * FW / elements_per_lane;
uint32_t lane = threadIdx.x + blockIdx.x * blockDim.x;
if (lane < size) {
reorder_ncxhwx_imma_filter_func<size_bits, interleaved>(
dst_filter, src_filter, OC, IC, FH, FW, lane, trans_oc);
}
}
template <uint32_t size_bits, uint32_t alignbits, uint32_t interleaved>
__device__ __forceinline__ void reorder_nhwc_imma_filter_func(
int8_t* dst, const int8_t* src, uint32_t OC, uint32_t IC, uint32_t FH,
uint32_t FW, uint32_t lane, bool trans_oc) {
static constexpr uint32_t elements_per_access = alignbits / size_bits;
static constexpr uint32_t instruction_shape_col = 8;
// 4 threads per Quad
static constexpr uint32_t elements_per_thread = instruction_shape_col / 4;
// 4 threads per Quad
static constexpr uint32_t reordered_elements_per_thread = interleaved / 4;
uint32_t ICx = IC / elements_per_access;
uint32_t k = lane / (ICx * FH * FW);
uint32_t cxrs = lane - k * ICx * FH * FW;
uint32_t rs = cxrs / ICx;
uint32_t cx = cxrs - rs * ICx;
// transpose nhwc to ncxhwx
uint32_t src_offset = lane * elements_per_access;
// reorder k
k = (trans_oc)
? (k / interleaved) * interleaved +
((k % reordered_elements_per_thread) /
elements_per_thread) *
instruction_shape_col +
((k % interleaved) / reordered_elements_per_thread) *
elements_per_thread +
(k % elements_per_thread)
: k;
uint32_t dst_offset =
(k * ICx * FH * FW + cx * FH * FW + rs) * elements_per_access;
if (alignbits == 32) {
*(reinterpret_cast<int*>(dst + dst_offset * size_bits / 8)) = *(
reinterpret_cast<const int*>(src + src_offset * size_bits / 8));
} else if (alignbits == 64) {
*(reinterpret_cast<int2*>(dst + dst_offset * size_bits / 8)) =
*(reinterpret_cast<const int2*>(src +
src_offset * size_bits / 8));
} else {
*(reinterpret_cast<int4*>(dst + dst_offset * size_bits / 8)) =
*(reinterpret_cast<const int4*>(src +
src_offset * size_bits / 8));
}
}
template <uint32_t size_bits, uint32_t alignbits, uint32_t interleaved>
__global__ void reorder_nhwc_imma_filter_kernel(
int8_t* __restrict__ dst_filter, const int8_t* __restrict__ src_filter,
uint32_t OC, uint32_t IC, uint32_t FH, uint32_t FW, bool trans_oc) {
static constexpr uint32_t elements_per_access = alignbits / size_bits;
const uint32_t size = OC * IC * FH * FW / elements_per_access;
uint32_t lane = threadIdx.x + blockIdx.x * blockDim.x;
if (lane < size) {
reorder_nhwc_imma_filter_func<size_bits, alignbits, interleaved>(
dst_filter, src_filter, OC, IC, FH, FW, lane, trans_oc);
}
}
} // namespace
template <uint32_t size_bits, uint32_t interleaved>
void megdnn::cuda::cutlass_wrapper::reorder_ncxhwx_imma_filter(
int8_t* dst_filter, const int8_t* src_filter, uint32_t OC, uint32_t IC,
uint32_t FH, uint32_t FW, bool trans_oc, cudaStream_t stream) {
static constexpr uint32_t elements_per_lane = 128 / size_bits;
uint32_t nr_threads =
query_blocksize_for_kernel(reinterpret_cast<const void*>(
reorder_ncxhwx_imma_filter_kernel<size_bits, interleaved>));
uint32_t vthreads = DIVUP(OC * IC * FH * FW, elements_per_lane);
nr_threads = std::min(nr_threads, vthreads);
uint32_t nr_blocks = DIVUP(vthreads, nr_threads);
reorder_ncxhwx_imma_filter_kernel<size_bits, interleaved>
<<<nr_blocks, nr_threads, 0, stream>>>(dst_filter, src_filter, OC,
IC, FH, FW, trans_oc);
after_kernel_launch();
}
template <uint32_t size_bits, uint32_t alignbits>
void megdnn::cuda::cutlass_wrapper::reorder_nhwc_imma_filter(
int8_t* dst_filter, const int8_t* src_filter, uint32_t OC, uint32_t IC,
uint32_t FH, uint32_t FW, bool trans_oc, uint32_t oc_interleaved,
cudaStream_t stream) {
static constexpr uint32_t elements_per_access = alignbits / size_bits;
uint32_t nr_threads =
query_blocksize_for_kernel(reinterpret_cast<const void*>(
reorder_nhwc_imma_filter_kernel<size_bits, alignbits, 32>));
uint32_t vthreads = DIVUP(OC * IC * FH * FW, elements_per_access);
nr_threads = std::min(nr_threads, vthreads);
uint32_t nr_blocks = DIVUP(vthreads, nr_threads);
if (oc_interleaved == 32) {
reorder_nhwc_imma_filter_kernel<size_bits, alignbits, 32>
<<<nr_blocks, nr_threads, 0, stream>>>(
dst_filter, src_filter, OC, IC, FH, FW, trans_oc);
} else {
reorder_nhwc_imma_filter_kernel<size_bits, alignbits, 64>
<<<nr_blocks, nr_threads, 0, stream>>>(
dst_filter, src_filter, OC, IC, FH, FW, trans_oc);
}
after_kernel_launch();
}
#define INST(_size_bits, _interleaved) \
template void megdnn::cuda::cutlass_wrapper::reorder_ncxhwx_imma_filter< \
_size_bits, _interleaved>(int8_t * dst_filter, \
const int8_t* src_filter, uint32_t OC, \
uint32_t IC, uint32_t FH, uint32_t FW, \
bool trans_oc, cudaStream_t stream);
INST(8, 32)
INST(4, 64)
#undef INST
#define INST(_size_bits, _alignbits) \
template void megdnn::cuda::cutlass_wrapper::reorder_nhwc_imma_filter< \
_size_bits, _alignbits>( \
int8_t * dst_filter, const int8_t* src_filter, uint32_t OC, \
uint32_t IC, uint32_t FH, uint32_t FW, bool trans_oc, \
uint32_t oc_interleaved, cudaStream_t stream);
INST(4, 32)
INST(4, 64)
INST(4, 128)
#undef INST
// vim: syntax=cuda.doxygen
|
2e55ee80a4b5daa6272a5d210aa7a5db95331b17.hip | // !!! This is a file automatically generated by hipify!!!
#include <f/device/device_assert/cuda_assert.hpp>
#include <f/device/device_assert/cublas_assert.hpp>
#include <f/device/device_assert/kernel_assert.hpp>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <hip/hip_complex.h>
#include <math_functions.h>
#if 1
//should call with Dznrm2<<<1,128>>>(...)
__global__ void Dznrm2( unsigned long m, double2 *dA, double *dxnorm )
{
unsigned long i = threadIdx.x;
__shared__ double x[128];
double lsum = 0.0;
for( unsigned long j = i; j < m; j += 128 )
{
double const re = dA[j].x;
double const im = dA[j].y;
lsum += re*re + im*im;
}
x[i] = lsum;
__syncthreads();
if ( i < 64 ) { x[i] += x[i+ 64]; } __syncthreads();
if ( i < 32 ) { x[i] += x[i+ 32]; } __syncthreads();
if ( i < 16 ) { x[i] += x[i+ 16]; } __syncthreads();
if ( i < 8 ) { x[i] += x[i+ 8]; } __syncthreads();
if ( i < 4 ) { x[i] += x[i+ 4]; } __syncthreads();
if ( i < 2 ) { x[i] += x[i+ 2]; } __syncthreads();
if ( i < 1 ) { x[i] += x[i+ 1]; } __syncthreads();
if ( i == 0 ) *dxnorm = sqrt(x[0]);
}
__global__ void Dasum( unsigned long m, double2 *dA, double *dxnorm )
{
unsigned long i = threadIdx.x;
__shared__ double x[128];
double lsum = 0.0;
for( unsigned long j = i; j < m; j += 128 )
{
double const re = dA[j].x;
double const im = dA[j].y;
lsum += sqrt(re*re + im*im);
}
x[i] = lsum;
__syncthreads();
if ( i < 64 ) { x[i] += x[i+ 64]; } __syncthreads();
if ( i < 32 ) { x[i] += x[i+ 32]; } __syncthreads();
if ( i < 16 ) { x[i] += x[i+ 16]; } __syncthreads();
if ( i < 8 ) { x[i] += x[i+ 8]; } __syncthreads();
if ( i < 4 ) { x[i] += x[i+ 4]; } __syncthreads();
if ( i < 2 ) { x[i] += x[i+ 2]; } __syncthreads();
if ( i < 1 ) { x[i] += x[i+ 1]; } __syncthreads();
if ( i == 0 ) *dxnorm = x[0];
}
#endif
#if 0
__global__ void Dznrm2( unsigned long int n, double2* x, double* the_norm )
{
__shared__ double sSum[512];
double res = 0.0;
double2* lastX = x + n;
x += threadIdx.x + blockIdx.x*512;
unsigned long const blockOffset = gridDim.x*512;
while ( x < lastX )
{
double R = (*x).x;
double I = (*x).y;
res += R * R + I * I;
x += blockOffset;
}
if (threadIdx.x >= 32)
sSum[threadIdx.x] = res;
__syncthreads();
if (threadIdx.x < 32)
for ( unsigned long i=1; i < 16; ++i )
res += sSum[i*32 + threadIdx.x];
__syncthreads();
if (threadIdx.x < 32)
{
double* vsSum = sSum;
vsSum[threadIdx.x] = res;
if (threadIdx.x < 16) vsSum[threadIdx.x] += vsSum[threadIdx.x + 16];
__syncthreads();
if (threadIdx.x < 8) vsSum[threadIdx.x] += vsSum[threadIdx.x + 8];
__syncthreads();
if (threadIdx.x < 4) vsSum[threadIdx.x] += vsSum[threadIdx.x + 4];
__syncthreads();
if (threadIdx.x < 2) vsSum[threadIdx.x] += vsSum[threadIdx.x + 2];
__syncthreads();
if (threadIdx.x == 0)
*the_norm = sqrt( vsSum[0] + vsSum[1] );
}
}
#endif
//should call with Zscale<<<1, 128>>>(...);
__global__ void Zscal( unsigned long m, double real, double2* dA )
{
const int i = threadIdx.x;
for( unsigned long j = i; j < m; j += 128 )
{
dA[j].x *= real;
dA[j].y *= real;
}
}
__global__ //<<<((dim+15)/16,(dim+15)/16), (16,16)>>>
void Zgemm( double2* P, double2* M, double2* N, unsigned long dim, double alpha )
{
typedef double value_type;
typedef double2 complex_type;
typedef unsigned long size_type;
__shared__ value_type _M[16][17];
__shared__ value_type _m[16][17];
__shared__ value_type _N[16][17];
__shared__ value_type _n[16][17];
const size_type bx = blockIdx.x;
const size_type by = blockIdx.y;
const size_type tx = threadIdx.x;
const size_type ty = threadIdx.y;
const size_type row = by * 16 + ty;
const size_type col = bx * 16 + tx;
const size_type iter_n = (dim+15)/16;
value_type R = 0.0;
value_type I = 0.0;
for ( size_type i = 0; i != iter_n; ++i )
{
if ( i * 16 + tx < dim && row < dim )
{
_M[ty][tx] = (*( M + row * dim + i * 16 + tx )).x;
_m[ty][tx] = (*( M + row * dim + i * 16 + tx )).y;
}
else
{
_M[ty][tx] = 0.0;
_m[ty][tx] = 0.0;
}
if ( i * 16 + ty < dim && col < dim )
{
_N[ty][tx] = (*( N + ( i * 16 + ty ) * dim + col )).x;
_n[ty][tx] = (*( N + ( i * 16 + ty ) * dim + col )).y;
}
else
{
_N[ty][tx] = 0.0;
_n[ty][tx] = 0.0;
}
__syncthreads();
#pragma unroll
for ( size_type j = 0; j != 16; ++j )
{
R += _M[ty][j] * _N[j][tx] - _m[ty][j] * _n[j][tx];
I += _M[ty][j] * _n[j][tx] + _m[ty][j] * _N[j][tx];
}
__syncthreads();
}
if ( row < dim && col < dim )
{
(*( P + row * dim + col )).x = alpha * R;
(*( P + row * dim + col )).y = alpha * I;
}
}
__global__ void //<<<1,128>>>
Zcopy( unsigned long dims, double2* src, double2* dst )
{
unsigned long const i = threadIdx.x;
for( unsigned long j = i; j < dims; j += 128 )
{
(*(dst+j)).x = (*(src+j)).x;
(*(dst+j)).y = (*(src+j)).y;
}
}
__global__ void//<<<1, 128>>>
Zaxpy( unsigned long dims, double real, double imag, double2* dst, double2* src ) // dst += (real,imag) * src
{
unsigned long const i = threadIdx.x;
double R = 0.0;
double I = 0.0;
for( unsigned long j = i; j < dims; j += 128 )
{
R = (*(src+j)).x;
I = (*(src+j)).y;
(*(dst+j)).x += real * R - imag * I;
(*(dst+j)).y += real * I + imag * R;
}
}
__global__ void
compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim, double2* A )
{
int const row_index = threadIdx.x;
for ( unsigned long col_index = 0; col_index != dim; ++col_index )
{
unsigned long a_offset = row_index * dim + col_index;
unsigned long const ug_index = *(ar+a_offset);
//*(a+a_offset) = make_cuDoubleComplex( *(ug+ug_index+ug_index), *(ug+ug_index+ug_index+1) );
*(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)), thickness *( *(ug+ug_index+ug_index)) );
*(A+a_offset) = *(a+a_offset);
}
//*(a+row_index*dim+row_index) = make_cuDoubleComplex( *(diag+row_index), 0.0 );
*(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness *( *(diag+row_index) ) );
*(A+row_index*dim+row_index) = *(a+row_index*dim+row_index);
}
__global__ void
extract_intensity_diff( double2* s, double* I_exp, double* I_diff, unsigned long dim, unsigned long column_index )
{
int const I_offset = threadIdx.x;
int const S_offset = column_index + threadIdx.x * dim;
double const norm = cuCabs(*(s+S_offset));
*(I_diff+I_offset) = *(I_exp+I_offset) - norm * norm;
}
__global__ void
extract_intensity_diff_with_offset( double2* s, double* weights, double* I_exp, double* I_diff, unsigned long dim, unsigned long column_index, double ac_offset, double dc_offset )
{
int const I_offset = threadIdx.x;
int const S_offset = column_index + threadIdx.x * dim;
double const norm = cuCabs(*(s+S_offset));
*(I_diff+I_offset) = ( *(I_exp+I_offset) - norm * norm * ac_offset - dc_offset ) * weights[I_offset];
}
__global__ void
sum_diag( double2* a, unsigned long dim, double real, double imag )
{
int const index = threadIdx.x;
int const offset = index * dim + index;
*(a+offset) = make_cuDoubleComplex( cuCreal(*(a+offset))+real, cuCimag(*(a+offset))+imag );
}
/*
* Input/Output:
*
** ug[M]
* ar[n][n]
* diag[n] ==>> I_diff[n]
** thickness
* dim -- n
* I_exp[n]
** column_index
*
* cache:
* a_[n][n] -- p2p3
* a^2_[n][n] -- s
* a^3_[n][n] -- s_
* P1[n][n]
* P2[n][n]
* P3[n][n]
*
* 1) compose A
* 2) scale to A_
* 3) compute A_^2 A_^3
* 4) compute (P1) (P2) (P3)
* 5) square back
* 6) extract one column
*/
__global__ void
make_individual_pattern_intensity_diff( double* cuda_weights, double* cuda_ug, unsigned long* cuda_ar, double* cuda_diag, double thickness, unsigned long* cuda_dim, double* cuda_I_exp, double* cuda_I_diff, unsigned long column_index, double2* cuda_cache, unsigned long max_dim, unsigned long tilt_size, double2* cuda_a )
{
unsigned long const tilt_index = blockDim.x * blockIdx.x + threadIdx.x;
if ( tilt_index >= tilt_size ) return;
unsigned long const dim = *(cuda_dim + tilt_index);
double* ug = cuda_ug;
unsigned long* ar = cuda_ar + tilt_index * max_dim * max_dim;
double* diag = cuda_diag + tilt_index * max_dim;
double* I_exp = cuda_I_exp + tilt_index * max_dim;
double* I_diff = cuda_I_diff + tilt_index * max_dim;
double* weights = cuda_weights + tilt_index * max_dim;
double2* cache = cuda_cache + 6 * tilt_index * max_dim * max_dim;
unsigned long dimdim = dim*dim;
//cache should be of size 6*N^2
double2* a_copy = cuda_a;
double2* a_ = cache;
double2* aa_ = a_ + dimdim;
double2* aaa_ = aa_ + dimdim;
double2* p1 = aaa_ + dimdim;
double2* p2 = p1 + dimdim;
double2* p3 = p2 + dimdim;
//reuse memory in latter steps, when a_, aa_ and aaa_ are idle
//double2* p2p3 = a_;
double2* p2p3 = aaa_;
double2* s = aa_;
double2* s_ = aaa_;
//1)
kernel_assert(hipLaunchKernelGGL(( (compose_a), dim3(1), dim3(dim), 0, 0, ug, ar, diag, thickness, a_, dim, a_copy )) );
cuda_assert( hipDeviceSynchronize() );
//2)
//TODO
double* the_norm = (double*)aa_;
kernel_assert(hipLaunchKernelGGL(( (Dznrm2), dim3(1),dim3(128), 0, 0, dimdim, a_, the_norm )) );
//kernel_assert( (Dasum<<<1,128>>>( dimdim, a_, the_norm )) );
cuda_assert( hipDeviceSynchronize() );
//double const ratio = (*the_norm) * 53.71920351148152;
double const ratio = (*the_norm) / 5.371920351148152;
unsigned long const scaler = ratio < 1.0 ? 0 : ceil(log2(ratio));
unsigned long const scaling_factor = 1 << scaler;
double const scale = scaling_factor;
kernel_assert(hipLaunchKernelGGL(( (Zscal), dim3(1), dim3(128), 0, 0, dimdim, 1.0/scale, a_ )) ); //a_ /= scale
cuda_assert( hipDeviceSynchronize() );
//3)
dim3 const mm_grids( (dim+15)/16, (dim+15)/16 );
dim3 const mm_threads( 16, 16 );
kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, aa_, a_, a_, dim, 1.0 )) );
cuda_assert( hipDeviceSynchronize() );
kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, aaa_, aa_, a_, dim, 1.0 )) );
cuda_assert( hipDeviceSynchronize() );
//4)
/*
* Maple:
* Digits := 25
* evalf(solve(_Z^9+9*_Z^8+72*_Z^7+504*_Z^6+3024*_Z^5+15120*_Z^4+60480*_Z^3+181440*_Z^2+362880*_Z+362880 = 0))
* Returns:
* 2.697333461536989227389605+5.184162062649414177834087*I, //c1
* -.3810698456631129990312942+4.384644533145397950369203*I, //c2
* -2.110839800302654737498705+3.089910928725500922777702*I, //c3
* -3.038648072936697089212469+1.586801195758838328803868*I, //c4
* -3.333551485269048803294274, //c5
* -3.038648072936697089212469-1.586801195758838328803868*I, //c6
* -2.110839800302654737498705-3.089910928725500922777702*I, //c7
* -.3810698456631129990312942-4.384644533145397950369203*I, //c8
* 2.697333461536989227389605-5.184162062649414177834087*I //c9
*
* expand((x-c1)*(x-c2)*(x-c3)) >> p1 ( p1_c )
* x^3-.205423815571221490859606*x^2-(12.65871752452031305098099*I)*x^2-58.21460179641193947200471*x-(3.189848964212376356715960*I)*x-19.71085376106750328141397+94.20645646169128946503649*I
*
* expand((x-c4)*(x-c5)*(x-c6)) >> p2 ( p2_c )
* x^3+9.410847631142442981719212*x^2+39.17363072664900708597702-6.123261017392618755198919*10^(-24)*I+32.01029973951970099352671*x+(4.*10^(-24)*I)*x
*
* expand((x-c7)*(x-c8)*(x-c9)) >> p3 ( p3_c )
* x^3-.205423815571221490859601*x^2+(12.65871752452031305098099*I)*x^2-58.21460179641193947200470*x+(3.18984896421237635671600*I)*x-19.71085376106750328141404-94.20645646169128946503646*I
*
* expand((x-c1)*(x-c2)*(x-c3)*(x-c4)*(x-c5)*(x-c6)*(x-c7)*(x-c8)*(x-c9))
* 3.628800000000000000000003*10^5-1.365022562699469279472268*10^(-19)*I+3.628800000000000000000003*10^5*x+x^9+9.00000000000000000000000*x^8+72.00000000000000000000006*x^7+503.9999999999999999999995*x^6+3024.000000000000000000002*x^5+15120.00000000000000000000*x^4+60479.99999999999999999995*x^3+1.814400000000000000000001*10^5*x^2-(5.*10^(-22)*I)*x^6-(1.*10^(-20)*I)*x^4-(1.0*10^(-19)*I)*x^3+(2.*10^(-24)*I)*x^8-(3.0*10^(-19)*I)*x^2-(7.*10^(-21)*I)*x^5-(4.*10^(-19)*I)*x+(2.*10^(-23)*I)*x^7
*/
//4 - p1)
kernel_assert(hipLaunchKernelGGL(( (Zcopy), dim3(1),dim3(128), 0, 0, dimdim, aaa_, p1 )) );
cuda_assert( hipDeviceSynchronize() );
kernel_assert(hipLaunchKernelGGL(( (Zaxpy), dim3(1),dim3(128), 0, 0, dimdim, -0.205423815571221490859606, -12.65871752452031305098099, p1, aa_ )) );
cuda_assert( hipDeviceSynchronize() );
kernel_assert(hipLaunchKernelGGL(( (Zaxpy), dim3(1),dim3(128), 0, 0, dimdim, -58.21460179641193947200471, -3.189848964212376356715960, p1, a_ )) );
cuda_assert( hipDeviceSynchronize() );
kernel_assert(hipLaunchKernelGGL(( (sum_diag), dim3(1),dim3(dim), 0, 0, p1, dim, -19.71085376106750328141397, 94.20645646169128946503649 )) );
cuda_assert( hipDeviceSynchronize() );
//4 - p2)
kernel_assert(hipLaunchKernelGGL(( (Zcopy), dim3(1),dim3(128), 0, 0, dimdim, aaa_, p2 )) );
cuda_assert( hipDeviceSynchronize() );
kernel_assert(hipLaunchKernelGGL(( (Zaxpy), dim3(1),dim3(128), 0, 0, dimdim, 9.410847631142442981719212, 0.0, p2, aa_ )) );
cuda_assert( hipDeviceSynchronize() );
kernel_assert(hipLaunchKernelGGL(( (Zaxpy), dim3(1),dim3(128), 0, 0, dimdim, 32.01029973951970099352671, 0.0, p2, a_ )) );
cuda_assert( hipDeviceSynchronize() );
kernel_assert(hipLaunchKernelGGL(( (sum_diag), dim3(1),dim3(dim), 0, 0, p2, dim, 39.17363072664900708597702, 0.0 )) );
cuda_assert( hipDeviceSynchronize() );
//4 - p3)
kernel_assert(hipLaunchKernelGGL(( (Zcopy), dim3(1),dim3(128), 0, 0, dimdim, aaa_, p3 )) );
cuda_assert( hipDeviceSynchronize() );
kernel_assert(hipLaunchKernelGGL(( (Zaxpy), dim3(1),dim3(128), 0, 0, dimdim, -0.205423815571221490859601, 12.65871752452031305098099, p3, aa_ )) );
cuda_assert( hipDeviceSynchronize() );
kernel_assert(hipLaunchKernelGGL(( (Zaxpy), dim3(1),dim3(128), 0, 0, dimdim, -58.21460179641193947200470, 3.18984896421237635671600, p3, a_ )) );
cuda_assert( hipDeviceSynchronize() );
kernel_assert(hipLaunchKernelGGL(( (sum_diag), dim3(1),dim3(dim), 0, 0, p3, dim, -19.71085376106750328141404, -94.20645646169128946503646 )) );
cuda_assert( hipDeviceSynchronize() );
//4 - s)
// s = 1/602.39521910453439454428( p1 * ( 1/602.39521910453439454428 * p2 * p3 ) ) = (p1 p2 p3)/362880
kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, p2p3, p2, p3, dim, 0.0016600397351866578333 )) );
cuda_assert( hipDeviceSynchronize() );
kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, s, p1, p2p3, dim, 0.0016600397351866578333 )) );
cuda_assert( hipDeviceSynchronize() );
//5)
if ( scaler != 0 )
{
for ( unsigned long index = 0; index != scaler; ++index )
{
kernel_assert(hipLaunchKernelGGL(( (Zgemm), dim3(mm_grids), dim3(mm_threads), 0, 0, s_, s, s, dim, 1.0 )) );
cuda_assert( hipDeviceSynchronize() );
double2* tmp = s_;
s_ = s;
s = tmp;
}
}
//6)
//kernel_assert( (extract_intensity_diff<<<1,dim>>>( s, I_exp, I_diff, dim, column_index )) );
double const ac_offset = cuda_ug[0];
double const dc_offset = cuda_ug[1];
//kernel_assert(hipLaunchKernelGGL(( (extract_intensity_diff_with_offset), dim3(1),dim3(dim), 0, 0, s, I_exp, I_diff, dim, column_index, ac_offset, dc_offset )) );
kernel_assert(hipLaunchKernelGGL(( (extract_intensity_diff_with_offset), dim3(1),dim3(dim), 0, 0, s, weights, I_exp, I_diff, dim, column_index, ac_offset, dc_offset )) );
cuda_assert( hipDeviceSynchronize() );
}
void make_pattern_intensity_diff( double* cuda_weights, double* cuda_ug, unsigned long* cuda_ar, double* cuda_diag, double thickness, unsigned long* cuda_dim, double* cuda_I_exp, double* cuda_I_diff, unsigned long column_index, double2* cuda_cache, unsigned long tilt_size, unsigned long max_dim, double2* cuda_a )
{
unsigned long const threads = 64;
unsigned long const grids = (tilt_size + threads - 1)/threads;
kernel_assert( (hipLaunchKernelGGL(( make_individual_pattern_intensity_diff), dim3(grids), dim3(threads), 0, 0, cuda_weights, cuda_ug, cuda_ar, cuda_diag, thickness, cuda_dim, cuda_I_exp, cuda_I_diff, column_index, cuda_cache, max_dim, tilt_size, cuda_a ) ) );
cuda_assert( hipDeviceSynchronize() );
}
| 2e55ee80a4b5daa6272a5d210aa7a5db95331b17.cu | #include <f/device/device_assert/cuda_assert.hpp>
#include <f/device/device_assert/cublas_assert.hpp>
#include <f/device/device_assert/kernel_assert.hpp>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cuComplex.h>
#include <math_functions.h>
#if 1
//should call with Dznrm2<<<1,128>>>(...)
__global__ void Dznrm2( unsigned long m, double2 *dA, double *dxnorm )
{
unsigned long i = threadIdx.x;
__shared__ double x[128];
double lsum = 0.0;
for( unsigned long j = i; j < m; j += 128 )
{
double const re = dA[j].x;
double const im = dA[j].y;
lsum += re*re + im*im;
}
x[i] = lsum;
__syncthreads();
if ( i < 64 ) { x[i] += x[i+ 64]; } __syncthreads();
if ( i < 32 ) { x[i] += x[i+ 32]; } __syncthreads();
if ( i < 16 ) { x[i] += x[i+ 16]; } __syncthreads();
if ( i < 8 ) { x[i] += x[i+ 8]; } __syncthreads();
if ( i < 4 ) { x[i] += x[i+ 4]; } __syncthreads();
if ( i < 2 ) { x[i] += x[i+ 2]; } __syncthreads();
if ( i < 1 ) { x[i] += x[i+ 1]; } __syncthreads();
if ( i == 0 ) *dxnorm = sqrt(x[0]);
}
__global__ void Dasum( unsigned long m, double2 *dA, double *dxnorm )
{
unsigned long i = threadIdx.x;
__shared__ double x[128];
double lsum = 0.0;
for( unsigned long j = i; j < m; j += 128 )
{
double const re = dA[j].x;
double const im = dA[j].y;
lsum += sqrt(re*re + im*im);
}
x[i] = lsum;
__syncthreads();
if ( i < 64 ) { x[i] += x[i+ 64]; } __syncthreads();
if ( i < 32 ) { x[i] += x[i+ 32]; } __syncthreads();
if ( i < 16 ) { x[i] += x[i+ 16]; } __syncthreads();
if ( i < 8 ) { x[i] += x[i+ 8]; } __syncthreads();
if ( i < 4 ) { x[i] += x[i+ 4]; } __syncthreads();
if ( i < 2 ) { x[i] += x[i+ 2]; } __syncthreads();
if ( i < 1 ) { x[i] += x[i+ 1]; } __syncthreads();
if ( i == 0 ) *dxnorm = x[0];
}
#endif
#if 0
__global__ void Dznrm2( unsigned long int n, double2* x, double* the_norm )
{
__shared__ double sSum[512];
double res = 0.0;
double2* lastX = x + n;
x += threadIdx.x + blockIdx.x*512;
unsigned long const blockOffset = gridDim.x*512;
while ( x < lastX )
{
double R = (*x).x;
double I = (*x).y;
res += R * R + I * I;
x += blockOffset;
}
if (threadIdx.x >= 32)
sSum[threadIdx.x] = res;
__syncthreads();
if (threadIdx.x < 32)
for ( unsigned long i=1; i < 16; ++i )
res += sSum[i*32 + threadIdx.x];
__syncthreads();
if (threadIdx.x < 32)
{
double* vsSum = sSum;
vsSum[threadIdx.x] = res;
if (threadIdx.x < 16) vsSum[threadIdx.x] += vsSum[threadIdx.x + 16];
__syncthreads();
if (threadIdx.x < 8) vsSum[threadIdx.x] += vsSum[threadIdx.x + 8];
__syncthreads();
if (threadIdx.x < 4) vsSum[threadIdx.x] += vsSum[threadIdx.x + 4];
__syncthreads();
if (threadIdx.x < 2) vsSum[threadIdx.x] += vsSum[threadIdx.x + 2];
__syncthreads();
if (threadIdx.x == 0)
*the_norm = sqrt( vsSum[0] + vsSum[1] );
}
}
#endif
//should call with Zscale<<<1, 128>>>(...);
__global__ void Zscal( unsigned long m, double real, double2* dA )
{
const int i = threadIdx.x;
for( unsigned long j = i; j < m; j += 128 )
{
dA[j].x *= real;
dA[j].y *= real;
}
}
__global__ //<<<((dim+15)/16,(dim+15)/16), (16,16)>>>
void Zgemm( double2* P, double2* M, double2* N, unsigned long dim, double alpha )
{
typedef double value_type;
typedef double2 complex_type;
typedef unsigned long size_type;
__shared__ value_type _M[16][17];
__shared__ value_type _m[16][17];
__shared__ value_type _N[16][17];
__shared__ value_type _n[16][17];
const size_type bx = blockIdx.x;
const size_type by = blockIdx.y;
const size_type tx = threadIdx.x;
const size_type ty = threadIdx.y;
const size_type row = by * 16 + ty;
const size_type col = bx * 16 + tx;
const size_type iter_n = (dim+15)/16;
value_type R = 0.0;
value_type I = 0.0;
for ( size_type i = 0; i != iter_n; ++i )
{
if ( i * 16 + tx < dim && row < dim )
{
_M[ty][tx] = (*( M + row * dim + i * 16 + tx )).x;
_m[ty][tx] = (*( M + row * dim + i * 16 + tx )).y;
}
else
{
_M[ty][tx] = 0.0;
_m[ty][tx] = 0.0;
}
if ( i * 16 + ty < dim && col < dim )
{
_N[ty][tx] = (*( N + ( i * 16 + ty ) * dim + col )).x;
_n[ty][tx] = (*( N + ( i * 16 + ty ) * dim + col )).y;
}
else
{
_N[ty][tx] = 0.0;
_n[ty][tx] = 0.0;
}
__syncthreads();
#pragma unroll
for ( size_type j = 0; j != 16; ++j )
{
R += _M[ty][j] * _N[j][tx] - _m[ty][j] * _n[j][tx];
I += _M[ty][j] * _n[j][tx] + _m[ty][j] * _N[j][tx];
}
__syncthreads();
}
if ( row < dim && col < dim )
{
(*( P + row * dim + col )).x = alpha * R;
(*( P + row * dim + col )).y = alpha * I;
}
}
__global__ void //<<<1,128>>>
Zcopy( unsigned long dims, double2* src, double2* dst )
{
unsigned long const i = threadIdx.x;
for( unsigned long j = i; j < dims; j += 128 )
{
(*(dst+j)).x = (*(src+j)).x;
(*(dst+j)).y = (*(src+j)).y;
}
}
__global__ void//<<<1, 128>>>
Zaxpy( unsigned long dims, double real, double imag, double2* dst, double2* src ) // dst += (real,imag) * src
{
unsigned long const i = threadIdx.x;
double R = 0.0;
double I = 0.0;
for( unsigned long j = i; j < dims; j += 128 )
{
R = (*(src+j)).x;
I = (*(src+j)).y;
(*(dst+j)).x += real * R - imag * I;
(*(dst+j)).y += real * I + imag * R;
}
}
__global__ void
compose_a( double* ug, unsigned long* ar, double* diag, double thickness, double2* a, unsigned long dim, double2* A )
{
int const row_index = threadIdx.x;
for ( unsigned long col_index = 0; col_index != dim; ++col_index )
{
unsigned long a_offset = row_index * dim + col_index;
unsigned long const ug_index = *(ar+a_offset);
//*(a+a_offset) = make_cuDoubleComplex( *(ug+ug_index+ug_index), *(ug+ug_index+ug_index+1) );
*(a+a_offset) = make_cuDoubleComplex( -thickness * (*(ug+ug_index+ug_index+1)), thickness *( *(ug+ug_index+ug_index)) );
*(A+a_offset) = *(a+a_offset);
}
//*(a+row_index*dim+row_index) = make_cuDoubleComplex( *(diag+row_index), 0.0 );
*(a+row_index*dim+row_index) = make_cuDoubleComplex( 0.0, thickness *( *(diag+row_index) ) );
*(A+row_index*dim+row_index) = *(a+row_index*dim+row_index);
}
__global__ void
extract_intensity_diff( double2* s, double* I_exp, double* I_diff, unsigned long dim, unsigned long column_index )
{
int const I_offset = threadIdx.x;
int const S_offset = column_index + threadIdx.x * dim;
double const norm = cuCabs(*(s+S_offset));
*(I_diff+I_offset) = *(I_exp+I_offset) - norm * norm;
}
__global__ void
extract_intensity_diff_with_offset( double2* s, double* weights, double* I_exp, double* I_diff, unsigned long dim, unsigned long column_index, double ac_offset, double dc_offset )
{
int const I_offset = threadIdx.x;
int const S_offset = column_index + threadIdx.x * dim;
double const norm = cuCabs(*(s+S_offset));
*(I_diff+I_offset) = ( *(I_exp+I_offset) - norm * norm * ac_offset - dc_offset ) * weights[I_offset];
}
__global__ void
sum_diag( double2* a, unsigned long dim, double real, double imag )
{
int const index = threadIdx.x;
int const offset = index * dim + index;
*(a+offset) = make_cuDoubleComplex( cuCreal(*(a+offset))+real, cuCimag(*(a+offset))+imag );
}
/*
* Input/Output:
*
** ug[M]
* ar[n][n]
* diag[n] ==>> I_diff[n]
** thickness
* dim -- n
* I_exp[n]
** column_index
*
* cache:
* a_[n][n] -- p2p3
* a^2_[n][n] -- s
* a^3_[n][n] -- s_
* P1[n][n]
* P2[n][n]
* P3[n][n]
*
* 1) compose A
* 2) scale to A_
* 3) compute A_^2 A_^3
* 4) compute (P1) (P2) (P3)
* 5) square back
* 6) extract one column
*/
__global__ void
make_individual_pattern_intensity_diff( double* cuda_weights, double* cuda_ug, unsigned long* cuda_ar, double* cuda_diag, double thickness, unsigned long* cuda_dim, double* cuda_I_exp, double* cuda_I_diff, unsigned long column_index, double2* cuda_cache, unsigned long max_dim, unsigned long tilt_size, double2* cuda_a )
{
unsigned long const tilt_index = blockDim.x * blockIdx.x + threadIdx.x;
if ( tilt_index >= tilt_size ) return;
unsigned long const dim = *(cuda_dim + tilt_index);
double* ug = cuda_ug;
unsigned long* ar = cuda_ar + tilt_index * max_dim * max_dim;
double* diag = cuda_diag + tilt_index * max_dim;
double* I_exp = cuda_I_exp + tilt_index * max_dim;
double* I_diff = cuda_I_diff + tilt_index * max_dim;
double* weights = cuda_weights + tilt_index * max_dim;
double2* cache = cuda_cache + 6 * tilt_index * max_dim * max_dim;
unsigned long dimdim = dim*dim;
//cache should be of size 6*N^2
double2* a_copy = cuda_a;
double2* a_ = cache;
double2* aa_ = a_ + dimdim;
double2* aaa_ = aa_ + dimdim;
double2* p1 = aaa_ + dimdim;
double2* p2 = p1 + dimdim;
double2* p3 = p2 + dimdim;
//reuse memory in latter steps, when a_, aa_ and aaa_ are idle
//double2* p2p3 = a_;
double2* p2p3 = aaa_;
double2* s = aa_;
double2* s_ = aaa_;
//1)
kernel_assert( (compose_a<<<1, dim>>>( ug, ar, diag, thickness, a_, dim, a_copy )) );
cuda_assert( cudaDeviceSynchronize() );
//2)
//TODO
double* the_norm = (double*)aa_;
kernel_assert( (Dznrm2<<<1,128>>>( dimdim, a_, the_norm )) );
//kernel_assert( (Dasum<<<1,128>>>( dimdim, a_, the_norm )) );
cuda_assert( cudaDeviceSynchronize() );
//double const ratio = (*the_norm) * 53.71920351148152;
double const ratio = (*the_norm) / 5.371920351148152;
unsigned long const scaler = ratio < 1.0 ? 0 : ceil(log2(ratio));
unsigned long const scaling_factor = 1 << scaler;
double const scale = scaling_factor;
kernel_assert( (Zscal<<<1, 128>>>( dimdim, 1.0/scale, a_ )) ); //a_ /= scale
cuda_assert( cudaDeviceSynchronize() );
//3)
dim3 const mm_grids( (dim+15)/16, (dim+15)/16 );
dim3 const mm_threads( 16, 16 );
kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( aa_, a_, a_, dim, 1.0 )) );
cuda_assert( cudaDeviceSynchronize() );
kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( aaa_, aa_, a_, dim, 1.0 )) );
cuda_assert( cudaDeviceSynchronize() );
//4)
/*
* Maple:
* Digits := 25
* evalf(solve(_Z^9+9*_Z^8+72*_Z^7+504*_Z^6+3024*_Z^5+15120*_Z^4+60480*_Z^3+181440*_Z^2+362880*_Z+362880 = 0))
* Returns:
* 2.697333461536989227389605+5.184162062649414177834087*I, //c1
* -.3810698456631129990312942+4.384644533145397950369203*I, //c2
* -2.110839800302654737498705+3.089910928725500922777702*I, //c3
* -3.038648072936697089212469+1.586801195758838328803868*I, //c4
* -3.333551485269048803294274, //c5
* -3.038648072936697089212469-1.586801195758838328803868*I, //c6
* -2.110839800302654737498705-3.089910928725500922777702*I, //c7
* -.3810698456631129990312942-4.384644533145397950369203*I, //c8
* 2.697333461536989227389605-5.184162062649414177834087*I //c9
*
* expand((x-c1)*(x-c2)*(x-c3)) >> p1 ( p1_c )
* x^3-.205423815571221490859606*x^2-(12.65871752452031305098099*I)*x^2-58.21460179641193947200471*x-(3.189848964212376356715960*I)*x-19.71085376106750328141397+94.20645646169128946503649*I
*
* expand((x-c4)*(x-c5)*(x-c6)) >> p2 ( p2_c )
* x^3+9.410847631142442981719212*x^2+39.17363072664900708597702-6.123261017392618755198919*10^(-24)*I+32.01029973951970099352671*x+(4.*10^(-24)*I)*x
*
* expand((x-c7)*(x-c8)*(x-c9)) >> p3 ( p3_c )
* x^3-.205423815571221490859601*x^2+(12.65871752452031305098099*I)*x^2-58.21460179641193947200470*x+(3.18984896421237635671600*I)*x-19.71085376106750328141404-94.20645646169128946503646*I
*
* expand((x-c1)*(x-c2)*(x-c3)*(x-c4)*(x-c5)*(x-c6)*(x-c7)*(x-c8)*(x-c9))
* 3.628800000000000000000003*10^5-1.365022562699469279472268*10^(-19)*I+3.628800000000000000000003*10^5*x+x^9+9.00000000000000000000000*x^8+72.00000000000000000000006*x^7+503.9999999999999999999995*x^6+3024.000000000000000000002*x^5+15120.00000000000000000000*x^4+60479.99999999999999999995*x^3+1.814400000000000000000001*10^5*x^2-(5.*10^(-22)*I)*x^6-(1.*10^(-20)*I)*x^4-(1.0*10^(-19)*I)*x^3+(2.*10^(-24)*I)*x^8-(3.0*10^(-19)*I)*x^2-(7.*10^(-21)*I)*x^5-(4.*10^(-19)*I)*x+(2.*10^(-23)*I)*x^7
*/
//4 - p1)
kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p1 )) );
cuda_assert( cudaDeviceSynchronize() );
kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -0.205423815571221490859606, -12.65871752452031305098099, p1, aa_ )) );
cuda_assert( cudaDeviceSynchronize() );
kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -58.21460179641193947200471, -3.189848964212376356715960, p1, a_ )) );
cuda_assert( cudaDeviceSynchronize() );
kernel_assert( (sum_diag<<<1,dim>>>( p1, dim, -19.71085376106750328141397, 94.20645646169128946503649 )) );
cuda_assert( cudaDeviceSynchronize() );
//4 - p2)
kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p2 )) );
cuda_assert( cudaDeviceSynchronize() );
kernel_assert( (Zaxpy<<<1,128>>>( dimdim, 9.410847631142442981719212, 0.0, p2, aa_ )) );
cuda_assert( cudaDeviceSynchronize() );
kernel_assert( (Zaxpy<<<1,128>>>( dimdim, 32.01029973951970099352671, 0.0, p2, a_ )) );
cuda_assert( cudaDeviceSynchronize() );
kernel_assert( (sum_diag<<<1,dim>>>( p2, dim, 39.17363072664900708597702, 0.0 )) );
cuda_assert( cudaDeviceSynchronize() );
//4 - p3)
kernel_assert( (Zcopy<<<1,128>>>( dimdim, aaa_, p3 )) );
cuda_assert( cudaDeviceSynchronize() );
kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -0.205423815571221490859601, 12.65871752452031305098099, p3, aa_ )) );
cuda_assert( cudaDeviceSynchronize() );
kernel_assert( (Zaxpy<<<1,128>>>( dimdim, -58.21460179641193947200470, 3.18984896421237635671600, p3, a_ )) );
cuda_assert( cudaDeviceSynchronize() );
kernel_assert( (sum_diag<<<1,dim>>>( p3, dim, -19.71085376106750328141404, -94.20645646169128946503646 )) );
cuda_assert( cudaDeviceSynchronize() );
//4 - s)
// s = 1/602.39521910453439454428( p1 * ( 1/602.39521910453439454428 * p2 * p3 ) ) = (p1 p2 p3)/362880
kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( p2p3, p2, p3, dim, 0.0016600397351866578333 )) );
cuda_assert( cudaDeviceSynchronize() );
kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( s, p1, p2p3, dim, 0.0016600397351866578333 )) );
cuda_assert( cudaDeviceSynchronize() );
//5)
if ( scaler != 0 )
{
for ( unsigned long index = 0; index != scaler; ++index )
{
kernel_assert( (Zgemm<<<mm_grids, mm_threads>>>( s_, s, s, dim, 1.0 )) );
cuda_assert( cudaDeviceSynchronize() );
double2* tmp = s_;
s_ = s;
s = tmp;
}
}
//6)
//kernel_assert( (extract_intensity_diff<<<1,dim>>>( s, I_exp, I_diff, dim, column_index )) );
double const ac_offset = cuda_ug[0];
double const dc_offset = cuda_ug[1];
//kernel_assert( (extract_intensity_diff_with_offset<<<1,dim>>>( s, I_exp, I_diff, dim, column_index, ac_offset, dc_offset )) );
kernel_assert( (extract_intensity_diff_with_offset<<<1,dim>>>( s, weights, I_exp, I_diff, dim, column_index, ac_offset, dc_offset )) );
cuda_assert( cudaDeviceSynchronize() );
}
void make_pattern_intensity_diff( double* cuda_weights, double* cuda_ug, unsigned long* cuda_ar, double* cuda_diag, double thickness, unsigned long* cuda_dim, double* cuda_I_exp, double* cuda_I_diff, unsigned long column_index, double2* cuda_cache, unsigned long tilt_size, unsigned long max_dim, double2* cuda_a )
{
unsigned long const threads = 64;
unsigned long const grids = (tilt_size + threads - 1)/threads;
kernel_assert( ( make_individual_pattern_intensity_diff<<<grids, threads>>>( cuda_weights, cuda_ug, cuda_ar, cuda_diag, thickness, cuda_dim, cuda_I_exp, cuda_I_diff, column_index, cuda_cache, max_dim, tilt_size, cuda_a ) ) );
cuda_assert( cudaDeviceSynchronize() );
}
|
ef1fa85d849598429f3d3e49c9b335b5ca643341.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <sys/time.h>
#define EPSILON 1.0e-6
struct Particle
{
float3 position;
float3 velocity;
};
__host__ __device__ uint gen_random(uint a, uint b, uint c = 10, uint seed = 10)
{
return (seed*a+b) % c;
}
__device__ void update_position(Particle* part_array_gpu, float dt, uint i)
{
part_array_gpu[i].position.x += part_array_gpu[i].velocity.x * dt;
part_array_gpu[i].position.y += part_array_gpu[i].velocity.y * dt;
part_array_gpu[i].position.z += part_array_gpu[i].velocity.z * dt;
}
__device__ void update_velocity(Particle* part_array_gpu, float dt, uint i, uint j)
{
part_array_gpu[i].velocity.x += (float)(gen_random(i, j)) * dt;
part_array_gpu[i].velocity.y += (float)(gen_random(i, j)) * dt;
part_array_gpu[i].velocity.z += (float)(gen_random(i, j)) * dt;
}
__global__ void updateKernel(Particle* part_array_gpu, float dt, uint j)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
update_velocity(part_array_gpu, dt, i, j);
update_position(part_array_gpu, dt, i);
}
__host__ void updateCPU(Particle* part_array_cpu, float dt, uint j, uint NPARTICLES)
{
for (int i = 0; i < NPARTICLES; i++)
{
if(i < NPARTICLES)
{
part_array_cpu[i].velocity.x += (float)(gen_random(i, j)) * dt;
part_array_cpu[i].velocity.y += (float)(gen_random(i, j)) * dt;
part_array_cpu[i].velocity.z += (float)(gen_random(i, j)) * dt;
part_array_cpu[i].position.x += part_array_cpu[i].velocity.x * dt;
part_array_cpu[i].position.y += part_array_cpu[i].velocity.y * dt;
part_array_cpu[i].position.z += part_array_cpu[i].velocity.z * dt;
}
}
}
__host__ bool compare_float(float a, float b)
{
return (fabs(a-b) < EPSILON);
}
__host__ bool compare_float3(float3 a, float3 b)
{
return (compare_float(a.x, b.x) && compare_float(a.y, b.y) && compare_float(a.z, b.z));
}
__host__ bool compareParticle(Particle particle1, Particle particle2)
{
bool result = true;
result &= compare_float3(particle1.position, particle2.position);
result &= compare_float3(particle1.velocity, particle2.velocity);
return result;
}
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
int main(int argc, char** argv)
{
const float dt = 1.0;
bool flag = 1;
const int NITER = atoi(argv[1]);
const int TPB = atoi(argv[2]);
const int NPARTICLES = atoi(argv[3]);
// Declare a pointer for an array of particles
Particle* particles_gpu;
Particle* particles_cpu;
//Particle* particles_res;
//particles_cpu = new Particle[NPARTICLES];
//particles_res = new Particle[NPARTICLES];
hipHostMalloc((void **) &particles_cpu, NPARTICLES*sizeof(Particle), hipHostMallocDefault);
//hipHostMalloc((void **) &particles_res, NPARTICLES*sizeof(Particle), hipHostMallocDefault);
// Allocate device memory to store the output array
//hipMalloc(&particles_gpu, NPARTICLES*sizeof(Particle));
// Allocate managed memory
hipMallocManaged(&particles_gpu, NPARTICLES*sizeof(Particle));
for (int i = 0; i < NPARTICLES; i++)
{
if(i < NPARTICLES)
{
particles_cpu[i].velocity.x = 0;
particles_cpu[i].velocity.y = 0;
particles_cpu[i].velocity.z = 0;
particles_cpu[i].position.x = 1;
particles_cpu[i].position.y = 1;
particles_cpu[i].position.z = 1;
particles_gpu[i].velocity.x = 0;
particles_gpu[i].velocity.y = 0;
particles_gpu[i].velocity.z = 0;
particles_gpu[i].position.x = 1;
particles_gpu[i].position.y = 1;
particles_gpu[i].position.z = 1;
}
}
//double iStart_HtoD = cpuSecond();
//double iElaps_HtoD = cpuSecond() - iStart_HtoD;
double iStart_gpu = cpuSecond();
//GPU computation
for (int j = 0; j < NITER; j++)
{
//printf("GPU iteration numro %d : \n", j);
//hipMemcpy(particles_gpu, particles_res, NPARTICLES*sizeof(Particle), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( updateKernel), dim3((NPARTICLES+TPB-1) / TPB), dim3(TPB), 0, 0, particles_gpu, dt, j);
//hipMemcpy(particles_res, particles_gpu, NPARTICLES*sizeof(Particle), hipMemcpyDeviceToHost);
}
hipDeviceSynchronize();
//double iElaps_gpu = cpuSecond() - iStart_gpu;
//double iStart_cpu = cpuSecond();
//CPU computation
for (int j = 0; j < NITER; j++)
{
updateCPU(particles_cpu, dt, j, NPARTICLES);
}
//double iElaps_cpu = cpuSecond() - iStart_cpu;
for (int i = 0; i < NPARTICLES; i++) {
if(i < NPARTICLES)
{
if(!(compareParticle(particles_cpu[i], particles_gpu[i]))) {
flag = 0;
break;
}
}
}
/*double iStart_DtoH = cpuSecond();
hipMemcpy(particles_cpu, particles_gpu, NPARTICLES*sizeof(Particle), hipMemcpyDeviceToHost);
double iElaps_DtoH = cpuSecond() - iStart_DtoH; */
printf("Comparing the output for each implementation ");
if (flag)
{
printf("Correct!\n");
} else {
printf("Incorrect\n");
}
//delete[] particles_cpu;
//delete[] particles_res;
hipFree(particles_gpu);
hipHostFree(particles_cpu);
//hipHostFree(particles_res); // Free the memory
return 0;
} | ef1fa85d849598429f3d3e49c9b335b5ca643341.cu | #include <stdio.h>
#include <sys/time.h>
#define EPSILON 1.0e-6
struct Particle
{
float3 position;
float3 velocity;
};
__host__ __device__ uint gen_random(uint a, uint b, uint c = 10, uint seed = 10)
{
return (seed*a+b) % c;
}
__device__ void update_position(Particle* part_array_gpu, float dt, uint i)
{
part_array_gpu[i].position.x += part_array_gpu[i].velocity.x * dt;
part_array_gpu[i].position.y += part_array_gpu[i].velocity.y * dt;
part_array_gpu[i].position.z += part_array_gpu[i].velocity.z * dt;
}
__device__ void update_velocity(Particle* part_array_gpu, float dt, uint i, uint j)
{
part_array_gpu[i].velocity.x += (float)(gen_random(i, j)) * dt;
part_array_gpu[i].velocity.y += (float)(gen_random(i, j)) * dt;
part_array_gpu[i].velocity.z += (float)(gen_random(i, j)) * dt;
}
__global__ void updateKernel(Particle* part_array_gpu, float dt, uint j)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
update_velocity(part_array_gpu, dt, i, j);
update_position(part_array_gpu, dt, i);
}
__host__ void updateCPU(Particle* part_array_cpu, float dt, uint j, uint NPARTICLES)
{
for (int i = 0; i < NPARTICLES; i++)
{
if(i < NPARTICLES)
{
part_array_cpu[i].velocity.x += (float)(gen_random(i, j)) * dt;
part_array_cpu[i].velocity.y += (float)(gen_random(i, j)) * dt;
part_array_cpu[i].velocity.z += (float)(gen_random(i, j)) * dt;
part_array_cpu[i].position.x += part_array_cpu[i].velocity.x * dt;
part_array_cpu[i].position.y += part_array_cpu[i].velocity.y * dt;
part_array_cpu[i].position.z += part_array_cpu[i].velocity.z * dt;
}
}
}
__host__ bool compare_float(float a, float b)
{
return (fabs(a-b) < EPSILON);
}
__host__ bool compare_float3(float3 a, float3 b)
{
return (compare_float(a.x, b.x) && compare_float(a.y, b.y) && compare_float(a.z, b.z));
}
__host__ bool compareParticle(Particle particle1, Particle particle2)
{
bool result = true;
result &= compare_float3(particle1.position, particle2.position);
result &= compare_float3(particle1.velocity, particle2.velocity);
return result;
}
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
int main(int argc, char** argv)
{
const float dt = 1.0;
bool flag = 1;
const int NITER = atoi(argv[1]);
const int TPB = atoi(argv[2]);
const int NPARTICLES = atoi(argv[3]);
// Declare a pointer for an array of particles
Particle* particles_gpu;
Particle* particles_cpu;
//Particle* particles_res;
//particles_cpu = new Particle[NPARTICLES];
//particles_res = new Particle[NPARTICLES];
cudaHostAlloc((void **) &particles_cpu, NPARTICLES*sizeof(Particle), cudaHostAllocDefault);
//cudaHostAlloc((void **) &particles_res, NPARTICLES*sizeof(Particle), cudaHostAllocDefault);
// Allocate device memory to store the output array
//cudaMalloc(&particles_gpu, NPARTICLES*sizeof(Particle));
// Allocate managed memory
cudaMallocManaged(&particles_gpu, NPARTICLES*sizeof(Particle));
for (int i = 0; i < NPARTICLES; i++)
{
if(i < NPARTICLES)
{
particles_cpu[i].velocity.x = 0;
particles_cpu[i].velocity.y = 0;
particles_cpu[i].velocity.z = 0;
particles_cpu[i].position.x = 1;
particles_cpu[i].position.y = 1;
particles_cpu[i].position.z = 1;
particles_gpu[i].velocity.x = 0;
particles_gpu[i].velocity.y = 0;
particles_gpu[i].velocity.z = 0;
particles_gpu[i].position.x = 1;
particles_gpu[i].position.y = 1;
particles_gpu[i].position.z = 1;
}
}
//double iStart_HtoD = cpuSecond();
//double iElaps_HtoD = cpuSecond() - iStart_HtoD;
double iStart_gpu = cpuSecond();
//GPU computation
for (int j = 0; j < NITER; j++)
{
//printf("GPU iteration numéro %d : \n", j);
//cudaMemcpy(particles_gpu, particles_res, NPARTICLES*sizeof(Particle), cudaMemcpyHostToDevice);
updateKernel<<<(NPARTICLES+TPB-1) / TPB, TPB>>>(particles_gpu, dt, j);
//cudaMemcpy(particles_res, particles_gpu, NPARTICLES*sizeof(Particle), cudaMemcpyDeviceToHost);
}
cudaDeviceSynchronize();
//double iElaps_gpu = cpuSecond() - iStart_gpu;
//double iStart_cpu = cpuSecond();
//CPU computation
for (int j = 0; j < NITER; j++)
{
updateCPU(particles_cpu, dt, j, NPARTICLES);
}
//double iElaps_cpu = cpuSecond() - iStart_cpu;
for (int i = 0; i < NPARTICLES; i++) {
if(i < NPARTICLES)
{
if(!(compareParticle(particles_cpu[i], particles_gpu[i]))) {
flag = 0;
break;
}
}
}
/*double iStart_DtoH = cpuSecond();
cudaMemcpy(particles_cpu, particles_gpu, NPARTICLES*sizeof(Particle), cudaMemcpyDeviceToHost);
double iElaps_DtoH = cpuSecond() - iStart_DtoH; */
printf("Comparing the output for each implementation… ");
if (flag)
{
printf("Correct!\n");
} else {
printf("Incorrect\n");
}
//delete[] particles_cpu;
//delete[] particles_res;
cudaFree(particles_gpu);
cudaFreeHost(particles_cpu);
//cudaFreeHost(particles_res); // Free the memory
return 0;
} |
cb2a16c57d0aaff1bf452b0a7eae04eaad20af92.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file test_decimate.cu
* @author Adam Rogowiec
*
* This file is an integral part of the master thesis entitled:
* "Elaboration and implementation in CUDA technology parallel version of
* estimation of multidimensional random variable density function ridge
* detection algorithm."
* , which is conducted under the supervision of prof. dr hab. in. Marek
* Nacz.
*
* Institute of Control and Computation Engineering Faculty of Electronics and
* Information Technology Warsaw University of Technology 2016
*/
#include <helper_cuda.h>
#include <iostream>
#include <sstream>
#include <typeinfo>
#include <stdexcept>
#include <string>
#include <algorithm>
#include <list>
#define BLOCK_TILE_LOAD_V4 1
#include "rd/cpu/brute_force/choose.hpp"
#include "rd/cpu/brute_force/decimate.hpp"
#include "rd/gpu/device/brute_force/decimate.cuh"
#include "rd/gpu/device/device_decimate.cuh"
#include "rd/gpu/device/brute_force/rd_globals.cuh"
#include "rd/gpu/device/samples_generator.cuh"
#include "rd/gpu/util/data_order_traits.hpp"
#include "rd/gpu/util/dev_memcpy.cuh"
#include "rd/gpu/device/brute_force/test/decimate_nan_marker1.cuh"
#include "rd/gpu/device/brute_force/decimate_dist_mtx.cuh"
#include "rd/utils/memory.h"
#include "rd/utils/graph_drawer.hpp"
#include "rd/utils/cmd_line_parser.hpp"
#include "rd/utils/utilities.hpp"
#include "rd/utils/rd_samples.cuh"
#include "rd/utils/rd_params.hpp"
#include "rd/utils/name_traits.hpp"
#include "cub/test_util.h"
#include "cub/util_device.cuh"
#include "cub/util_type.cuh"
static const int TEST_DIM = 2;
static const int BLOCK_SIZE = 512;
template <typename T>
void testDecimateKernel(rd::RDParams<T> &rdp,
rd::RDSpiralParams<T> const &rds);
int main(int argc, char const **argv)
{
rd::RDParams<double> dParams;
rd::RDSpiralParams<double> dSParams;
rd::RDParams<float> fParams;
rd::RDSpiralParams<float> fSParams;
//-----------------------------------------------------------------
// Initialize command line
rd::CommandLineArgs args(argc, argv);
if (args.CheckCmdLineFlag("help"))
{
printf("%s \n"
"\t\t[--np=<P size>]\n"
"\t\t[--r1=<r1 param>]\n"
"\t\t[--r2=<r2 param>]\n"
"\t\t[--a=<spiral param>]\n"
"\t\t[--b=<spiral param>]\n"
"\t\t[--s=<spiral noise sigma>]\n"
"\t\t[--d=<device id>]\n"
"\t\t[--v <verbose>]\n"
"\n", argv[0]);
exit(0);
}
args.GetCmdLineArgument("r1", dParams.r1);
args.GetCmdLineArgument("r2", dParams.r2);
args.GetCmdLineArgument("r1", fParams.r1);
args.GetCmdLineArgument("r2", fParams.r2);
args.GetCmdLineArgument("np", dParams.np);
args.GetCmdLineArgument("np", fParams.np);
if (args.CheckCmdLineFlag("a"))
{
args.GetCmdLineArgument("a", fSParams.a);
args.GetCmdLineArgument("a", dSParams.a);
}
if (args.CheckCmdLineFlag("b"))
{
args.GetCmdLineArgument("b", fSParams.b);
args.GetCmdLineArgument("b", dSParams.b);
}
if (args.CheckCmdLineFlag("s"))
{
args.GetCmdLineArgument("s", fSParams.sigma);
args.GetCmdLineArgument("s", dSParams.sigma);
}
if (args.CheckCmdLineFlag("d"))
{
args.GetCmdLineArgument("d", fParams.devId);
args.GetCmdLineArgument("d", dParams.devId);
}
if (args.CheckCmdLineFlag("v"))
{
fParams.verbose = true;
dParams.verbose = true;
}
deviceInit(fParams.devId);
std::cout << rd::HLINE << std::endl;
std::cout << "FLOAT: " << std::endl;
testDecimateKernel<float>(fParams, fSParams);
std::cout << rd::HLINE << std::endl;
// std::cout << "DOUBLE: " << std::endl;
// testDecimateKernel<double>(dParams, dSParams);
// std::cout << rd::HLINE << std::endl;
deviceReset();
std::cout << "END!" << std::endl;
return 0;
}
template <typename T>
void decimateGold(
rd::RDParams<T> &rdp,
T *P,
T *S,
T *chosenS,
int &chosenCount)
{
std::list<T*> csList;
rd::choose(P, S, csList, rdp.np, rdp.ns, TEST_DIM, rdp.r1);
chosenCount = rdp.ns;
rd::copyTable(S, chosenS, chosenCount * TEST_DIM);
std::cout << "Chosen count: " << rdp.ns << std::endl;
std::ostringstream os;
rd::GraphDrawer<T> gDrawer;
// rd::printTable(chosenS, TEST_DIM, chosenCount, "initial chosen smpl");
if (rdp.verbose)
{
os << typeid(T).name() << "_" << TEST_DIM << "D_ref_chosen_set";
gDrawer.startGraph(os.str(), TEST_DIM);
// gDrawer.addPlotCmd("'-' w p pt 1 lc rgb '#d64f4f' ps 0.5 ",
// P, rd::GraphDrawer<T>::POINTS, rdp.np);
gDrawer.addPlotCmd("'-' w p pt 2 lc rgb '#38abe0' ps 1.3 ",
S, rd::GraphDrawer<T>::POINTS, rdp.ns);
gDrawer.endGraph();
os.clear();
os.str(std::string());
}
rd::decimate(S, csList, rdp.ns, TEST_DIM, rdp.r2);
std::cout << "Decimate count: " << rdp.ns << std::endl;
// rd::printTable(S, TEST_DIM, rdp.ns, "cpu decimate smpl");
if (rdp.verbose)
{
os << typeid(T).name() << "_" << TEST_DIM << "D_ref_decimate";
gDrawer.startGraph(os.str(), TEST_DIM);
// gDrawer.addPlotCmd("'-' w p pt 1 lc rgb '#d64f4f' ps 0.5 ",
// P, rd::GraphDrawer<T>::POINTS, rdp.np);
gDrawer.addPlotCmd("'-' w p pt 2 lc rgb '#38abe0' ps 1.3 ",
S, rd::GraphDrawer<T>::POINTS, rdp.ns);
gDrawer.endGraph();
os.clear();
os.str(std::string());
}
}
template <
rd::DataMemoryLayout MEM_LAYOUT,
typename DecimateKernelPtr,
typename T>
void testDecimateNaNMark(
rd::RDParams<T> const &rdp,
T * d_S,
T * h_chosenS,
int h_chosenCount,
T const * S_gold,
DecimateKernelPtr kernelPtr)
{
std::cout << rd::HLINE << std::endl;
std::cout << "testDecimateNaNMark: ("
<< rd::DataMemoryLayoutNameTraits<MEM_LAYOUT>::name << ")" << std::endl;
T *S_gpu;
int *d_ns, h_ns;
checkCudaErrors(hipMemset(d_S, 0, rdp.np * TEST_DIM * sizeof(T)));
// get chosen samples to device memory properly ordered
rd::gpu::rdMemcpy<MEM_LAYOUT, rd::ROW_MAJOR, hipMemcpyHostToDevice>(
d_S, h_chosenS, TEST_DIM, h_chosenCount,
(MEM_LAYOUT == rd::COL_MAJOR) ? h_chosenCount : TEST_DIM,
TEST_DIM);
checkCudaErrors(hipGetSymbolAddress((void**)&d_ns, rd::gpu::rdBruteForceNs));
checkCudaErrors(hipMemcpyToSymbol(rd::gpu::rdBruteForceNs, &h_chosenCount, sizeof(int)));
checkCudaErrors(hipDeviceSynchronize());
int stride = (MEM_LAYOUT == rd::COL_MAJOR) ? h_chosenCount : TEST_DIM;
hipLaunchKernelGGL(( kernelPtr), dim3(1), dim3(BLOCK_SIZE), 0, 0, d_S, d_ns, rdp.r2, stride, cub::Int2Type<MEM_LAYOUT>());
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpyFromSymbol(&h_ns, rd::gpu::rdBruteForceNs, sizeof(int)));
checkCudaErrors(hipDeviceSynchronize());
if ((int)rdp.ns != h_ns)
{
std::cout << "[ERROR]Incorrect number of chosen samples!" << std::endl;
std::cout << "Is: " << h_ns << " and should be: " << rdp.ns << std::endl;
}
S_gpu = new T[h_chosenCount * TEST_DIM];
rd::gpu::rdMemcpy<rd::ROW_MAJOR, MEM_LAYOUT, hipMemcpyDeviceToHost>(
S_gpu, d_S, (MEM_LAYOUT == rd::COL_MAJOR) ? h_chosenCount : TEST_DIM,
(MEM_LAYOUT == rd::COL_MAJOR) ? TEST_DIM : h_chosenCount,
TEST_DIM, (MEM_LAYOUT == rd::COL_MAJOR) ? h_chosenCount : TEST_DIM);
checkCudaErrors(hipDeviceSynchronize());
if (rdp.verbose)
{
rd::GraphDrawer<T> gDrawer;
std::ostringstream os;
os << typeid(T).name() << "_" << TEST_DIM << "D_gpu_decimate";
gDrawer.startGraph(os.str(), TEST_DIM);
// gDrawer.addPlotCmd("'-' w p pt 1 lc rgb '#d64f4f' ps 0.5 ",
// P, rd::GraphDrawer<T>::POINTS, rdp.np);
gDrawer.addPlotCmd("'-' w p pt 2 lc rgb '#38abe0' ps 1.3 ",
S_gpu, rd::GraphDrawer<T>::POINTS, h_ns);
gDrawer.endGraph();
os.clear();
os.str(std::string());
}
T * aux = new T[h_chosenCount * TEST_DIM];
rd::copyTable_omp(S_gold, aux, rdp.ns * TEST_DIM);
std::sort(aux, aux + rdp.ns * TEST_DIM);
std::sort(S_gpu, S_gpu + rdp.ns * TEST_DIM);
rd::checkResult(aux, S_gpu, rdp.ns * TEST_DIM, rdp.verbose);
delete[] S_gpu;
}
template <
typename DecimateKernelPtr,
typename T>
void testDecimateNaNMark_alignedMem(
rd::RDParams<T> const &rdp,
T * h_chosenS,
int h_chosenCount,
T const * S_gold,
DecimateKernelPtr kernelPtr)
{
std::cout << rd::HLINE << std::endl;
std::cout << "testDecimateNaNMark_alignedMem: (COL_MAJOR)" << std::endl;
size_t pitch;
T * d_S;
checkCudaErrors(hipMallocPitch(&d_S, &pitch, h_chosenCount * sizeof(T), TEST_DIM));
T *S_gpu;
int *d_ns, h_ns;
checkCudaErrors(hipMemset2D(d_S, pitch, 0, h_chosenCount, TEST_DIM));
// get chosen samples to device memory properly ordered
rd::gpu::rdMemcpy2D<rd::COL_MAJOR, rd::ROW_MAJOR, hipMemcpyHostToDevice>(
d_S, h_chosenS, TEST_DIM, h_chosenCount, pitch, TEST_DIM * sizeof(T));
checkCudaErrors(hipGetSymbolAddress((void**)&d_ns, rd::gpu::rdBruteForceNs));
checkCudaErrors(hipMemcpyToSymbol(rd::gpu::rdBruteForceNs, &h_chosenCount, sizeof(int)));
checkCudaErrors(hipDeviceSynchronize());
int stride = pitch / sizeof(T);
hipLaunchKernelGGL(( kernelPtr), dim3(1), dim3(BLOCK_SIZE), 0, 0, d_S, d_ns, rdp.r2, stride, cub::Int2Type<rd::COL_MAJOR>());
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpyFromSymbol(&h_ns, rd::gpu::rdBruteForceNs, sizeof(int)));
checkCudaErrors(hipDeviceSynchronize());
if ((int)rdp.ns != h_ns)
{
std::cout << "[ERROR]Incorrect number of chosen samples!" << std::endl;
std::cout << "Is: " << h_ns << " and should be: " << rdp.ns << std::endl;
}
S_gpu = new T[h_chosenCount * TEST_DIM];
rd::gpu::rdMemcpy2D<rd::ROW_MAJOR, rd::COL_MAJOR, hipMemcpyDeviceToHost>(
S_gpu, d_S, h_chosenCount, TEST_DIM, TEST_DIM * sizeof(T), pitch);
checkCudaErrors(hipDeviceSynchronize());
if (rdp.verbose)
{
rd::GraphDrawer<T> gDrawer;
std::ostringstream os;
os << typeid(T).name() << "_" << TEST_DIM << "D_gpu_decimate";
gDrawer.startGraph(os.str(), TEST_DIM);
// gDrawer.addPlotCmd("'-' w p pt 1 lc rgb '#d64f4f' ps 0.5 ",
// P, rd::GraphDrawer<T>::POINTS, rdp.np);
gDrawer.addPlotCmd("'-' w p pt 2 lc rgb '#38abe0' ps 1.3 ",
S_gpu, rd::GraphDrawer<T>::POINTS, h_ns);
gDrawer.endGraph();
os.clear();
os.str(std::string());
}
T * aux = new T[h_chosenCount * TEST_DIM];
rd::copyTable_omp(S_gold, aux, rdp.ns * TEST_DIM);
std::sort(aux, aux + rdp.ns * TEST_DIM);
std::sort(S_gpu, S_gpu + rdp.ns * TEST_DIM);
rd::checkResult(aux, S_gpu, rdp.ns * TEST_DIM, rdp.verbose);
delete[] S_gpu;
checkCudaErrors(hipFree(d_S));
}
template <typename T>
static void getChosenPtsFromMask(
char const * h_gpuMask,
T const * h_chosenS,
T * S_gpu,
int h_chosenCount)
{
int index = 0;
for (int i = 0; i < h_chosenCount; ++i)
{
if (h_gpuMask[i])
{
for (int d = 0; d < TEST_DIM; ++d)
{
S_gpu[index * TEST_DIM + d] = h_chosenS[i * TEST_DIM + d];
}
index++;
}
}
}
template <
rd::DataMemoryLayout MEM_LAYOUT,
typename DecimateKernelPtr,
typename T>
void testDecimateDistMtx(
rd::RDParams<T> const &rdp,
T * h_chosenS,
int h_chosenCount,
T const * S_gold,
DecimateKernelPtr kernelPtr)
{
std::cout << rd::HLINE << std::endl;
std::cout << "testDecimateDistMtx: ("
<< rd::DataMemoryLayoutNameTraits<MEM_LAYOUT>::name << ")" << std::endl;
size_t sPitch, distMtxPitch;
T * d_S, *d_distMtx;
char *d_mask;
if (MEM_LAYOUT == rd::ROW_MAJOR)
{
checkCudaErrors(hipMalloc(&d_S, TEST_DIM * h_chosenCount * sizeof(T)));
sPitch = TEST_DIM * sizeof(T);
checkCudaErrors(hipMemset(d_S, 0, h_chosenCount * TEST_DIM * sizeof(T)));
checkCudaErrors(hipMemcpy(d_S, h_chosenS, h_chosenCount * TEST_DIM * sizeof(T),
hipMemcpyHostToDevice));
}
else if (MEM_LAYOUT == rd::COL_MAJOR)
{
checkCudaErrors(hipMallocPitch(&d_S, &sPitch, h_chosenCount * sizeof(T), TEST_DIM));
checkCudaErrors(hipMemset2D(d_S, sPitch, 0, h_chosenCount * sizeof(T), TEST_DIM));
// get chosen samples to device memory properly ordered
rd::gpu::rdMemcpy2D<rd::COL_MAJOR, rd::ROW_MAJOR, hipMemcpyHostToDevice>(
d_S, h_chosenS, TEST_DIM, h_chosenCount, sPitch, TEST_DIM * sizeof(T));
}
else
{
throw std::runtime_error("Unsupported memory layout!");
}
checkCudaErrors(hipMallocPitch(&d_distMtx, &distMtxPitch, h_chosenCount * sizeof(T),
h_chosenCount));
checkCudaErrors(hipMalloc(&d_mask, h_chosenCount * sizeof(char)));
T *S_gpu;
int *d_ns, h_ns;
// checkCudaErrors(hipMemset2D(d_distMtx, distMtxPitch, 0, h_chosenCount * sizeof(T),
// h_chosenCount));
// checkCudaErrors(hipMemset(d_mask, 1, h_chosenCount * sizeof(char)));
checkCudaErrors(hipGetSymbolAddress((void**)&d_ns, rd::gpu::rdBruteForceNs));
checkCudaErrors(hipMemcpyToSymbol(rd::gpu::rdBruteForceNs, &h_chosenCount, sizeof(int)));
checkCudaErrors(hipDeviceSynchronize());
int sStride = sPitch / sizeof(T);
int distMtxStride = distMtxPitch / sizeof(T);
hipLaunchKernelGGL(( kernelPtr), dim3(1), dim3(BLOCK_SIZE), 0, 0, d_S, d_ns, sStride, d_distMtx, distMtxStride, d_mask, rdp.r2);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpyFromSymbol(&h_ns, rd::gpu::rdBruteForceNs, sizeof(int)));
checkCudaErrors(hipDeviceSynchronize());
if ((int)rdp.ns != h_ns)
{
std::cout << "[ERROR]Incorrect number of chosen samples!" << std::endl;
std::cout << "Is: " << h_ns << " and should be: " << rdp.ns << std::endl;
checkCudaErrors(hipFree(d_S));
checkCudaErrors(hipFree(d_distMtx));
checkCudaErrors(hipFree(d_mask));
return;
}
S_gpu = new T[h_ns * TEST_DIM];
if (MEM_LAYOUT == rd::COL_MAJOR)
{
rd::gpu::rdMemcpy2D<rd::ROW_MAJOR, rd::COL_MAJOR, hipMemcpyDeviceToHost>(
S_gpu, d_S, h_ns, TEST_DIM, TEST_DIM * sizeof(T), sPitch);
}
else if (MEM_LAYOUT == rd::ROW_MAJOR)
{
checkCudaErrors(hipMemcpy(S_gpu, d_S, h_ns * TEST_DIM * sizeof(T),
hipMemcpyDeviceToHost));
}
else
{
throw std::runtime_error("Unsupported memory layout!");
}
checkCudaErrors(hipDeviceSynchronize());
if (rdp.verbose)
{
rd::GraphDrawer<T> gDrawer;
std::ostringstream os;
os << typeid(T).name() << "_" << TEST_DIM << "D_gpu_decimate";
gDrawer.startGraph(os.str(), TEST_DIM);
// gDrawer.addPlotCmd("'-' w p pt 1 lc rgb '#d64f4f' ps 0.5 ",
// P, rd::GraphDrawer<T>::POINTS, rdp.np);
gDrawer.addPlotCmd("'-' w p pt 2 lc rgb '#38abe0' ps 1.3 ",
S_gpu, rd::GraphDrawer<T>::POINTS, h_ns);
gDrawer.endGraph();
os.clear();
os.str(std::string());
}
char * h_gpuMask = new char[h_chosenCount];
T * S_gpu2 = new T[h_ns * TEST_DIM];
checkCudaErrors(hipMemcpy(h_gpuMask, d_mask, h_chosenCount * sizeof(char),
hipMemcpyDeviceToHost));
checkCudaErrors(hipDeviceSynchronize());
getChosenPtsFromMask(h_gpuMask, h_chosenS, S_gpu2, h_chosenCount);
T * aux = new T[h_chosenCount * TEST_DIM];
rd::copyTable_omp(S_gold, aux, rdp.ns * TEST_DIM);
std::sort(aux, aux + rdp.ns * TEST_DIM);
std::sort(S_gpu, S_gpu + rdp.ns * TEST_DIM);
std::sort(S_gpu2, S_gpu2 + rdp.ns * TEST_DIM);
rd::checkResult(aux, S_gpu2, rdp.ns * TEST_DIM, rdp.verbose);
rd::checkResult(aux, S_gpu, rdp.ns * TEST_DIM, rdp.verbose);
delete[] S_gpu;
delete[] S_gpu2;
delete[] h_gpuMask;
delete[] aux;
checkCudaErrors(hipFree(d_S));
checkCudaErrors(hipFree(d_distMtx));
checkCudaErrors(hipFree(d_mask));
}
template <typename T>
void testDecimateKernel(rd::RDParams<T> &rdp,
rd::RDSpiralParams<T> const &sp)
{
std::cout << "Samples: " << std::endl;
std::cout << "\t dimension: " << TEST_DIM << std::endl;
std::cout << "\t n_samples: " << rdp.np << std::endl;
std::cout << "\t r1: " << rdp.r1 << std::endl;
std::cout << "\t r2: " << rdp.r2 << std::endl;
std::cout << "Spiral params: " << std::endl;
std::cout << "\t a: " << sp.a << std::endl;
std::cout << "\t b: " << sp.b << std::endl;
std::cout << "\t sigma: " << sp.sigma << std::endl;
T *d_P, *d_S;
T *h_P, *h_S, *h_chosenS;
checkCudaErrors(hipMalloc((void**)&d_P, rdp.np * TEST_DIM * sizeof(T)));
checkCudaErrors(hipMalloc((void**)&d_S, rdp.np * TEST_DIM * sizeof(T)));
checkCudaErrors(hipMemset(d_P, 0, rdp.np * TEST_DIM * sizeof(T)));
// h_P = rd::createTable<T>(rdp.np * TEST_DIM, T(1));
h_P = new T[rdp.np * TEST_DIM];
h_S = new T[rdp.np * TEST_DIM];
h_chosenS = new T[rdp.np * TEST_DIM];
// for (size_t i = 0; i < rdp.np; ++i)
// {
// h_P[i * TEST_DIM] = i;
// }
switch(TEST_DIM)
{
case 2:
rd::gpu::SamplesGenerator<T>::template spiral2D<rd::COL_MAJOR>(
rdp.np, sp.a, sp.b, sp.sigma, d_P);
break;
case 3:
rd::gpu::SamplesGenerator<T>::template spiral3D<rd::COL_MAJOR>(
rdp.np, sp.a, sp.b, sp.sigma, d_P);
break;
default:
throw std::logic_error("Not supported dimension!");
}
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(h_P, d_P, rdp.np * TEST_DIM * sizeof(T),
hipMemcpyDeviceToHost));
checkCudaErrors(hipDeviceSynchronize());
rd::transposeInPlace(h_P, h_P + rdp.np * TEST_DIM, rdp.np);
if (rdp.verbose)
{
std::ostringstream os;
rd::GraphDrawer<T> gDrawer;
os << typeid(T).name() << "_" << TEST_DIM;
os << "D_initial_samples_set_";
gDrawer.showPoints(os.str(), h_P, rdp.np, TEST_DIM);
os.clear();
os.str(std::string());
}
//---------------------------------------------------
// REFERENCE DECIMATE
//---------------------------------------------------
int h_chosenCount;
decimateGold(rdp, h_P, h_S, h_chosenS, h_chosenCount);
//---------------------------------------------------
// GPU DECIMATE
//---------------------------------------------------
rdp.devId = (rdp.devId != -1) ? rdp.devId : 0;
//---------------------------------------------------
// 1st version - basic NaN marker version
//---------------------------------------------------
typedef void (*DecimateKernelPtr1_RM)(T*, int*, T const, int, cub::Int2Type<rd::ROW_MAJOR>);
typedef void (*DecimateKernelPtr1_CM)(T*, int*, T const, int, cub::Int2Type<rd::COL_MAJOR>);
DecimateKernelPtr1_RM kernelPtr1_RM = rd::gpu::bruteForce::decimateNanMarker1<BLOCK_SIZE, TEST_DIM, T>;
DecimateKernelPtr1_CM kernelPtr1_CM = rd::gpu::bruteForce::decimateNanMarker1<BLOCK_SIZE, TEST_DIM, T>;
testDecimateNaNMark<rd::ROW_MAJOR>(rdp, d_S, h_chosenS, h_chosenCount, h_S, kernelPtr1_RM);
testDecimateNaNMark<rd::COL_MAJOR>(rdp, d_S, h_chosenS, h_chosenCount, h_S, kernelPtr1_CM);
//---------------------------------------------------
// 3rd version - just mtx row alignment in memory for 1st version
//---------------------------------------------------
typedef void (*DecimateKernelPtr3_CM)(T*, int*, T const, int, cub::Int2Type<rd::COL_MAJOR>);
DecimateKernelPtr3_CM kernelPtr3_CM = rd::gpu::bruteForce::decimateNanMarker1<BLOCK_SIZE, TEST_DIM, T>;
// assume COL_MAJOR
testDecimateNaNMark_alignedMem(rdp, h_chosenS, h_chosenCount, h_S, kernelPtr3_CM);
//---------------------------------------------------
// 4th version - completly new way,
// 1) compute dist mtx, 2) reduce dist mtx, 3) reduce points
//---------------------------------------------------
auto kernelPtr4_RM = rd::gpu::bruteForce::decimateDistMtx<TEST_DIM, BLOCK_SIZE, rd::ROW_MAJOR, T>;
auto kernelPtr4_CM = rd::gpu::bruteForce::decimateDistMtx<TEST_DIM, BLOCK_SIZE, rd::COL_MAJOR, T>;
testDecimateDistMtx<rd::ROW_MAJOR>(rdp, h_chosenS, h_chosenCount, h_S, kernelPtr4_RM);
testDecimateDistMtx<rd::COL_MAJOR>(rdp, h_chosenS, h_chosenCount, h_S, kernelPtr4_CM);
// clean-up
delete[] h_P;
delete[] h_S;
delete[] h_chosenS;
checkCudaErrors(hipFree(d_P));
checkCudaErrors(hipFree(d_S));
}
| cb2a16c57d0aaff1bf452b0a7eae04eaad20af92.cu | /**
* @file test_decimate.cu
* @author Adam Rogowiec
*
* This file is an integral part of the master thesis entitled:
* "Elaboration and implementation in CUDA technology parallel version of
* estimation of multidimensional random variable density function ridge
* detection algorithm."
* , which is conducted under the supervision of prof. dr hab. inż. Marek
* Nałęcz.
*
* Institute of Control and Computation Engineering Faculty of Electronics and
* Information Technology Warsaw University of Technology 2016
*/
#include <helper_cuda.h>
#include <iostream>
#include <sstream>
#include <typeinfo>
#include <stdexcept>
#include <string>
#include <algorithm>
#include <list>
#define BLOCK_TILE_LOAD_V4 1
#include "rd/cpu/brute_force/choose.hpp"
#include "rd/cpu/brute_force/decimate.hpp"
#include "rd/gpu/device/brute_force/decimate.cuh"
#include "rd/gpu/device/device_decimate.cuh"
#include "rd/gpu/device/brute_force/rd_globals.cuh"
#include "rd/gpu/device/samples_generator.cuh"
#include "rd/gpu/util/data_order_traits.hpp"
#include "rd/gpu/util/dev_memcpy.cuh"
#include "rd/gpu/device/brute_force/test/decimate_nan_marker1.cuh"
#include "rd/gpu/device/brute_force/decimate_dist_mtx.cuh"
#include "rd/utils/memory.h"
#include "rd/utils/graph_drawer.hpp"
#include "rd/utils/cmd_line_parser.hpp"
#include "rd/utils/utilities.hpp"
#include "rd/utils/rd_samples.cuh"
#include "rd/utils/rd_params.hpp"
#include "rd/utils/name_traits.hpp"
#include "cub/test_util.h"
#include "cub/util_device.cuh"
#include "cub/util_type.cuh"
static const int TEST_DIM = 2;
static const int BLOCK_SIZE = 512;
template <typename T>
void testDecimateKernel(rd::RDParams<T> &rdp,
rd::RDSpiralParams<T> const &rds);
int main(int argc, char const **argv)
{
rd::RDParams<double> dParams;
rd::RDSpiralParams<double> dSParams;
rd::RDParams<float> fParams;
rd::RDSpiralParams<float> fSParams;
//-----------------------------------------------------------------
// Initialize command line
rd::CommandLineArgs args(argc, argv);
if (args.CheckCmdLineFlag("help"))
{
printf("%s \n"
"\t\t[--np=<P size>]\n"
"\t\t[--r1=<r1 param>]\n"
"\t\t[--r2=<r2 param>]\n"
"\t\t[--a=<spiral param>]\n"
"\t\t[--b=<spiral param>]\n"
"\t\t[--s=<spiral noise sigma>]\n"
"\t\t[--d=<device id>]\n"
"\t\t[--v <verbose>]\n"
"\n", argv[0]);
exit(0);
}
args.GetCmdLineArgument("r1", dParams.r1);
args.GetCmdLineArgument("r2", dParams.r2);
args.GetCmdLineArgument("r1", fParams.r1);
args.GetCmdLineArgument("r2", fParams.r2);
args.GetCmdLineArgument("np", dParams.np);
args.GetCmdLineArgument("np", fParams.np);
if (args.CheckCmdLineFlag("a"))
{
args.GetCmdLineArgument("a", fSParams.a);
args.GetCmdLineArgument("a", dSParams.a);
}
if (args.CheckCmdLineFlag("b"))
{
args.GetCmdLineArgument("b", fSParams.b);
args.GetCmdLineArgument("b", dSParams.b);
}
if (args.CheckCmdLineFlag("s"))
{
args.GetCmdLineArgument("s", fSParams.sigma);
args.GetCmdLineArgument("s", dSParams.sigma);
}
if (args.CheckCmdLineFlag("d"))
{
args.GetCmdLineArgument("d", fParams.devId);
args.GetCmdLineArgument("d", dParams.devId);
}
if (args.CheckCmdLineFlag("v"))
{
fParams.verbose = true;
dParams.verbose = true;
}
deviceInit(fParams.devId);
std::cout << rd::HLINE << std::endl;
std::cout << "FLOAT: " << std::endl;
testDecimateKernel<float>(fParams, fSParams);
std::cout << rd::HLINE << std::endl;
// std::cout << "DOUBLE: " << std::endl;
// testDecimateKernel<double>(dParams, dSParams);
// std::cout << rd::HLINE << std::endl;
deviceReset();
std::cout << "END!" << std::endl;
return 0;
}
template <typename T>
void decimateGold(
rd::RDParams<T> &rdp,
T *P,
T *S,
T *chosenS,
int &chosenCount)
{
std::list<T*> csList;
rd::choose(P, S, csList, rdp.np, rdp.ns, TEST_DIM, rdp.r1);
chosenCount = rdp.ns;
rd::copyTable(S, chosenS, chosenCount * TEST_DIM);
std::cout << "Chosen count: " << rdp.ns << std::endl;
std::ostringstream os;
rd::GraphDrawer<T> gDrawer;
// rd::printTable(chosenS, TEST_DIM, chosenCount, "initial chosen smpl");
if (rdp.verbose)
{
os << typeid(T).name() << "_" << TEST_DIM << "D_ref_chosen_set";
gDrawer.startGraph(os.str(), TEST_DIM);
// gDrawer.addPlotCmd("'-' w p pt 1 lc rgb '#d64f4f' ps 0.5 ",
// P, rd::GraphDrawer<T>::POINTS, rdp.np);
gDrawer.addPlotCmd("'-' w p pt 2 lc rgb '#38abe0' ps 1.3 ",
S, rd::GraphDrawer<T>::POINTS, rdp.ns);
gDrawer.endGraph();
os.clear();
os.str(std::string());
}
rd::decimate(S, csList, rdp.ns, TEST_DIM, rdp.r2);
std::cout << "Decimate count: " << rdp.ns << std::endl;
// rd::printTable(S, TEST_DIM, rdp.ns, "cpu decimate smpl");
if (rdp.verbose)
{
os << typeid(T).name() << "_" << TEST_DIM << "D_ref_decimate";
gDrawer.startGraph(os.str(), TEST_DIM);
// gDrawer.addPlotCmd("'-' w p pt 1 lc rgb '#d64f4f' ps 0.5 ",
// P, rd::GraphDrawer<T>::POINTS, rdp.np);
gDrawer.addPlotCmd("'-' w p pt 2 lc rgb '#38abe0' ps 1.3 ",
S, rd::GraphDrawer<T>::POINTS, rdp.ns);
gDrawer.endGraph();
os.clear();
os.str(std::string());
}
}
template <
rd::DataMemoryLayout MEM_LAYOUT,
typename DecimateKernelPtr,
typename T>
void testDecimateNaNMark(
rd::RDParams<T> const &rdp,
T * d_S,
T * h_chosenS,
int h_chosenCount,
T const * S_gold,
DecimateKernelPtr kernelPtr)
{
std::cout << rd::HLINE << std::endl;
std::cout << "testDecimateNaNMark: ("
<< rd::DataMemoryLayoutNameTraits<MEM_LAYOUT>::name << ")" << std::endl;
T *S_gpu;
int *d_ns, h_ns;
checkCudaErrors(cudaMemset(d_S, 0, rdp.np * TEST_DIM * sizeof(T)));
// get chosen samples to device memory properly ordered
rd::gpu::rdMemcpy<MEM_LAYOUT, rd::ROW_MAJOR, cudaMemcpyHostToDevice>(
d_S, h_chosenS, TEST_DIM, h_chosenCount,
(MEM_LAYOUT == rd::COL_MAJOR) ? h_chosenCount : TEST_DIM,
TEST_DIM);
checkCudaErrors(cudaGetSymbolAddress((void**)&d_ns, rd::gpu::rdBruteForceNs));
checkCudaErrors(cudaMemcpyToSymbol(rd::gpu::rdBruteForceNs, &h_chosenCount, sizeof(int)));
checkCudaErrors(cudaDeviceSynchronize());
int stride = (MEM_LAYOUT == rd::COL_MAJOR) ? h_chosenCount : TEST_DIM;
kernelPtr<<<1, BLOCK_SIZE>>>(d_S, d_ns, rdp.r2, stride, cub::Int2Type<MEM_LAYOUT>());
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpyFromSymbol(&h_ns, rd::gpu::rdBruteForceNs, sizeof(int)));
checkCudaErrors(cudaDeviceSynchronize());
if ((int)rdp.ns != h_ns)
{
std::cout << "[ERROR]Incorrect number of chosen samples!" << std::endl;
std::cout << "Is: " << h_ns << " and should be: " << rdp.ns << std::endl;
}
S_gpu = new T[h_chosenCount * TEST_DIM];
rd::gpu::rdMemcpy<rd::ROW_MAJOR, MEM_LAYOUT, cudaMemcpyDeviceToHost>(
S_gpu, d_S, (MEM_LAYOUT == rd::COL_MAJOR) ? h_chosenCount : TEST_DIM,
(MEM_LAYOUT == rd::COL_MAJOR) ? TEST_DIM : h_chosenCount,
TEST_DIM, (MEM_LAYOUT == rd::COL_MAJOR) ? h_chosenCount : TEST_DIM);
checkCudaErrors(cudaDeviceSynchronize());
if (rdp.verbose)
{
rd::GraphDrawer<T> gDrawer;
std::ostringstream os;
os << typeid(T).name() << "_" << TEST_DIM << "D_gpu_decimate";
gDrawer.startGraph(os.str(), TEST_DIM);
// gDrawer.addPlotCmd("'-' w p pt 1 lc rgb '#d64f4f' ps 0.5 ",
// P, rd::GraphDrawer<T>::POINTS, rdp.np);
gDrawer.addPlotCmd("'-' w p pt 2 lc rgb '#38abe0' ps 1.3 ",
S_gpu, rd::GraphDrawer<T>::POINTS, h_ns);
gDrawer.endGraph();
os.clear();
os.str(std::string());
}
T * aux = new T[h_chosenCount * TEST_DIM];
rd::copyTable_omp(S_gold, aux, rdp.ns * TEST_DIM);
std::sort(aux, aux + rdp.ns * TEST_DIM);
std::sort(S_gpu, S_gpu + rdp.ns * TEST_DIM);
rd::checkResult(aux, S_gpu, rdp.ns * TEST_DIM, rdp.verbose);
delete[] S_gpu;
}
template <
typename DecimateKernelPtr,
typename T>
void testDecimateNaNMark_alignedMem(
rd::RDParams<T> const &rdp,
T * h_chosenS,
int h_chosenCount,
T const * S_gold,
DecimateKernelPtr kernelPtr)
{
std::cout << rd::HLINE << std::endl;
std::cout << "testDecimateNaNMark_alignedMem: (COL_MAJOR)" << std::endl;
size_t pitch;
T * d_S;
checkCudaErrors(cudaMallocPitch(&d_S, &pitch, h_chosenCount * sizeof(T), TEST_DIM));
T *S_gpu;
int *d_ns, h_ns;
checkCudaErrors(cudaMemset2D(d_S, pitch, 0, h_chosenCount, TEST_DIM));
// get chosen samples to device memory properly ordered
rd::gpu::rdMemcpy2D<rd::COL_MAJOR, rd::ROW_MAJOR, cudaMemcpyHostToDevice>(
d_S, h_chosenS, TEST_DIM, h_chosenCount, pitch, TEST_DIM * sizeof(T));
checkCudaErrors(cudaGetSymbolAddress((void**)&d_ns, rd::gpu::rdBruteForceNs));
checkCudaErrors(cudaMemcpyToSymbol(rd::gpu::rdBruteForceNs, &h_chosenCount, sizeof(int)));
checkCudaErrors(cudaDeviceSynchronize());
int stride = pitch / sizeof(T);
kernelPtr<<<1, BLOCK_SIZE>>>(d_S, d_ns, rdp.r2, stride, cub::Int2Type<rd::COL_MAJOR>());
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpyFromSymbol(&h_ns, rd::gpu::rdBruteForceNs, sizeof(int)));
checkCudaErrors(cudaDeviceSynchronize());
if ((int)rdp.ns != h_ns)
{
std::cout << "[ERROR]Incorrect number of chosen samples!" << std::endl;
std::cout << "Is: " << h_ns << " and should be: " << rdp.ns << std::endl;
}
S_gpu = new T[h_chosenCount * TEST_DIM];
rd::gpu::rdMemcpy2D<rd::ROW_MAJOR, rd::COL_MAJOR, cudaMemcpyDeviceToHost>(
S_gpu, d_S, h_chosenCount, TEST_DIM, TEST_DIM * sizeof(T), pitch);
checkCudaErrors(cudaDeviceSynchronize());
if (rdp.verbose)
{
rd::GraphDrawer<T> gDrawer;
std::ostringstream os;
os << typeid(T).name() << "_" << TEST_DIM << "D_gpu_decimate";
gDrawer.startGraph(os.str(), TEST_DIM);
// gDrawer.addPlotCmd("'-' w p pt 1 lc rgb '#d64f4f' ps 0.5 ",
// P, rd::GraphDrawer<T>::POINTS, rdp.np);
gDrawer.addPlotCmd("'-' w p pt 2 lc rgb '#38abe0' ps 1.3 ",
S_gpu, rd::GraphDrawer<T>::POINTS, h_ns);
gDrawer.endGraph();
os.clear();
os.str(std::string());
}
T * aux = new T[h_chosenCount * TEST_DIM];
rd::copyTable_omp(S_gold, aux, rdp.ns * TEST_DIM);
std::sort(aux, aux + rdp.ns * TEST_DIM);
std::sort(S_gpu, S_gpu + rdp.ns * TEST_DIM);
rd::checkResult(aux, S_gpu, rdp.ns * TEST_DIM, rdp.verbose);
delete[] S_gpu;
checkCudaErrors(cudaFree(d_S));
}
template <typename T>
static void getChosenPtsFromMask(
char const * h_gpuMask,
T const * h_chosenS,
T * S_gpu,
int h_chosenCount)
{
int index = 0;
for (int i = 0; i < h_chosenCount; ++i)
{
if (h_gpuMask[i])
{
for (int d = 0; d < TEST_DIM; ++d)
{
S_gpu[index * TEST_DIM + d] = h_chosenS[i * TEST_DIM + d];
}
index++;
}
}
}
template <
rd::DataMemoryLayout MEM_LAYOUT,
typename DecimateKernelPtr,
typename T>
void testDecimateDistMtx(
rd::RDParams<T> const &rdp,
T * h_chosenS,
int h_chosenCount,
T const * S_gold,
DecimateKernelPtr kernelPtr)
{
std::cout << rd::HLINE << std::endl;
std::cout << "testDecimateDistMtx: ("
<< rd::DataMemoryLayoutNameTraits<MEM_LAYOUT>::name << ")" << std::endl;
size_t sPitch, distMtxPitch;
T * d_S, *d_distMtx;
char *d_mask;
if (MEM_LAYOUT == rd::ROW_MAJOR)
{
checkCudaErrors(cudaMalloc(&d_S, TEST_DIM * h_chosenCount * sizeof(T)));
sPitch = TEST_DIM * sizeof(T);
checkCudaErrors(cudaMemset(d_S, 0, h_chosenCount * TEST_DIM * sizeof(T)));
checkCudaErrors(cudaMemcpy(d_S, h_chosenS, h_chosenCount * TEST_DIM * sizeof(T),
cudaMemcpyHostToDevice));
}
else if (MEM_LAYOUT == rd::COL_MAJOR)
{
checkCudaErrors(cudaMallocPitch(&d_S, &sPitch, h_chosenCount * sizeof(T), TEST_DIM));
checkCudaErrors(cudaMemset2D(d_S, sPitch, 0, h_chosenCount * sizeof(T), TEST_DIM));
// get chosen samples to device memory properly ordered
rd::gpu::rdMemcpy2D<rd::COL_MAJOR, rd::ROW_MAJOR, cudaMemcpyHostToDevice>(
d_S, h_chosenS, TEST_DIM, h_chosenCount, sPitch, TEST_DIM * sizeof(T));
}
else
{
throw std::runtime_error("Unsupported memory layout!");
}
checkCudaErrors(cudaMallocPitch(&d_distMtx, &distMtxPitch, h_chosenCount * sizeof(T),
h_chosenCount));
checkCudaErrors(cudaMalloc(&d_mask, h_chosenCount * sizeof(char)));
T *S_gpu;
int *d_ns, h_ns;
// checkCudaErrors(cudaMemset2D(d_distMtx, distMtxPitch, 0, h_chosenCount * sizeof(T),
// h_chosenCount));
// checkCudaErrors(cudaMemset(d_mask, 1, h_chosenCount * sizeof(char)));
checkCudaErrors(cudaGetSymbolAddress((void**)&d_ns, rd::gpu::rdBruteForceNs));
checkCudaErrors(cudaMemcpyToSymbol(rd::gpu::rdBruteForceNs, &h_chosenCount, sizeof(int)));
checkCudaErrors(cudaDeviceSynchronize());
int sStride = sPitch / sizeof(T);
int distMtxStride = distMtxPitch / sizeof(T);
kernelPtr<<<1, BLOCK_SIZE>>>(d_S, d_ns, sStride, d_distMtx, distMtxStride, d_mask, rdp.r2);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpyFromSymbol(&h_ns, rd::gpu::rdBruteForceNs, sizeof(int)));
checkCudaErrors(cudaDeviceSynchronize());
if ((int)rdp.ns != h_ns)
{
std::cout << "[ERROR]Incorrect number of chosen samples!" << std::endl;
std::cout << "Is: " << h_ns << " and should be: " << rdp.ns << std::endl;
checkCudaErrors(cudaFree(d_S));
checkCudaErrors(cudaFree(d_distMtx));
checkCudaErrors(cudaFree(d_mask));
return;
}
S_gpu = new T[h_ns * TEST_DIM];
if (MEM_LAYOUT == rd::COL_MAJOR)
{
rd::gpu::rdMemcpy2D<rd::ROW_MAJOR, rd::COL_MAJOR, cudaMemcpyDeviceToHost>(
S_gpu, d_S, h_ns, TEST_DIM, TEST_DIM * sizeof(T), sPitch);
}
else if (MEM_LAYOUT == rd::ROW_MAJOR)
{
checkCudaErrors(cudaMemcpy(S_gpu, d_S, h_ns * TEST_DIM * sizeof(T),
cudaMemcpyDeviceToHost));
}
else
{
throw std::runtime_error("Unsupported memory layout!");
}
checkCudaErrors(cudaDeviceSynchronize());
if (rdp.verbose)
{
rd::GraphDrawer<T> gDrawer;
std::ostringstream os;
os << typeid(T).name() << "_" << TEST_DIM << "D_gpu_decimate";
gDrawer.startGraph(os.str(), TEST_DIM);
// gDrawer.addPlotCmd("'-' w p pt 1 lc rgb '#d64f4f' ps 0.5 ",
// P, rd::GraphDrawer<T>::POINTS, rdp.np);
gDrawer.addPlotCmd("'-' w p pt 2 lc rgb '#38abe0' ps 1.3 ",
S_gpu, rd::GraphDrawer<T>::POINTS, h_ns);
gDrawer.endGraph();
os.clear();
os.str(std::string());
}
char * h_gpuMask = new char[h_chosenCount];
T * S_gpu2 = new T[h_ns * TEST_DIM];
checkCudaErrors(cudaMemcpy(h_gpuMask, d_mask, h_chosenCount * sizeof(char),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaDeviceSynchronize());
getChosenPtsFromMask(h_gpuMask, h_chosenS, S_gpu2, h_chosenCount);
T * aux = new T[h_chosenCount * TEST_DIM];
rd::copyTable_omp(S_gold, aux, rdp.ns * TEST_DIM);
std::sort(aux, aux + rdp.ns * TEST_DIM);
std::sort(S_gpu, S_gpu + rdp.ns * TEST_DIM);
std::sort(S_gpu2, S_gpu2 + rdp.ns * TEST_DIM);
rd::checkResult(aux, S_gpu2, rdp.ns * TEST_DIM, rdp.verbose);
rd::checkResult(aux, S_gpu, rdp.ns * TEST_DIM, rdp.verbose);
delete[] S_gpu;
delete[] S_gpu2;
delete[] h_gpuMask;
delete[] aux;
checkCudaErrors(cudaFree(d_S));
checkCudaErrors(cudaFree(d_distMtx));
checkCudaErrors(cudaFree(d_mask));
}
template <typename T>
void testDecimateKernel(rd::RDParams<T> &rdp,
rd::RDSpiralParams<T> const &sp)
{
std::cout << "Samples: " << std::endl;
std::cout << "\t dimension: " << TEST_DIM << std::endl;
std::cout << "\t n_samples: " << rdp.np << std::endl;
std::cout << "\t r1: " << rdp.r1 << std::endl;
std::cout << "\t r2: " << rdp.r2 << std::endl;
std::cout << "Spiral params: " << std::endl;
std::cout << "\t a: " << sp.a << std::endl;
std::cout << "\t b: " << sp.b << std::endl;
std::cout << "\t sigma: " << sp.sigma << std::endl;
T *d_P, *d_S;
T *h_P, *h_S, *h_chosenS;
checkCudaErrors(cudaMalloc((void**)&d_P, rdp.np * TEST_DIM * sizeof(T)));
checkCudaErrors(cudaMalloc((void**)&d_S, rdp.np * TEST_DIM * sizeof(T)));
checkCudaErrors(cudaMemset(d_P, 0, rdp.np * TEST_DIM * sizeof(T)));
// h_P = rd::createTable<T>(rdp.np * TEST_DIM, T(1));
h_P = new T[rdp.np * TEST_DIM];
h_S = new T[rdp.np * TEST_DIM];
h_chosenS = new T[rdp.np * TEST_DIM];
// for (size_t i = 0; i < rdp.np; ++i)
// {
// h_P[i * TEST_DIM] = i;
// }
switch(TEST_DIM)
{
case 2:
rd::gpu::SamplesGenerator<T>::template spiral2D<rd::COL_MAJOR>(
rdp.np, sp.a, sp.b, sp.sigma, d_P);
break;
case 3:
rd::gpu::SamplesGenerator<T>::template spiral3D<rd::COL_MAJOR>(
rdp.np, sp.a, sp.b, sp.sigma, d_P);
break;
default:
throw std::logic_error("Not supported dimension!");
}
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(h_P, d_P, rdp.np * TEST_DIM * sizeof(T),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaDeviceSynchronize());
rd::transposeInPlace(h_P, h_P + rdp.np * TEST_DIM, rdp.np);
if (rdp.verbose)
{
std::ostringstream os;
rd::GraphDrawer<T> gDrawer;
os << typeid(T).name() << "_" << TEST_DIM;
os << "D_initial_samples_set_";
gDrawer.showPoints(os.str(), h_P, rdp.np, TEST_DIM);
os.clear();
os.str(std::string());
}
//---------------------------------------------------
// REFERENCE DECIMATE
//---------------------------------------------------
int h_chosenCount;
decimateGold(rdp, h_P, h_S, h_chosenS, h_chosenCount);
//---------------------------------------------------
// GPU DECIMATE
//---------------------------------------------------
rdp.devId = (rdp.devId != -1) ? rdp.devId : 0;
//---------------------------------------------------
// 1st version - basic NaN marker version
//---------------------------------------------------
typedef void (*DecimateKernelPtr1_RM)(T*, int*, T const, int, cub::Int2Type<rd::ROW_MAJOR>);
typedef void (*DecimateKernelPtr1_CM)(T*, int*, T const, int, cub::Int2Type<rd::COL_MAJOR>);
DecimateKernelPtr1_RM kernelPtr1_RM = rd::gpu::bruteForce::decimateNanMarker1<BLOCK_SIZE, TEST_DIM, T>;
DecimateKernelPtr1_CM kernelPtr1_CM = rd::gpu::bruteForce::decimateNanMarker1<BLOCK_SIZE, TEST_DIM, T>;
testDecimateNaNMark<rd::ROW_MAJOR>(rdp, d_S, h_chosenS, h_chosenCount, h_S, kernelPtr1_RM);
testDecimateNaNMark<rd::COL_MAJOR>(rdp, d_S, h_chosenS, h_chosenCount, h_S, kernelPtr1_CM);
//---------------------------------------------------
// 3rd version - just mtx row alignment in memory for 1st version
//---------------------------------------------------
typedef void (*DecimateKernelPtr3_CM)(T*, int*, T const, int, cub::Int2Type<rd::COL_MAJOR>);
DecimateKernelPtr3_CM kernelPtr3_CM = rd::gpu::bruteForce::decimateNanMarker1<BLOCK_SIZE, TEST_DIM, T>;
// assume COL_MAJOR
testDecimateNaNMark_alignedMem(rdp, h_chosenS, h_chosenCount, h_S, kernelPtr3_CM);
//---------------------------------------------------
// 4th version - completly new way,
// 1) compute dist mtx, 2) reduce dist mtx, 3) reduce points
//---------------------------------------------------
auto kernelPtr4_RM = rd::gpu::bruteForce::decimateDistMtx<TEST_DIM, BLOCK_SIZE, rd::ROW_MAJOR, T>;
auto kernelPtr4_CM = rd::gpu::bruteForce::decimateDistMtx<TEST_DIM, BLOCK_SIZE, rd::COL_MAJOR, T>;
testDecimateDistMtx<rd::ROW_MAJOR>(rdp, h_chosenS, h_chosenCount, h_S, kernelPtr4_RM);
testDecimateDistMtx<rd::COL_MAJOR>(rdp, h_chosenS, h_chosenCount, h_S, kernelPtr4_CM);
// clean-up
delete[] h_P;
delete[] h_S;
delete[] h_chosenS;
checkCudaErrors(cudaFree(d_P));
checkCudaErrors(cudaFree(d_S));
}
|
445ad70fa4b37ec1dd496d9213cc71056c97fc14.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hiprand/hiprand.h>
#include <stdio.h>
#include <math.h>
// VS __syncthreads();
//. https://devtalk.nvidia.com/default/topic/1009723/__syncthreads-and-atomicadd-are-undefined-in-visual-studio-2015/
#ifndef __HIPCC__
#define __HIPCC__
#endif
#include "hip/hip_runtime.h"
#include <hiprand/hiprand_kernel.h>
#include <hip/device_functions.h>
#include "device_launch_parameters.h"
#define CUDA_CHECK_ERROR(err) \
if (err != hipSuccess) { \
printf("Cuda error: %s\n", hipGetErrorString(err)); \
printf("Error in file: %s, line: %i\n", __FILE__, __LINE__); \
}
const long N = 33554432; // Points count
__global__ void calc_PI_gpu(float *x, float *y, int *totalCount) {
int idx = blockIdx.x * blockDim.x + threadIdx.x; // Thread id
int threadCount = gridDim.x * blockDim.x; // Threads count to be used as a step in loop
int countPointsInCircle = 0;
for (int i = idx; i < N; i += threadCount) {
if (x[i] * x[i] + y[i] * y[i] < 1) {
countPointsInCircle++;
}
}
atomicAdd(totalCount, countPointsInCircle); //each thread sum amount of points in circle into variable
}
float calc_PI_CPU(float *x, float *y) {
int countPointsInCircle = 0;
for (int i = 0; i < N; i++) {
if (x[i] * x[i] + y[i] * y[i] < 1) {
countPointsInCircle++;
}
}
return float(countPointsInCircle) * 4 / N;
}
int main()
{
float *host_X, *host_Y, *gpu_X, *gpu_Y;
host_X = (float *)calloc(N, sizeof(float));
host_Y = (float *)calloc(N, sizeof(float));
CUDA_CHECK_ERROR(hipMalloc((void **)&gpu_X, N * sizeof(float)));
CUDA_CHECK_ERROR(hipMalloc((void **)&gpu_Y, N * sizeof(float)));
hiprandGenerator_t curandGenerator;
hiprandCreateGenerator(&curandGenerator, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(curandGenerator, 4321ULL);
//generate two sequences
hiprandGenerateUniform(curandGenerator, gpu_X, N);
hiprandGenerateUniform(curandGenerator, gpu_Y, N);
hiprandDestroyGenerator(curandGenerator);
CUDA_CHECK_ERROR(hipMemcpy(host_X, gpu_X, N * sizeof(float), hipMemcpyDeviceToHost));
CUDA_CHECK_ERROR(hipMemcpy(host_Y, gpu_Y, N * sizeof(float), hipMemcpyDeviceToHost));
clock_t start_time = clock();
float cpu_result = calc_PI_CPU(host_X, host_Y);
clock_t end_time = clock();
std::cout.precision(15);
std::cout << "CPU time = " << (double)((end_time - start_time) * 1000 / CLOCKS_PER_SEC) << " msec" << std::endl;
std::cout << "result: " << cpu_result << std::endl;
float gpuTime = 0;
hipEvent_t start;
hipEvent_t stop;
int blockDim = 512;
dim3 threads(blockDim, 1);
dim3 grid(N / (128 * blockDim), 1);
int *total_gpu_count;
int *host_total_gpu_count = (int *)calloc(1, sizeof(int));
CUDA_CHECK_ERROR(hipMalloc((void **)&total_gpu_count, sizeof(int)));
CUDA_CHECK_ERROR(hipEventCreate(&start));
CUDA_CHECK_ERROR(hipEventCreate(&stop));
hipEventRecord(start, 0);
calc_PI_gpu << <grid, threads >> > (gpu_X, gpu_Y, total_gpu_count);
CUDA_CHECK_ERROR(hipMemcpy(host_total_gpu_count, total_gpu_count, sizeof(int), hipMemcpyDeviceToHost));
int gpu_points_count = *host_total_gpu_count;
float gpu_result = (float)gpu_points_count * 4 / N;
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpuTime, start, stop);
std::cout << "GPU time = " << gpuTime << " " << std::endl;
std::cout << "result: " << gpu_result << std::endl;
CUDA_CHECK_ERROR(hipEventDestroy(start));
CUDA_CHECK_ERROR(hipEventDestroy(stop));
CUDA_CHECK_ERROR(hipFree(gpu_X));
CUDA_CHECK_ERROR(hipFree(gpu_Y));
CUDA_CHECK_ERROR(hipFree(total_gpu_count));
delete host_X;
delete host_Y;
delete host_total_gpu_count;
system("pause");
return 0;
}
| 445ad70fa4b37ec1dd496d9213cc71056c97fc14.cu | #include <cstdlib>
#include <cuda.h>
#include <stdio.h>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <curand.h>
#include <stdio.h>
#include <math.h>
// чтобы VS не ругался на __syncthreads();
//доп. инфа здесь https://devtalk.nvidia.com/default/topic/1009723/__syncthreads-and-atomicadd-are-undefined-in-visual-studio-2015/
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include "cuda_runtime.h"
#include <curand_kernel.h>
#include <device_functions.h>
#include "device_launch_parameters.h"
#define CUDA_CHECK_ERROR(err) \
if (err != cudaSuccess) { \
printf("Cuda error: %s\n", cudaGetErrorString(err)); \
printf("Error in file: %s, line: %i\n", __FILE__, __LINE__); \
}
const long N = 33554432; // Points count
__global__ void calc_PI_gpu(float *x, float *y, int *totalCount) {
int idx = blockIdx.x * blockDim.x + threadIdx.x; // Thread id
int threadCount = gridDim.x * blockDim.x; // Threads count to be used as a step in loop
int countPointsInCircle = 0;
for (int i = idx; i < N; i += threadCount) {
if (x[i] * x[i] + y[i] * y[i] < 1) {
countPointsInCircle++;
}
}
atomicAdd(totalCount, countPointsInCircle); //each thread sum amount of points in circle into variable
}
float calc_PI_CPU(float *x, float *y) {
int countPointsInCircle = 0;
for (int i = 0; i < N; i++) {
if (x[i] * x[i] + y[i] * y[i] < 1) {
countPointsInCircle++;
}
}
return float(countPointsInCircle) * 4 / N;
}
int main()
{
float *host_X, *host_Y, *gpu_X, *gpu_Y;
host_X = (float *)calloc(N, sizeof(float));
host_Y = (float *)calloc(N, sizeof(float));
CUDA_CHECK_ERROR(cudaMalloc((void **)&gpu_X, N * sizeof(float)));
CUDA_CHECK_ERROR(cudaMalloc((void **)&gpu_Y, N * sizeof(float)));
curandGenerator_t curandGenerator;
curandCreateGenerator(&curandGenerator, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(curandGenerator, 4321ULL);
//generate two sequences
curandGenerateUniform(curandGenerator, gpu_X, N);
curandGenerateUniform(curandGenerator, gpu_Y, N);
curandDestroyGenerator(curandGenerator);
CUDA_CHECK_ERROR(cudaMemcpy(host_X, gpu_X, N * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CHECK_ERROR(cudaMemcpy(host_Y, gpu_Y, N * sizeof(float), cudaMemcpyDeviceToHost));
clock_t start_time = clock();
float cpu_result = calc_PI_CPU(host_X, host_Y);
clock_t end_time = clock();
std::cout.precision(15);
std::cout << "CPU time = " << (double)((end_time - start_time) * 1000 / CLOCKS_PER_SEC) << " msec" << std::endl;
std::cout << "result: " << cpu_result << std::endl;
float gpuTime = 0;
cudaEvent_t start;
cudaEvent_t stop;
int blockDim = 512;
dim3 threads(blockDim, 1);
dim3 grid(N / (128 * blockDim), 1);
int *total_gpu_count;
int *host_total_gpu_count = (int *)calloc(1, sizeof(int));
CUDA_CHECK_ERROR(cudaMalloc((void **)&total_gpu_count, sizeof(int)));
CUDA_CHECK_ERROR(cudaEventCreate(&start));
CUDA_CHECK_ERROR(cudaEventCreate(&stop));
cudaEventRecord(start, 0);
calc_PI_gpu << <grid, threads >> > (gpu_X, gpu_Y, total_gpu_count);
CUDA_CHECK_ERROR(cudaMemcpy(host_total_gpu_count, total_gpu_count, sizeof(int), cudaMemcpyDeviceToHost));
int gpu_points_count = *host_total_gpu_count;
float gpu_result = (float)gpu_points_count * 4 / N;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpuTime, start, stop);
std::cout << "GPU time = " << gpuTime << " мсек" << std::endl;
std::cout << "result: " << gpu_result << std::endl;
CUDA_CHECK_ERROR(cudaEventDestroy(start));
CUDA_CHECK_ERROR(cudaEventDestroy(stop));
CUDA_CHECK_ERROR(cudaFree(gpu_X));
CUDA_CHECK_ERROR(cudaFree(gpu_Y));
CUDA_CHECK_ERROR(cudaFree(total_gpu_count));
delete host_X;
delete host_Y;
delete host_total_gpu_count;
system("pause");
return 0;
}
|
709a35798d7e3095ebcedb808c5f84610ac22e75.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include "finite_difference_cuda.cuh"
float fx = 1.0f, fy = 1.0f, fz = 1.0f;
const int mx = 64, my = 64, mz = 64;
const int sPencils = 4;
const int lPencils = 32;
__constant__ float c_ax, c_bx, c_cx, c_dx;
__constant__ float c_ay, c_by, c_cy, c_dy;
__constant__ float c_az, c_bz, c_cz, c_dz;
__global__ void derivative_x(float *f, float *df) {
__shared__ float s_f[sPencils][mx + 8]; // 4-wide halo
if (blockIdx.x == 0 && threadIdx.x == 0) {
printf("%f, %f, %f, %f\n", c_ax, c_bx, c_cx, c_dx);
}
int i = threadIdx.x;
int j = blockIdx.x * blockDim.y + threadIdx.y;
int k = blockIdx.y;
int si = i + 4; // local i for shared memory access + halo offset
int sj = threadIdx.y; // local j for shared memory access
int globalIdx = k * mx * my + j * mx + i;
s_f[sj][si] = f[globalIdx];
__syncthreads();
// fill in periodic images in shared memory array
if (i < 4) {
s_f[sj][si - 4] = s_f[sj][si + mx - 5];
s_f[sj][si + mx] = s_f[sj][si + 1];
}
__syncthreads();
df[globalIdx] = (c_ax * (s_f[sj][si + 1] - s_f[sj][si - 1]) +
c_bx * (s_f[sj][si + 2] - s_f[sj][si - 2]) +
c_cx * (s_f[sj][si + 3] - s_f[sj][si - 3]) +
c_dx * (s_f[sj][si + 4] - s_f[sj][si - 4]));
}
__global__ void derivative_x_lPencils(float *f, float *df) {
__shared__ float s_f[lPencils][mx + 8]; // 4-wide halo
int i = threadIdx.x;
int jBase = blockIdx.x * lPencils;
int k = blockIdx.y;
int si = i + 4; // local i for shared memory access + halo offset
for (int sj = threadIdx.y; sj < lPencils; sj += blockDim.y) {
int globalIdx = k * mx * my + (jBase + sj) * mx + i;
s_f[sj][si] = f[globalIdx];
}
__syncthreads();
// fill in periodic images in shared memory array
if (i < 4) {
for (int sj = threadIdx.y; sj < lPencils; sj += blockDim.y) {
s_f[sj][si - 4] = s_f[sj][si + mx - 5];
s_f[sj][si + mx] = s_f[sj][si + 1];
}
}
__syncthreads();
for (int sj = threadIdx.y; sj < lPencils; sj += blockDim.y) {
int globalIdx = k * mx * my + (jBase + sj) * mx + i;
df[globalIdx] = (c_ax * (s_f[sj][si + 1] - s_f[sj][si - 1]) +
c_bx * (s_f[sj][si + 2] - s_f[sj][si - 2]) +
c_cx * (s_f[sj][si + 3] - s_f[sj][si - 3]) +
c_dx * (s_f[sj][si + 4] - s_f[sj][si - 4]));
}
}
__global__ void derivative_y(float *f, float *df) {
__shared__ float s_f[my + 8][sPencils];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = threadIdx.y;
int k = blockIdx.y;
int si = threadIdx.x;
int sj = j + 4;
int globalIdx = k * mx * my + j * mx + i;
s_f[sj][si] = f[globalIdx];
__syncthreads();
if (j < 4) {
s_f[sj - 4][si] = s_f[sj + my - 5][si];
s_f[sj + my][si] = s_f[sj + 1][si];
}
__syncthreads();
df[globalIdx] = (c_ay * (s_f[sj + 1][si] - s_f[sj - 1][si]) +
c_by * (s_f[sj + 2][si] - s_f[sj - 2][si]) +
c_cy * (s_f[sj + 3][si] - s_f[sj - 3][si]) +
c_dy * (s_f[sj + 4][si] - s_f[sj - 4][si]));
}
__global__ void derivative_y_lPencils(float *f, float *df) {
__shared__ float s_f[my + 8][lPencils];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockIdx.y;
int si = threadIdx.x;
for (int j = threadIdx.y; j < my; j += blockDim.y) {
int globalIdx = k * mx * my + j * mx + i;
int sj = j + 4;
s_f[sj][si] = f[globalIdx];
}
__syncthreads();
int sj = threadIdx.y + 4;
if (sj < 8) {
s_f[sj - 4][si] = s_f[sj + my - 5][si];
s_f[sj + my][si] = s_f[sj + 1][si];
}
__syncthreads();
for (int j = threadIdx.y; j < my; j += blockDim.y) {
int globalIdx = k * mx * my + j * mx + i;
int sj = j + 4;
df[globalIdx] = (c_ay * (s_f[sj + 1][si] - s_f[sj - 1][si]) +
c_by * (s_f[sj + 2][si] - s_f[sj - 2][si]) +
c_cy * (s_f[sj + 3][si] - s_f[sj - 3][si]) +
c_dy * (s_f[sj + 4][si] - s_f[sj - 4][si]));
}
}
__global__ void derivative_z(float *f, float *df) {
__shared__ float s_f[mz + 8][sPencils];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y;
int k = threadIdx.y;
int si = threadIdx.x;
int sk = k + 4; // halo offset
int globalIdx = k * mx * my + j * mx + i;
s_f[sk][si] = f[globalIdx];
__syncthreads();
if (k < 4) {
s_f[sk - 4][si] = s_f[sk + mz - 5][si];
s_f[sk + mz][si] = s_f[sk + 1][si];
}
__syncthreads();
df[globalIdx] = (c_az * (s_f[sk + 1][si] - s_f[sk - 1][si]) +
c_bz * (s_f[sk + 2][si] - s_f[sk - 2][si]) +
c_cz * (s_f[sk + 3][si] - s_f[sk - 3][si]) +
c_dz * (s_f[sk + 4][si] - s_f[sk - 4][si]));
}
__global__ void derivative_z_lPencils(float *f, float *df) {
__shared__ float s_f[mz + 8][lPencils];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y;
int si = threadIdx.x;
for (int k = threadIdx.y; k < mz; k += blockDim.y) {
int globalIdx = k * mx * my + j * mx + i;
int sk = k + 4;
s_f[sk][si] = f[globalIdx];
}
__syncthreads();
int k = threadIdx.y + 4;
if (k < 8) {
s_f[k - 4][si] = s_f[k + mz - 5][si];
s_f[k + mz][si] = s_f[k + 1][si];
}
__syncthreads();
for (int k = threadIdx.y; k < mz; k += blockDim.y) {
int globalIdx = k * mx * my + j * mx + i;
int sk = k + 4;
df[globalIdx] = (c_az * (s_f[sk + 1][si] - s_f[sk - 1][si]) +
c_bz * (s_f[sk + 2][si] - s_f[sk - 2][si]) +
c_cz * (s_f[sk + 3][si] - s_f[sk - 3][si]) +
c_dz * (s_f[sk + 4][si] - s_f[sk - 4][si]));
}
} | 709a35798d7e3095ebcedb808c5f84610ac22e75.cu | //#include "finite_difference_cuda.cuh"
float fx = 1.0f, fy = 1.0f, fz = 1.0f;
const int mx = 64, my = 64, mz = 64;
const int sPencils = 4;
const int lPencils = 32;
__constant__ float c_ax, c_bx, c_cx, c_dx;
__constant__ float c_ay, c_by, c_cy, c_dy;
__constant__ float c_az, c_bz, c_cz, c_dz;
__global__ void derivative_x(float *f, float *df) {
__shared__ float s_f[sPencils][mx + 8]; // 4-wide halo
if (blockIdx.x == 0 && threadIdx.x == 0) {
printf("%f, %f, %f, %f\n", c_ax, c_bx, c_cx, c_dx);
}
int i = threadIdx.x;
int j = blockIdx.x * blockDim.y + threadIdx.y;
int k = blockIdx.y;
int si = i + 4; // local i for shared memory access + halo offset
int sj = threadIdx.y; // local j for shared memory access
int globalIdx = k * mx * my + j * mx + i;
s_f[sj][si] = f[globalIdx];
__syncthreads();
// fill in periodic images in shared memory array
if (i < 4) {
s_f[sj][si - 4] = s_f[sj][si + mx - 5];
s_f[sj][si + mx] = s_f[sj][si + 1];
}
__syncthreads();
df[globalIdx] = (c_ax * (s_f[sj][si + 1] - s_f[sj][si - 1]) +
c_bx * (s_f[sj][si + 2] - s_f[sj][si - 2]) +
c_cx * (s_f[sj][si + 3] - s_f[sj][si - 3]) +
c_dx * (s_f[sj][si + 4] - s_f[sj][si - 4]));
}
__global__ void derivative_x_lPencils(float *f, float *df) {
__shared__ float s_f[lPencils][mx + 8]; // 4-wide halo
int i = threadIdx.x;
int jBase = blockIdx.x * lPencils;
int k = blockIdx.y;
int si = i + 4; // local i for shared memory access + halo offset
for (int sj = threadIdx.y; sj < lPencils; sj += blockDim.y) {
int globalIdx = k * mx * my + (jBase + sj) * mx + i;
s_f[sj][si] = f[globalIdx];
}
__syncthreads();
// fill in periodic images in shared memory array
if (i < 4) {
for (int sj = threadIdx.y; sj < lPencils; sj += blockDim.y) {
s_f[sj][si - 4] = s_f[sj][si + mx - 5];
s_f[sj][si + mx] = s_f[sj][si + 1];
}
}
__syncthreads();
for (int sj = threadIdx.y; sj < lPencils; sj += blockDim.y) {
int globalIdx = k * mx * my + (jBase + sj) * mx + i;
df[globalIdx] = (c_ax * (s_f[sj][si + 1] - s_f[sj][si - 1]) +
c_bx * (s_f[sj][si + 2] - s_f[sj][si - 2]) +
c_cx * (s_f[sj][si + 3] - s_f[sj][si - 3]) +
c_dx * (s_f[sj][si + 4] - s_f[sj][si - 4]));
}
}
__global__ void derivative_y(float *f, float *df) {
__shared__ float s_f[my + 8][sPencils];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = threadIdx.y;
int k = blockIdx.y;
int si = threadIdx.x;
int sj = j + 4;
int globalIdx = k * mx * my + j * mx + i;
s_f[sj][si] = f[globalIdx];
__syncthreads();
if (j < 4) {
s_f[sj - 4][si] = s_f[sj + my - 5][si];
s_f[sj + my][si] = s_f[sj + 1][si];
}
__syncthreads();
df[globalIdx] = (c_ay * (s_f[sj + 1][si] - s_f[sj - 1][si]) +
c_by * (s_f[sj + 2][si] - s_f[sj - 2][si]) +
c_cy * (s_f[sj + 3][si] - s_f[sj - 3][si]) +
c_dy * (s_f[sj + 4][si] - s_f[sj - 4][si]));
}
__global__ void derivative_y_lPencils(float *f, float *df) {
__shared__ float s_f[my + 8][lPencils];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockIdx.y;
int si = threadIdx.x;
for (int j = threadIdx.y; j < my; j += blockDim.y) {
int globalIdx = k * mx * my + j * mx + i;
int sj = j + 4;
s_f[sj][si] = f[globalIdx];
}
__syncthreads();
int sj = threadIdx.y + 4;
if (sj < 8) {
s_f[sj - 4][si] = s_f[sj + my - 5][si];
s_f[sj + my][si] = s_f[sj + 1][si];
}
__syncthreads();
for (int j = threadIdx.y; j < my; j += blockDim.y) {
int globalIdx = k * mx * my + j * mx + i;
int sj = j + 4;
df[globalIdx] = (c_ay * (s_f[sj + 1][si] - s_f[sj - 1][si]) +
c_by * (s_f[sj + 2][si] - s_f[sj - 2][si]) +
c_cy * (s_f[sj + 3][si] - s_f[sj - 3][si]) +
c_dy * (s_f[sj + 4][si] - s_f[sj - 4][si]));
}
}
__global__ void derivative_z(float *f, float *df) {
__shared__ float s_f[mz + 8][sPencils];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y;
int k = threadIdx.y;
int si = threadIdx.x;
int sk = k + 4; // halo offset
int globalIdx = k * mx * my + j * mx + i;
s_f[sk][si] = f[globalIdx];
__syncthreads();
if (k < 4) {
s_f[sk - 4][si] = s_f[sk + mz - 5][si];
s_f[sk + mz][si] = s_f[sk + 1][si];
}
__syncthreads();
df[globalIdx] = (c_az * (s_f[sk + 1][si] - s_f[sk - 1][si]) +
c_bz * (s_f[sk + 2][si] - s_f[sk - 2][si]) +
c_cz * (s_f[sk + 3][si] - s_f[sk - 3][si]) +
c_dz * (s_f[sk + 4][si] - s_f[sk - 4][si]));
}
__global__ void derivative_z_lPencils(float *f, float *df) {
__shared__ float s_f[mz + 8][lPencils];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y;
int si = threadIdx.x;
for (int k = threadIdx.y; k < mz; k += blockDim.y) {
int globalIdx = k * mx * my + j * mx + i;
int sk = k + 4;
s_f[sk][si] = f[globalIdx];
}
__syncthreads();
int k = threadIdx.y + 4;
if (k < 8) {
s_f[k - 4][si] = s_f[k + mz - 5][si];
s_f[k + mz][si] = s_f[k + 1][si];
}
__syncthreads();
for (int k = threadIdx.y; k < mz; k += blockDim.y) {
int globalIdx = k * mx * my + j * mx + i;
int sk = k + 4;
df[globalIdx] = (c_az * (s_f[sk + 1][si] - s_f[sk - 1][si]) +
c_bz * (s_f[sk + 2][si] - s_f[sk - 2][si]) +
c_cz * (s_f[sk + 3][si] - s_f[sk - 3][si]) +
c_dz * (s_f[sk + 4][si] - s_f[sk - 4][si]));
}
} |
9c893b96c87e11b1ce3ba58a99cd2a5872a981ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
float h_A[]= {
0.7175049743623347, 0.7483295476728882, 0.5428045722921292, 0.6670388593622318, 0.8285250757988448, 0.6493922330544046, 0.9155831240661374, 0.5175069123492884, 0.7144072115954666, 0.8263031546197478, 0.7624806464646448, 0.9122238073039419, 0.5566906615344596, 0.8168905807336863, 0.6933761370918536, 0.8395397054923843, 0.8033347649131046, 0.9941032741599793, 0.7742898730965164, 0.7870283077556541, 0.7673346013702567, 0.5769613471735737, 0.5738554351773941, 0.9939341868842451, 0.7799700014781104, 0.6814291040536089, 0.6714052283275981, 0.8774744357242918, 0.7024670328536721, 0.8045429947352749, 0.5769122826992121, 0.9777938420156224, 0.5937074023708211, 0.8286152802298441, 0.7650014261108503, 0.8929116643934834, 0.9410130293220881, 0.9891729316565442, 0.9817416845398415, 0.8830830552694126, 0.8134548183522272, 0.5314132914925039, 0.9810008198757578, 0.70572108492611, 0.5139436081367416, 0.5314311730571363, 0.9311131260820702, 0.7196725611869096, 0.7147502373185421, 0.8797002950296562, 0.8642699279375733, 0.5862109957132438, 0.9767298402817426, 0.5013217469795739, 0.9706521527934112, 0.7578632639753504, 0.7315584410769278, 0.9177498031306632, 0.6479141148218037, 0.5249262177376186, 0.5538981159995084, 0.8617596047073285, 0.5861942430635054, 0.6500747778165836, 0.7951722519661015, 0.5493490472227096, 0.8163500530570433, 0.912325687539941, 0.9820644411813464, 0.5657051815243339, 0.5138839480921523, 0.8826636504434808, 0.6663946666020875, 0.5038093310152814, 0.6493543396457044, 0.759386096090106, 0.6937557980029705, 0.8613649332807252, 0.9368177629965486, 0.9249213283655325, 0.5369637236706578, 0.8433307098361942, 0.7692938546744861, 0.9954228032712307, 0.7905907651739529, 0.8668570208651423, 0.7601528897475356, 0.9845661760121813, 0.7369202598469778, 0.6165134229820441, 0.7234416032607325, 0.8502648004612344, 0.8724870591921792, 0.7709651062409866, 0.558198557942287, 0.8977255395511201, 0.8889499830154203, 0.5701917992687124, 0.7554781816275644, 0.5953613547860896, 0.9796824676982792, 0.7281405531166179, 0.9494958059277303, 0.7828436297295336, 0.9830261904762756, 0.7307054242652655, 0.6682378749952345, 0.9793992597131692, 0.6820986459541211, 0.9378306758357847, 0.508093650074964, 0.7487797253077175, 0.6182510432722995, 0.5643877925169976, 0.625274773134032, 0.8408833108631155, 0.7768001990882476, 0.8845791088186201, 0.7807379197439666, 0.8783146131963451, 0.6532335322235417, 0.6161001105283774, 0.635087698565751, 0.9600892442603386, 0.6116721753435643, 0.6252979884438084, 0.8837417264202989, 0.8442385902436769, 0.5414256005141584, 0.685889206934632, 0.9033746724099666, 0.7876143894608625, 0.7037993683222312, 0.7687455214370533, 0.7461391437801412, 0.8933959699374662, 0.7639035809373022, 0.9057501353466801, 0.8312658321929823, 0.9419822773300106, 0.6161201027145985, 0.9927409526811327, 0.9306612696201013, 0.8603789778155553, 0.6124688912804437, 0.869129383064396, 0.9122000852018062, 0.5681157443004359, 0.9748988939052683, 0.9114554697381612, 0.6918164670784539, 0.7899732821289123, 0.9023764376284498, 0.7719162680121916, 0.6562876429840578, 0.562240461689329, 0.709946990925238, 0.5538049440387958, 0.7758888223511162, 0.8373510051770181, 0.8646231948847161, 0.820356465782869, 0.9350557319129418, 0.8394144817387159, 0.9977296856553373, 0.7840380670609457, 0.8105234427374122, 0.7437132219084173, 0.6783428765147859, 0.546607960797175, 0.9885389860145156, 0.8063298224739776, 0.687175865573554, 0.6000481233632222, 0.7568343096854063, 0.9044337343691745, 0.6615939718093461, 0.6380093737651071, 0.6990238507534235, 0.8160095581872466, 0.8385853606436432, 0.8629413055597112, 0.5107350105503465, 0.6312070654603079, 0.9820733064941537, 0.5227133030856335, 0.7040240602876442, 0.605160134986669, 0.886725260779536, 0.6412641050414196, 0.7234382961166687, 0.6807040170143226, 0.5596514706490721, 0.8878583353275754, 0.7616923517398573, 0.6157467623001187, 0.5393448546810748, 0.80528150598235, 0.8992462596802366, 0.8813947430826625, 0.8235018717592513, 0.9435712522639765, 0.5278177667291167, 0.5878938541203524, 0.8252227102833165, 0.7041544300947209, 0.7320694319060471, 0.6716748310014393, 0.6809079227932331, 0.8576255866003155, 0.7596863546532289, 0.8898858402084153, 0.532722160160641, 0.963421275120853, 0.6664793029771361, 0.5417125114487675, 0.759428088181401, 0.5710879176916852, 0.5328935155949841, 0.6543138231277239, 0.5069073223272444, 0.9022298759578964, 0.8286833812838779, 0.6803512526435467, 0.5973966449252462, 0.7073522374428227, 0.5133318608626939, 0.7315184133850601, 0.8432620635287341, 0.9049121483497172, 0.8318095289399634, 0.9026618189927469, 0.6420833615107415, 0.9994597892129664, 0.617270934565862, 0.5263116225733089, 0.6786285820933046, 0.7835775336832631, 0.8300036415934484, 0.592353782128615, 0.5657998624689723, 0.8625412727685493, 0.7631865210445816, 0.7210754865124193, 0.6086667174931063, 0.7385750183909484, 0.920989447364617, 0.94739538918244, 0.6855098253290155, 0.9343211030413223, 0.5493828913973317, 0.5310448272026687, 0.9009663222278717, 0.7869293956416037, 0.5900925683418661, 0.8385620086692027, 0.7932993354385146, 0.5299737440855232, 0.9196170421922958, 0.6558905671450281, 0.8656626448607085, 0.5027122782652429, 0.7366020784814247, 0.6655989049899537, 0.6372782040816614, 0.8859886166595796, 0.5062947603356658, 0.7327567125061054, 0.8850052277110629, 0.5647387676513849, 0.5963841494889127, 0.9060854867162725, 0.9166426088404747, 0.673022107739839, 0.5416696342938154, 0.5831003726939399, 0.8272720683126304, 0.944026928918944, 0.9109686909633957, 0.7147974869008538, 0.5825293817589708, 0.9724077581654852, 0.9485523147886157, 0.6287315124101525, 0.7700442647127689, 0.5209166191356271, 0.5193350166049711, 0.6458388115855724, 0.9987572403319077, 0.6350924079879765, 0.831849668083315, 0.6750287308727874, 0.7967959702799643, 0.894115199675573, 0.658030849082638, 0.9545717516881893, 0.8710326220369058, 0.721515880841148, 0.6984331129268149, 0.5653054189829718, 0.815756265097344, 0.7760842554115608, 0.8895647446995394, 0.9977935828790359, 0.5897241716307271, 0.9949236196766181, 0.92753081222319, 0.6853273027621664, 0.5634366839955158, 0.770503701790801, 0.5647916231445753, 0.830501580551152, 0.7295982005063606, 0.620210763689558, 0.839370767957761, 0.8110668147591117, 0.5885117268521713, 0.9079035820954013, 0.848010784932944, 0.5861117069774286, 0.9711812608601524, 0.8350727902796424, 0.8970020026716459, 0.9225997430138331, 0.6973010393622734, 0.9315739888406733, 0.7459221360697157, 0.6133229159696165, 0.9044214366240093, 0.8264688721486457, 0.7200759232117373, 0.5377806242551253, 0.9061159336796546, 0.814628149989443, 0.5771589351457149, 0.7590412505272548, 0.5400821407167455, 0.6035577001353102, 0.8616924443817809, 0.8084092807811845, 0.8268654638680732, 0.515455017127314, 0.6588949082639428, 0.666586640816452, 0.8129506705897591, 0.9152114880141494, 0.8538569643921092, 0.8818942623454151, 0.7470907519257488, 0.8013355615892588, 0.9761993578970845, 0.6715271822726312, 0.8364434572987292, 0.7057228439267411, 0.72439559452532, 0.6519551081496757, 0.9588471899630655, 0.9046448427882299, 0.673403059284444, 0.533791032347466, 0.8719566473185582, 0.9885125189872472, 0.5518750031889608, 0.7863070845734004, 0.7202503595676394, 0.6570415531865861, 0.8809316023110825, 0.9578127062198909, 0.9956279054377624, 0.5301132227909873, 0.6707888922998658, 0.8780451674922749, 0.9395832068131295, 0.6219165776596278, 0.8599748478525162, 0.5499551679120835, 0.7904418509009485, 0.6646686791801107, 0.7563399098545823, 0.6805063745210362, 0.5842324092075663, 0.9573603558630026, 0.5357325376994545, 0.5033870370732605, 0.5870444093984921, 0.6643157518902542, 0.9387857091494207, 0.7858692115538473, 0.9439044009105371, 0.7284829532643802, 0.8623399116217768, 0.8498837663091304, 0.5406120073866904, 0.9193851056929803, 0.9715002283759175, 0.6637502323106244, 0.6477781243695839, 0.9860881211121879, 0.797424984558573, 0.9654009547209299, 0.7270539879573954, 0.6655098186635973, 0.9171610281017151, 0.9242860461215878, 0.7391459284313388, 0.5222790180475673, 0.8773912484344897, 0.8149384609228445, 0.5217527542568972, 0.8789698337305594, 0.7268861887430299, 0.9520860712672825, 0.7249576200898387, 0.9550388258626021, 0.5804611735926011, 0.9777288081887516, 0.808805640359967, 0.633559141593157, 0.85623391772666, 0.5575605489107095, 0.6638687542016302, 0.8787973526182437, 0.617434709946848, 0.8030270968921089, 0.581240418150783, 0.5503531203353518, 0.9946837433498839, 0.76218136971764, 0.9357608582755572, 0.5231331816447684, 0.6854780173341097, 0.5311558879767161, 0.5785887652419894, 0.8307138697131689, 0.7758140043664766, 0.5189577064384299, 0.5436659851795499, 0.8896681065588994, 0.653402969925178, 0.5016245400301977, 0.5951782217959249, 0.8166937645579907, 0.5100527537462374, 0.6227861063432557, 0.7548678545114191, 0.8102903477833221, 0.5925998867095521, 0.6974047808401522, 0.8373805531204513, 0.6642000445504762, 0.7975856745853391, 0.6063007020696211, 0.7198844198006322, 0.9551274973977456, 0.5915330492969607, 0.5871846331761975, 0.9646590712359239, 0.6930243557587628, 0.9813963133898272, 0.6672200749543034, 0.565025257660732, 0.8851300705531739, 0.7687037742668024, 0.9344239798458662, 0.9513599212366258, 0.8438953560602331, 0.7930587288346782, 0.8966819839324218, 0.72280875227075, 0.9630973594388421, 0.5144295914645305, 0.7916602381315909, 0.7997319825032736, 0.9006457288102252, 0.6370833742847035, 0.9318892860850914, 0.7002814582836712, 0.8018037592376548, 0.6118100096109376, 0.8993243036825278, 0.6308733695618169, 0.855600641276804, 0.5300356818815629, 0.6794562007188256, 0.8512372607896367, 0.9497273495469162, 0.7336120821192751, 0.7841910800053631, 0.9392352689967308, 0.9179604098218539, 0.5549502392878447, 0.9516431101153908, 0.6649121701750261, 0.6225664124296383, 0.568126683026504, 0.9924079875188276, 0.6710241562997306, 0.6701705351127124, 0.754202872050578, 0.91732517674133, 0.6399248142474455, 0.8548843208551697, 0.5364315152334754, 0.8107052135499548, 0.7512686381961702, 0.8110701715719404, 0.8447227857251871, 0.5876345450559814, 0.5327760038007834, 0.7528108022675388, 0.8562476771299916, 0.7826885726498719, 0.9027967963922693, 0.7827052013120901, 0.630071066205456, 0.6283442027052404, 0.831256222211215, 0.899171512439089, 0.5543083798488495, 0.5003917635936048, 0.858540050440179, 0.8511857457627605, 0.5339443686622836, 0.9255766780355341, 0.9607122348356951, 0.6596183118093375, 0.7340366840429344, 0.7115311138250818, 0.7376614471456091, 0.7430208557618188, 0.7474483791732092, 0.9141204595985233, 0.9130646732899668, 0.7461779598386188, 0.5496660731588356, 0.7531447745734647, 0.6285716534428196, 0.7675855152704778, 0.8274753710241782, 0.9775063265438432, 0.6099333564132423, 0.8841593803495991, 0.5897987076844864, 0.9149521235657798, 0.7574415244848697, 0.7517422214169902, 0.7657148238903451, 0.5356244148728739, 0.7279396444671993, 0.7508828135188834, 0.6107722696332616, 0.6574094800693152, 0.7873525948023378, 0.8373287552267644, 0.7028342501913618, 0.7264423782681426, 0.9560341268780939, 0.8382048820213726, 0.6398660688361371, 0.9068375920762427, 0.7051372148373858, 0.5100106590764255, 0.7558333353508433, 0.6377687959028604, 0.7347584262208351, 0.6931694361907945, 0.9264285207343963, 0.6990356849902173, 0.8812084662378701, 0.9615328670414424, 0.6005469892369674, 0.5799344945715658, 0.7316303507539086, 0.8480191138570592, 0.9739126782411822, 0.8680138256255747, 0.8937497033237087, 0.8926174533866478, 0.6461052223425663, 0.6551360785593032, 0.863895103850987, 0.876860805930852, 0.8140718944338794, 0.6473599436221105, 0.7798594961887093, 0.5967679181430172, 0.5418606390223952, 0.5114483447332854, 0.8080931141911538, 0.9783824711215274, 0.8040049169606638, 0.5208864215176348, 0.6866720766920343, 0.8689085352077532, 0.5698086063461534, 0.5590515330662833, 0.5487904535831385, 0.605853621159167, 0.8037596567650076, 0.5058726990961012, 0.8264464891643191, 0.7440690101659653, 0.9387429279279773, 0.5093002348043006, 0.6864582066771174, 0.7641058932756224, 0.7682464855319144, 0.7068409791226118, 0.7616325231851375, 0.5176209704155486, 0.7396525681392454, 0.8516466803051463, 0.9745332488704593, 0.6372801091647291, 0.780162324841136, 0.6011560638162952, 0.7758104530170866, 0.9778492507155405, 0.7136446150698623, 0.9056871285809793, 0.6582054282738479, 0.9959695391088318, 0.7640958988950919, 0.5315143051891601, 0.7935719017312257, 0.6146160979563673, 0.6253942950088016, 0.8883496779647706, 0.5035718245444725, 0.7224012857727014, 0.9703342374924342, 0.5128779592314681, 0.8913464893991353, 0.9776792113700319, 0.718061983258852, 0.5601701464524802, 0.539262466664707, 0.8039954356583209, 0.5718666752272372, 0.9867494861581749, 0.9722964284176024, 0.781272469188941, 0.7513964277437876, 0.8944272559127562, 0.5744254533131001, 0.9295503865516243, 0.5284053661424665, 0.5120682723028225, 0.7051528355484178, 0.817759059758939, 0.8878234201267484, 0.7115108475832024, 0.6977492408338297, 0.8708475630672106, 0.7070046653739912, 0.8841867175339746, 0.8270795620142489, 0.8390813046244068, 0.5952539252129366, 0.721161318734161, 0.8874204496629856, 0.9966075198467306, 0.8591974676623013, 0.84273984418223, 0.8983747264589919, 0.7574319285596463, 0.8964426075922378, 0.5028400905927127, 0.6880295532517162, 0.6152306269949663, 0.6186976046582084, 0.7366986347459097, 0.7859052302398979, 0.9402245323204153, 0.5582828803320001, 0.9871008905046461, 0.9398841286358288, 0.8273766111183014, 0.8690633711478154, 0.6445258908338545, 0.8238560736013408, 0.8868354651060395, 0.6513951919759716, 0.7766869165003716, 0.8863067960076496, 0.8964319489442505, 0.5521181744970176, 0.6127213652815815, 0.945379576593336, 0.7189300338801579, 0.865334436673536, 0.7198323949071357, 0.7962577018247845, 0.6358190992286217, 0.9276250110389713, 0.9819163208770993, 0.7724176506274993, 0.6476578495920275, 0.972865635311776, 0.8046936048915933, 0.8673961964801453, 0.8445569454590101, 0.8873657173278331, 0.7782558101450034, 0.6207674972961522, 0.7665842394516937, 0.8008155296587234, 0.6466505815681847, 0.8575435629845465, 0.6411914593744596, 0.7108285202888673, 0.8620444563502725, 0.5050694794126986, 0.8637548407442048, 0.8323727724808773, 0.8376718804615941, 0.6204178230604773, 0.8422952266556656, 0.534132194476928, 0.8775611875459841, 0.7012772269190144, 0.5132187498448768, 0.7781480862167057, 0.6462219374876739, 0.8711759741319117, 0.6811588843496068, 0.8462124321971298, 0.7594594399198328, 0.6921830124454388, 0.6139518015483896, 0.9605438818690479, 0.6424743638235524, 0.8149649759891937, 0.9644544372931267, 0.5561764723076235, 0.8585333314066805, 0.5325496748762529, 0.8656598392212878, 0.7711339376127809, 0.659993718779327, 0.9978837582596711, 0.7717833020772951, 0.9474573653916873, 0.819145751558648, 0.6556920433965562, 0.5453283527136021, 0.6764505382413155, 0.6916554003820774, 0.9583896931351981, 0.58030744774696, 0.8063901831276439, 0.8936482795908587, 0.618469967832747, 0.5164409639385599, 0.6947431135083324, 0.9107282523023559, 0.931187500303613, 0.7560204793656411, 0.7292892620081342, 0.9465113995089967, 0.6435631464101446, 0.6390034714160846, 0.6716480205484452, 0.8395584665856469, 0.9815491943366021, 0.5000637206066598, 0.631120143293386, 0.6684673936090034, 0.8710345575179892, 0.5676401231813213, 0.8705998951280909, 0.7304571674311067, 0.6958274171730265, 0.6820397612355116, 0.9408984652591819, 0.6416857222878372, 0.5663007354631049, 0.5187326733534938, 0.9651023903788754, 0.9599278703742112, 0.9635432361373745, 0.9771965184807319, 0.9092807130055276, 0.6566316399822616, 0.9057205974937773, 0.6443472291950072, 0.8275717873434834, 0.6296991113671675, 0.5440090741633943, 0.6451174284031015, 0.5810917269507535, 0.8602814986194647, 0.5982315029925749, 0.8197394438163682, 0.5074276450775005, 0.6352618825355972, 0.8632445536712614, 0.895556888791958, 0.772466422683947, 0.8766991232245125, 0.8950025300506068, 0.6383706077180286, 0.7360529113763161, 0.5816032096251498, 0.5817647148619042, 0.8205065511917298, 0.6555368768954937, 0.8905382756360487, 0.5046182476914518, 0.5918374048976502, 0.6578990429906522, 0.5378244317779652, 0.9170343243196952, 0.5244932995419187, 0.9890573824861929, 0.5265348901528981, 0.9785805894377044, 0.6245137487387351, 0.8568309009364565, 0.5303987348955517, 0.5278836754783361, 0.5506073272012736, 0.8293018192690798, 0.8559027551593832, 0.5652030593620075, 0.7744348843610241, 0.515565415390881, 0.7562683563264283, 0.6894489399825493, 0.6514528488638454, 0.9068508909272324, 0.7768493106977585, 0.9802387408269064, 0.8589983079940284, 0.7222923104509164, 0.9494617172959475, 0.8899479118430491, 0.9003505505488993, 0.8927540461450996, 0.663452880294259, 0.9091269545833842, 0.8785106769475404, 0.9683897845608465, 0.6287573278825096, 0.6097798414369457, 0.593142429351299, 0.6643805679857613, 0.9449941005092909, 0.5672211635036601, 0.5417439410396544, 0.9425290851148195, 0.872629281623168, 0.6872829195007166, 0.8196810157590535, 0.5421910834680155, 0.5087469854455247, 0.567657921507269, 0.988092957408338, 0.662308030649258, 0.645700118158399, 0.51291417605355, 0.725382201117026, 0.8899784510338955, 0.791852756396201, 0.8613252527742451, 0.6985963908098032, 0.7931961436100736, 0.7246411315033985, 0.5074494220630064, 0.7181020073666426, 0.7452083519591107, 0.6261428899251942, 0.8677793189487603, 0.619013683643147, 0.7005126612539005, 0.671066701562349, 0.9893137451322225, 0.7944790679653224, 0.6663369299639874, 0.7269476170300537, 0.8847926334632098, 0.9254319409896083, 0.8425651882044956, 0.8887079693375327, 0.6127535589222362, 0.7513855281679238, 0.7977040119757262, 0.5753885030740056, 0.8823175306301378, 0.9746276117552004, 0.8520311453044047, 0.8346986553052198, 0.8608398838356635, 0.6942951813749323, 0.5935175076725132, 0.6452102467579437, 0.6089768499936696, 0.6240654348876982, 0.9568093610477555, 0.708854812106231, 0.7786585165948752, 0.6862246402894738, 0.8784685253024165, 0.745081116140263, 0.5560460255644317, 0.9573782332604687, 0.714803911587768, 0.8756255642819305, 0.8757743045637505, 0.8180294947185416, 0.8814811176831775, 0.7085585440638689, 0.9603832249611536, 0.6722558816465003, 0.6267913019567721, 0.5865790349345534, 0.9516539066062502, 0.7451457811267855, 0.8025784082032348, 0.8418860178577664, 0.6308280816981481, 0.6853085519307471, 0.7551524408469074, 0.7430121984824316, 0.9492725946788689, 0.6429105889861857, 0.7346099079549737, 0.9394328779489014, 0.6560384180940686, 0.7558064466523573, 0.5759460283684525, 0.6424294880995515, 0.5194307614271902, 0.7748595056324805, 0.7601587048009304, 0.6482002996501057, 0.7503416626194899, 0.8185374969327385, 0.5826823834475505, 0.9453678588242939, 0.6028600904040826, 0.9697346009371919, 0.7728385331571652, 0.5221809993973963, 0.5046621854492988, 0.5535213543750981, 0.5194143019350554, 0.7604625073507458, 0.6963315115445796, 0.5947666131272471, 0.662744932524679, 0.5283522839783175, 0.7462269223822893, 0.6792763469158709, 0.8905042398479888, 0.5617605287995046, 0.7038962997379475, 0.530241282660103, 0.7739164328515271, 0.5546559984902873, 0.6823076488401074, 0.9032642711177921, 0.961267613492639, 0.8926089870115224, 0.6663045722906779, 0.989213466511294, 0.7956599656958443, 0.5812320340237365, 0.5225730570674529, 0.9881999586480004, 0.9670344944853754, 0.8459229232958514, 0.6199871622341855, 0.8807591612922377, 0.5522434335425221, 0.5997822777309251, 0.8172806662101112, 0.9201608160506246, 0.9851189734538995, 0.9367460076041031, 0.8975575560247762, 0.5136110274679664, 0.5979519983561881, 0.8106752176678614, 0.9212592424879466, 0.7180151697836186, 0.8509776500977333, 0.6906978737626681, 0.8976240620435023, 0.5898372485860035, 0.94210644533675, 0.629784042120416, 0.7856539905457469, 0.8424831645147772, 0.7615615514498257, 0.5850397725964329, 0.9772890787487611, 0.7323646997020976, 0.5622960236222174, 0.9517326186573504, 0.7060518181694814, 0.997181763028437, 0.5308038825891753, 0.6264982044181915, 0.589470610163918, 0.9573346921237573, 0.8295804592188971, 0.8876650661530883, 0.8906843645617804, 0.7016273598821912, 0.993268697220647, 0.713624330715787, 0.5167452699173447, 0.6451846501581989, 0.5768974273964227, 0.963111350154206, 0.5284020780057046, 0.8083087919852645, 0.9404681124072247, 0.888968826194855, 0.7654918074329926, 0.6427789327734105, 0.6305467702122113, 0.5589166180041161, 0.7166224045604126, 0.7669467284403176, 0.6873152344900346, 0.6526028910790005, 0.9418675640398168, 0.8882755099680846, 0.5147153023108415, 0.9739625290928586, 0.5033978669711672, 0.7938016935948135, 0.766542990689066, 0.7261191212026787, 0.6527649327464139, 0.708642537501577, 0.7165073057577073, 0.9984980081212749, 0.988532877897, 0.7099428108255872, 0.5137695069394658, 0.9807259990401583, 0.7403002643701834, 0.5514882847926235, 0.6135113625450792, 0.6014355453772171, 0.6693123846125552, 0.9779521455200496, 0.7712536656412053, 0.6496438414342058, 0.7094825293268194, 0.5011072060929522, 0.7771166960237996, 0.6679772027223554, 0.7750706177095686, 0.8724460837824822, 0.8698068231637112, 0.7932358594106979, 0.6724929418478673, 0.6305325517024176, 0.5525401028401618, 0.641370488705765, 0.7860364135246316, 0.6004378555932679, 0.8541126316115408, 0.9758266653021483, 0.8090166246911529, 0.7623431024030126, 0.6276663749076229, 0.7633985044525853, 0.8279146404340301, 0.7365491974506588, 0.8895058286563093, 0.6566801550910897, 0.8610332779500183, 0.8841396315097685, 0.5054396403580577, 0.9216958146374543, 0.5537148160092475, 0.7312155571015464, 0.520836583506611, 0.8423104945404853, 0.9898386597298968, 0.5734113161318437, 0.9894539812170214, 0.7336947967915626, 0.7863174990966972, 0.8860146044578334, 0.8442384670193102, 0.8958166345234695, 0.8146633567036041, 0.9668275484323352, 0.780710644634811, 0.6445879894296913, 0.5329310915882532, 0.7326607984547295, 0.6533595507023908, 0.613761429465081, 0.563668244702823, 0.6991731864477975, 0.8103694771338092, 0.9671152594896997, 0.9432708786868045, 0.5432405567502189, 0.835475692486889, 0.8556006293325628, 0.6753191962277889, 0.7728914099324997, 0.581534224065785, 0.9910334283088683, 0.8349723471036976, 0.7822130100720384, 0.9601596306139333, 0.5821089713544967, 0.5668239800664228, 0.9875357019551327, 0.7262479602251901, 0.7633864341928385, 0.8613391183703002, 0.999140563749566, 0.9921832048548372, 0.6082236851951994, 0.6611679818470018, 0.6187042505565088, 0.8446046230742189, 0.8209767909834509, 0.7106759801031106, 0.6318202990057862, 0.5382161259689066, 0.8746656832667575, 0.5894414013568754, 0.7712726317368632, 0.8818175209436678, 0.7285367722292011, 0.6775994149348465, 0.6548766657330559, 0.8086304609120382, 0.6496691641541017, 0.6404145694357817, 0.6146535354872686, 0.5628304098800292, 0.8504227776914639, 0.7300093498635001, 0.7309543603714539, 0.5577115140892256, 0.805128177136711, 0.6245543763081725, 0.5808337156165888, 0.7785247375810993, 0.6479863068688876, 0.9500136687442952, 0.7390718668728872, 0.6799821809999093, 0.5576501270045107, 0.5062196398402773, 0.5925160742426088, 0.9083721682880289, 0.6551690240454425, 0.701492069831505, 0.5943054214130425, 0.7524794141323005, 0.9746045341670407, 0.7020745767151475, 0.5584573123332287, 0.5250730191176305, 0.5709027715649087, 0.6188783769362616, 0.8117337011799307, 0.6525735475837441, 0.882513730778285, 0.9479178340384894, 0.6814038224946768, 0.5334245139935747, 0.8961452109437502, 0.8418240870730447, 0.7644734626224039, 0.7217482107685174, 0.8953553642076545, 0.9187503741975702, 0.714160195391629, 0.672117239474953, 0.8553412095181543, 0.9242109653432042, 0.835357483239132, 0.9579128778624189, 0.9526157837848979, 0.7821514003909765, 0.7604521629744276, 0.9183129704275856, 0.8646587889517333, 0.7330593345807841, 0.7373657183304267, 0.8372085604673498, 0.6036045784783213, 0.7289186633975822, 0.9561371068500679, 0.6557534918059027, 0.7824149736491341, 0.546074953034261, 0.8128633439678956, 0.5894229198435625, 0.8562056900763841, 0.7956323761573609, 0.8300287517817402, 0.8967114891783585, 0.8564251750762437, 0.512258336720917, 0.8607981038244268, 0.9209877482122233, 0.9512602133565491, 0.843554752571138, 0.9949128174126994, 0.6422319076698031, 0.5937278704672031, 0.6858333759507335, 0.9911418365915716, 0.5818852026746673, 0.6702331440120901, 0.6826652460354408, 0.7751398464368906, 0.7626879835028708, 0.6992854454868584, 0.6745033152194828, 0.8643914900789589, 0.7913166843256632, 0.6999800757211145, 0.5101493620587221, 0.6608134925468845, 0.9998147448294846, 0.7592684237716584, 0.9365435185891271, 0.5530399887926196, 0.5388428018439475, 0.9488035593377866, 0.7244956378299721, 0.645781494593326, 0.7612837713941363, 0.9826733391227193, 0.6833651162242154, 0.5185469136556282, 0.865159431957844, 0.5201184763579165, 0.9285132052274714, 0.901007693320281, 0.9383133532670421, 0.66386384111747, 0.530368079374365, 0.8095861558127821, 0.9069279532658556, 0.7619363167523863, 0.8498748510907799, 0.5843692718309024, 0.5077711910929267, 0.5510652074269715, 0.5222905591508611, 0.5751530168705512, 0.8434015345536627, 0.8535092015731167, 0.9746465684702841, 0.9745842322332521, 0.9328774354679271, 0.6741700202631504, 0.7639216479640978, 0.5759782207004538, 0.8019380853986657, 0.9063011717798191, 0.6997498982659436, 0.7783044779318976, 0.884064479328831, 0.819640801630687, 0.6868719133045034, 0.5764531035183071, 0.9033213140530403, 0.8295356640912785, 0.5229428542459562, 0.9225062382796814, 0.5962755179898899, 0.7876076590708363, 0.8021791058851477, 0.7736585990449019, 0.7669673070767691, 0.645485397764695, 0.5707256698365865, 0.8332816925163448, 0.8286356707650886, 0.591444595314593, 0.704850968726765, 0.9924817177448647, 0.7516289170251745, 0.7202447849667328, 0.9961327822584503, 0.895076700578197, 0.7413944258950942, 0.7352379419975204, 0.8302467086310572, 0.9413434992189189, 0.9104513936452676, 0.7618997955894264, 0.7728078086282837, 0.7301755200153848, 0.5153021053069511, 0.6010167088227896, 0.6067812327899456, 0.5504690699545939, 0.9910961615374642, 0.6691152029811651, 0.9490286301782986, 0.9200742820802505, 0.5189500983730917, 0.8525543560800397, 0.9320593295626501, 0.8788752113002543, 0.5038736162106825, 0.5816563248414781, 0.8902090357676785, 0.6899897568100604, 0.9017506567239717, 0.8709093758412643, 0.9438071343587066, 0.5002147397951398, 0.7475840622589863, 0.5735383592107244, 0.9769047619767373, 0.8948663659606431, 0.6176249124447295, 0.9535818732538861, 0.9592823401535421, 0.6036842026863783, 0.53034561108728, 0.8292833843476701, 0.8879800929988897, 0.675000281421835, 0.7027288989166203, 0.5109724852051873, 0.6784710544406269, 0.6373460618451123, 0.8262931485053452, 0.691696943746094, 0.9637485764669901, 0.6549806031585472, 0.7060721109438506, 0.8603085632049483, 0.534018314560281, 0.9154376536734805, 0.9619597172201113, 0.9807822801879384, 0.7496187245585252, 0.829882143716985, 0.972908834863139, 0.5138377718509919, 0.6745218515152756, 0.6764119647778936, 0.6066600372194065, 0.636914460694394, 0.9739340122743261, 0.9383850030249858, 0.7853434114988123, 0.7590254651683531, 0.9286864420903781, 0.5067189922638421, 0.9001281415201354, 0.9429743162300508, 0.6236013529652162, 0.5447882577330456, 0.7501054537443661, 0.8954390006704087, 0.9089407294552516, 0.7015296219615172, 0.7875478352604383, 0.8996615498785965, 0.7248698534942242, 0.6106574122601415, 0.7610919410790564, 0.9042616864083334, 0.8699330573124895, 0.9454422127355948, 0.5080104875764967, 0.8419350418831113, 0.85812813385249, 0.8472281287918224, 0.7620025220055116, 0.9519867472716944, 0.7179722703980433, 0.9881223018573225, 0.5306202977921255, 0.6763231901172333, 0.6530709783484165, 0.7384644879901143, 0.7099736141401639, 0.9468927601883832, 0.5599460601013726, 0.5786712141004302, 0.7577948408770163, 0.6211926767471363, 0.7767547467372422, 0.5503922699814501, 0.8623682550155094, 0.5277487461741305, 0.5271412719851853, 0.603303642703858, 0.5046159137795874, 0.8875324695288536, 0.6162889187503491, 0.713849994717284, 0.678963959661502, 0.5649883822975299, 0.9450624248307206, 0.6031959163900722, 0.6048340693441006, 0.8717991943195025, 0.7484467330459357, 0.6708286513376636, 0.7651645639636815, 0.6944071611358545, 0.6714285089205528, 0.6492981397121595, 0.8538114864368507, 0.7072205336097959, 0.8721829631489694, 0.9226488956068498, 0.7581055895259285, 0.9892674108911205, 0.6840981580020149, 0.5317158220075506, 0.6200501305825845, 0.7898600697851039, 0.914255291975371, 0.7599494433505634, 0.5899685601353097, 0.566614838551335, 0.6940471152640746, 0.6418357706652105, 0.7083420901993781, 0.6363129653558489, 0.9204968216623916, 0.5213162283196175, 0.7634328217305616, 0.9773560913985563, 0.8494164741031572, 0.5722590273873908, 0.6941877519429185, 0.9776808755448225, 0.9355307763841905, 0.5880663325523613, 0.6322876566202893, 0.6282242376658101, 0.7687860186744274, 0.6618308079456487, 0.831468438280451, 0.9930179820332139, 0.538138650552868, 0.5277917674731687, 0.9199903235086286, 0.88486123030877, 0.59499219165424, 0.6205886211923772, 0.6796425047940372, 0.85544158069972, 0.7004123872942458, 0.9061552808638923, 0.6120370930126453, 0.6841736249180548, 0.6581883881774276, 0.8699516064056356, 0.5822602501505494, 0.9662965753406267, 0.6116178151396046, 0.5064483506023824, 0.6877573894334768, 0.5022382392369705, 0.7254107520124952, 0.8195196883447051, 0.6766088971566135, 0.8475357859964334, 0.6995826063156045, 0.7223967817054127, 0.7643248667524006, 0.6514680389013573, 0.9084384362129768, 0.7518646900495141, 0.8098683560401849, 0.5446096532831843, 0.6921592129691931, 0.6097480975333543, 0.8500576647736753, 0.7893016990523297, 0.747236501223288, 0.7965674028433354, 0.7898308266775331, 0.9371890154452823, 0.7427904693239592, 0.5743864241870549, 0.6001005610555792, 0.8856773435838555, 0.9469729056225761, 0.6285980194493921, 0.7455141529825774, 0.7169301487906006, 0.8324320949303405, 0.6868123454614934, 0.7351953216484255, 0.6229880407891798, 0.9357465271399151, 0.8219225838132759, 0.8041035226967344, 0.9604819157925212, 0.7425720080005966, 0.8273285131840353, 0.7037282757065321, 0.8566172388166069, 0.5134850101222439, 0.9298395462134984, 0.6327457308971489, 0.6547448880305768, 0.9633326367177113, 0.9107393425364897, 0.8773057935927309, 0.6390363499365141, 0.9837484093682858, 0.9365242834529388, 0.5601128468952454, 0.574328652456658, 0.594946006449974, 0.9140920225962014, 0.7394659984015306, 0.5703094834211362, 0.8455135484673701, 0.7868671169686368, 0.819501565793358, 0.9354605322078218, 0.9951954603881399, 0.661248072361504, 0.973983181051322, 0.8893609201363589, 0.5156898231304536, 0.7725987903162101, 0.9462266371287507, 0.8141571990224551, 0.5524857505581697, 0.780604854124278, 0.9476175283069159, 0.6709456257735877, 0.8930158033699911, 0.6232525765638153, 0.6362952221889576, 0.8434559529754833, 0.7608734100261614, 0.7954948835934275, 0.5961025239494934, 0.5681895665488891, 0.6186906390521572, 0.9181205034244135, 0.6222969014496306, 0.7015495735014838, 0.5559929131787308, 0.7922354339802484, 0.7881106877269952, 0.5430146877393492, 0.5237330255244912, 0.5210132669130969, 0.9725250800995315, 0.9066154408621534, 0.8991306528292407, 0.5628041369567875, 0.6451721791871721, 0.615932998039527, 0.6360060126924725, 0.8628031880613172, 0.5443072909885702, 0.7356660927280614, 0.65706288058276, 0.577091351625659, 0.6018326011699737, 0.8380679661380052, 0.8513074119601258, 0.9429211529615619, 0.6281184105396489, 0.6949976466330967, 0.6658767519365294, 0.9119999003834881, 0.5426355109665553, 0.7541705068259221, 0.8364301676469597, 0.6035308309424822, 0.6446025767456472, 0.7615938660827628, 0.6800820479670957, 0.642704216087383, 0.6711322613723023, 0.7781904407448335, 0.5818289736820905, 0.7083108768056219, 0.6253965082618334, 0.9241049054035579, 0.5659718764423283, 0.8311220225284467, 0.5054792027300415, 0.9273389842158787, 0.647543498157667, 0.6283292110865317, 0.892743591007697, 0.5035541612766795, 0.8184838700733226, 0.9126795493724598, 0.6379612451648786, 0.5148065687922454, 0.9257812808169583, 0.6421078600261372, 0.6732061380955131, 0.8852976665566448, 0.6783601993662944, 0.7658093819459484, 0.6470173655122833, 0.9382453247804615, 0.7752190113263372, 0.5886852550076895, 0.5040592821392038, 0.9265823930678271, 0.8777723290896416, 0.9418197391203281, 0.9505163126124363, 0.9670782318814066, 0.7377273336054866, 0.5690405650321111, 0.768474137238674, 0.9562360180462941, 0.7924764146741087, 0.5623961103877321, 0.8447279791329101, 0.8723424398506983, 0.6204094295740443, 0.691897013914006, 0.8588818015794806, 0.9503314302839962, 0.6573997779229512, 0.8231763904154479, 0.8691696764831389, 0.8894606991984341, 0.8975202982197699, 0.6358668528408458, 0.821089002396882, 0.9041819102577477, 0.5628403502935329, 0.760826212369208, 0.9128854284346171, 0.6821264123022245, 0.8563473845839193, 0.6176718822389753, 0.9588329897221661, 0.8631626701804092, 0.7475687576075192, 0.6311251059726435, 0.6221157944866731, 0.9625134709153247, 0.5770660544429079, 0.9067720209918457, 0.5175166044702494, 0.7373618352455553, 0.6381086913498986, 0.959441602057445, 0.8193338030898987, 0.6268561699035748, 0.8038476116207511, 0.5172249394685491, 0.9243177293242675, 0.8603573500742081, 0.9069150962425331, 0.5185972609246838, 0.5812870894475801, 0.7881755375661732, 0.6881506055116686, 0.5784708856484663, 0.5284444765354019, 0.8820330456925016, 0.8194582733888895, 0.7145985331501983, 0.7882525434915694, 0.6333895680238123, 0.5753758919441347, 0.885377298226855, 0.5196926803840358, 0.5299123051089902, 0.5122168100173352, 0.5163151859328264, 0.8834520061448703, 0.9796317303456397, 0.5753691895883543, 0.921366532866347, 0.5667640966185078, 0.9231718742045324, 0.549572667581883, 0.8597926036126979, 0.995585982744646, 0.5981540337865883, 0.7740185001878648, 0.8907629134015664, 0.9037335725881088, 0.7716060650952832, 0.6007462511351187, 0.8781696732361259, 0.8987283404578411, 0.9183116374349468, 0.6796361927008923, 0.8861790654932987, 0.785223533561294, 0.6931469217203146, 0.9364237526233625, 0.5831835057278958, 0.5785132321796909, 0.7694896938096754, 0.5318359821460423, 0.5374269643882819, 0.9995177128547987, 0.5600830425953117, 0.7586630671627375, 0.9308318948058705, 0.7540064831518557, 0.8176016868792606, 0.79297500901383, 0.8208013081644872, 0.5394156211400083, 0.754447141106461, 0.5136858684873413, 0.8494761866266705, 0.9840526089334924, 0.524934415102573, 0.6090886487277045, 0.7547468638018622, 0.8178365633025253, 0.96672011747395, 0.8067326378392154, 0.9896126827587732, 0.5472437931980156, 0.8675314688681943, 0.6754110858875397, 0.6117405996251466, 0.9630779180048616, 0.8178620255334397, 0.8287176712068933, 0.5521738785630912, 0.8058087831641754, 0.7715464023048679, 0.5098884421989982, 0.8421057154624972, 0.5529810438934792, 0.9654634384084751, 0.7921096542927694, 0.7569286659608252, 0.9638901801362684, 0.8009935941149795, 0.5472992052668609, 0.803450665467315, 0.7071748109799121, 0.925372213234267, 0.8358602969244493, 0.5013539774751917, 0.76114733148495, 0.8409196129864291, 0.6004540275972222, 0.6242554642180184, 0.7863163821739155, 0.7855610931621674, 0.5837523643582748, 0.8772054421667996, 0.7761214532930358, 0.7921269887292546, 0.7345999083343054, 0.5157653782176657, 0.5727795890363829, 0.6915629527719143, 0.60715782360429, 0.6709922812985274, 0.8881860429054316, 0.7460448105842246, 0.6579370312362088, 0.6233690431365249, 0.5909442568242784, 0.6524170623766391, 0.7837404434967922, 0.660451043343014, 0.9321777906072863, 0.9168570772960576, 0.7935544991716605, 0.8744927541022869, 0.6213741979948911, 0.5615075822416964, 0.9221301406573355, 0.7089112834189721, 0.5028688779967165, 0.6099135657025361, 0.7942108211391501, 0.5405338211135304, 0.9914194937355469, 0.5930260844968904, 0.6015555805159594, 0.6752832528232355, 0.8471030946837145, 0.9243989589631573, 0.8241347678574591, 0.9430286873895924, 0.6627010592521301, 0.6955259214860333, 0.945768897068516, 0.6773643698613927, 0.8884376117085355, 0.5771789625522717, 0.5974170038608939, 0.5929660038771092, 0.9828182695939262, 0.6150739165700747, 0.7613483097136733, 0.7021389056247636, 0.82820322896286, 0.9907282068793475, 0.604411780562639, 0.6551559017567585, 0.8538058373303081, 0.9698006169562738, 0.5788024824091587, 0.6593266369124686, 0.8663271392865015, 0.7353237624004714, 0.8930129508580846, 0.8655006774565666, 0.8523241985752377, 0.6175467684067242, 0.708934419069368, 0.6427857089803343, 0.8159292634626936, 0.6888466542260839, 0.967865936231636, 0.6210129613100557, 0.5366255401499385, 0.8055446965448945, 0.5940670501192774, 0.5434152401405767, 0.5770401655412056, 0.5969560513988119, 0.9769727910110262, 0.8265256520270928, 0.681882166957902, 0.7863753778650009, 0.5419877757668172, 0.915746317915296, 0.6437626359227024, 0.8142988396829145, 0.7673117652909901, 0.8438416731445058, 0.582425994669028, 0.5418673603327593, 0.695948111847613, 0.8134177987369794, 0.6488121986883286, 0.8608440293982965, 0.826037204606745, 0.6980308605963873, 0.5265271903297091, 0.5917268643558542, 0.787355757808717, 0.5489066071659837, 0.6484038526235057, 0.5341200545517506, 0.6494610042781092, 0.6454295363164777, 0.8952224208786037, 0.9539701298281222, 0.7943905272562894, 0.9958544762887128, 0.8128135554843834, 0.641313791861706, 0.6493027321851108, 0.7653904673512081, 0.7054826776468746, 0.7525027204761019, 0.6046808559381953, 0.7914910565810608, 0.6694432046815346, 0.5753935596737181, 0.7810666446240457, 0.5570427379289616, 0.5937014064608418, 0.7452821653999654, 0.777418175526714, 0.7532049229093205, 0.6886605042809385, 0.6751118458525884, 0.8585855064510803, 0.5479638743332989, 0.9156651473292594, 0.6816877132198763, 0.633358716175487, 0.9518177014927656, 0.6250940166406174, 0.893843180187947, 0.8300287881079281, 0.9027255620113692, 0.508726119225092, 0.6125870110514244, 0.7941271165724119, 0.9781930400316037, 0.5729685691311295, 0.6758304610473418, 0.8769558796748848, 0.6571734248643695, 0.714780505569107, 0.6258097865738217, 0.5080602383576567, 0.6611279135371182, 0.7543823694041599, 0.7366925772535606, 0.6686711754219568, 0.5594371759883598, 0.9200414282447109, 0.6815652629692752, 0.695521573493622, 0.8468671263026029, 0.9795220519056111, 0.9240184997543229, 0.7916586150401608, 0.6269949639335561, 0.7164428625380524, 0.9530461609088254, 0.8060184320214699, 0.8562510544575324, 0.6835677683851109, 0.592003746418656, 0.5901281034667402, 0.8995663411644998, 0.8722930118800893, 0.9300146202027657, 0.8890756195963634, 0.730021074430472, 0.9349353511953671, 0.6871671756372753, 0.7576052260423678, 0.9662646161200914, 0.8377220297703659, 0.6507628793882929, 0.6805378953794877, 0.9988047489800839, 0.639177620400696, 0.7030249993191677, 0.8446042758723032, 0.6468340770906948, 0.629205619203034, 0.5212864402406089, 0.9225537045694742, 0.5748387797752327, 0.8540022649032548, 0.8444647659598924, 0.6220797001143853, 0.5102216212796327, 0.9746962534672807, 0.5184321134355409, 0.5684388088349857, 0.8516495732658551, 0.6665960622084958, 0.5889058436891101, 0.8943330259665722, 0.5161543648040315, 0.5797780091949838, 0.9529271517702254, 0.9552525805177794, 0.5294053961095556, 0.78716934459023, 0.8781555092396992, 0.5723694876394003, 0.8527475703236316, 0.5925102838386178, 0.5882805538505731, 0.92964303237526, 0.9356781468531246, 0.5369521997528874, 0.9991458731018021, 0.7669877301220493, 0.6747239612505052, 0.535984642921164, 0.6560119319379832, 0.8205518301534211, 0.8528868037202795, 0.532365393442346, 0.5197424236061743, 0.7107342852415686, 0.6903272665919551, 0.9047028521248992, 0.95466954281727, 0.706433678510179, 0.5672826228513124, 0.7432827350407214, 0.8657786300792945, 0.5387847379166413, 0.925724889206001, 0.6739053603068051, 0.848139125466419, 0.5019414160679896, 0.8367985441801302, 0.6831704843129207, 0.8373125731233035, 0.7346826111308522, 0.8459907284781356, 0.8588725352469145, 0.6875148775682237, 0.8290308162753826, 0.5307193893180664, 0.864680088396879, 0.7378275872680642, 0.7437283324696132, 0.9915256115701195, 0.932108027455147, 0.9705608240442029, 0.8960667423470785, 0.6131798242272608, 0.9953659654838565, 0.5161862945648015, 0.8239451709312049, 0.9127793868895188, 0.9720370349920426, 0.5136741301187612, 0.7658401581191037, 0.9117626730652535, 0.9958601875666531, 0.9289619401086482, 0.6015215629789699, 0.7335896072725847, 0.6473187728398855, 0.7407121025194993, 0.5759982393572167, 0.8874638457378227, 0.8850098402769022, 0.7029795932915703, 0.9434661558204243, 0.8876161902464879, 0.6064210664180453, 0.7471649340863421, 0.925193315702309, 0.5765551320126745, 0.6387155650094809, 0.7913622837878913, 0.9370640028181594, 0.7290977567978159, 0.8351135999832571, 0.7510163755257597, 0.8387829934022151, 0.81193254034691, 0.5173614455151312, 0.8787496680828402, 0.9013818186588463, 0.796004452817268, 0.5197389768882843, 0.6234341860593895, 0.9789312973458868, 0.639875031473345, 0.8422653430940132, 0.9423526135590956, 0.6472020242361765, 0.7333251297116115, 0.731833506203957, 0.716663284163104, 0.9424638765924747, 0.5974308149181644, 0.9035648936660936, 0.9821764389921134, 0.7424075582759011, 0.9839580075241754, 0.676519638769224, 0.7664598865870298, 0.8167936040809898, 0.6387653296667252, 0.6436751284138729, 0.9336763444709959, 0.945949134844217, 0.6342362544235283, 0.5174282767830517, 0.7760465139717261, 0.7190864646320134, 0.8147268382624209, 0.8816293843365759, 0.7014958751563715, 0.6015709166864329, 0.6415941152814149, 0.8088193411687666, 0.7664514777736766, 0.6943867977788294, 0.5268848510853754, 0.6193774422669525, 0.5632621216355649, 0.6585108569908902, 0.7938668793681362, 0.6013938252153627, 0.6055802394451928, 0.6398995349857255, 0.9573533192872709, 0.8835151613392198, 0.891083610924235, 0.704284904782208, 0.6397645943607386, 0.6380926738924756, 0.5407032898180599, 0.7764146492752455, 0.8901394344683833, 0.6500815228604782, 0.9034359277863147, 0.5702969934077986, 0.6575149649153593, 0.6391157019469873, 0.7249468333750464, 0.5203687133469301, 0.6109392004327265, 0.7825123844081316, 0.7928077639093474, 0.7615661029296911, 0.8766597423044084, 0.6128025888948829, 0.8331838250673729, 0.5723066175936661, 0.9136074775920552, 0.9327256397666035, 0.8205175314379347, 0.6081160789386327, 0.8856694063537895, 0.8518223055061285, 0.6199228734050539, 0.6860128007613421, 0.6011623772087256, 0.6716310234334492, 0.5348770617881753, 0.6325820665288333, 0.5300383711893248, 0.8014128973556918, 0.8549043295978149, 0.6948597667466769, 0.5245187810971732, 0.8146075753379158, 0.7869523607124722, 0.6979290433571477, 0.9253709565962183, 0.8991821885660147, 0.8135058670197116, 0.7589771777127562, 0.9478714741477258, 0.8595509946064208, 0.8535435980107899, 0.5273529412635047, 0.5432859731801132, 0.9155021587246206, 0.6530609134899201, 0.9325770696368664, 0.8480776448942414, 0.8094694023190814, 0.6154601374319297, 0.7128538469866149, 0.8165493829199933, 0.5051529869678096, 0.9598922488091417, 0.6028483345465174, 0.728642469839998, 0.7325540766460573, 0.834836083087562, 0.7174629180510331, 0.9876991125941831, 0.7051900449120676, 0.7714220989850871, 0.5723160035747583, 0.9234803232551262, 0.8780223510993465, 0.9188710065776873, 0.5103226215909413, 0.9635271575363713, 0.940244822948358, 0.6744502768745843, 0.955621715037075, 0.5220712537638685, 0.7654496293594193, 0.5460919277711018, 0.8845163063981873, 0.5212384977973661, 0.857981224742825, 0.7981726550234078, 0.96536703908634, 0.8443442002154493, 0.8018705382927362, 0.8235274560745327, 0.8181742647915351, 0.5188922501546788, 0.6496925289187754, 0.6669159863104381, 0.617079426836273, 0.7431021049579136, 0.5569123155828829, 0.9357654841693319, 0.7224435857969247, 0.5667397289683489, 0.9104049050141614, 0.7181815257977888, 0.6442243089690669, 0.9496167578909831, 0.5053024344842327, 0.8611585770256711, 0.7466579198623194, 0.5629997411728183, 0.5970959648756117, 0.9914256170172386, 0.8005214421046096, 0.6825643306415372, 0.5089699947713033, 0.9211975555041418, 0.9319797746046443, 0.5096619458148612, 0.5203921237921441, 0.6507295236428798, 0.5433539678776844, 0.556052906624505, 0.6524254809584136, 0.5992506871125677, 0.936703675173564, 0.8929390306732288, 0.8686967271993203, 0.8962203726441915, 0.7371416025635431, 0.5198940934224826, 0.6385916675221293, 0.8728613963134714, 0.9615828155654698, 0.6196910715926967, 0.8154399563888691, 0.7900800922836845, 0.580302912176433, 0.8977991171339559, 0.5015054851590662, 0.9220885845997386, 0.9819738917678473, 0.8756009537054932, 0.8088636294631915, 0.6374507700063703, 0.663317183155143, 0.7386533010348891, 0.9923093634269613, 0.5818561080574995, 0.7526616777761118, 0.811325089165613, 0.5672821481757033, 0.6275980427056668, 0.8765856501040606, 0.7242636725052409, 0.9309905687995792, 0.5150707292434097, 0.6559426630282776, 0.6162932516923176, 0.9960347521825266, 0.7124288376431711, 0.7670954888201322, 0.7280472540065532, 0.7643875305010728, 0.9765342642844058, 0.8355627930218592, 0.9750104404256045, 0.5301134357248831, 0.7805099880620585, 0.6452321960232006, 0.7817561763147924, 0.5193243881442117, 0.560969503152547, 0.5613473628525412, 0.7690810952382325, 0.7828273792419752, 0.5054152644567658, 0.7588136482927028, 0.9795473372554943, 0.7830736298463543, 0.6438338871277196, 0.7041638541360838, 0.6122159412359114, 0.5712114982218484, 0.8068102019366056, 0.6239616780625801, 0.929688782730278, 0.531668814149201, 0.6412472487991798, 0.6992025439786067, 0.5710540180831314, 0.5910393014989255, 0.7649565654728958, 0.7285432212894083, 0.5162494806732162, 0.7228269659307935, 0.7290125061356767, 0.5815334801552252, 0.843517491662637, 0.8821636636649975, 0.7281465337106677, 0.9225441650750518, 0.7011764723614714, 0.8217469345217072, 0.8899480417837744, 0.5939652905104598, 0.9978845511886627, 0.9234538769759321, 0.8862857254381704, 0.6372386023985757, 0.7023174312194447, 0.9364269247292629, 0.6639841063948866, 0.9032641983741855, 0.6915058126698029, 0.8054100996143874, 0.5100487684843068, 0.8300047550692435, 0.6857837879058708, 0.6506267475969114, 0.7165743591288839, 0.5036053514050455, 0.9317040866615414, 0.7108755043067905, 0.8182322905735229, 0.8336928890112152, 0.6547883597008355, 0.9018926990726173, 0.89880978102049, 0.6186627880481559, 0.6593836086965359, 0.67276648963544, 0.7822184102308498, 0.6684874205581698, 0.7674328694291627, 0.8180218964997443, 0.7257177369310089, 0.5523864200091388, 0.7665514996776907, 0.7675492086221051, 0.7976587729226796, 0.5920972301841083, 0.5172732806520535, 0.7448701544224792, 0.5456383980061769, 0.7675865714248676, 0.5928871012037974, 0.6432975441376207, 0.8257859967442878, 0.745937798113723, 0.738390109519, 0.8310884895172159, 0.7950020037710401, 0.5523178484690423, 0.9341061118268646, 0.5092737365124196, 0.8893675295083721, 0.8142633079407278, 0.6906360500239161, 0.7931936096473864, 0.5786214942418056, 0.5255705858438764, 0.9673226994645517, 0.6825658114796198, 0.6990893575908248, 0.7689647279348479, 0.7762629044605842, 0.787899759050261, 0.8612539558253843, 0.6968966869744391, 0.8963898451641584, 0.7874776275692648, 0.6169346246839679, 0.7456162782552551, 0.9253078044675884, 0.8043728921457898, 0.8878897619363925, 0.8101042360139501, 0.5013362865537523, 0.6419645923108228, 0.8495431396476412, 0.5624453309804072, 0.9008383721313372, 0.5147451385287836, 0.7214579946039219, 0.8321499104415846, 0.5196080473580488, 0.8720048937484752, 0.655598445294471, 0.6075474520304793, 0.6154669613042898, 0.8399651297639643, 0.7697946149196762, 0.6011135460944501, 0.9767092106165108, 0.6337856801702281, 0.7776414439798223, 0.7746078191257271, 0.793142159059689, 0.5247866652041935, 0.8948301947235571, 0.7573771514214955, 0.6083668028854381, 0.907300897592991, 0.6504068203424305, 0.617418686216896, 0.5349856086287135, 0.5620550987838409, 0.9820890492475123, 0.7642196626032158, 0.6657158913960136, 0.8539587551642036, 0.7986895751963896, 0.6977576028442419, 0.8688267853679255, 0.7163153318221285, 0.8978054293694165, 0.5562238707891396, 0.7978227035728627, 0.5674909221765463, 0.6899473475380353, 0.7190970972201947, 0.8650081894009487, 0.775035991867493, 0.7217251022807419, 0.8404709341125339, 0.8646288213576638, 0.5379876901276016, 0.5178906129772481, 0.6583084173204156, 0.6731145936478715, 0.5607325051718208, 0.5615589240891601, 0.9293369600567007, 0.5087867846744999, 0.7142602864072984, 0.747480576242112, 0.8135285773948422, 0.6669562535618634, 0.8643794168648794, 0.9310210679108952, 0.9823847198187513, 0.6149409385319168, 0.8655435314817495, 0.7071008261543383, 0.5523976022672145, 0.524183662181188, 0.7653996972823487, 0.8063431516438057, 0.5374745069430411, 0.7894098297770019, 0.6307209584402045, 0.8251222004497787, 0.8691504288574734, 0.8758001159385629, 0.90642734797546, 0.7976328926840953, 0.7615243579177798, 0.8067739043396971, 0.8547605703301723, 0.930946449435019, 0.5053095012523622, 0.8893165830370782, 0.9149247226706171, 0.5770988199864353, 0.8155298174252852, 0.5969915773286172, 0.8919822828700179, 0.7788481560679072, 0.7339365882334028, 0.9521731629482624, 0.908016888632325, 0.7576318890513758, 0.8263561852058019, 0.9886462960575197, 0.6492912242208633, 0.5759064445487654, 0.6438049785659269, 0.9961599573215405, 0.6035878910247205, 0.8192066790878987, 0.6793723801121848, 0.5149176103065249, 0.8716668788881881, 0.7303766687344688, 0.7437744424061299, 0.8403188595919369, 0.9146476681917969, 0.9144226458668399, 0.8241969555195214, 0.9400767188872132, 0.8796708199779166, 0.5867123207272281, 0.5661065534733196, 0.9842670002554704, 0.7088977261678507, 0.8373520495971691, 0.9466016074899567, 0.9671096687559122, 0.5014040450051263, 0.6816596474812255, 0.7413921064322477, 0.8500197585856759, 0.8225229948041795, 0.7308164850656009, 0.5216623036800799, 0.678317199776818, 0.7851256717946675, 0.5103419355975908, 0.8196601569712345, 0.7818674644292971, 0.5298765015626382, 0.8462327648127925, 0.9772879507000612, 0.6324107344205412, 0.8827171708520256, 0.6256181471563094, 0.8095571740975434, 0.6344620691005516, 0.9747123256749077, 0.592791651077162, 0.557067118035114, 0.790484083530427, 0.598985629525823, 0.8312950794494223, 0.972020532106863, 0.5089042897221394, 0.8060612789814228, 0.9031149678870163, 0.8652151460304494, 0.7669136717954567, 0.5794517676193336, 0.8425363712278714, 0.9891762054200774, 0.8173903761948815, 0.7794259853691299, 0.7339551166065378, 0.5394477406507698, 0.556744012807699, 0.7331446799778027, 0.5454830283207355, 0.8476924384614626, 0.7780073856725516, 0.818329607734478, 0.8211647962868294, 0.9029867473767959, 0.733753611499397, 0.8084270594194268, 0.7615139308409747, 0.8789347356044743, 0.9431097584677678, 0.6158791780463081, 0.8031087852362604, 0.7018641089417004, 0.8535165093406694, 0.5222207985842773, 0.8305967784000268, 0.999898623019865, 0.5832150161577367, 0.8467455984797989, 0.896549339889776, 0.6669288245133367, 0.7212484650603225, 0.6215986100123481, 0.6454031947913343, 0.9682955548864891, 0.6645712604931293, 0.663061184146073, 0.6879227096452558, 0.8893045607909527, 0.6918347601611998, 0.718874237050344, 0.7170744322668204, 0.9266457364640908, 0.6648140609037345, 0.5613284324301513, 0.6966411963937653, 0.8460904481630513, 0.739592525770473, 0.6970233103754411, 0.7791381561353667, 0.5452501057362795, 0.8186900413852012, 0.5272323129378986, 0.8399320293997783, 0.9796375756990707, 0.7952095396443757, 0.798401405719408, 0.9314878077796653, 0.5960148391749803, 0.5900768312339093, 0.5116196464613243, 0.7726322299207953, 0.8959085868747678, 0.6600348343393145, 0.7713417789156092, 0.9178806511047147, 0.6776629075706067, 0.5424048470261353, 0.9446435821224528, 0.809291715618141, 0.6609481671457218, 0.6701550770981094, 0.7366963063825851, 0.7358670240524675, 0.9402398759264472, 0.8883105212949844, 0.5767489068783174, 0.8252968756736643, 0.5543768874781556, 0.7955125789309502, 0.5849021489259048, 0.7544769073707238, 0.6712448881885786, 0.8925486823602715, 0.7165697164072855, 0.89962582211716, 0.7114949402208695, 0.9255693523855371, 0.7763465960183871, 0.618450351181935, 0.7790208386320476, 0.5352202745711507, 0.9891704145525944, 0.6624294634369445, 0.5563976692071078, 0.8882724792281421, 0.9407512647619354, 0.8314322984636187, 0.7807162869160427, 0.6173605711469301, 0.8879770284960171, 0.886161553238737, 0.706173763979181, 0.5748288568455264, 0.897097308546642, 0.6434027726247901, 0.74760194156338, 0.7920322729796607, 0.8555441361438446, 0.6167779623805374, 0.5261916729697578, 0.7616086480511656, 0.5516801142538106, 0.5368518436001746, 0.9098265251701911, 0.7198986338667326, 0.6399799246270028, 0.6640507012194743, 0.8100597413610224, 0.9947068832545278, 0.7707026884031236, 0.9814662478819047, 0.9890101565786802, 0.8937033933251552, 0.9140219134505654, 0.6216556903657474, 0.7557301924469492, 0.9153055653587335, 0.754516446379301, 0.9774686853543858, 0.7502076482947491, 0.6146817991189966, 0.8830231807869793, 0.9646678134010758, 0.5453158678931883, 0.9157895747562862, 0.7622706910881434, 0.8822423893820512, 0.7164041767939908, 0.8655201616946835, 0.9957172073748555, 0.8184460016840276, 0.8085726609681312, 0.8231370182294477, 0.7726921422239639, 0.5589558487777035, 0.6397519476693545, 0.7651546672729788, 0.6078722209659344, 0.9183875427252378, 0.7862335529028825, 0.8891440159290445, 0.6109661589176987, 0.5365729638518577, 0.6404742572050899, 0.6535953300698574, 0.9676552153095836, 0.9215965981347031, 0.515760110225036, 0.718719823205925, 0.7032753370882583, 0.8551322264716803, 0.7366194035228892, 0.6348135258314045, 0.8283655456532653, 0.9006641899695829, 0.6778397601632018, 0.7322981916569187, 0.7145708542796585, 0.5009851812980679, 0.6676270139881939, 0.6472400057914102, 0.5448059511730533, 0.7682722552757896, 0.7686198830087784, 0.6437274479983579, 0.546644571050813, 0.8022601756921321, 0.5540078581040906, 0.5404803357646176, 0.7930622251001478, 0.9973481352761929, 0.786947871300294, 0.6142307428501093, 0.6205262213662558, 0.6200311119376098, 0.6925347787515862, 0.7463768403961208, 0.9157854106712157, 0.9841070054436734, 0.9046146604123131, 0.8672897415190852, 0.5084601648460894, 0.7625073873469459, 0.8324163747021962, 0.5016499286556593, 0.7240576909173986, 0.5351328689291761, 0.7039042594421585, 0.5412681226663529, 0.7480069391528494, 0.614210766642989, 0.9476529967175451, 0.8911185656182008, 0.5208899659267474, 0.7926277027706221, 0.7897548702533355, 0.6974942145350399, 0.6482394019896965, 0.8338332201828214, 0.9646873368039379, 0.8015251365237392, 0.9233765691926299, 0.6185958025097656, 0.5481499384890696, 0.8296296772151147, 0.6893248659811981, 0.5317261728594251, 0.5458295671561957, 0.8437852024002674, 0.5101631307839003, 0.9326903057353478, 0.9557010368456083, 0.8803604106693255, 0.9042539793946862, 0.5221755262011514, 0.6911902803527633, 0.8633256026710845, 0.9759611541288452, 0.9831686895499968, 0.7726740314507103, 0.5359662365924959, 0.7049292019033488, 0.6159252589704116, 0.5188457977391356, 0.8053665190474937, 0.9822761001453222, 0.712909072782901, 0.7761031219313634, 0.990773681066657, 0.8674799938990794, 0.6740226993512959, 0.6879771168920681, 0.6963561987900253, 0.7418000289018154, 0.7024923243730755, 0.5953431081121066, 0.5851970119943858, 0.6255737677224071, 0.9312029668216117, 0.5948614887303092, 0.5828680610490713, 0.8555473663016062, 0.7873037490485959, 0.6557701870796334, 0.9684821035100158, 0.6079909040684202, 0.9327884268882946, 0.5911808911696887, 0.7605652872617463, 0.9926874369689922, 0.7898819983354092, 0.6356283278357446, 0.7059572460867527, 0.6278501066064799, 0.8424170151022313, 0.5186360410010542, 0.6136251361655191, 0.9457399443190431, 0.6253177657041136, 0.6462976828049336, 0.8366581547266769, 0.8951790282638051, 0.9896551643612765, 0.7858358378929504, 0.993063504185514, 0.6329370916428947, 0.7110768481169445, 0.562755850540752, 0.6172365606427809, 0.5138968216438502, 0.9312926525754174, 0.5564187639649898, 0.5993202917892867, 0.6052324986105493, 0.9071351594473988, 0.5349240051410104, 0.9683161081110236, 0.7815010555708877, 0.7589471296384135, 0.9608367523426091, 0.9811818628886728, 0.6202256428942371, 0.6347571902825007, 0.5696325650429657, 0.6958469660606633, 0.5508213126910059, 0.8600525533497847, 0.6303833962634704, 0.8271573907313883, 0.5802147758369782, 0.511374086216662, 0.935193037408419, 0.6460842197623998, 0.7228526364189263, 0.899147928972994, 0.5708548227700966, 0.5352737074126583, 0.6494057328991445, 0.731492883140168, 0.6622207754544218, 0.5026748339869939, 0.7861407625440129, 0.7895144859985515, 0.5668122641259743, 0.5194209504621901, 0.7664936440772383, 0.6895681728682042, 0.5471113079675233, 0.5697905974943365, 0.9654621412877444, 0.6117534747774322, 0.5195230510250107, 0.5029874549497909, 0.6436230566901067, 0.7814019757174042, 0.7243115146159262, 0.8490022959549282, 0.5986087210572619, 0.8917653315595842, 0.8186740951384265, 0.6557207833538385, 0.7383517661652084, 0.5076593667567217, 0.8351509540790598, 0.9066154116110787, 0.8633966120608996, 0.5448011348714983, 0.8811262486064548, 0.5311062494107786, 0.8434152015057589, 0.7587715892859013, 0.6525977108705863, 0.8709010148712284, 0.9902567437643712, 0.6363651362541571, 0.93154565323665, 0.6673336659816802, 0.6363488918779094, 0.5398241942834009, 0.7914814885511636, 0.9529666873959003, 0.564045965681691, 0.6382659091632683, 0.7805351222831617, 0.7733886221614187, 0.5241008399449433, 0.611945308809795, 0.6169907785866606, 0.6385936386173398, 0.7841750718670859, 0.7173238321530276, 0.7367928171908347, 0.6265166185554858, 0.6483108410096134, 0.9089151560803901, 0.6335384299494107, 0.8466095418964659, 0.636736467136152, 0.7789681772250416, 0.9077504496156267, 0.6867613895975948, 0.8301847995782373, 0.7681290610779284, 0.8055269135501388, 0.509034281413844, 0.5060145402076777, 0.9531102781357583, 0.6049030717098274, 0.6978487916911934, 0.7108004718191071, 0.939874195247411, 0.592163166813884, 0.6911488424262666, 0.6024932357446733, 0.5784406533904147, 0.7248755260288222, 0.8132232450347373, 0.5497978515691366, 0.6422827817500908, 0.5623109553041825, 0.7026465582885555, 0.6835431097023816, 0.8280875424648927, 0.5697242233141735, 0.8156284418986919, 0.5454630115116291, 0.7460605384610532, 0.5902137370746723, 0.7745159125590151, 0.6547269575301189, 0.7346468747258099, 0.9460016552534074, 0.8465037831537723, 0.6581592649653958, 0.9961161028919017, 0.9616426679771357, 0.6219619779644283, 0.8905650088552008, 0.693779959860743, 0.8630066415278895, 0.6282224791138035, 0.7181170944536824, 0.716100884180571, 0.7756481074289252, 0.9142676032562131, 0.9180303779395655, 0.5432335136324105, 0.7182639917924389, 0.8153204221836884, 0.5827031352248715, 0.5463832979704049, 0.7948268737909616, 0.6598653129414418, 0.7845986404426359, 0.7420335255239141, 0.9385969458853061, 0.6671326457500286, 0.6998412684827398, 0.9868858054402161, 0.8786467710924218, 0.7726528808366292, 0.5025826296328212, 0.8629287020940559, 0.768810976141887, 0.7143246919991757, 0.5711739798143532, 0.7403205840894782, 0.6027968963707016, 0.6175212302157366, 0.895600718676384, 0.6650496553531864, 0.5250917194786286, 0.6758665713113955, 0.7805817873318424, 0.6818936839385621, 0.6057482283252358, 0.6871084714045712, 0.6843923217748706, 0.9580840879847571, 0.9620823503252021, 0.9359778904131593, 0.5283872878497775, 0.6871858860380478, 0.9444190932633929, 0.9885328307628365, 0.8083797157964749, 0.9424081240545741, 0.8802440651197797, 0.784110556928838, 0.7405118236993029, 0.8360838660431231, 0.899166063131732, 0.77185218566812, 0.6062536162567856, 0.9208825384410072, 0.7634110466954283, 0.6976841175687796, 0.8327596087256153, 0.9671620346864906, 0.9686730049558581, 0.8064760176546859, 0.8111507853410636, 0.8183558206337572, 0.9303474076168086, 0.7269156219306148, 0.663924586136488, 0.5652355395972282, 0.7114783361613157, 0.9941623968973058, 0.9019171227186691, 0.5718219819754591, 0.604427496795746, 0.8175951412577782, 0.8307192547040465, 0.8351033792034779, 0.8684962626794389, 0.6539215870803055, 0.9848417146820273, 0.6531511706497632, 0.6594799796002644, 0.9779532141456241, 0.673486037170848, 0.5789477708928197, 0.6180602771363755, 0.5329323393363907, 0.8489464197755796, 0.5064534417492499, 0.8862573495967074, 0.6222412180577075, 0.6278909822734882, 0.6167278917623664, 0.7514889415570422, 0.9842393117235242, 0.7040952031606168, 0.6162300748507374, 0.5495587025920416, 0.9547786202798549, 0.8277331199332134, 0.7967722842015716, 0.9440799125798904, 0.9418650824551249, 0.7534229072756989, 0.9912622414268777, 0.8452315698593057, 0.9822508947400309, 0.5730411813614218, 0.8736561417308908, 0.9482751125132334, 0.9656610121257161, 0.5773354752984169, 0.5457123334403183, 0.7477422326987724, 0.7695973624514447, 0.9589493801559172, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0};
int h_B[]= {
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 53, 55, 57, 59, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 175, 177, 179, 181, 184, 186, 188, 190, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 339, 341, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 396, 398, 400, 402, 406, 408, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 476, 478, 480, 482, 484, 486, 488, 490, 492, 494, 496, 498, 500, 502, 504, 506, 508, 510, 512, 514, 516, 518, 520, 522, 524, 526, 529, 531, 533, 535, 538, 540, 542, 544, 547, 549, 551, 553, 556, 558, 560, 562, 564, 566, 568, 570, 572, 574, 576, 578, 581, 583, 586, 588, 591, 593, 596, 598, 601, 603, 606, 608, 611, 613, 616, 618, 620, 622, 625, 627, 629, 631, 634, 636, 640, 642, 644, 646, 648, 650, 652, 654, 657, 659, 661, 663, 665, 667, 670, 672, 674, 676, 678, 680, 682, 684, 686, 688, 691, 693, 695, 697, 700, 702, 705, 707, 713, 715, 719, 721, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 745, 747, 751, 753, 756, 758, 761, 763, 766, 768, 770, 772, 774, 776, 780, 782, 785, 787, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 817, 819, 822, 824, 827, 829, 832, 834, 837, 839, 842, 844, 850, 852, 855, 857, 860, 862, 864, 866, 868, 870, 873, 875, 878, 880, 883, 885, 888, 890, 892, 894, 896, 898, 901, 903, 906, 908, 911, 913, 916, 918, 920, 922, 924, 926, 929, 931, 934, 936, 939, 941, 760, 755, 760, 755, 760, 755, 760, 755, 900, 915, 86, 86, 87, 87, 900, 915, 995, 997, 999, 1001, 1003, 1005, 1008, 1010, 1012, 1014, 1016, 1018, 1020, 1022, 1024, 1026, 1028, 1030, 638, 633, 638, 633, 638, 633, 928, 943, 928, 943, 1107, 1109, 1111, 1113, 1115, 1117, 1119, 1121, 1124, 1126, 1128, 1130, 1132, 1134, 1136, 1138, 1140, 1142, 1144, 1146, 1149, 1151, 1153, 1155, 791, 778, 1208, 1210, 1212, 1214, 1216, 1218, 1221, 1223, 704, 699, 704, 699, 933, 938, 933, 938, 1257, 1259, 933, 938, 1272, 1274, 1277, 1279, 1283, 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, 1301, 1303, 1305, 887, 887, 882, 882, 1325, 1327, 1329, 1331, 1333, 1335, 1337, 1339, 1342, 1344, 1346, 1348, 1350, 1352, 1354, 1356, 1358, 1360, 595, 595, 709, 711, 750, 750, 778, 791, 778, 791, 849, 847, 849, 847, 1466, 1468, 1470, 1472, 1474, 1476, 1478, 1480, 1482, 1484, 1486, 1488, 1492, 1494, 1499, 1501, 1503, 1505, 1508, 1510, 1512, 1514, 1517, 1519, 1523, 1525, 1527, 1529, 1531, 1533, 1536, 1538, 1541, 1543, 1521, 1516, 1148, 1547, 1363, 1498, 1496, 1521, 1516, 1521, 1516, 1498, 1496, 1521, 1516, 994, 994, 1281, 1498, 1496, 1007, 1007, 1703, 1705, 1707, 1709, 1711, 1713, 1715, 1717, 1719, 1721, 1723, 1725, 1729, 1731, 1766, 1768, 1521, 1516, 1774, 1776, 1778, 1780, 1782, 1784, 1786, 1788, 1793, 1795, 1797, 1799, 1148, 1281, 1547, 1363, 1911, 1913, 1915, 1917, 1919, 1921, 1521, 1516, 1363, 1547, 1934, 1936, 1938, 1940, 1942, 1944, 1946, 1948, 1270, 1268, 1270, 1268, 1521, 1516, 1521, 1516, 1363, 2039, 2041, 2043, 2045, 1547, 2058, 2060, 1363, 2072, 2074, 1496, 1498, 1498, 1496, 1535, 1547, 1549, 2136, 2138, 2140, 2142, 2144, 2146, 2149, 2151, 2154, 2156, 2159, 2161, 2164, 2166, 2169, 2171, 2175, 2177, 2180, 2182, 2179, 2153, 2148, 2148, 2153, 2179, 2077, 2179, 2077, 2179, 2077, 2179, 2184, 2077, 2179, 2184, 2174, 2174, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 3072, 3074, 3076, 3078, 3080, 3082, 3084, 3086, 3088, 3090, 3092, 3094, 3096, 3098, 3100, 3102, 3104, 3106, 3108, 3110, 3112, 3114, 3116, 3118, 3120, 3122, 3124, 3126, 3128, 3130, 3132, 3134, 3136, 3138, 3140, 3142, 3144, 3146, 3148, 3150, 3152, 3154, 3156, 3158, 3160, 3162, 3164, 3166, 3168, 3170, 3172, 3174, 3176, 3178, 3180, 3182, 3184, 3186, 3188, 3190, 3192, 3194, 3196, 3198, 3200, 3202, 3204, 3206, 3208, 3210, 3212, 3214, 3216, 3218, 3220, 3222, 3224, 3226, 3228, 3230, 3232, 3234, 3236, 3238, 3240, 3242, 3244, 3246, 3248, 3250, 3252, 3254, 3256, 3258, 3260, 3262, 3264, 3266, 3268, 3270, 3272, 3274, 3276, 3278, 3280, 3282, 3284, 3286, 3288, 3290, 3292, 3294, 3296, 3298, 3300, 3302, 3304, 3306, 3308, 3310, 3312, 3314, 3316, 3318, 3320, 3322, 3324, 3326, 3328, 3330, 3332, 3334, 3336, 3338, 3340, 3342, 3344, 3346, 3348, 3350, 3352, 3354, 3356, 3358, 3360, 3362, 3364, 3366, 3368, 3370, 3372, 3374, 3376, 3378, 3380, 3382, 3384, 3386, 3388, 3390, 3392, 3394, 3396, 3398, 3400, 3402, 3404, 3406, 3408, 3410, 3412, 3414, 3416, 3418, 3420, 3422, 3424, 3426, 3428, 3430, 3432, 3434, 3436, 3438, 3440, 3442, 3444, 3446, 3448, 3450, 3452, 3454, 3456, 3458, 3460, 3462, 3464, 3466, 3468, 3470, 3472, 3474, 3476, 3478, 3480, 3482, 3484, 3486, 3488, 3490, 3492, 3494, 3496, 3498, 3500, 3502, 3504, 3505, 3506, 3507, 3508, 3509, 3510, 3511, 3512, 3513, 3514, 3515, 3516, 3517, 3518, 3519, 3520, 3522, 3524, 3526, 3528, 3530, 3532, 3534, 3536, 3538, 3539, 3540, 3541, 3542, 3543, 3544, 3545, 3546, 3547, 3548, 3550, 3552, 3554, 3556, 3558, 3560, 3562, 3564, 3566, 3568, 3570, 3572, 3573, 3574, 3576, 3578, 3580, 3582, 3583, 3584, 3585, 3586, 3587, 3588, 3589, 3590, 3592, 3593, 3594, 3596, 3598, 3600, 3602, 3604, 3606, 3608, 3610, 3611, 3612, 3613, 3614, 3616, 3618, 3620, 3622, 3624, 3626, 3628, 3630, 3632, 3633, 3634, 3635, 3636, 3637, 3638, 3639, 3640, 3641, 3642, 3643, 3644, 3645, 3646, 3648, 3650, 3652, 3654, 3656, 3658, 3660, 3662, 3664, 3666, 3668, 3670, 3672, 3674, 3676, 3678, 3680, 3681, 3682, 3683, 3684, 3685, 3686, 3687, 3688, 3689, 3690, 3691, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 3700, 3701, 3702, 3704, 3706, 3708, 3710, 3712, 3714, 3716, 3718, 3719, 3720, 3722, 3724, 3726, 3728, 3730, 3732, 3733, 3734, 3735, 3736, 3738, 3740, 3742, 3743, 3744, 3745, 3746, 3748, 3750, 3752, 3754, 3755, 3756, 3757, 3758, 3759, 3760, 3761, 3762, 3763, 3765, 3767, 3768, 3770, 3771, 3773, 3774, 3775, 3776, 3777, 3778, 3779, 3780, 3782, 3784, 3786, 3788, 3790, 3792, 3794, 3796, 3798, 3800, 3801, 3802, 3803, 3804, 3805, 3806, 3807, 3808, 3809, 3810, 3811, 3812, 3813, 3814, 3815, 3816, 3817, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 905, 910, 910, 905, 933, 938, 905, 910, 910, 905, 933, 938, 638, 633, 3850, 717, 712, 4056, 4058, 704, 699, 4060, 4062, 910, 905, 717, 712, 704, 699, 638, 633, 3864, 717, 712, 699, 704, 789, 784, 590, 585, 590, 585, 590, 585, 600, 610, 605, 580, 590, 585, 600, 595, 610, 605, 615, 4081, 3989, 4083, 3994, 656, 3997, 669, 590, 585, 590, 585, 590, 585, 600, 610, 605, 580, 590, 585, 600, 595, 610, 605, 615, 3989, 4085, 3992, 3994, 656, 3997, 669, 910, 905, 3875, 910, 905, 3877, 933, 938, 933, 938, 910, 905, 3884, 910, 905, 3886, 933, 938, 933, 938, 755, 789, 3891, 3893, 784, 789, 4103, 760, 760, 3896, 590, 585, 610, 605, 3901, 784, 590, 585, 3904, 717, 712, 760, 755, 789, 590, 585, 580, 615, 638, 633, 3916, 3917, 3919, 669, 343, 338, 656, 712, 717, 717, 712, 717, 712, 3928, 3930, 3932, 755, 755, 755, 784, 590, 585, 717, 712, 4109, 4012, 4111, 4012, 760, 755, 638, 633, 3942, 638, 633, 3943, 784, 789, 4113, 4115, 854, 859, 3949, 905, 910, 3953, 928, 905, 910, 3953, 928, 4118, 943, 859, 854, 3959, 877, 872, 877, 872, 859, 854, 3965, 877, 872, 877, 872, 590, 585, 590, 585, 590, 585, 600, 610, 605, 580, 590, 585, 600, 595, 610, 605, 615, 638, 633, 3989, 638, 633, 3992, 3994, 656, 3997, 669, 717, 712, 717, 712, 717, 712, 4003, 4004, 704, 699, 717, 712, 717, 712, 4009, 4010, 4012, 760, 755, 760, 755, 760, 755, 765, 789, 784, 789, 784, 4148, 789, 784, 826, 821, 836, 831, 846, 841, 4151, 826, 821, 836, 831, 846, 841, 4153, 859, 854, 4039, 877, 872, 887, 882, 910, 905, 900, 910, 905, 915, 933, 938, 928, 938, 933, 943, 4172, 4175, 4159, 4160, 4161, 4177, 4179, 4181, 4183, 4185, 4190, 1521, 1516, 1521, 1516, 1545, 1540, 1545, 1540, 1547, 1547, 1270, 1268, 1276, 1271, 4202, 1545, 1540, 1148, 1148, 1148, 1521, 1516, 1547, 1363, 4210, 4160, 4161, 1363, 1547, 4212, 4135, 4161, 4217, 4105, 1545, 1540, 4219, 4107, 4108, 4225, 1276, 1271, 4227, 1276, 1271, 1281, 1281, 1281, 4229, 4231, 1545, 1540, 1545, 1540, 1545, 1540, 4135, 4160, 4161, 4135, 4160, 4161, 1521, 1516, 4138, 1521, 1516, 4140, 4156, 4158, 4159, 4160, 4161, 4242, 1521, 1516, 4164, 1521, 1516, 4167, 1545, 1540, 1545, 1540, 4241, 4240, 4241, 4240, 4241, 4240, 2077, 2077, 4241, 4240, 4241, 4240, 4241, 4240, 2148, 2077, 2179, 2077, 2179, 2077, 2179, 2179, 2077, 2179, 2158, 2158, 2077, 2179, 2077, 2179, 2077, 2179, 2077, 2179, 2158, 2153, 2148, 2158, 2153, 2163, 2077, 2179, 2174, 2158, 2153, 2148, 2158, 2153, 2163, 2077, 2179, 2184, 2135, 2133, 2077, 2179, 4263, 2174, 4265, 2174, 4267, 4270, 2135, 2133, 2135, 2133, 2158, 2153, 2148, 2158, 2153, 2163, 2179, 2179, 2179, 2184, 4260, 4259, 4274, 4273, 4260, 4259, 4260, 4259, 4260, 4259, 4260, 4259, 4274, 4273, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 4288, 4289, 4290, 4291, 4292, 4293, 4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303, 4304, 4307, 4308, 4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4320, 4321, 4322, 4323, 4324, 4325, 4326, 4327, 4328, 4329, 4330, 4331, 4332, 4333, 4334, 4335, 4336, 4337, 4338, 4339, 4340, 4341, 4342, 4344, 4346, 4347, 4348, 4349, 4350, 4351, 4352, 4353, 4354, 4355, 4356, 4357, 4358, 4359, 4360, 4361, 4362, 4363, 4364, 4365, 4366, 4367, 4369, 4370, 4371, 4372, 4373, 4374, 4375, 4376, 4377, 4378, 4379, 4380, 4381, 4382, 4383, 4384, 4385, 4386, 4387, 4388, 4389, 4390, 4391, 4392, 4393, 4394, 4395, 4396, 4397, 4398, 4399, 4401, 4402, 4403, 4404, 4405, 4406, 4407, 4408, 4409, 4410, 4411, 4412, 4413, 4414, 4415, 4416, 4417, 4418, 4419, 4420, 4421, 4422, 4423, 4424, 4425, 4426, 4427, 4428, 4429, 4430, 4431, 4432, 4433, 4434, 4435, 4436, 4437, 4438, 4439, 4440, 4441, 4442, 4443, 4444, 4445, 4446, 4447, 4449, 4451, 4452, 4453, 4454, 4455, 4456, 4457, 4458, 4459, 4460, 4461, 4464, 4465, 4466, 4467, 4468, 4469, 4470, 4471, 4472, 4473, 4474, 4476, 4477, 4478, 4479, 4480, 4481, 4482, 4483, 4484, 4485, 4486, 4487, 4488, 4489, 4490, 4491, 4492, 4493, 4494, 4495, 4496, 4497, 4498, 4499, 4500, 4501, 4502, 4503, 4504, 4505, 4506, 4507, 4508, 4509, 4510, 4511, 4512, 4513, 4514, 4515, 4516, 4517, 4518, 4519, 4520, 4521, 4522, 4523, 4524, 4525, 4526, 4527, 4528, 4529, 4530, 4531, 4532, 4533, 4534, 4535, 4536, 4537, 4538, 4539, 4540, 4541, 4542, 4543, 4544, 4545, 4547, 4548, 4549, 4550, 4551, 4552, 4553, 4554, 4556, 4557, 4558, 4559, 4560, 4561, 4563, 4564, 4565, 4566, 4567, 4568, 4569, 4570, 4571, 4572, 4573, 4574, 4575, 4576, 4577, 4578, 4579, 4580, 4581, 4584, 4585, 4586, 4146, 4145, 4146, 4145, 4146, 4145, 4593, 4594, 4595, 4596, 4597, 4598, 4599, 4600, 4601, 4602, 4603, 4604, 4605, 4606, 4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616, 4618, 4619, 4620, 4621, 4623, 4624, 4626, 4627, 4628, 4630, 4631, 4633, 4634, 4636, 4637, 4638, 4639, 4640, 4643, 4644, 4645, 4646, 4647, 4648, 4649, 4650, 4651, 4652, 4653, 4654, 4655, 4656, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4587, 4677, 4678, 4590, 4679, 4680, 4592, 4681, 4682, 4683, 4684, 4587, 4685, 4686, 4188, 4187, 4590, 4687, 4688, 4188, 4187, 4592, 4689, 4690, 4691, 4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704, 4705, 4706, 4707, 4708, 4709, 4710, 4240, 4240, 4711, 4712, 4713, 4714, 4715, 4716, 4717, 4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730, 4731, 4732, 4734, 4736, 4739, 4740, 4741, 4742, 4743, 4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752, 4753, 4754, 4269, 4273, 4755, 4756, 4272, 4274, 4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4766, 4272, 4269, 4272, 4269, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 4800, 4802, 4804, 4806, 4808, 4810, 4812, 4815, 4817, 4819, 4821, 4823, 4825, 4828, 4830, 4832, 4834, 4836, 4838, 4841, 4844, 4846, 4848, 4856, 4858, 4860, 4863, 4866, 4868, 4870, 4879, 4882, 4885, 4887, 4889, 4892, 4895, 4897, 4903, 4908, 4910, 4914, 4917, 4919, 4922, 4926, 4932, 4935, 4937, 4939, 4948, 4950, 4954, 4956, 4959, 4962, 4964, 4967, 4971, 4976, 4979, 4981, 4983, 4986, 4988, 4990, 4992, 4994, 4997, 5000, 5002, 5004, 5007, 5010, 5017, 5019, 5021, 5025, 5027, 5029, 5034, 5036, 5038, 5041, 5043, 5045, 5047, 5049, 5051, 5053, 5055, 5057, 5059, 5062, 5064, 5066, 5069, 5072, 5075, 5016, 5014, 5033, 5081, 5082, 5083, 5084, 5016, 5014, 5033, 5085, 5086, 5016, 5014, 5033, 5033, 5016, 5014, 5033, 5087, 5089, 5091, 5093, 4874, 4851, 4855, 4853, 4874, 4873, 4878, 4876, 5097, 5099, 5101, 5106, 5108, 5112, 4931, 4902, 5040, 4146, 4145, 4147, 5014, 4931, 4902, 4931, 5040, 4146, 4145, 4147, 5014, 4931, 4943, 4150, 5016, 4934, 4943, 4150, 4931, 4943, 5040, 4146, 4145, 4147, 5117, 5016, 5014, 4448, 4952, 4450, 4953, 5016, 5014, 5033, 4975, 4970, 4975, 4974, 4975, 4970, 5121, 4975, 4974, 5123, 5128, 5130, 5132, 5140, 5143, 5016, 5014, 5033, 5151, 5154, 5157, 5159, 5145, 5142, 5161, 5164, 5167, 4666, 4241, 4240, 5147, 5172, 5175, 5176, 5177, 5180, 5181, 5182, 5186, 5188, 5190, 5193, 5145, 5142, 5145, 5116, 5197, 5199, 5201, 4666, 4241, 4240, 5203, 4666, 4241, 5205, 4666, 4241, 5206, 5147, 5147, 4666, 4241, 4240, 5145, 5142, 2135, 2133, 5207, 5210, 5213, 4666, 4241, 4240, 5145, 5116, 2135, 2133, 5216, 5219, 5222, 4666, 4241, 4240, 5145, 5142, 5227, 4666, 4241, 4240, 4666, 4241, 4240, 5147, 4666, 4241, 4240, 5231, 5233, 5235, 5238, 5240, 5240, 5185, 5247, 5248, 5244, 5244, 5229, 5251, 5252, 5240, 5185, 5244, 5229, 5240, 5240, 5240, 5237, 5240, 5240, 5244, 5229, 5263, 5264, 5265, 5266, 5230, 5244, 4274, 4273, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 4065, 4064, 4087, 4065, 4064, 4088, 4996, 4142, 4141, 4924, 5382, 4925, 5012, 5009, 5411, 5412, 5023, 4144, 4143, 5326, 5031, 4144, 4143, 5413, 5414, 4150, 4546, 4147, 5326, 5416, 4996, 4142, 4141, 4924, 5382, 4925, 5012, 5009, 5418, 5419, 5023, 4144, 4143, 5320, 5031, 4144, 4143, 5420, 5421, 4546, 4150, 4147, 4065, 4064, 4090, 4089, 4996, 4142, 4141, 4924, 5382, 4925, 5012, 5009, 5423, 5424, 5023, 4144, 4143, 5323, 5031, 4144, 4143, 5425, 5040, 4546, 4150, 4147, 5012, 5009, 5023, 4144, 4143, 5389, 5031, 4144, 4143, 5426, 5040, 4546, 4150, 4147, 5382, 4925, 4996, 4142, 4141, 4924, 5012, 5009, 5427, 5428, 5031, 4144, 4143, 5429, 5023, 4144, 4143, 5326, 4146, 4145, 4546, 4150, 4147, 4071, 4070, 4840, 4142, 4141, 4843, 5333, 4850, 5434, 5435, 5436, 5437, 4862, 4142, 4141, 4865, 5340, 4872, 5438, 5439, 5440, 5441, 4884, 4881, 4088, 4087, 4894, 4891, 4090, 4089, 4996, 4141, 4142, 4924, 5382, 4925, 4912, 4928, 4934, 5448, 4941, 4144, 4143, 5449, 5450, 5451, 5452, 5453, 4996, 4141, 4142, 4924, 5382, 4925, 5012, 4901, 5454, 5455, 4941, 4144, 4143, 5456, 5040, 4146, 4145, 4400, 4996, 4141, 4142, 4934, 5457, 4941, 4144, 4143, 5458, 5459, 5460, 5461, 4996, 4141, 4142, 4924, 5382, 4925, 4912, 5009, 5462, 5463, 4941, 4144, 4143, 5464, 5040, 4146, 4145, 5465, 4996, 4142, 4141, 4929, 4928, 5466, 5467, 4941, 4144, 4143, 5468, 5040, 4146, 4145, 5469, 4996, 4142, 4141, 4924, 4925, 4929, 4928, 4934, 5470, 4941, 4144, 4143, 5471, 5472, 5473, 5474, 5475, 4130, 4128, 4996, 4142, 4141, 4999, 5382, 5006, 4961, 4958, 5477, 5478, 4144, 4143, 5479, 5480, 5040, 4146, 4145, 4150, 4147, 4546, 4144, 4143, 5481, 4144, 4143, 5482, 5040, 4146, 4145, 4996, 4142, 4141, 4999, 5382, 5006, 4961, 4958, 5483, 5484, 5023, 5389, 5031, 5485, 5040, 4146, 4145, 4150, 4147, 4546, 4129, 4131, 5486, 5487, 5488, 5489, 4966, 4969, 5490, 5491, 4973, 5493, 5494, 4978, 4129, 4128, 4985, 4131, 4130, 4996, 4142, 4141, 4999, 5382, 5006, 5012, 5009, 5501, 5502, 5023, 4144, 4143, 5389, 5031, 4144, 4143, 5503, 5040, 4146, 4145, 4150, 4546, 4147, 5399, 4555, 5402, 4562, 5061, 5406, 5071, 5068, 5077, 5074, 5508, 5509, 5510, 5511, 5512, 4583, 5513, 5514, 5515, 5516, 5517, 5518, 5520, 5521, 5523, 4193, 4192, 4233, 4233, 5095, 5096, 5443, 5528, 5529, 5530, 5531, 4233, 5535, 5536, 5537, 4233, 5539, 5540, 5145, 5142, 5446, 5542, 5543, 5145, 5116, 5447, 5545, 5546, 5547, 5548, 5549, 5550, 5551, 4622, 5552, 5553, 5557, 5558, 5559, 5560, 5561, 4629, 5562, 5563, 5492, 5492, 5492, 5495, 5567, 5568, 5569, 5570, 5571, 4233, 4233, 4233, 4236, 4236, 5573, 5574, 5575, 5576, 5577, 5578, 5145, 5142, 4238, 4238, 5579, 5580, 5581, 5582, 5156, 5153, 4245, 4245, 5587, 5230, 5588, 5589, 4272, 4274, 4273, 4269, 5240, 5237, 5590, 5240, 5237, 5592, 5593, 5594, 5595, 5597, 5598, 4272, 4274, 4273, 4269, 5599, 5600, 5601, 4272, 5230, 4269, 5212, 5209, 5215, 5602, 4272, 4269, 5603, 5604, 5229, 4272, 4269, 5605, 5230, 5212, 5209, 5215, 5221, 5218, 5224, 5212, 5209, 5215, 5221, 5218, 5224, 5606, 5230, 5607, 5608, 5240, 5237, 5613, 5611, 5240, 5237, 5614, 5615, 5616, 57, 58, 59, 60, 61, 62, 63, 5632, 5633, 5634, 5635, 5636, 5637, 5638, 5639, 5640, 5641, 5642, 5643, 5644, 5645, 5646, 5648, 5649, 5650, 5651, 5652, 5653, 5654, 5657, 5658, 5659, 5660, 5662, 5663, 5664, 5665, 5666, 5667, 5668, 5669, 5670, 5672, 5673, 5674, 5675, 5676, 5677, 5678, 5681, 5682, 5683, 5684, 5685, 5686, 5687, 5688, 5689, 5690, 5691, 5692, 5693, 5694, 5695, 5696, 5698, 5699, 5700, 5701, 5702, 5703, 5704, 5706, 5707, 5708, 5709, 5710, 5711, 5712, 5713, 5714, 5715, 5716, 5717, 5718, 5720, 5721, 5722, 5723, 5724, 5725, 5726, 5727, 5728, 5729, 5730, 5731, 5732, 5734, 5735, 5736, 5738, 5739, 5740, 5741, 5742, 5743, 5744, 5745, 5746, 5747, 5748, 5749, 5750, 5751, 5752, 5753, 5754, 5755, 5757, 5759, 5760, 5761, 5762, 5763, 5764, 5765, 5767, 5769, 5770, 5771, 5772, 5773, 5774, 5775, 5776, 5777, 5778, 5779, 5780, 5781, 5782, 5783, 5784, 5785, 5787, 5788, 5789, 5791, 5795, 5796, 5797, 5798, 5799, 5800, 5801, 5802, 5803, 5805, 5806, 5807, 5809, 5810, 5811, 5812, 5813, 5814, 5815, 5816, 5818, 5819, 5820, 5821, 5825, 5826, 5827, 5828, 5829, 5830, 5831, 5832, 5833, 5835, 5836, 5837, 5839, 5840, 5841, 5843, 5844, 5845, 5846, 5847, 5848, 5850, 5851, 5852, 5854, 5855, 5856, 5858, 5859, 5860, 5861, 5862, 5863, 5864, 5865, 5867, 5868, 5869, 5871, 5875, 5876, 5877, 5878, 5879, 5880, 5881, 5882, 5883, 5884, 5885, 5887, 5888, 5891, 5892, 5893, 5894, 5895, 5896, 5897, 5898, 5900, 5901, 5903, 5904, 5905, 5906, 5907, 5908, 5909, 5910, 5911, 5912, 5913, 5914, 5916, 5917, 5918, 5920, 5921, 5922, 5923, 5924, 5925, 5926, 5927, 5928, 5930, 5932, 5933, 5934, 5936, 5937, 5939, 5940, 5941, 5942, 5943, 5944, 5945, 5946, 5947, 5948, 5949, 5950, 5951, 5952, 5953, 5955, 5956, 5957, 5958, 5959, 5960, 5961, 5963, 5964, 5965, 5966, 5967, 5968, 5969, 5970, 5971, 5972, 5973, 5974, 5975, 5976, 5977, 5978, 5979, 5981, 5982, 5983, 5984, 5985, 5989, 5991, 5993, 5994, 5995, 5996, 5997, 5998, 5999, 6000, 6001, 6003, 6005, 6006, 6009, 6010, 6012, 6013, 6014, 6015, 6017, 6018, 6019, 6022, 6025, 6027, 6028, 6030, 6033, 6035, 6036, 6038, 6039, 6040, 6041, 6042, 6045, 6047, 6048, 6049, 6050, 6051, 6052, 6055, 6058, 6059, 6060, 6061, 6063, 6066, 6067, 6068, 6069, 6070, 6071, 6072, 6074, 6075, 6076, 6077, 6078, 6079, 6081, 6082, 6083, 6084, 6087, 6089, 6090, 6091, 6092, 6093, 6095, 6096, 6097, 6098, 6099, 6100, 6101, 6102, 6103, 6104, 6105, 6107, 6108, 6109, 6110, 6111, 6112, 6113, 6114, 6115, 6116, 6117, 6118, 6119, 6120, 6121, 6122, 6123, 6124, 6125, 6126, 6127, 6128, 6129, 6131, 6132, 6133, 6134, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6144, 6147, 6150, 6156, 6159, 6163, 6166, 6170, 6176, 6179, 6183, 6186, 6189, 6191, 6193, 6199, 6202, 6206, 6210, 6213, 6215, 6219, 6223, 6228, 6232, 6235, 6238, 6242, 6244, 6247, 6249, 6257, 6265, 6267, 6269, 6271, 6273, 6279, 6281, 6282, 6285, 6286, 6292, 6295, 6298, 6302, 6305, 6306, 6309, 6310, 6316, 6319, 6322, 6325, 6328, 6331, 6334, 6337, 6342, 6344, 6345, 6348, 6351, 6357, 6360, 6362, 6365, 6368, 6370, 6372, 6375, 6381, 6387, 6390, 6403, 6406, 6408, 6414, 6417, 6421, 6424, 6427, 6436, 6438, 6445, 6198, 6155, 6175, 6433, 6431, 6435, 6155, 6227, 6175, 6198, 6227, 6435, 6449, 6254, 6256, 6262, 6264, 6459, 6461, 6462, 6465, 6466, 6278, 6315, 6341, 6291, 6431, 6278, 6291, 6341, 6315, 6380, 6341, 6433, 6469, 6473, 6413, 6433, 6431, 6350, 6349, 6395, 6356, 6380, 5919, 6385, 6433, 6431, 6394, 6393, 6395, 6396, 6413, 6433, 6431, 6399, 6401, 6481, 6488, 6489, 6490, 6413, 6433, 6431, 6435, 6494, 6495, 5584, 5226, 5225, 6499, 5992, 5990, 5584, 5226, 5225, 6502, 6504, 5584, 5583, 6506, 6472, 6476, 5584, 5583, 6508, 6510, 5992, 5990, 5584, 5226, 5225, 6513, 6515, 5584, 5583, 5584, 5225, 5226, 6518, 6519, 6472, 6522, 6476, 5584, 5226, 5225, 6525, 6526, 5225, 5226, 5584, 6529, 5584, 5226, 5225, 6532, 6472, 6534, 6476, 6537, 6472, 6540, 6476, 6543, 5226, 5225, 5584, 6546, 5584, 5583, 6548, 6549, 5584, 5583, 6550, 5584, 5583, 6553, 6555, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6594, 6596, 6597, 6598, 6599, 6601, 6602, 6603, 6606, 6608, 6609, 6610, 6612, 6613, 6614, 6615, 6617, 6618, 6620, 6622, 6623, 6628, 6631, 6633, 6635, 6636, 6637, 6639, 6641, 6643, 6644, 6645, 6647, 6648, 6649, 6652, 6654, 6657, 6658, 6661, 6662, 6664, 6665, 6668, 6670, 6671, 6672, 6673, 6677, 6201, 6678, 6158, 6679, 6178, 6680, 6681, 6682, 6149, 6146, 6683, 6158, 6684, 6234, 6685, 6178, 6605, 6686, 6201, 6383, 6687, 6234, 6688, 6675, 6690, 6691, 6692, 6693, 6627, 6625, 6699, 6630, 5794, 6700, 6318, 6701, 6651, 5874, 6702, 6294, 6703, 6666, 6667, 6675, 6704, 6630, 5794, 6705, 6294, 6706, 6638, 5824, 6707, 6318, 6708, 6330, 6709, 6651, 5874, 6710, 6675, 6713, 6416, 6714, 6715, 6716, 6717, 6718, 6719, 6359, 5890, 5889, 5902, 5899, 6720, 6383, 6721, 6722, 6723, 6724, 6725, 6726, 6727, 6728, 6729, 6416, 6730, 6731, 6666, 6667, 6732, 6733, 6667, 6666, 6738, 6416, 6739, 6740, 6741, 6675, 6440, 6744, 6745, 6746, 6689, 6748, 6749, 6750, 6751, 6752, 6753, 6743, 6755, 6756, 6470, 6758, 6759, 6743, 6760, 6761, 6743, 6689, 6764, 6765, 6766, 6767, 6768, 6769, 6771, 6772, 6743, 6773, 6774, 6775, 6777, 6456, 6778, 6457, 6780, 6737, 6781, 6782, 6783, 6786, 6787, 6788, 6789, 6482, 6790, 6791, 6792, 6696, 6794, 6698, 6796, 6470, 6798, 6474, 6800, 6482, 6802, 6803, 6804, 6743, 6806, 6807, 6743, 6737, 6810, 6811, 6743, 6813, 6814, 6500, 6080, 6763, 6511, 6086, 6517, 6524, 6785, 6533, 6536, 6539, 6542, 6545, 6547, 6808, 6809, 6552, 6816, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6196, 6897, 5705, 6205, 6859, 6153, 6899, 5655, 6162, 6851, 6173, 6901, 5679, 6182, 6855, 6902, 6905, 6906, 6153, 6908, 5655, 6162, 6851, 6231, 6910, 5719, 6169, 6862, 6173, 6912, 5679, 6182, 6855, 6913, 6196, 6915, 5705, 6205, 6859, 6916, 5719, 6218, 6862, 6231, 6918, 6241, 5737, 6866, 6920, 6252, 6260, 6925, 6926, 6276, 6928, 5790, 6929, 6313, 6931, 5838, 5842, 6340, 6933, 5870, 6934, 6289, 6936, 5808, 6301, 6938, 6939, 6940, 6276, 6942, 5790, 6943, 6289, 6945, 5808, 6301, 6378, 6947, 5853, 6948, 6313, 6950, 5838, 5842, 6378, 6952, 5853, 5857, 6340, 6954, 5870, 6955, 6957, 6411, 6959, 5962, 6420, 6895, 6960, 6962, 6354, 6966, 6967, 6968, 6886, 6969, 6970, 6890, 6378, 6972, 6973, 6890, 6975, 6977, 6411, 6982, 5962, 6420, 6895, 6983, 6985, 6986, 6989, 6990, 6411, 6992, 5962, 6420, 6895, 6993, 6996, 6480, 6477, 6480, 6478, 6997, 6998, 7001, 7004, 7008, 7009, 7011, 7014, 7015, 7017, 7018, 7021, 7025, 7027, 7028, 7032, 7034, 6480, 6477, 7036, 7037, 7040, 6480, 6477, 6480, 6478, 7044, 7045, 7048, 7050, 7052, 7054, 6480, 6477, 6480, 6478, 6480, 6479, 7056, 7057, 7060, 7061, 7063, 7064, 7065, 7067, 7068, 7070, 7007, 7071, 7072, 7073, 7074, 7024, 7075, 7031, 7076, 7077, 7043, 7078, 7079, 7080, 7081, 7082, 7083, 7084, 7085, 7086, 7087, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7104, 7106, 7107, 7108, 7109, 7111, 7112, 7113, 7114, 7116, 7117, 7118, 7120, 7122, 7124, 7125, 7126, 7127, 7129, 7130, 7131, 7132, 7134, 7135, 7136, 7138, 7140, 7141, 7142, 7144, 7145, 7146, 7147, 7149, 7150, 7151, 7153, 7154, 7155, 7157, 7159, 7161, 7163, 7164, 7165, 7167, 7169, 7171, 7172, 7173, 7176, 7178, 7180, 7182, 7183, 7184, 7186, 7188, 7190, 7191, 7192, 7194, 7195, 7196, 7198, 7201, 7203, 7204, 7205, 7208, 7210, 7212, 7213, 7215, 7216, 7219, 7222, 7224, 7225, 7226, 7228, 7230, 7232, 7234, 7235, 7236, 7207, 7239, 7240, 7221, 7241, 7242, 7244, 6904, 7245, 7246, 6995, 6062, 6995, 6062, 6904, 6021, 6919, 6021, 7253, 7254, 7257, 6020, 7207, 7260, 7261, 7263, 7264, 7207, 7265, 7266, 7221, 7267, 7268, 7270, 6020, 6020, 6021, 7207, 7275, 7276, 7221, 7277, 7278, 7279, 7280, 7282, 6995, 6062, 6995, 6062, 7291, 7248, 7012, 7013, 7251, 7255, 7287, 7296, 7255, 7298, 7033, 7035, 7301, 7049, 7051, 7053, 7055, 7284, 7287, 7287, 7289, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6896, 7361, 6898, 7365, 6900, 7369, 6907, 7374, 6909, 7378, 6911, 7382, 6914, 7386, 7389, 7392, 7393, 6921, 6923, 6927, 6930, 6932, 6935, 6941, 6944, 6946, 6949, 6951, 6953, 6958, 7426, 6965, 6971, 6981, 7437, 6991, 7443, 7446, 7447, 7431, 7449, 7450, 7453, 5988, 7454, 7456, 7457, 7458, 7459, 7460, 5988, 7461, 7462, 7463, 7464, 7441, 6455, 7408, 7168, 7409, 7467, 7468, 7469, 7473, 7474, 7431, 7476, 7477, 7168, 7403, 7408, 7160, 7409, 7480, 7160, 7403, 7168, 7408, 7409, 7481, 7179, 7414, 7187, 7419, 7422, 7199, 7440, 7482, 7483, 7484, 7431, 7433, 7435, 7486, 7487, 7440, 7489, 7441, 7492, 7493, 7494, 7495, 7452, 7497, 7498, 7499, 7500, 7501, 7502, 7504, 7466, 7506, 7507, 7471, 7472, 7479, 7509, 7510, 7511, 7512, 7491, 7513, 7514, 7515, 7516, 61, 62, 63, 7202, 7428, 7209, 7591, 7110, 7367, 7105, 7363, 7115, 7371, 7595, 7233, 7445, 7233, 7445, 7105, 7363, 7110, 7367, 7115, 7371, 7602, 7123, 7376, 7128, 7380, 7133, 7384, 7139, 7388, 7143, 7391, 7148, 7395, 6924, 6922, 7445, 7607, 7608, 7170, 7609, 7166, 7610, 7611, 7202, 7428, 7202, 7428, 7209, 7617, 7166, 7620, 7162, 7621, 7170, 7622, 7158, 7623, 7624, 7158, 7626, 7162, 7627, 7166, 7628, 7170, 7629, 7630, 7177, 7632, 7181, 7633, 7185, 7634, 7189, 7635, 7193, 7636, 7197, 7637, 7638, 7202, 7428, 7209, 7642, 7643, 7217, 7644, 7223, 7439, 7647, 7649, 7233, 7445, 7233, 7445, 7590, 7593, 7654, 7455, 7598, 7600, 7603, 7605, 7465, 7662, 7614, 7665, 7666, 7616, 7619, 7667, 7641, 7646, 7672, 7651, 7653, 7292, 7306, 7299, 7293, 7294, 7295, 7297, 7306, 7299, 7304, 7303, 7306, 7305, 7308, 7309, 7310, 7311, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7680, 7681, 7682, 7684, 7685, 7686, 7687, 7688, 7689, 7691, 7692, 7693, 7694, 7695, 7696, 7697, 7698, 7699, 7700, 7702, 7703, 7704, 7705, 7706, 7707, 7708, 7709, 7710, 7711, 7712, 7713, 7714, 7715, 7716, 7719, 7721, 7724, 7725, 7726, 7727, 7728, 7730, 7732, 7734, 7736, 7739, 7741, 7743, 7745, 7748, 7750, 7752, 7754, 7756, 7758, 7761, 7762, 7763, 7766, 7768, 7769, 7772, 7773, 7774, 7775, 7776, 7777, 7690, 7779, 7780, 7781, 7701, 7782, 7783, 7784, 7718, 7612, 7786, 7789, 7790, 7625, 7631, 7639, 7792, 7793, 7648, 7651, 7795, 7796, 7290, 7797, 7798, 7799, 7800, 7801, 7802, 7803, 7505, 7804, 7805, 7300, 7508, 7302, 7806, 7807, 7808, 7809, 7307, 7810, 7811, 7812, 7813, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7903, 7873, 7767, 7765, 7683, 7880, 7894, 7876, 7900, 7878, 7939, 7882, 7884, 7886, 7900, 7888, 7894, 7890, 7943, 7900, 7892, 7894, 7898, 7896, 7902, 7900, 7898, 7947, 7753, 7722, 7720, 7757, 7740, 7742, 7948, 7909, 7911, 7767, 7765, 7729, 7733, 7737, 7757, 7731, 7735, 7753, 7952, 7742, 7746, 7757, 7740, 7753, 7744, 7953, 7751, 7753, 7749, 7759, 7757, 7755, 7954, 7928, 7767, 7765, 7764, 7932, 7957, 7958, 7934, 7936, 7961, 7496, 7963, 7503, 7969, 7970, 7972, 7973, 7974, 7975, 7977, 7979, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8001, 8002, 8003, 8004, 8005, 8006, 8007, 8008, 8009, 8011, 8012, 8013, 8014, 8015, 8016, 8017, 8019, 8020, 8021, 8022, 8023, 8024, 8025, 8026, 7905, 8028, 8029, 8030, 8031, 8032, 8033, 8035, 8036, 8037, 8038, 8039, 8040, 8041, 8042, 8043, 8044, 8045, 8047, 8048, 8049, 8050, 8051, 8052, 8054, 8055, 8056, 8057, 8058, 8059, 8061, 8062, 8063, 8064, 8065, 8068, 8069, 8071, 8073, 63, 8129, 8132, 8134, 8139, 8141, 8144, 8146, 8149, 8152, 8153, 8155, 8157, 8161, 8164, 8166, 8168, 8170, 8172, 8174, 8176, 8178, 8180, 8183, 8066, 7937, 7941, 7942, 8066, 7949, 8066, 7950, 8066, 7955, 8067, 7959, 7960, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8192, 8193, 8195, 8197, 8199, 8201, 8204, 8205, 8208, 8211, 8214, 8215, 8216, 8217, 8218, 8027, 8219, 8220, 8221, 8222, 8223, 8224, 8225, 8226, 8227, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8257, 8258, 8259, 8261, 8263, 8264, 8265, 7938, 7945, 7945, 8271, 7951, 7951, 7956, 7965, 7966, 7982, 7968, 7962, 7983, 8077, 7967, 7980, 7981, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8327, 8328, 7944, 8010, 8329, 7944, 8018, 8060, 8034, 8331, 8332, 8060, 8046, 8060, 8053, 8333, 8334, 8335, 8336, 8337, 8338, 8074, 8339, 8340, 8341, 8342, 8343, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8267, 8386, 8387, 8389, 8390, 8391, 8392, 8272, 8274, 8395, 8396, 8397, 8398, 8276, 8405, 8401, 8408, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8448, 8385, 8388, 8453, 8455, 8456, 8457, 8459, 8461, 8462, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8513, 8514, 8075, 8070, 8079, 8081, 8076, 8078, 8080, 8072, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8578, 8189, 8579, 8580, 8581, 8582, 8583, 8584, 8190, 8585, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8641, 8648, 8400, 8642, 8404, 8407, 8646, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8403, 8705, 8706, 8708, 8709, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8768, 8710, 8771, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8770, 8833, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8896, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8960, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63};
int h_C[]= {
1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 54, 56, 58, 60, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 176, 178, 180, 182, 185, 187, 189, 191, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 287, 289, 291, 293, 295, 297, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 340, 342, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 375, 377, 379, 381, 383, 385, 387, 389, 391, 393, 395, 397, 399, 401, 403, 407, 409, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435, 437, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 477, 479, 481, 483, 485, 487, 489, 491, 493, 495, 497, 499, 501, 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 523, 525, 527, 530, 532, 534, 536, 539, 541, 543, 545, 548, 550, 552, 554, 557, 559, 561, 563, 565, 567, 569, 571, 573, 575, 577, 579, 582, 584, 587, 589, 592, 594, 597, 599, 602, 604, 607, 609, 612, 614, 617, 619, 621, 623, 626, 628, 630, 632, 635, 637, 641, 643, 645, 647, 649, 651, 653, 655, 658, 660, 662, 664, 666, 668, 671, 673, 675, 677, 679, 681, 683, 685, 687, 689, 692, 694, 696, 698, 701, 703, 706, 708, 714, 716, 720, 722, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 746, 748, 752, 754, 757, 759, 762, 764, 767, 769, 771, 773, 775, 777, 781, 783, 786, 788, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 818, 820, 823, 825, 828, 830, 833, 835, 838, 840, 843, 845, 851, 853, 856, 858, 861, 863, 865, 867, 869, 871, 874, 876, 879, 881, 884, 886, 889, 891, 893, 895, 897, 899, 902, 904, 907, 909, 912, 914, 917, 919, 921, 923, 925, 927, 930, 932, 935, 937, 940, 942, 52, 52, 52, 52, 61, 61, 61, 61, 183, 183, 690, 723, 690, 723, 192, 192, 996, 998, 1000, 1002, 1004, 1006, 1009, 1011, 1013, 1015, 1017, 1019, 1021, 1023, 1025, 1027, 1029, 1031, 136, 136, 136, 136, 137, 137, 174, 174, 213, 213, 1108, 1110, 1112, 1114, 1116, 1118, 1120, 1122, 1125, 1127, 1129, 1131, 1133, 1135, 1137, 1139, 1141, 1143, 1145, 1147, 1150, 1152, 1154, 1156, 779, 779, 1209, 1211, 1213, 1215, 1217, 1219, 1222, 1224, 404, 404, 405, 405, 438, 438, 438, 438, 1258, 1260, 475, 475, 1273, 1275, 1278, 1280, 1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298, 1300, 1302, 1304, 1306, 528, 537, 528, 537, 1326, 1328, 1330, 1332, 1334, 1336, 1338, 1340, 1343, 1345, 1347, 1349, 1351, 1353, 1355, 1357, 1359, 1361, 546, 555, 710, 710, 744, 749, 790, 779, 779, 790, 816, 816, 848, 848, 1467, 1469, 1471, 1473, 1475, 1477, 1479, 1481, 1483, 1485, 1487, 1489, 1493, 1495, 1500, 1502, 1504, 1506, 1509, 1511, 1513, 1515, 1518, 1520, 1524, 1526, 1528, 1530, 1532, 1534, 1537, 1539, 1542, 1544, 1123, 1123, 1491, 1362, 1362, 1497, 1497, 1282, 1282, 1282, 1282, 1497, 1497, 1123, 1123, 1507, 1522, 1491, 1497, 1497, 1507, 1522, 1704, 1706, 1708, 1710, 1712, 1714, 1716, 1718, 1720, 1722, 1724, 1726, 1730, 1732, 1767, 1769, 1123, 1123, 1775, 1777, 1779, 1781, 1783, 1785, 1787, 1789, 1794, 1796, 1798, 1800, 1491, 1491, 1546, 1546, 1912, 1914, 1916, 1918, 1920, 1922, 1282, 1282, 1546, 1546, 1935, 1937, 1939, 1941, 1943, 1945, 1947, 1949, 1256, 1256, 1269, 1269, 1282, 1282, 1282, 1282, 1546, 2040, 2042, 2044, 2046, 1362, 2059, 2061, 1362, 2073, 2075, 1490, 1490, 1497, 1497, 1548, 1546, 1548, 2137, 2139, 2141, 2143, 2145, 2147, 2150, 2152, 2155, 2157, 2160, 2162, 2165, 2167, 2170, 2172, 2176, 2178, 2181, 2183, 2076, 1765, 2036, 2037, 2038, 2057, 2057, 2057, 2076, 2076, 2076, 2076, 2168, 2076, 2076, 2173, 2168, 2173, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 3073, 3075, 3077, 3079, 3081, 3083, 3085, 3087, 3089, 3091, 3093, 3095, 3097, 3099, 3101, 3103, 3105, 3107, 3109, 3111, 3113, 3115, 3117, 3119, 3121, 3123, 3125, 3127, 3129, 3131, 3133, 3135, 3137, 3139, 3141, 3143, 3145, 3147, 3149, 3151, 3153, 3155, 3157, 3159, 3161, 3163, 3165, 3167, 3169, 3171, 3173, 3175, 3177, 3179, 3181, 3183, 3185, 3187, 3189, 3191, 3193, 3195, 3197, 3199, 3201, 3203, 3205, 3207, 3209, 3211, 3213, 3215, 3217, 3219, 3221, 3223, 3225, 3227, 3229, 3231, 3233, 3235, 3237, 3239, 3241, 3243, 3245, 3247, 3249, 3251, 3253, 3255, 3257, 3259, 3261, 3263, 3265, 3267, 3269, 3271, 3273, 3275, 3277, 3279, 3281, 3283, 3285, 3287, 3289, 3291, 3293, 3295, 3297, 3299, 3301, 3303, 3305, 3307, 3309, 3311, 3313, 3315, 3317, 3319, 3321, 3323, 3325, 3327, 3329, 3331, 3333, 3335, 3337, 3339, 3341, 3343, 3345, 3347, 3349, 3351, 3353, 3355, 3357, 3359, 3361, 3363, 3365, 3367, 3369, 3371, 3373, 3375, 3377, 3379, 3381, 3383, 3385, 3387, 3389, 3391, 3393, 3395, 3397, 3399, 3401, 3403, 3405, 3407, 3409, 3411, 3413, 3415, 3417, 3419, 3421, 3423, 3425, 3427, 3429, 3431, 3433, 3435, 3437, 3439, 3441, 3443, 3445, 3447, 3449, 3451, 3453, 3455, 3457, 3459, 3461, 3463, 3465, 3467, 3469, 3471, 3473, 3475, 3477, 3479, 3481, 3483, 3485, 3487, 3489, 3491, 3493, 3495, 3497, 3499, 3501, 3503, 961, 962, 963, 964, 967, 968, 969, 970, 973, 974, 977, 980, 981, 982, 992, 993, 3521, 3523, 3525, 3527, 3529, 3531, 3533, 3535, 3537, 1049, 1050, 1052, 1053, 1076, 1077, 1091, 1094, 1103, 1106, 3549, 3551, 3553, 3555, 3557, 3559, 3561, 3563, 3565, 3567, 3569, 3571, 1163, 1164, 3575, 3577, 3579, 3581, 1229, 1230, 1232, 1233, 1245, 1246, 1247, 1248, 3591, 1265, 1266, 3595, 3597, 3599, 3601, 3603, 3605, 3607, 3609, 1312, 1315, 1321, 1324, 3615, 3617, 3619, 3621, 3623, 3625, 3627, 3629, 3631, 1366, 1369, 1405, 1406, 1414, 1417, 1423, 1426, 1427, 1430, 1437, 1438, 1445, 1446, 3647, 3649, 3651, 3653, 3655, 3657, 3659, 3661, 3663, 3665, 3667, 3669, 3671, 3673, 3675, 3677, 3679, 1550, 1551, 1552, 1553, 1554, 1678, 1679, 1680, 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, 1692, 1695, 1698, 3703, 3705, 3707, 3709, 3711, 3713, 3715, 3717, 1770, 1771, 3721, 3723, 3725, 3727, 3729, 3731, 1805, 1806, 1909, 1910, 3737, 3739, 3741, 1925, 1926, 1930, 1931, 3747, 3749, 3751, 3753, 2011, 2012, 2018, 2019, 2025, 2026, 2027, 2028, 2035, 3764, 3766, 2047, 3769, 2071, 3772, 2115, 2117, 2119, 2120, 2129, 2132, 2134, 3781, 3783, 3785, 3787, 3789, 3791, 3793, 3795, 3797, 3799, 2197, 2300, 2484, 2485, 2486, 2491, 2497, 2498, 2507, 2508, 2510, 2511, 2512, 2513, 2514, 2515, 2545, 2547, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 3840, 4048, 4048, 3841, 3843, 3842, 3844, 4048, 4048, 4048, 3846, 3845, 3848, 3847, 3849, 3852, 3851, 4057, 4059, 3854, 3853, 4061, 4063, 3856, 3855, 3858, 3857, 3860, 3859, 3862, 3861, 3863, 3866, 3865, 3868, 3867, 4023, 3869, 3971, 3970, 3973, 3972, 3975, 3974, 3976, 3978, 3870, 3871, 3981, 3980, 3983, 3982, 3985, 3984, 3872, 4082, 624, 4084, 3993, 3995, 3996, 3998, 3971, 3970, 3973, 3972, 3975, 3974, 3976, 3978, 3977, 3979, 3981, 3980, 3983, 3982, 3985, 3984, 3986, 624, 4086, 639, 3993, 3995, 3996, 3998, 3874, 3873, 183, 4048, 3876, 192, 3879, 3878, 3881, 3880, 3883, 3882, 183, 4048, 3885, 192, 3888, 3887, 3890, 3889, 3907, 3909, 624, 3892, 3895, 3894, 4104, 3908, 4017, 3934, 3898, 3897, 3900, 3899, 639, 3909, 3902, 3980, 3903, 3906, 3905, 3908, 3907, 3909, 3911, 3910, 3912, 3913, 3915, 3914, 624, 639, 3918, 3920, 3922, 3921, 3923, 3925, 3924, 4008, 3926, 4008, 3927, 718, 3929, 3931, 3933, 4015, 4016, 3934, 3980, 3935, 3937, 3936, 4110, 404, 4112, 405, 3939, 3938, 3941, 3940, 624, 3991, 3991, 639, 3945, 3944, 4114, 4116, 3947, 3946, 3948, 3951, 3950, 3952, 3954, 3951, 3950, 3952, 3954, 4119, 3955, 3957, 3956, 3958, 4041, 3960, 4041, 3961, 3963, 3962, 3964, 3967, 3966, 3969, 3968, 3971, 3970, 3973, 3972, 3975, 3974, 3976, 3978, 3977, 3979, 3981, 3980, 3983, 3982, 3985, 3984, 3986, 3988, 3987, 624, 3991, 3990, 639, 3993, 3995, 3996, 3998, 4000, 3999, 4008, 4001, 4008, 4002, 718, 690, 4006, 4005, 4008, 4007, 4008, 4008, 718, 723, 4011, 4014, 4013, 4017, 4015, 4017, 4016, 4018, 4020, 4019, 4023, 4021, 4149, 4023, 4022, 4025, 4024, 4027, 4026, 4029, 4028, 4152, 4031, 4030, 4033, 4032, 4035, 4034, 4154, 4037, 4036, 4038, 4041, 4040, 4043, 4042, 4045, 4044, 4046, 4048, 4047, 4049, 4051, 4050, 4052, 4054, 4053, 4055, 4173, 4176, 1491, 1491, 1491, 4178, 4180, 4182, 4184, 4186, 4191, 4073, 4072, 4166, 4074, 4076, 4075, 4078, 4077, 4079, 4080, 4092, 4091, 4094, 4093, 4203, 4096, 4095, 4132, 4133, 4134, 4098, 4097, 4100, 4099, 4211, 1491, 1491, 4102, 4101, 4213, 1341, 1341, 4218, 1507, 4123, 4106, 4220, 1220, 1220, 4226, 4120, 4117, 4228, 4120, 4120, 4121, 4133, 4134, 4230, 4232, 4123, 4122, 4125, 4124, 4127, 4126, 4132, 4133, 4134, 1341, 1341, 1341, 4137, 4136, 1507, 4166, 4139, 1522, 4155, 4157, 1491, 1491, 1491, 4243, 4163, 4162, 1507, 4166, 4165, 1522, 4169, 4168, 4171, 4170, 4174, 4174, 4174, 4174, 4174, 4174, 4207, 4224, 4189, 4189, 4189, 4189, 4189, 4189, 4194, 4224, 4195, 4197, 4196, 4199, 4198, 4200, 4224, 4201, 4248, 4251, 4224, 4204, 4224, 4205, 4207, 4206, 4209, 4208, 4248, 4247, 4221, 4223, 4222, 4252, 4215, 4214, 4216, 4248, 4247, 4221, 4223, 4222, 4252, 4224, 4255, 4256, 4244, 4244, 4235, 4234, 4264, 4237, 4266, 4239, 4268, 4271, 4244, 4244, 4246, 4246, 4248, 4247, 4249, 4251, 4250, 4252, 4253, 4254, 4255, 4256, 4261, 4261, 4257, 4257, 4258, 4258, 4261, 4261, 4261, 4261, 4261, 4261, 4262, 4262, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 965, 966, 971, 972, 975, 976, 978, 979, 983, 984, 985, 986, 987, 988, 989, 990, 991, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1051, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1092, 1093, 1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1104, 1105, 1157, 1158, 1159, 1160, 1161, 1162, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1225, 1226, 1227, 1228, 1231, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1261, 1262, 1263, 1264, 1267, 1307, 1308, 1309, 1310, 1311, 1313, 1314, 1316, 1317, 1318, 1319, 1320, 1322, 1323, 1364, 1365, 1367, 1368, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1415, 1416, 1418, 1419, 1420, 1421, 1422, 1424, 1425, 1428, 1429, 1431, 1432, 1433, 1434, 1435, 1436, 1439, 1440, 1441, 1442, 1443, 1444, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 1463, 1464, 1465, 1555, 1556, 1557, 4306, 4305, 4306, 4305, 4310, 4309, 1693, 1694, 1696, 1697, 1699, 1700, 1701, 1702, 1727, 1728, 1761, 1762, 1763, 1764, 1772, 1773, 1790, 1791, 1792, 1801, 1802, 1803, 1804, 1807, 1808, 1809, 1810, 1923, 1924, 1927, 1928, 1929, 1932, 1933, 2013, 2014, 2020, 2021, 2022, 2023, 2024, 2029, 2030, 2031, 2032, 2033, 2034, 2054, 2055, 2056, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2112, 2113, 2114, 2116, 2118, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2130, 2131, 4174, 2188, 2189, 4174, 2191, 2192, 4174, 2194, 2195, 2198, 2199, 4189, 2265, 2266, 4589, 4588, 4189, 2270, 2271, 4607, 4591, 4189, 2275, 2276, 2281, 2282, 2283, 2284, 2285, 2286, 2287, 2290, 2301, 2302, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 2320, 2321, 4617, 4617, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2417, 2418, 2419, 2420, 2421, 2422, 2423, 2424, 2425, 2481, 2483, 2487, 2488, 2499, 2509, 2533, 2534, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 2543, 2544, 2546, 2548, 2549, 2565, 2566, 4737, 4735, 2612, 2613, 4738, 4735, 2668, 2669, 2696, 2697, 2723, 2724, 2803, 2804, 2810, 2811, 4733, 4733, 4738, 4737, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 4801, 4803, 4805, 4807, 4809, 4811, 4813, 4816, 4818, 4820, 4822, 4824, 4826, 4829, 4831, 4833, 4835, 4837, 4839, 4842, 4845, 4847, 4849, 4857, 4859, 4861, 4864, 4867, 4869, 4871, 4880, 4883, 4886, 4888, 4890, 4893, 4896, 4898, 4904, 4909, 4911, 4915, 4918, 4920, 4923, 4927, 4933, 4936, 4938, 4940, 4949, 4951, 4955, 4957, 4960, 4963, 4965, 4968, 4972, 4977, 4980, 4982, 4984, 4987, 4989, 4991, 4993, 4995, 4998, 5001, 5003, 5005, 5008, 5011, 5018, 5020, 5022, 5026, 5028, 5030, 5035, 5037, 5039, 5042, 5044, 5046, 5048, 5050, 5052, 5054, 5056, 5058, 5060, 5063, 5065, 5067, 5070, 5073, 5076, 5015, 4814, 4067, 1582, 1583, 1588, 1589, 5015, 4827, 5032, 1608, 1609, 5015, 4827, 4067, 4069, 5015, 4827, 5032, 5088, 5090, 5092, 5094, 4345, 4343, 4854, 4852, 4368, 4368, 4877, 4875, 5098, 5100, 5102, 5107, 5109, 5113, 5015, 4942, 4946, 4945, 4899, 4900, 4916, 4930, 4942, 5015, 4906, 4906, 4905, 4907, 4916, 5015, 4942, 4913, 5015, 4916, 4942, 4921, 4930, 4942, 4946, 4945, 4944, 4947, 5118, 5015, 5013, 5024, 5032, 5024, 5032, 5015, 5013, 5032, 4463, 4462, 4463, 4463, 4475, 4475, 5122, 4475, 4475, 5124, 5129, 5131, 5133, 5141, 5144, 5015, 5013, 5032, 5152, 5155, 5158, 5160, 4607, 4582, 2187, 2190, 2193, 5080, 5079, 5078, 5146, 2264, 2267, 2268, 2269, 2272, 2273, 2274, 5187, 5189, 5191, 5194, 4607, 4607, 4607, 4607, 5198, 5200, 5202, 5105, 5104, 5103, 5204, 5111, 5110, 2324, 5111, 5110, 2330, 5146, 5146, 5115, 5138, 5114, 4642, 4625, 5120, 5119, 5208, 5211, 5214, 5115, 5138, 5114, 4642, 4625, 5120, 5119, 5217, 5220, 5223, 5127, 5126, 5125, 4642, 4641, 5228, 5136, 5135, 5134, 5139, 5138, 5137, 5146, 5150, 5149, 5148, 5232, 5234, 5236, 5239, 4261, 5196, 5195, 2597, 2598, 4257, 5171, 5170, 2617, 2618, 5196, 5195, 5243, 5192, 4258, 4261, 5196, 5195, 4261, 4261, 4262, 4733, 2815, 2816, 2826, 2827, 4735, 5243, 5242, 5241, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 5313, 5312, 5314, 5316, 5315, 5317, 5379, 5378, 5353, 5380, 5381, 5352, 5385, 5318, 1572, 1573, 5388, 5387, 5319, 4066, 5391, 5391, 5390, 1581, 5415, 5397, 5396, 5395, 4068, 5417, 5379, 5378, 5362, 5380, 5381, 5352, 5385, 5324, 1598, 1599, 5388, 5387, 5325, 5024, 5391, 5391, 5390, 1607, 5422, 5327, 5397, 5395, 5408, 5321, 5410, 5409, 5379, 5378, 5377, 5380, 5381, 5352, 5385, 5324, 1625, 1626, 5388, 5387, 5322, 4066, 5391, 5391, 5390, 1634, 5394, 5327, 5397, 5367, 5385, 5324, 5388, 5387, 5325, 4068, 5391, 5391, 5390, 1648, 5394, 5327, 5397, 5367, 5381, 5352, 5379, 5378, 5377, 5380, 5385, 5324, 1661, 1662, 5391, 5391, 5390, 1666, 5388, 5387, 5325, 5024, 5393, 5364, 5327, 5397, 5367, 5408, 5407, 5330, 5329, 5328, 5331, 5332, 5334, 1739, 1740, 1741, 1742, 5337, 5336, 5335, 5338, 5339, 5341, 1749, 1750, 1751, 1752, 5343, 5342, 5345, 5344, 5347, 5346, 5349, 5348, 5379, 5377, 5356, 5380, 5381, 5352, 5385, 5357, 5358, 1820, 5361, 5360, 5359, 1824, 1825, 1826, 1827, 1828, 5379, 5377, 5351, 5380, 5381, 5352, 5385, 5357, 1837, 1838, 5361, 5360, 5354, 1842, 5394, 5393, 5392, 5350, 5379, 5377, 5351, 5358, 1851, 5361, 5360, 5359, 1855, 1856, 1857, 1858, 5379, 5377, 5351, 5380, 5381, 5352, 5385, 5357, 1867, 1868, 5361, 5360, 5354, 1872, 5394, 5393, 5355, 1876, 5379, 5378, 5353, 5385, 5357, 1882, 1883, 5361, 5360, 5354, 1887, 5394, 5393, 5355, 1891, 5379, 5356, 5362, 5380, 5383, 5385, 5357, 5358, 1900, 5361, 5360, 5359, 1904, 1905, 1906, 1907, 1908, 5375, 5372, 5379, 5378, 5362, 5380, 5381, 5383, 5366, 5365, 1960, 1961, 5387, 5363, 1964, 1965, 5394, 5393, 5364, 5397, 5367, 5396, 5387, 5386, 1974, 5391, 5390, 1977, 5394, 5393, 5364, 5379, 5378, 5377, 5380, 5381, 5383, 5366, 5365, 1989, 1990, 5388, 5024, 5391, 1994, 5394, 5393, 5392, 5397, 5367, 5396, 5373, 5376, 2003, 2004, 2005, 2006, 5368, 5369, 2009, 2010, 5370, 2016, 2017, 5371, 5373, 5372, 5374, 5376, 5375, 5379, 5378, 5377, 5380, 5381, 5383, 5385, 5384, 2086, 2087, 5388, 5387, 5386, 5024, 5391, 5391, 5390, 2095, 5394, 5393, 5392, 5397, 5396, 5395, 5398, 5400, 5401, 5403, 5404, 5405, 5408, 5407, 5410, 5409, 2185, 2186, 5162, 5165, 5168, 5476, 2200, 2201, 2202, 2226, 5173, 5519, 5178, 5522, 5183, 5431, 5430, 5432, 5433, 5506, 5507, 5442, 2303, 2304, 2305, 2306, 5444, 2316, 2317, 2318, 5507, 2322, 2323, 5500, 5445, 5476, 2328, 2329, 5500, 5499, 5476, 2358, 2391, 2392, 2393, 2394, 2395, 2396, 5476, 2398, 2399, 2409, 2410, 2411, 2412, 2413, 5476, 2415, 2416, 4632, 4632, 4632, 4635, 2474, 2475, 2476, 2477, 2478, 5496, 5497, 5498, 5506, 5507, 2494, 2495, 2496, 2500, 2501, 2502, 5500, 5499, 5506, 5507, 2526, 2527, 2528, 2529, 5505, 5504, 5506, 5507, 2564, 5538, 2582, 2583, 5533, 5526, 5525, 5524, 5586, 5585, 5591, 5586, 5585, 2611, 2614, 2615, 5596, 2648, 2649, 5533, 5526, 5525, 5524, 2656, 2657, 2667, 5533, 5534, 5527, 5555, 5554, 5556, 2695, 5533, 5532, 2703, 2704, 5534, 5533, 5532, 2722, 5538, 5555, 5554, 5556, 5565, 5564, 5566, 5555, 5554, 5556, 5565, 5564, 5566, 2802, 5572, 2809, 2814, 5586, 5585, 2828, 5612, 5586, 5585, 2838, 2839, 2840, 57, 58, 59, 60, 61, 62, 63, 1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, 5647, 1574, 1575, 1576, 1577, 1578, 1579, 1580, 1584, 1585, 1586, 1587, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 5671, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1610, 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 5697, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1649, 1650, 1651, 1652, 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, 5733, 1663, 1664, 1665, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675, 1676, 1677, 1733, 1734, 1735, 1736, 1737, 1738, 5756, 5758, 1743, 1744, 1745, 1746, 1747, 1748, 5766, 5768, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1811, 1812, 1813, 1814, 1815, 1816, 1817, 1818, 1819, 1821, 1822, 1823, 5792, 1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 5804, 1839, 1840, 1841, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850, 1852, 1853, 1854, 5822, 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 5834, 1869, 1870, 1871, 1873, 1874, 1875, 1877, 1878, 1879, 1880, 1881, 5849, 1884, 1885, 1886, 1888, 1889, 1890, 1892, 1893, 1894, 1895, 1896, 1897, 1898, 1899, 1901, 1902, 1903, 5872, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 5886, 1962, 1963, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1975, 1976, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 5915, 1991, 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 5929, 5931, 2007, 2008, 5935, 2015, 5938, 2048, 2049, 2050, 2051, 2052, 2053, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085, 5954, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2096, 2097, 2098, 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 5980, 5163, 5166, 5169, 2196, 5986, 5174, 5179, 5184, 2277, 2278, 2279, 2280, 2288, 2289, 2299, 6002, 6004, 2307, 6007, 2319, 6011, 2325, 2326, 2327, 6016, 2331, 2332, 2333, 6023, 6026, 2397, 6029, 6031, 6034, 2414, 6037, 2437, 2458, 2471, 2473, 6043, 6046, 2479, 2480, 2482, 2489, 2490, 6053, 6056, 2503, 2504, 2505, 2506, 6064, 2530, 2531, 2532, 2535, 5245, 2567, 6073, 2584, 2585, 2586, 2587, 2595, 2596, 2609, 2610, 5249, 6085, 6088, 2650, 2651, 2652, 2653, 6094, 5253, 2670, 2671, 2672, 2681, 2682, 2683, 5255, 2698, 2699, 6106, 2705, 2706, 2707, 5257, 2725, 2738, 2739, 2740, 2743, 2744, 2745, 2772, 2773, 2774, 2777, 2778, 2779, 5259, 2805, 5261, 5609, 2824, 2825, 6130, 2836, 2837, 6135, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6145, 6148, 6151, 6157, 6160, 6164, 6167, 6171, 6177, 6180, 6184, 6187, 6190, 6192, 6194, 6200, 6203, 6207, 6211, 6214, 6216, 6220, 6224, 6229, 6233, 6236, 6239, 6243, 6245, 6248, 6250, 6258, 6266, 6268, 6270, 6272, 6274, 6280, 5786, 6283, 5793, 6287, 6293, 6296, 6299, 6303, 5817, 6307, 5823, 6311, 6317, 6320, 6323, 6326, 6329, 6332, 6335, 6338, 6343, 5866, 6346, 5873, 6352, 6358, 6361, 6363, 6366, 6369, 6371, 6373, 6376, 6382, 6388, 6391, 6404, 6407, 6409, 6415, 6418, 6422, 6425, 6428, 6437, 6439, 5987, 6197, 6154, 6174, 6432, 6430, 6434, 6154, 6379, 6174, 6197, 6226, 6434, 6450, 6253, 6255, 6261, 6263, 6008, 5541, 6463, 5544, 6467, 6277, 6314, 6379, 6290, 6430, 6277, 6290, 6379, 6314, 6379, 6379, 6432, 6024, 6032, 6412, 6432, 6430, 6397, 6405, 6398, 6355, 6379, 6386, 6384, 6432, 6430, 6405, 6402, 6398, 6400, 6412, 6432, 6430, 6398, 6400, 6044, 6054, 6057, 6491, 6412, 6432, 6430, 6434, 6065, 6496, 6460, 6485, 6484, 5246, 6442, 6441, 6460, 6452, 6451, 6503, 6505, 6493, 6492, 6507, 6444, 6444, 6487, 6486, 6509, 5250, 6447, 6446, 6460, 6452, 6451, 6514, 6516, 6454, 6453, 6460, 6458, 6485, 5254, 6520, 6464, 6523, 6468, 6460, 6485, 6484, 5256, 6527, 6458, 6485, 6483, 6530, 6460, 6485, 6484, 5258, 6464, 6535, 6468, 6538, 6471, 6541, 6475, 6544, 6485, 6484, 6483, 5260, 6487, 6486, 5262, 5610, 6493, 6492, 6551, 6498, 6497, 6554, 6136, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6152, 6161, 6165, 6168, 6172, 6181, 6185, 6188, 6195, 6204, 6208, 6212, 6217, 6221, 6225, 6230, 6237, 6240, 6246, 6251, 6259, 6275, 6284, 6288, 6297, 6300, 6304, 6308, 6312, 6321, 6324, 6327, 6333, 6336, 6339, 6347, 6353, 6364, 6367, 6374, 6377, 6389, 6392, 6410, 6419, 6423, 6426, 6429, 2203, 6607, 2209, 6595, 2215, 6600, 2221, 2222, 2223, 6593, 6592, 2227, 6595, 2233, 6611, 2239, 6600, 6604, 2246, 6607, 6611, 2257, 6616, 2262, 6621, 2291, 2293, 2294, 2296, 6626, 6624, 2334, 6629, 6632, 2339, 6642, 2344, 6650, 6653, 2349, 6634, 2354, 6397, 6405, 6674, 2359, 6629, 6632, 2364, 6634, 2369, 6646, 6640, 2374, 6642, 2379, 6646, 2384, 6650, 6653, 2389, 6674, 2426, 6669, 2432, 2433, 2434, 2435, 2436, 2438, 6655, 6660, 6656, 6660, 6659, 2447, 6663, 2450, 2451, 2453, 2454, 2455, 2456, 2457, 2459, 2460, 6669, 2466, 2467, 6397, 6405, 2470, 2472, 6405, 6402, 2516, 6669, 2522, 2523, 2524, 6674, 6735, 2561, 2562, 2563, 6443, 2577, 2578, 2579, 2580, 2581, 6754, 6676, 2593, 2594, 6711, 2600, 2601, 6742, 2607, 2608, 6676, 6448, 2643, 2644, 2645, 2646, 2647, 6770, 2654, 2655, 6735, 2664, 2665, 2666, 6521, 6711, 2680, 6712, 2685, 6735, 2692, 2693, 2694, 2700, 2701, 2702, 6531, 6694, 2719, 2720, 2721, 6695, 2737, 6697, 2742, 6711, 2771, 6712, 2776, 6734, 2799, 2800, 2801, 6742, 2807, 2808, 6735, 6736, 2822, 2823, 6742, 2834, 2835, 6747, 6757, 6762, 6812, 6812, 6812, 6779, 6784, 6793, 6795, 6797, 6799, 6801, 6805, 6812, 6812, 6812, 6815, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6856, 2205, 6858, 6857, 6209, 6848, 2211, 6850, 6849, 5656, 6852, 2217, 6854, 6853, 5680, 6903, 2224, 2225, 6848, 2229, 6850, 6849, 5656, 6879, 2235, 6861, 6860, 5661, 6852, 2241, 6854, 6853, 5680, 2245, 6856, 2248, 6858, 6857, 6209, 2252, 6861, 6860, 6222, 6863, 2258, 6865, 6864, 6619, 2263, 6867, 6868, 2297, 2298, 6869, 2336, 6870, 2338, 6876, 2341, 6877, 6878, 6882, 2346, 6883, 2348, 6871, 2351, 6872, 6873, 2355, 2356, 2357, 6869, 2361, 6870, 2363, 6871, 2366, 6872, 6873, 6874, 2371, 6875, 2373, 6876, 2376, 6877, 6878, 6879, 2381, 6880, 6881, 6882, 2386, 6883, 2388, 2390, 6891, 2428, 6893, 6892, 6894, 6961, 6963, 6884, 2440, 2441, 2442, 6885, 2444, 2445, 6887, 6888, 2449, 6974, 6889, 6976, 6978, 6891, 2462, 6893, 6892, 6894, 6984, 2468, 2469, 2492, 2493, 6891, 2518, 6893, 6892, 6894, 6994, 2525, 6980, 6964, 6980, 6979, 2560, 6999, 2576, 7005, 2592, 7010, 2599, 2606, 7016, 2616, 2642, 7022, 7026, 2663, 7029, 2679, 2684, 6980, 6964, 2691, 7038, 7041, 6980, 6964, 6980, 6979, 2718, 7046, 2736, 2741, 2770, 2775, 6980, 6964, 6980, 6979, 6988, 6987, 2798, 7058, 2806, 7062, 2813, 2821, 7066, 2833, 7069, 2848, 6501, 2860, 2866, 2868, 2870, 6512, 2890, 6776, 2903, 2908, 6528, 2918, 2927, 2929, 2945, 2947, 2957, 2959, 2962, 2966, 2970, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 2204, 2206, 2207, 2208, 2210, 2212, 2213, 2214, 2216, 2218, 2219, 2220, 7121, 2228, 2230, 2231, 2232, 2234, 2236, 2237, 2238, 2240, 2242, 2243, 2244, 2247, 2249, 2250, 2251, 2253, 2254, 2255, 2256, 2259, 2260, 2261, 2292, 2295, 7156, 2335, 2337, 2340, 2342, 2343, 2345, 2347, 2350, 2352, 2353, 7174, 2360, 2362, 2365, 2367, 2368, 2370, 2372, 2375, 2377, 2378, 2380, 2382, 2383, 2385, 2387, 2427, 2429, 2430, 2431, 2439, 7211, 2443, 7214, 2446, 2448, 2452, 2461, 2463, 2464, 2465, 7229, 7231, 2517, 2519, 2520, 2521, 7206, 2553, 2554, 7220, 2558, 2559, 7000, 7119, 7002, 7006, 7237, 7238, 7237, 7238, 7119, 7137, 7227, 7152, 7019, 7023, 7030, 7175, 7206, 2689, 2690, 7039, 7042, 7206, 2711, 2712, 7220, 2716, 2717, 7047, 7175, 7175, 7200, 7206, 2783, 2784, 7220, 2791, 2792, 2796, 2797, 7059, 7237, 7238, 7237, 7238, 2856, 7247, 7249, 7274, 7250, 7285, 7252, 2888, 7286, 2894, 7258, 7259, 2910, 7271, 7272, 7273, 7274, 7283, 7285, 7286, 7288, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7360, 7362, 7364, 7366, 7368, 7370, 7373, 7375, 7377, 7379, 7381, 7383, 7385, 7387, 7390, 6917, 7394, 7396, 7397, 7399, 7401, 7404, 7406, 7410, 7412, 7415, 7417, 7420, 7423, 7425, 7427, 7429, 7434, 7436, 7438, 7442, 7444, 2552, 7448, 7430, 2557, 7451, 2574, 7372, 7003, 2590, 2591, 2604, 2605, 2625, 7372, 2633, 2640, 2641, 7020, 7237, 7398, 7407, 7405, 6937, 2678, 2688, 7470, 2710, 7475, 7430, 2715, 7478, 7405, 7402, 7407, 7400, 6937, 2735, 7400, 7402, 7405, 7407, 6937, 2755, 7411, 7413, 7416, 7418, 7421, 7424, 6956, 2769, 2782, 7485, 7430, 7432, 7218, 2790, 7488, 7227, 7490, 7237, 2819, 2820, 2831, 2832, 7243, 2859, 2861, 2862, 2865, 2867, 2869, 2889, 7256, 2902, 2904, 7262, 7286, 7269, 2926, 2928, 2944, 2946, 7281, 2958, 2961, 2965, 2969, 61, 62, 63, 7581, 7582, 7583, 2556, 7554, 7555, 7552, 7553, 7556, 7557, 2575, 7587, 7588, 7587, 7588, 7552, 7553, 7554, 7555, 7556, 7557, 2626, 7558, 7559, 7560, 7561, 7562, 7563, 7564, 7565, 7579, 7566, 7567, 7568, 7570, 7569, 7588, 2661, 2662, 7574, 2674, 7573, 2676, 2677, 7581, 7582, 7581, 7582, 7583, 2714, 7573, 2727, 7572, 2729, 7574, 2731, 7571, 2733, 2734, 7571, 2747, 7572, 2749, 7573, 2751, 7574, 2753, 2754, 7575, 2757, 7576, 2759, 7577, 2761, 7578, 2763, 7579, 2765, 7580, 2767, 2768, 7581, 7582, 7583, 2786, 2787, 7584, 2789, 7585, 7586, 2795, 2812, 7587, 7588, 7587, 7588, 7589, 7592, 2847, 7596, 7597, 7599, 7604, 7604, 7606, 2893, 7613, 2907, 2909, 7615, 7618, 2917, 7640, 7645, 2956, 7650, 7652, 7655, 7657, 7656, 7658, 7659, 7660, 7661, 7664, 7663, 7669, 7668, 7671, 7670, 7673, 7674, 7675, 7676, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 2550, 2551, 2555, 2568, 2569, 2570, 2571, 2572, 2573, 2588, 2589, 2602, 2603, 2619, 2620, 2621, 2622, 2623, 2624, 2627, 2628, 2629, 2630, 2631, 2632, 2634, 2635, 2636, 2637, 2638, 2639, 2658, 2659, 2660, 2673, 2675, 2686, 2687, 2708, 2709, 2713, 2726, 2728, 2730, 2732, 2746, 2748, 2750, 2752, 2756, 2758, 2760, 2762, 2764, 2766, 2780, 2781, 2785, 2788, 2793, 2794, 2817, 2818, 2829, 2830, 2842, 2846, 7594, 2855, 2858, 2864, 7601, 2882, 2886, 2887, 7717, 7723, 2906, 2912, 2916, 7738, 7747, 7760, 2949, 2953, 7770, 7771, 2964, 2968, 7778, 2980, 2981, 2982, 2984, 2985, 2986, 2991, 7785, 2996, 2997, 7787, 7788, 7791, 3009, 3010, 3013, 3014, 7794, 3019, 3021, 3023, 3025, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7904, 7872, 7930, 7930, 7874, 7879, 7893, 7875, 7899, 7877, 2854, 7881, 7883, 7885, 7899, 7887, 7893, 7889, 2876, 7899, 7891, 7893, 7897, 7895, 7901, 7899, 7897, 2892, 7923, 7907, 7906, 7925, 7917, 7918, 2901, 7908, 7910, 7930, 7930, 7912, 7914, 7916, 7925, 7913, 7915, 7923, 2925, 7918, 7920, 7925, 7917, 7923, 7919, 2936, 7922, 7923, 7921, 7926, 7925, 7924, 2943, 7927, 7930, 7930, 7929, 7931, 2955, 2960, 7933, 7935, 2974, 7940, 7964, 7946, 2993, 7971, 3001, 3002, 3006, 7976, 7978, 3018, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 2841, 2843, 2844, 2845, 2849, 2850, 2851, 2852, 2853, 2857, 2863, 2871, 2872, 2873, 2874, 2875, 2877, 2878, 2879, 2880, 2881, 2883, 2884, 2885, 8000, 2895, 2896, 2897, 2898, 2899, 2900, 2905, 2911, 2913, 2914, 2915, 2919, 2920, 2921, 2922, 2923, 2924, 2930, 2931, 2932, 2933, 2934, 2935, 2937, 2938, 2939, 2940, 2941, 2942, 2948, 2950, 2951, 2952, 2954, 2963, 2967, 2978, 2990, 63, 8130, 8133, 8135, 8140, 8142, 8145, 8147, 8150, 2891, 8154, 8156, 8158, 8162, 8165, 8167, 8169, 8171, 8173, 8175, 8177, 8179, 8181, 8184, 8186, 8128, 8137, 8138, 8186, 8159, 8186, 8160, 8186, 8182, 8187, 8187, 8188, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8131, 8194, 8196, 8198, 8151, 8202, 8163, 8206, 8209, 8212, 8185, 2971, 2973, 2979, 2983, 8200, 2998, 3000, 3003, 3005, 3015, 3017, 3020, 3022, 3024, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8136, 8143, 8148, 8203, 8207, 8210, 8213, 8256, 8260, 8260, 2992, 8262, 8262, 8266, 8270, 8278, 8279, 8279, 8269, 8280, 8278, 8279, 8279, 8278, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 2972, 2975, 8322, 8320, 2987, 8322, 8321, 8326, 8323, 2999, 3004, 8326, 8324, 8326, 8325, 3016, 3026, 3028, 3029, 3030, 3034, 8330, 3037, 3038, 3040, 3041, 3045, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8384, 2976, 2977, 2988, 2989, 2994, 2995, 8393, 8394, 3007, 3008, 3011, 3012, 8399, 3036, 8402, 8409, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8268, 8449, 8451, 8454, 8273, 8275, 8458, 8460, 8277, 8406, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8450, 8452, 8515, 8512, 8518, 8520, 8516, 8517, 8519, 8515, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 3027, 8576, 3032, 3033, 3035, 3039, 3042, 3043, 8577, 3046, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 3031, 3044, 8640, 8643, 8644, 8645, 8647, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8704, 8410, 8463, 8521, 8464, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8707, 8769, 8772, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8832, 8649, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8834, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8897, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63};
bool h_Op[]= {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#define THREADS_PER_BLOCK 64
#define BLOCKS_PER_GRID 1
#define SIZE_OF_IN 3072
#define SIZE_OF_AC 6016
__device__ void
ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float R[142*THREADS_PER_BLOCK];
const int t= THREADS_PER_BLOCK;
__shared__ float final;
final=0;
R[i + 0*t] = A[i + 0*t];
R[i + 1*t] = A[i + 1*t];
R[i + 2*t] = A[i + 2*t];
R[i + 3*t] = A[i + 3*t];
R[i + 4*t] = A[i + 4*t];
R[i + 5*t] = A[i + 5*t];
R[i + 6*t] = A[i + 6*t];
R[i + 7*t] = A[i + 7*t];
R[i + 8*t] = A[i + 8*t];
R[i + 9*t] = A[i + 9*t];
R[i + 10*t] = A[i + 10*t];
R[i + 11*t] = A[i + 11*t];
R[i + 12*t] = A[i + 12*t];
R[i + 13*t] = A[i + 13*t];
R[i + 14*t] = A[i + 14*t];
R[i + 15*t] = A[i + 15*t];
R[i + 16*t] = A[i + 16*t];
R[i + 17*t] = A[i + 17*t];
R[i + 18*t] = A[i + 18*t];
R[i + 19*t] = A[i + 19*t];
R[i + 20*t] = A[i + 20*t];
R[i + 21*t] = A[i + 21*t];
R[i + 22*t] = A[i + 22*t];
R[i + 23*t] = A[i + 23*t];
R[i + 24*t] = A[i + 24*t];
R[i + 25*t] = A[i + 25*t];
R[i + 26*t] = A[i + 26*t];
R[i + 27*t] = A[i + 27*t];
R[i + 28*t] = A[i + 28*t];
R[i + 29*t] = A[i + 29*t];
R[i + 30*t] = A[i + 30*t];
R[i + 31*t] = A[i + 31*t];
R[i + 32*t] = A[i + 32*t];
R[i + 33*t] = A[i + 33*t];
R[i + 34*t] = A[i + 34*t];
R[i + 35*t] = A[i + 35*t];
R[i + 36*t] = A[i + 36*t];
R[i + 37*t] = A[i + 37*t];
R[i + 38*t] = A[i + 38*t];
R[i + 39*t] = A[i + 39*t];
R[i + 40*t] = A[i + 40*t];
R[i + 41*t] = A[i + 41*t];
R[i + 42*t] = A[i + 42*t];
R[i + 43*t] = A[i + 43*t];
R[i + 44*t] = A[i + 44*t];
R[i + 45*t] = A[i + 45*t];
R[i + 46*t] = A[i + 46*t];
R[i + 47*t] = A[i + 47*t];
__syncthreads();
for (int iter=0; iter< n_iter; iter++) {
R[i + 48*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]];
R[i + 49*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]];
R[i + 50*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]];
R[i + 51*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]];
R[i + 52*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]];
R[i + 53*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]];
R[i + 54*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]];
R[i + 55*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]];
R[i + 56*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]];
R[i + 57*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]];
R[i + 58*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]];
R[i + 59*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]];
__syncthreads();
R[i + 60*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]];
R[i + 61*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]];
R[i + 62*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]];
R[i + 63*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]];
R[i + 64*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]];
R[i + 65*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]];
R[i + 66*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]];
__syncthreads();
R[i + 67*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]];
R[i + 68*t] = Op[i + 20*t] ? R[B[i + 20*t]] * R[C[i + 20*t]] : R[B[i + 20*t]] + R[C[i + 20*t]];
R[i + 69*t] = Op[i + 21*t] ? R[B[i + 21*t]] * R[C[i + 21*t]] : R[B[i + 21*t]] + R[C[i + 21*t]];
R[i + 70*t] = Op[i + 22*t] ? R[B[i + 22*t]] * R[C[i + 22*t]] : R[B[i + 22*t]] + R[C[i + 22*t]];
R[i + 71*t] = Op[i + 23*t] ? R[B[i + 23*t]] * R[C[i + 23*t]] : R[B[i + 23*t]] + R[C[i + 23*t]];
R[i + 72*t] = Op[i + 24*t] ? R[B[i + 24*t]] * R[C[i + 24*t]] : R[B[i + 24*t]] + R[C[i + 24*t]];
R[i + 73*t] = Op[i + 25*t] ? R[B[i + 25*t]] * R[C[i + 25*t]] : R[B[i + 25*t]] + R[C[i + 25*t]];
R[i + 74*t] = Op[i + 26*t] ? R[B[i + 26*t]] * R[C[i + 26*t]] : R[B[i + 26*t]] + R[C[i + 26*t]];
__syncthreads();
R[i + 75*t] = Op[i + 27*t] ? R[B[i + 27*t]] * R[C[i + 27*t]] : R[B[i + 27*t]] + R[C[i + 27*t]];
R[i + 76*t] = Op[i + 28*t] ? R[B[i + 28*t]] * R[C[i + 28*t]] : R[B[i + 28*t]] + R[C[i + 28*t]];
R[i + 77*t] = Op[i + 29*t] ? R[B[i + 29*t]] * R[C[i + 29*t]] : R[B[i + 29*t]] + R[C[i + 29*t]];
R[i + 78*t] = Op[i + 30*t] ? R[B[i + 30*t]] * R[C[i + 30*t]] : R[B[i + 30*t]] + R[C[i + 30*t]];
R[i + 79*t] = Op[i + 31*t] ? R[B[i + 31*t]] * R[C[i + 31*t]] : R[B[i + 31*t]] + R[C[i + 31*t]];
R[i + 80*t] = Op[i + 32*t] ? R[B[i + 32*t]] * R[C[i + 32*t]] : R[B[i + 32*t]] + R[C[i + 32*t]];
R[i + 81*t] = Op[i + 33*t] ? R[B[i + 33*t]] * R[C[i + 33*t]] : R[B[i + 33*t]] + R[C[i + 33*t]];
R[i + 82*t] = Op[i + 34*t] ? R[B[i + 34*t]] * R[C[i + 34*t]] : R[B[i + 34*t]] + R[C[i + 34*t]];
__syncthreads();
R[i + 83*t] = Op[i + 35*t] ? R[B[i + 35*t]] * R[C[i + 35*t]] : R[B[i + 35*t]] + R[C[i + 35*t]];
R[i + 84*t] = Op[i + 36*t] ? R[B[i + 36*t]] * R[C[i + 36*t]] : R[B[i + 36*t]] + R[C[i + 36*t]];
R[i + 85*t] = Op[i + 37*t] ? R[B[i + 37*t]] * R[C[i + 37*t]] : R[B[i + 37*t]] + R[C[i + 37*t]];
R[i + 86*t] = Op[i + 38*t] ? R[B[i + 38*t]] * R[C[i + 38*t]] : R[B[i + 38*t]] + R[C[i + 38*t]];
R[i + 87*t] = Op[i + 39*t] ? R[B[i + 39*t]] * R[C[i + 39*t]] : R[B[i + 39*t]] + R[C[i + 39*t]];
__syncthreads();
R[i + 88*t] = Op[i + 40*t] ? R[B[i + 40*t]] * R[C[i + 40*t]] : R[B[i + 40*t]] + R[C[i + 40*t]];
R[i + 89*t] = Op[i + 41*t] ? R[B[i + 41*t]] * R[C[i + 41*t]] : R[B[i + 41*t]] + R[C[i + 41*t]];
R[i + 90*t] = Op[i + 42*t] ? R[B[i + 42*t]] * R[C[i + 42*t]] : R[B[i + 42*t]] + R[C[i + 42*t]];
R[i + 91*t] = Op[i + 43*t] ? R[B[i + 43*t]] * R[C[i + 43*t]] : R[B[i + 43*t]] + R[C[i + 43*t]];
R[i + 92*t] = Op[i + 44*t] ? R[B[i + 44*t]] * R[C[i + 44*t]] : R[B[i + 44*t]] + R[C[i + 44*t]];
R[i + 93*t] = Op[i + 45*t] ? R[B[i + 45*t]] * R[C[i + 45*t]] : R[B[i + 45*t]] + R[C[i + 45*t]];
R[i + 94*t] = Op[i + 46*t] ? R[B[i + 46*t]] * R[C[i + 46*t]] : R[B[i + 46*t]] + R[C[i + 46*t]];
R[i + 95*t] = Op[i + 47*t] ? R[B[i + 47*t]] * R[C[i + 47*t]] : R[B[i + 47*t]] + R[C[i + 47*t]];
__syncthreads();
R[i + 96*t] = Op[i + 48*t] ? R[B[i + 48*t]] * R[C[i + 48*t]] : R[B[i + 48*t]] + R[C[i + 48*t]];
R[i + 97*t] = Op[i + 49*t] ? R[B[i + 49*t]] * R[C[i + 49*t]] : R[B[i + 49*t]] + R[C[i + 49*t]];
R[i + 98*t] = Op[i + 50*t] ? R[B[i + 50*t]] * R[C[i + 50*t]] : R[B[i + 50*t]] + R[C[i + 50*t]];
R[i + 99*t] = Op[i + 51*t] ? R[B[i + 51*t]] * R[C[i + 51*t]] : R[B[i + 51*t]] + R[C[i + 51*t]];
R[i + 100*t] = Op[i + 52*t] ? R[B[i + 52*t]] * R[C[i + 52*t]] : R[B[i + 52*t]] + R[C[i + 52*t]];
R[i + 101*t] = Op[i + 53*t] ? R[B[i + 53*t]] * R[C[i + 53*t]] : R[B[i + 53*t]] + R[C[i + 53*t]];
R[i + 102*t] = Op[i + 54*t] ? R[B[i + 54*t]] * R[C[i + 54*t]] : R[B[i + 54*t]] + R[C[i + 54*t]];
__syncthreads();
R[i + 103*t] = Op[i + 55*t] ? R[B[i + 55*t]] * R[C[i + 55*t]] : R[B[i + 55*t]] + R[C[i + 55*t]];
R[i + 104*t] = Op[i + 56*t] ? R[B[i + 56*t]] * R[C[i + 56*t]] : R[B[i + 56*t]] + R[C[i + 56*t]];
R[i + 105*t] = Op[i + 57*t] ? R[B[i + 57*t]] * R[C[i + 57*t]] : R[B[i + 57*t]] + R[C[i + 57*t]];
R[i + 106*t] = Op[i + 58*t] ? R[B[i + 58*t]] * R[C[i + 58*t]] : R[B[i + 58*t]] + R[C[i + 58*t]];
__syncthreads();
R[i + 107*t] = Op[i + 59*t] ? R[B[i + 59*t]] * R[C[i + 59*t]] : R[B[i + 59*t]] + R[C[i + 59*t]];
R[i + 108*t] = Op[i + 60*t] ? R[B[i + 60*t]] * R[C[i + 60*t]] : R[B[i + 60*t]] + R[C[i + 60*t]];
R[i + 109*t] = Op[i + 61*t] ? R[B[i + 61*t]] * R[C[i + 61*t]] : R[B[i + 61*t]] + R[C[i + 61*t]];
R[i + 110*t] = Op[i + 62*t] ? R[B[i + 62*t]] * R[C[i + 62*t]] : R[B[i + 62*t]] + R[C[i + 62*t]];
__syncthreads();
R[i + 111*t] = Op[i + 63*t] ? R[B[i + 63*t]] * R[C[i + 63*t]] : R[B[i + 63*t]] + R[C[i + 63*t]];
R[i + 112*t] = Op[i + 64*t] ? R[B[i + 64*t]] * R[C[i + 64*t]] : R[B[i + 64*t]] + R[C[i + 64*t]];
R[i + 113*t] = Op[i + 65*t] ? R[B[i + 65*t]] * R[C[i + 65*t]] : R[B[i + 65*t]] + R[C[i + 65*t]];
R[i + 114*t] = Op[i + 66*t] ? R[B[i + 66*t]] * R[C[i + 66*t]] : R[B[i + 66*t]] + R[C[i + 66*t]];
__syncthreads();
R[i + 115*t] = Op[i + 67*t] ? R[B[i + 67*t]] * R[C[i + 67*t]] : R[B[i + 67*t]] + R[C[i + 67*t]];
R[i + 116*t] = Op[i + 68*t] ? R[B[i + 68*t]] * R[C[i + 68*t]] : R[B[i + 68*t]] + R[C[i + 68*t]];
R[i + 117*t] = Op[i + 69*t] ? R[B[i + 69*t]] * R[C[i + 69*t]] : R[B[i + 69*t]] + R[C[i + 69*t]];
__syncthreads();
R[i + 118*t] = Op[i + 70*t] ? R[B[i + 70*t]] * R[C[i + 70*t]] : R[B[i + 70*t]] + R[C[i + 70*t]];
R[i + 119*t] = Op[i + 71*t] ? R[B[i + 71*t]] * R[C[i + 71*t]] : R[B[i + 71*t]] + R[C[i + 71*t]];
__syncthreads();
R[i + 120*t] = Op[i + 72*t] ? R[B[i + 72*t]] * R[C[i + 72*t]] : R[B[i + 72*t]] + R[C[i + 72*t]];
R[i + 121*t] = Op[i + 73*t] ? R[B[i + 73*t]] * R[C[i + 73*t]] : R[B[i + 73*t]] + R[C[i + 73*t]];
R[i + 122*t] = Op[i + 74*t] ? R[B[i + 74*t]] * R[C[i + 74*t]] : R[B[i + 74*t]] + R[C[i + 74*t]];
__syncthreads();
R[i + 123*t] = Op[i + 75*t] ? R[B[i + 75*t]] * R[C[i + 75*t]] : R[B[i + 75*t]] + R[C[i + 75*t]];
R[i + 124*t] = Op[i + 76*t] ? R[B[i + 76*t]] * R[C[i + 76*t]] : R[B[i + 76*t]] + R[C[i + 76*t]];
__syncthreads();
R[i + 125*t] = Op[i + 77*t] ? R[B[i + 77*t]] * R[C[i + 77*t]] : R[B[i + 77*t]] + R[C[i + 77*t]];
R[i + 126*t] = Op[i + 78*t] ? R[B[i + 78*t]] * R[C[i + 78*t]] : R[B[i + 78*t]] + R[C[i + 78*t]];
__syncthreads();
R[i + 127*t] = Op[i + 79*t] ? R[B[i + 79*t]] * R[C[i + 79*t]] : R[B[i + 79*t]] + R[C[i + 79*t]];
__syncthreads();
R[i + 128*t] = Op[i + 80*t] ? R[B[i + 80*t]] * R[C[i + 80*t]] : R[B[i + 80*t]] + R[C[i + 80*t]];
__syncthreads();
R[i + 129*t] = Op[i + 81*t] ? R[B[i + 81*t]] * R[C[i + 81*t]] : R[B[i + 81*t]] + R[C[i + 81*t]];
__syncthreads();
R[i + 130*t] = Op[i + 82*t] ? R[B[i + 82*t]] * R[C[i + 82*t]] : R[B[i + 82*t]] + R[C[i + 82*t]];
__syncthreads();
R[i + 131*t] = Op[i + 83*t] ? R[B[i + 83*t]] * R[C[i + 83*t]] : R[B[i + 83*t]] + R[C[i + 83*t]];
__syncthreads();
R[i + 132*t] = Op[i + 84*t] ? R[B[i + 84*t]] * R[C[i + 84*t]] : R[B[i + 84*t]] + R[C[i + 84*t]];
__syncthreads();
R[i + 133*t] = Op[i + 85*t] ? R[B[i + 85*t]] * R[C[i + 85*t]] : R[B[i + 85*t]] + R[C[i + 85*t]];
__syncthreads();
R[i + 134*t] = Op[i + 86*t] ? R[B[i + 86*t]] * R[C[i + 86*t]] : R[B[i + 86*t]] + R[C[i + 86*t]];
__syncthreads();
R[i + 135*t] = Op[i + 87*t] ? R[B[i + 87*t]] * R[C[i + 87*t]] : R[B[i + 87*t]] + R[C[i + 87*t]];
__syncthreads();
R[i + 136*t] = Op[i + 88*t] ? R[B[i + 88*t]] * R[C[i + 88*t]] : R[B[i + 88*t]] + R[C[i + 88*t]];
__syncthreads();
R[i + 137*t] = Op[i + 89*t] ? R[B[i + 89*t]] * R[C[i + 89*t]] : R[B[i + 89*t]] + R[C[i + 89*t]];
__syncthreads();
R[i + 138*t] = Op[i + 90*t] ? R[B[i + 90*t]] * R[C[i + 90*t]] : R[B[i + 90*t]] + R[C[i + 90*t]];
__syncthreads();
R[i + 139*t] = Op[i + 91*t] ? R[B[i + 91*t]] * R[C[i + 91*t]] : R[B[i + 91*t]] + R[C[i + 91*t]];
__syncthreads();
R[i + 140*t] = Op[i + 92*t] ? R[B[i + 92*t]] * R[C[i + 92*t]] : R[B[i + 92*t]] + R[C[i + 92*t]];
__syncthreads();
R[i + 141*t] = Op[i + 93*t] ? R[B[i + 93*t]] * R[C[i + 93*t]] : R[B[i + 93*t]] + R[C[i + 93*t]];
if (i==0) { final += R[141*t]; }
__syncthreads();
}
if (i==0) { A[0]= final;}
}
| 9c893b96c87e11b1ce3ba58a99cd2a5872a981ed.cu | float h_A[]= {
0.7175049743623347, 0.7483295476728882, 0.5428045722921292, 0.6670388593622318, 0.8285250757988448, 0.6493922330544046, 0.9155831240661374, 0.5175069123492884, 0.7144072115954666, 0.8263031546197478, 0.7624806464646448, 0.9122238073039419, 0.5566906615344596, 0.8168905807336863, 0.6933761370918536, 0.8395397054923843, 0.8033347649131046, 0.9941032741599793, 0.7742898730965164, 0.7870283077556541, 0.7673346013702567, 0.5769613471735737, 0.5738554351773941, 0.9939341868842451, 0.7799700014781104, 0.6814291040536089, 0.6714052283275981, 0.8774744357242918, 0.7024670328536721, 0.8045429947352749, 0.5769122826992121, 0.9777938420156224, 0.5937074023708211, 0.8286152802298441, 0.7650014261108503, 0.8929116643934834, 0.9410130293220881, 0.9891729316565442, 0.9817416845398415, 0.8830830552694126, 0.8134548183522272, 0.5314132914925039, 0.9810008198757578, 0.70572108492611, 0.5139436081367416, 0.5314311730571363, 0.9311131260820702, 0.7196725611869096, 0.7147502373185421, 0.8797002950296562, 0.8642699279375733, 0.5862109957132438, 0.9767298402817426, 0.5013217469795739, 0.9706521527934112, 0.7578632639753504, 0.7315584410769278, 0.9177498031306632, 0.6479141148218037, 0.5249262177376186, 0.5538981159995084, 0.8617596047073285, 0.5861942430635054, 0.6500747778165836, 0.7951722519661015, 0.5493490472227096, 0.8163500530570433, 0.912325687539941, 0.9820644411813464, 0.5657051815243339, 0.5138839480921523, 0.8826636504434808, 0.6663946666020875, 0.5038093310152814, 0.6493543396457044, 0.759386096090106, 0.6937557980029705, 0.8613649332807252, 0.9368177629965486, 0.9249213283655325, 0.5369637236706578, 0.8433307098361942, 0.7692938546744861, 0.9954228032712307, 0.7905907651739529, 0.8668570208651423, 0.7601528897475356, 0.9845661760121813, 0.7369202598469778, 0.6165134229820441, 0.7234416032607325, 0.8502648004612344, 0.8724870591921792, 0.7709651062409866, 0.558198557942287, 0.8977255395511201, 0.8889499830154203, 0.5701917992687124, 0.7554781816275644, 0.5953613547860896, 0.9796824676982792, 0.7281405531166179, 0.9494958059277303, 0.7828436297295336, 0.9830261904762756, 0.7307054242652655, 0.6682378749952345, 0.9793992597131692, 0.6820986459541211, 0.9378306758357847, 0.508093650074964, 0.7487797253077175, 0.6182510432722995, 0.5643877925169976, 0.625274773134032, 0.8408833108631155, 0.7768001990882476, 0.8845791088186201, 0.7807379197439666, 0.8783146131963451, 0.6532335322235417, 0.6161001105283774, 0.635087698565751, 0.9600892442603386, 0.6116721753435643, 0.6252979884438084, 0.8837417264202989, 0.8442385902436769, 0.5414256005141584, 0.685889206934632, 0.9033746724099666, 0.7876143894608625, 0.7037993683222312, 0.7687455214370533, 0.7461391437801412, 0.8933959699374662, 0.7639035809373022, 0.9057501353466801, 0.8312658321929823, 0.9419822773300106, 0.6161201027145985, 0.9927409526811327, 0.9306612696201013, 0.8603789778155553, 0.6124688912804437, 0.869129383064396, 0.9122000852018062, 0.5681157443004359, 0.9748988939052683, 0.9114554697381612, 0.6918164670784539, 0.7899732821289123, 0.9023764376284498, 0.7719162680121916, 0.6562876429840578, 0.562240461689329, 0.709946990925238, 0.5538049440387958, 0.7758888223511162, 0.8373510051770181, 0.8646231948847161, 0.820356465782869, 0.9350557319129418, 0.8394144817387159, 0.9977296856553373, 0.7840380670609457, 0.8105234427374122, 0.7437132219084173, 0.6783428765147859, 0.546607960797175, 0.9885389860145156, 0.8063298224739776, 0.687175865573554, 0.6000481233632222, 0.7568343096854063, 0.9044337343691745, 0.6615939718093461, 0.6380093737651071, 0.6990238507534235, 0.8160095581872466, 0.8385853606436432, 0.8629413055597112, 0.5107350105503465, 0.6312070654603079, 0.9820733064941537, 0.5227133030856335, 0.7040240602876442, 0.605160134986669, 0.886725260779536, 0.6412641050414196, 0.7234382961166687, 0.6807040170143226, 0.5596514706490721, 0.8878583353275754, 0.7616923517398573, 0.6157467623001187, 0.5393448546810748, 0.80528150598235, 0.8992462596802366, 0.8813947430826625, 0.8235018717592513, 0.9435712522639765, 0.5278177667291167, 0.5878938541203524, 0.8252227102833165, 0.7041544300947209, 0.7320694319060471, 0.6716748310014393, 0.6809079227932331, 0.8576255866003155, 0.7596863546532289, 0.8898858402084153, 0.532722160160641, 0.963421275120853, 0.6664793029771361, 0.5417125114487675, 0.759428088181401, 0.5710879176916852, 0.5328935155949841, 0.6543138231277239, 0.5069073223272444, 0.9022298759578964, 0.8286833812838779, 0.6803512526435467, 0.5973966449252462, 0.7073522374428227, 0.5133318608626939, 0.7315184133850601, 0.8432620635287341, 0.9049121483497172, 0.8318095289399634, 0.9026618189927469, 0.6420833615107415, 0.9994597892129664, 0.617270934565862, 0.5263116225733089, 0.6786285820933046, 0.7835775336832631, 0.8300036415934484, 0.592353782128615, 0.5657998624689723, 0.8625412727685493, 0.7631865210445816, 0.7210754865124193, 0.6086667174931063, 0.7385750183909484, 0.920989447364617, 0.94739538918244, 0.6855098253290155, 0.9343211030413223, 0.5493828913973317, 0.5310448272026687, 0.9009663222278717, 0.7869293956416037, 0.5900925683418661, 0.8385620086692027, 0.7932993354385146, 0.5299737440855232, 0.9196170421922958, 0.6558905671450281, 0.8656626448607085, 0.5027122782652429, 0.7366020784814247, 0.6655989049899537, 0.6372782040816614, 0.8859886166595796, 0.5062947603356658, 0.7327567125061054, 0.8850052277110629, 0.5647387676513849, 0.5963841494889127, 0.9060854867162725, 0.9166426088404747, 0.673022107739839, 0.5416696342938154, 0.5831003726939399, 0.8272720683126304, 0.944026928918944, 0.9109686909633957, 0.7147974869008538, 0.5825293817589708, 0.9724077581654852, 0.9485523147886157, 0.6287315124101525, 0.7700442647127689, 0.5209166191356271, 0.5193350166049711, 0.6458388115855724, 0.9987572403319077, 0.6350924079879765, 0.831849668083315, 0.6750287308727874, 0.7967959702799643, 0.894115199675573, 0.658030849082638, 0.9545717516881893, 0.8710326220369058, 0.721515880841148, 0.6984331129268149, 0.5653054189829718, 0.815756265097344, 0.7760842554115608, 0.8895647446995394, 0.9977935828790359, 0.5897241716307271, 0.9949236196766181, 0.92753081222319, 0.6853273027621664, 0.5634366839955158, 0.770503701790801, 0.5647916231445753, 0.830501580551152, 0.7295982005063606, 0.620210763689558, 0.839370767957761, 0.8110668147591117, 0.5885117268521713, 0.9079035820954013, 0.848010784932944, 0.5861117069774286, 0.9711812608601524, 0.8350727902796424, 0.8970020026716459, 0.9225997430138331, 0.6973010393622734, 0.9315739888406733, 0.7459221360697157, 0.6133229159696165, 0.9044214366240093, 0.8264688721486457, 0.7200759232117373, 0.5377806242551253, 0.9061159336796546, 0.814628149989443, 0.5771589351457149, 0.7590412505272548, 0.5400821407167455, 0.6035577001353102, 0.8616924443817809, 0.8084092807811845, 0.8268654638680732, 0.515455017127314, 0.6588949082639428, 0.666586640816452, 0.8129506705897591, 0.9152114880141494, 0.8538569643921092, 0.8818942623454151, 0.7470907519257488, 0.8013355615892588, 0.9761993578970845, 0.6715271822726312, 0.8364434572987292, 0.7057228439267411, 0.72439559452532, 0.6519551081496757, 0.9588471899630655, 0.9046448427882299, 0.673403059284444, 0.533791032347466, 0.8719566473185582, 0.9885125189872472, 0.5518750031889608, 0.7863070845734004, 0.7202503595676394, 0.6570415531865861, 0.8809316023110825, 0.9578127062198909, 0.9956279054377624, 0.5301132227909873, 0.6707888922998658, 0.8780451674922749, 0.9395832068131295, 0.6219165776596278, 0.8599748478525162, 0.5499551679120835, 0.7904418509009485, 0.6646686791801107, 0.7563399098545823, 0.6805063745210362, 0.5842324092075663, 0.9573603558630026, 0.5357325376994545, 0.5033870370732605, 0.5870444093984921, 0.6643157518902542, 0.9387857091494207, 0.7858692115538473, 0.9439044009105371, 0.7284829532643802, 0.8623399116217768, 0.8498837663091304, 0.5406120073866904, 0.9193851056929803, 0.9715002283759175, 0.6637502323106244, 0.6477781243695839, 0.9860881211121879, 0.797424984558573, 0.9654009547209299, 0.7270539879573954, 0.6655098186635973, 0.9171610281017151, 0.9242860461215878, 0.7391459284313388, 0.5222790180475673, 0.8773912484344897, 0.8149384609228445, 0.5217527542568972, 0.8789698337305594, 0.7268861887430299, 0.9520860712672825, 0.7249576200898387, 0.9550388258626021, 0.5804611735926011, 0.9777288081887516, 0.808805640359967, 0.633559141593157, 0.85623391772666, 0.5575605489107095, 0.6638687542016302, 0.8787973526182437, 0.617434709946848, 0.8030270968921089, 0.581240418150783, 0.5503531203353518, 0.9946837433498839, 0.76218136971764, 0.9357608582755572, 0.5231331816447684, 0.6854780173341097, 0.5311558879767161, 0.5785887652419894, 0.8307138697131689, 0.7758140043664766, 0.5189577064384299, 0.5436659851795499, 0.8896681065588994, 0.653402969925178, 0.5016245400301977, 0.5951782217959249, 0.8166937645579907, 0.5100527537462374, 0.6227861063432557, 0.7548678545114191, 0.8102903477833221, 0.5925998867095521, 0.6974047808401522, 0.8373805531204513, 0.6642000445504762, 0.7975856745853391, 0.6063007020696211, 0.7198844198006322, 0.9551274973977456, 0.5915330492969607, 0.5871846331761975, 0.9646590712359239, 0.6930243557587628, 0.9813963133898272, 0.6672200749543034, 0.565025257660732, 0.8851300705531739, 0.7687037742668024, 0.9344239798458662, 0.9513599212366258, 0.8438953560602331, 0.7930587288346782, 0.8966819839324218, 0.72280875227075, 0.9630973594388421, 0.5144295914645305, 0.7916602381315909, 0.7997319825032736, 0.9006457288102252, 0.6370833742847035, 0.9318892860850914, 0.7002814582836712, 0.8018037592376548, 0.6118100096109376, 0.8993243036825278, 0.6308733695618169, 0.855600641276804, 0.5300356818815629, 0.6794562007188256, 0.8512372607896367, 0.9497273495469162, 0.7336120821192751, 0.7841910800053631, 0.9392352689967308, 0.9179604098218539, 0.5549502392878447, 0.9516431101153908, 0.6649121701750261, 0.6225664124296383, 0.568126683026504, 0.9924079875188276, 0.6710241562997306, 0.6701705351127124, 0.754202872050578, 0.91732517674133, 0.6399248142474455, 0.8548843208551697, 0.5364315152334754, 0.8107052135499548, 0.7512686381961702, 0.8110701715719404, 0.8447227857251871, 0.5876345450559814, 0.5327760038007834, 0.7528108022675388, 0.8562476771299916, 0.7826885726498719, 0.9027967963922693, 0.7827052013120901, 0.630071066205456, 0.6283442027052404, 0.831256222211215, 0.899171512439089, 0.5543083798488495, 0.5003917635936048, 0.858540050440179, 0.8511857457627605, 0.5339443686622836, 0.9255766780355341, 0.9607122348356951, 0.6596183118093375, 0.7340366840429344, 0.7115311138250818, 0.7376614471456091, 0.7430208557618188, 0.7474483791732092, 0.9141204595985233, 0.9130646732899668, 0.7461779598386188, 0.5496660731588356, 0.7531447745734647, 0.6285716534428196, 0.7675855152704778, 0.8274753710241782, 0.9775063265438432, 0.6099333564132423, 0.8841593803495991, 0.5897987076844864, 0.9149521235657798, 0.7574415244848697, 0.7517422214169902, 0.7657148238903451, 0.5356244148728739, 0.7279396444671993, 0.7508828135188834, 0.6107722696332616, 0.6574094800693152, 0.7873525948023378, 0.8373287552267644, 0.7028342501913618, 0.7264423782681426, 0.9560341268780939, 0.8382048820213726, 0.6398660688361371, 0.9068375920762427, 0.7051372148373858, 0.5100106590764255, 0.7558333353508433, 0.6377687959028604, 0.7347584262208351, 0.6931694361907945, 0.9264285207343963, 0.6990356849902173, 0.8812084662378701, 0.9615328670414424, 0.6005469892369674, 0.5799344945715658, 0.7316303507539086, 0.8480191138570592, 0.9739126782411822, 0.8680138256255747, 0.8937497033237087, 0.8926174533866478, 0.6461052223425663, 0.6551360785593032, 0.863895103850987, 0.876860805930852, 0.8140718944338794, 0.6473599436221105, 0.7798594961887093, 0.5967679181430172, 0.5418606390223952, 0.5114483447332854, 0.8080931141911538, 0.9783824711215274, 0.8040049169606638, 0.5208864215176348, 0.6866720766920343, 0.8689085352077532, 0.5698086063461534, 0.5590515330662833, 0.5487904535831385, 0.605853621159167, 0.8037596567650076, 0.5058726990961012, 0.8264464891643191, 0.7440690101659653, 0.9387429279279773, 0.5093002348043006, 0.6864582066771174, 0.7641058932756224, 0.7682464855319144, 0.7068409791226118, 0.7616325231851375, 0.5176209704155486, 0.7396525681392454, 0.8516466803051463, 0.9745332488704593, 0.6372801091647291, 0.780162324841136, 0.6011560638162952, 0.7758104530170866, 0.9778492507155405, 0.7136446150698623, 0.9056871285809793, 0.6582054282738479, 0.9959695391088318, 0.7640958988950919, 0.5315143051891601, 0.7935719017312257, 0.6146160979563673, 0.6253942950088016, 0.8883496779647706, 0.5035718245444725, 0.7224012857727014, 0.9703342374924342, 0.5128779592314681, 0.8913464893991353, 0.9776792113700319, 0.718061983258852, 0.5601701464524802, 0.539262466664707, 0.8039954356583209, 0.5718666752272372, 0.9867494861581749, 0.9722964284176024, 0.781272469188941, 0.7513964277437876, 0.8944272559127562, 0.5744254533131001, 0.9295503865516243, 0.5284053661424665, 0.5120682723028225, 0.7051528355484178, 0.817759059758939, 0.8878234201267484, 0.7115108475832024, 0.6977492408338297, 0.8708475630672106, 0.7070046653739912, 0.8841867175339746, 0.8270795620142489, 0.8390813046244068, 0.5952539252129366, 0.721161318734161, 0.8874204496629856, 0.9966075198467306, 0.8591974676623013, 0.84273984418223, 0.8983747264589919, 0.7574319285596463, 0.8964426075922378, 0.5028400905927127, 0.6880295532517162, 0.6152306269949663, 0.6186976046582084, 0.7366986347459097, 0.7859052302398979, 0.9402245323204153, 0.5582828803320001, 0.9871008905046461, 0.9398841286358288, 0.8273766111183014, 0.8690633711478154, 0.6445258908338545, 0.8238560736013408, 0.8868354651060395, 0.6513951919759716, 0.7766869165003716, 0.8863067960076496, 0.8964319489442505, 0.5521181744970176, 0.6127213652815815, 0.945379576593336, 0.7189300338801579, 0.865334436673536, 0.7198323949071357, 0.7962577018247845, 0.6358190992286217, 0.9276250110389713, 0.9819163208770993, 0.7724176506274993, 0.6476578495920275, 0.972865635311776, 0.8046936048915933, 0.8673961964801453, 0.8445569454590101, 0.8873657173278331, 0.7782558101450034, 0.6207674972961522, 0.7665842394516937, 0.8008155296587234, 0.6466505815681847, 0.8575435629845465, 0.6411914593744596, 0.7108285202888673, 0.8620444563502725, 0.5050694794126986, 0.8637548407442048, 0.8323727724808773, 0.8376718804615941, 0.6204178230604773, 0.8422952266556656, 0.534132194476928, 0.8775611875459841, 0.7012772269190144, 0.5132187498448768, 0.7781480862167057, 0.6462219374876739, 0.8711759741319117, 0.6811588843496068, 0.8462124321971298, 0.7594594399198328, 0.6921830124454388, 0.6139518015483896, 0.9605438818690479, 0.6424743638235524, 0.8149649759891937, 0.9644544372931267, 0.5561764723076235, 0.8585333314066805, 0.5325496748762529, 0.8656598392212878, 0.7711339376127809, 0.659993718779327, 0.9978837582596711, 0.7717833020772951, 0.9474573653916873, 0.819145751558648, 0.6556920433965562, 0.5453283527136021, 0.6764505382413155, 0.6916554003820774, 0.9583896931351981, 0.58030744774696, 0.8063901831276439, 0.8936482795908587, 0.618469967832747, 0.5164409639385599, 0.6947431135083324, 0.9107282523023559, 0.931187500303613, 0.7560204793656411, 0.7292892620081342, 0.9465113995089967, 0.6435631464101446, 0.6390034714160846, 0.6716480205484452, 0.8395584665856469, 0.9815491943366021, 0.5000637206066598, 0.631120143293386, 0.6684673936090034, 0.8710345575179892, 0.5676401231813213, 0.8705998951280909, 0.7304571674311067, 0.6958274171730265, 0.6820397612355116, 0.9408984652591819, 0.6416857222878372, 0.5663007354631049, 0.5187326733534938, 0.9651023903788754, 0.9599278703742112, 0.9635432361373745, 0.9771965184807319, 0.9092807130055276, 0.6566316399822616, 0.9057205974937773, 0.6443472291950072, 0.8275717873434834, 0.6296991113671675, 0.5440090741633943, 0.6451174284031015, 0.5810917269507535, 0.8602814986194647, 0.5982315029925749, 0.8197394438163682, 0.5074276450775005, 0.6352618825355972, 0.8632445536712614, 0.895556888791958, 0.772466422683947, 0.8766991232245125, 0.8950025300506068, 0.6383706077180286, 0.7360529113763161, 0.5816032096251498, 0.5817647148619042, 0.8205065511917298, 0.6555368768954937, 0.8905382756360487, 0.5046182476914518, 0.5918374048976502, 0.6578990429906522, 0.5378244317779652, 0.9170343243196952, 0.5244932995419187, 0.9890573824861929, 0.5265348901528981, 0.9785805894377044, 0.6245137487387351, 0.8568309009364565, 0.5303987348955517, 0.5278836754783361, 0.5506073272012736, 0.8293018192690798, 0.8559027551593832, 0.5652030593620075, 0.7744348843610241, 0.515565415390881, 0.7562683563264283, 0.6894489399825493, 0.6514528488638454, 0.9068508909272324, 0.7768493106977585, 0.9802387408269064, 0.8589983079940284, 0.7222923104509164, 0.9494617172959475, 0.8899479118430491, 0.9003505505488993, 0.8927540461450996, 0.663452880294259, 0.9091269545833842, 0.8785106769475404, 0.9683897845608465, 0.6287573278825096, 0.6097798414369457, 0.593142429351299, 0.6643805679857613, 0.9449941005092909, 0.5672211635036601, 0.5417439410396544, 0.9425290851148195, 0.872629281623168, 0.6872829195007166, 0.8196810157590535, 0.5421910834680155, 0.5087469854455247, 0.567657921507269, 0.988092957408338, 0.662308030649258, 0.645700118158399, 0.51291417605355, 0.725382201117026, 0.8899784510338955, 0.791852756396201, 0.8613252527742451, 0.6985963908098032, 0.7931961436100736, 0.7246411315033985, 0.5074494220630064, 0.7181020073666426, 0.7452083519591107, 0.6261428899251942, 0.8677793189487603, 0.619013683643147, 0.7005126612539005, 0.671066701562349, 0.9893137451322225, 0.7944790679653224, 0.6663369299639874, 0.7269476170300537, 0.8847926334632098, 0.9254319409896083, 0.8425651882044956, 0.8887079693375327, 0.6127535589222362, 0.7513855281679238, 0.7977040119757262, 0.5753885030740056, 0.8823175306301378, 0.9746276117552004, 0.8520311453044047, 0.8346986553052198, 0.8608398838356635, 0.6942951813749323, 0.5935175076725132, 0.6452102467579437, 0.6089768499936696, 0.6240654348876982, 0.9568093610477555, 0.708854812106231, 0.7786585165948752, 0.6862246402894738, 0.8784685253024165, 0.745081116140263, 0.5560460255644317, 0.9573782332604687, 0.714803911587768, 0.8756255642819305, 0.8757743045637505, 0.8180294947185416, 0.8814811176831775, 0.7085585440638689, 0.9603832249611536, 0.6722558816465003, 0.6267913019567721, 0.5865790349345534, 0.9516539066062502, 0.7451457811267855, 0.8025784082032348, 0.8418860178577664, 0.6308280816981481, 0.6853085519307471, 0.7551524408469074, 0.7430121984824316, 0.9492725946788689, 0.6429105889861857, 0.7346099079549737, 0.9394328779489014, 0.6560384180940686, 0.7558064466523573, 0.5759460283684525, 0.6424294880995515, 0.5194307614271902, 0.7748595056324805, 0.7601587048009304, 0.6482002996501057, 0.7503416626194899, 0.8185374969327385, 0.5826823834475505, 0.9453678588242939, 0.6028600904040826, 0.9697346009371919, 0.7728385331571652, 0.5221809993973963, 0.5046621854492988, 0.5535213543750981, 0.5194143019350554, 0.7604625073507458, 0.6963315115445796, 0.5947666131272471, 0.662744932524679, 0.5283522839783175, 0.7462269223822893, 0.6792763469158709, 0.8905042398479888, 0.5617605287995046, 0.7038962997379475, 0.530241282660103, 0.7739164328515271, 0.5546559984902873, 0.6823076488401074, 0.9032642711177921, 0.961267613492639, 0.8926089870115224, 0.6663045722906779, 0.989213466511294, 0.7956599656958443, 0.5812320340237365, 0.5225730570674529, 0.9881999586480004, 0.9670344944853754, 0.8459229232958514, 0.6199871622341855, 0.8807591612922377, 0.5522434335425221, 0.5997822777309251, 0.8172806662101112, 0.9201608160506246, 0.9851189734538995, 0.9367460076041031, 0.8975575560247762, 0.5136110274679664, 0.5979519983561881, 0.8106752176678614, 0.9212592424879466, 0.7180151697836186, 0.8509776500977333, 0.6906978737626681, 0.8976240620435023, 0.5898372485860035, 0.94210644533675, 0.629784042120416, 0.7856539905457469, 0.8424831645147772, 0.7615615514498257, 0.5850397725964329, 0.9772890787487611, 0.7323646997020976, 0.5622960236222174, 0.9517326186573504, 0.7060518181694814, 0.997181763028437, 0.5308038825891753, 0.6264982044181915, 0.589470610163918, 0.9573346921237573, 0.8295804592188971, 0.8876650661530883, 0.8906843645617804, 0.7016273598821912, 0.993268697220647, 0.713624330715787, 0.5167452699173447, 0.6451846501581989, 0.5768974273964227, 0.963111350154206, 0.5284020780057046, 0.8083087919852645, 0.9404681124072247, 0.888968826194855, 0.7654918074329926, 0.6427789327734105, 0.6305467702122113, 0.5589166180041161, 0.7166224045604126, 0.7669467284403176, 0.6873152344900346, 0.6526028910790005, 0.9418675640398168, 0.8882755099680846, 0.5147153023108415, 0.9739625290928586, 0.5033978669711672, 0.7938016935948135, 0.766542990689066, 0.7261191212026787, 0.6527649327464139, 0.708642537501577, 0.7165073057577073, 0.9984980081212749, 0.988532877897, 0.7099428108255872, 0.5137695069394658, 0.9807259990401583, 0.7403002643701834, 0.5514882847926235, 0.6135113625450792, 0.6014355453772171, 0.6693123846125552, 0.9779521455200496, 0.7712536656412053, 0.6496438414342058, 0.7094825293268194, 0.5011072060929522, 0.7771166960237996, 0.6679772027223554, 0.7750706177095686, 0.8724460837824822, 0.8698068231637112, 0.7932358594106979, 0.6724929418478673, 0.6305325517024176, 0.5525401028401618, 0.641370488705765, 0.7860364135246316, 0.6004378555932679, 0.8541126316115408, 0.9758266653021483, 0.8090166246911529, 0.7623431024030126, 0.6276663749076229, 0.7633985044525853, 0.8279146404340301, 0.7365491974506588, 0.8895058286563093, 0.6566801550910897, 0.8610332779500183, 0.8841396315097685, 0.5054396403580577, 0.9216958146374543, 0.5537148160092475, 0.7312155571015464, 0.520836583506611, 0.8423104945404853, 0.9898386597298968, 0.5734113161318437, 0.9894539812170214, 0.7336947967915626, 0.7863174990966972, 0.8860146044578334, 0.8442384670193102, 0.8958166345234695, 0.8146633567036041, 0.9668275484323352, 0.780710644634811, 0.6445879894296913, 0.5329310915882532, 0.7326607984547295, 0.6533595507023908, 0.613761429465081, 0.563668244702823, 0.6991731864477975, 0.8103694771338092, 0.9671152594896997, 0.9432708786868045, 0.5432405567502189, 0.835475692486889, 0.8556006293325628, 0.6753191962277889, 0.7728914099324997, 0.581534224065785, 0.9910334283088683, 0.8349723471036976, 0.7822130100720384, 0.9601596306139333, 0.5821089713544967, 0.5668239800664228, 0.9875357019551327, 0.7262479602251901, 0.7633864341928385, 0.8613391183703002, 0.999140563749566, 0.9921832048548372, 0.6082236851951994, 0.6611679818470018, 0.6187042505565088, 0.8446046230742189, 0.8209767909834509, 0.7106759801031106, 0.6318202990057862, 0.5382161259689066, 0.8746656832667575, 0.5894414013568754, 0.7712726317368632, 0.8818175209436678, 0.7285367722292011, 0.6775994149348465, 0.6548766657330559, 0.8086304609120382, 0.6496691641541017, 0.6404145694357817, 0.6146535354872686, 0.5628304098800292, 0.8504227776914639, 0.7300093498635001, 0.7309543603714539, 0.5577115140892256, 0.805128177136711, 0.6245543763081725, 0.5808337156165888, 0.7785247375810993, 0.6479863068688876, 0.9500136687442952, 0.7390718668728872, 0.6799821809999093, 0.5576501270045107, 0.5062196398402773, 0.5925160742426088, 0.9083721682880289, 0.6551690240454425, 0.701492069831505, 0.5943054214130425, 0.7524794141323005, 0.9746045341670407, 0.7020745767151475, 0.5584573123332287, 0.5250730191176305, 0.5709027715649087, 0.6188783769362616, 0.8117337011799307, 0.6525735475837441, 0.882513730778285, 0.9479178340384894, 0.6814038224946768, 0.5334245139935747, 0.8961452109437502, 0.8418240870730447, 0.7644734626224039, 0.7217482107685174, 0.8953553642076545, 0.9187503741975702, 0.714160195391629, 0.672117239474953, 0.8553412095181543, 0.9242109653432042, 0.835357483239132, 0.9579128778624189, 0.9526157837848979, 0.7821514003909765, 0.7604521629744276, 0.9183129704275856, 0.8646587889517333, 0.7330593345807841, 0.7373657183304267, 0.8372085604673498, 0.6036045784783213, 0.7289186633975822, 0.9561371068500679, 0.6557534918059027, 0.7824149736491341, 0.546074953034261, 0.8128633439678956, 0.5894229198435625, 0.8562056900763841, 0.7956323761573609, 0.8300287517817402, 0.8967114891783585, 0.8564251750762437, 0.512258336720917, 0.8607981038244268, 0.9209877482122233, 0.9512602133565491, 0.843554752571138, 0.9949128174126994, 0.6422319076698031, 0.5937278704672031, 0.6858333759507335, 0.9911418365915716, 0.5818852026746673, 0.6702331440120901, 0.6826652460354408, 0.7751398464368906, 0.7626879835028708, 0.6992854454868584, 0.6745033152194828, 0.8643914900789589, 0.7913166843256632, 0.6999800757211145, 0.5101493620587221, 0.6608134925468845, 0.9998147448294846, 0.7592684237716584, 0.9365435185891271, 0.5530399887926196, 0.5388428018439475, 0.9488035593377866, 0.7244956378299721, 0.645781494593326, 0.7612837713941363, 0.9826733391227193, 0.6833651162242154, 0.5185469136556282, 0.865159431957844, 0.5201184763579165, 0.9285132052274714, 0.901007693320281, 0.9383133532670421, 0.66386384111747, 0.530368079374365, 0.8095861558127821, 0.9069279532658556, 0.7619363167523863, 0.8498748510907799, 0.5843692718309024, 0.5077711910929267, 0.5510652074269715, 0.5222905591508611, 0.5751530168705512, 0.8434015345536627, 0.8535092015731167, 0.9746465684702841, 0.9745842322332521, 0.9328774354679271, 0.6741700202631504, 0.7639216479640978, 0.5759782207004538, 0.8019380853986657, 0.9063011717798191, 0.6997498982659436, 0.7783044779318976, 0.884064479328831, 0.819640801630687, 0.6868719133045034, 0.5764531035183071, 0.9033213140530403, 0.8295356640912785, 0.5229428542459562, 0.9225062382796814, 0.5962755179898899, 0.7876076590708363, 0.8021791058851477, 0.7736585990449019, 0.7669673070767691, 0.645485397764695, 0.5707256698365865, 0.8332816925163448, 0.8286356707650886, 0.591444595314593, 0.704850968726765, 0.9924817177448647, 0.7516289170251745, 0.7202447849667328, 0.9961327822584503, 0.895076700578197, 0.7413944258950942, 0.7352379419975204, 0.8302467086310572, 0.9413434992189189, 0.9104513936452676, 0.7618997955894264, 0.7728078086282837, 0.7301755200153848, 0.5153021053069511, 0.6010167088227896, 0.6067812327899456, 0.5504690699545939, 0.9910961615374642, 0.6691152029811651, 0.9490286301782986, 0.9200742820802505, 0.5189500983730917, 0.8525543560800397, 0.9320593295626501, 0.8788752113002543, 0.5038736162106825, 0.5816563248414781, 0.8902090357676785, 0.6899897568100604, 0.9017506567239717, 0.8709093758412643, 0.9438071343587066, 0.5002147397951398, 0.7475840622589863, 0.5735383592107244, 0.9769047619767373, 0.8948663659606431, 0.6176249124447295, 0.9535818732538861, 0.9592823401535421, 0.6036842026863783, 0.53034561108728, 0.8292833843476701, 0.8879800929988897, 0.675000281421835, 0.7027288989166203, 0.5109724852051873, 0.6784710544406269, 0.6373460618451123, 0.8262931485053452, 0.691696943746094, 0.9637485764669901, 0.6549806031585472, 0.7060721109438506, 0.8603085632049483, 0.534018314560281, 0.9154376536734805, 0.9619597172201113, 0.9807822801879384, 0.7496187245585252, 0.829882143716985, 0.972908834863139, 0.5138377718509919, 0.6745218515152756, 0.6764119647778936, 0.6066600372194065, 0.636914460694394, 0.9739340122743261, 0.9383850030249858, 0.7853434114988123, 0.7590254651683531, 0.9286864420903781, 0.5067189922638421, 0.9001281415201354, 0.9429743162300508, 0.6236013529652162, 0.5447882577330456, 0.7501054537443661, 0.8954390006704087, 0.9089407294552516, 0.7015296219615172, 0.7875478352604383, 0.8996615498785965, 0.7248698534942242, 0.6106574122601415, 0.7610919410790564, 0.9042616864083334, 0.8699330573124895, 0.9454422127355948, 0.5080104875764967, 0.8419350418831113, 0.85812813385249, 0.8472281287918224, 0.7620025220055116, 0.9519867472716944, 0.7179722703980433, 0.9881223018573225, 0.5306202977921255, 0.6763231901172333, 0.6530709783484165, 0.7384644879901143, 0.7099736141401639, 0.9468927601883832, 0.5599460601013726, 0.5786712141004302, 0.7577948408770163, 0.6211926767471363, 0.7767547467372422, 0.5503922699814501, 0.8623682550155094, 0.5277487461741305, 0.5271412719851853, 0.603303642703858, 0.5046159137795874, 0.8875324695288536, 0.6162889187503491, 0.713849994717284, 0.678963959661502, 0.5649883822975299, 0.9450624248307206, 0.6031959163900722, 0.6048340693441006, 0.8717991943195025, 0.7484467330459357, 0.6708286513376636, 0.7651645639636815, 0.6944071611358545, 0.6714285089205528, 0.6492981397121595, 0.8538114864368507, 0.7072205336097959, 0.8721829631489694, 0.9226488956068498, 0.7581055895259285, 0.9892674108911205, 0.6840981580020149, 0.5317158220075506, 0.6200501305825845, 0.7898600697851039, 0.914255291975371, 0.7599494433505634, 0.5899685601353097, 0.566614838551335, 0.6940471152640746, 0.6418357706652105, 0.7083420901993781, 0.6363129653558489, 0.9204968216623916, 0.5213162283196175, 0.7634328217305616, 0.9773560913985563, 0.8494164741031572, 0.5722590273873908, 0.6941877519429185, 0.9776808755448225, 0.9355307763841905, 0.5880663325523613, 0.6322876566202893, 0.6282242376658101, 0.7687860186744274, 0.6618308079456487, 0.831468438280451, 0.9930179820332139, 0.538138650552868, 0.5277917674731687, 0.9199903235086286, 0.88486123030877, 0.59499219165424, 0.6205886211923772, 0.6796425047940372, 0.85544158069972, 0.7004123872942458, 0.9061552808638923, 0.6120370930126453, 0.6841736249180548, 0.6581883881774276, 0.8699516064056356, 0.5822602501505494, 0.9662965753406267, 0.6116178151396046, 0.5064483506023824, 0.6877573894334768, 0.5022382392369705, 0.7254107520124952, 0.8195196883447051, 0.6766088971566135, 0.8475357859964334, 0.6995826063156045, 0.7223967817054127, 0.7643248667524006, 0.6514680389013573, 0.9084384362129768, 0.7518646900495141, 0.8098683560401849, 0.5446096532831843, 0.6921592129691931, 0.6097480975333543, 0.8500576647736753, 0.7893016990523297, 0.747236501223288, 0.7965674028433354, 0.7898308266775331, 0.9371890154452823, 0.7427904693239592, 0.5743864241870549, 0.6001005610555792, 0.8856773435838555, 0.9469729056225761, 0.6285980194493921, 0.7455141529825774, 0.7169301487906006, 0.8324320949303405, 0.6868123454614934, 0.7351953216484255, 0.6229880407891798, 0.9357465271399151, 0.8219225838132759, 0.8041035226967344, 0.9604819157925212, 0.7425720080005966, 0.8273285131840353, 0.7037282757065321, 0.8566172388166069, 0.5134850101222439, 0.9298395462134984, 0.6327457308971489, 0.6547448880305768, 0.9633326367177113, 0.9107393425364897, 0.8773057935927309, 0.6390363499365141, 0.9837484093682858, 0.9365242834529388, 0.5601128468952454, 0.574328652456658, 0.594946006449974, 0.9140920225962014, 0.7394659984015306, 0.5703094834211362, 0.8455135484673701, 0.7868671169686368, 0.819501565793358, 0.9354605322078218, 0.9951954603881399, 0.661248072361504, 0.973983181051322, 0.8893609201363589, 0.5156898231304536, 0.7725987903162101, 0.9462266371287507, 0.8141571990224551, 0.5524857505581697, 0.780604854124278, 0.9476175283069159, 0.6709456257735877, 0.8930158033699911, 0.6232525765638153, 0.6362952221889576, 0.8434559529754833, 0.7608734100261614, 0.7954948835934275, 0.5961025239494934, 0.5681895665488891, 0.6186906390521572, 0.9181205034244135, 0.6222969014496306, 0.7015495735014838, 0.5559929131787308, 0.7922354339802484, 0.7881106877269952, 0.5430146877393492, 0.5237330255244912, 0.5210132669130969, 0.9725250800995315, 0.9066154408621534, 0.8991306528292407, 0.5628041369567875, 0.6451721791871721, 0.615932998039527, 0.6360060126924725, 0.8628031880613172, 0.5443072909885702, 0.7356660927280614, 0.65706288058276, 0.577091351625659, 0.6018326011699737, 0.8380679661380052, 0.8513074119601258, 0.9429211529615619, 0.6281184105396489, 0.6949976466330967, 0.6658767519365294, 0.9119999003834881, 0.5426355109665553, 0.7541705068259221, 0.8364301676469597, 0.6035308309424822, 0.6446025767456472, 0.7615938660827628, 0.6800820479670957, 0.642704216087383, 0.6711322613723023, 0.7781904407448335, 0.5818289736820905, 0.7083108768056219, 0.6253965082618334, 0.9241049054035579, 0.5659718764423283, 0.8311220225284467, 0.5054792027300415, 0.9273389842158787, 0.647543498157667, 0.6283292110865317, 0.892743591007697, 0.5035541612766795, 0.8184838700733226, 0.9126795493724598, 0.6379612451648786, 0.5148065687922454, 0.9257812808169583, 0.6421078600261372, 0.6732061380955131, 0.8852976665566448, 0.6783601993662944, 0.7658093819459484, 0.6470173655122833, 0.9382453247804615, 0.7752190113263372, 0.5886852550076895, 0.5040592821392038, 0.9265823930678271, 0.8777723290896416, 0.9418197391203281, 0.9505163126124363, 0.9670782318814066, 0.7377273336054866, 0.5690405650321111, 0.768474137238674, 0.9562360180462941, 0.7924764146741087, 0.5623961103877321, 0.8447279791329101, 0.8723424398506983, 0.6204094295740443, 0.691897013914006, 0.8588818015794806, 0.9503314302839962, 0.6573997779229512, 0.8231763904154479, 0.8691696764831389, 0.8894606991984341, 0.8975202982197699, 0.6358668528408458, 0.821089002396882, 0.9041819102577477, 0.5628403502935329, 0.760826212369208, 0.9128854284346171, 0.6821264123022245, 0.8563473845839193, 0.6176718822389753, 0.9588329897221661, 0.8631626701804092, 0.7475687576075192, 0.6311251059726435, 0.6221157944866731, 0.9625134709153247, 0.5770660544429079, 0.9067720209918457, 0.5175166044702494, 0.7373618352455553, 0.6381086913498986, 0.959441602057445, 0.8193338030898987, 0.6268561699035748, 0.8038476116207511, 0.5172249394685491, 0.9243177293242675, 0.8603573500742081, 0.9069150962425331, 0.5185972609246838, 0.5812870894475801, 0.7881755375661732, 0.6881506055116686, 0.5784708856484663, 0.5284444765354019, 0.8820330456925016, 0.8194582733888895, 0.7145985331501983, 0.7882525434915694, 0.6333895680238123, 0.5753758919441347, 0.885377298226855, 0.5196926803840358, 0.5299123051089902, 0.5122168100173352, 0.5163151859328264, 0.8834520061448703, 0.9796317303456397, 0.5753691895883543, 0.921366532866347, 0.5667640966185078, 0.9231718742045324, 0.549572667581883, 0.8597926036126979, 0.995585982744646, 0.5981540337865883, 0.7740185001878648, 0.8907629134015664, 0.9037335725881088, 0.7716060650952832, 0.6007462511351187, 0.8781696732361259, 0.8987283404578411, 0.9183116374349468, 0.6796361927008923, 0.8861790654932987, 0.785223533561294, 0.6931469217203146, 0.9364237526233625, 0.5831835057278958, 0.5785132321796909, 0.7694896938096754, 0.5318359821460423, 0.5374269643882819, 0.9995177128547987, 0.5600830425953117, 0.7586630671627375, 0.9308318948058705, 0.7540064831518557, 0.8176016868792606, 0.79297500901383, 0.8208013081644872, 0.5394156211400083, 0.754447141106461, 0.5136858684873413, 0.8494761866266705, 0.9840526089334924, 0.524934415102573, 0.6090886487277045, 0.7547468638018622, 0.8178365633025253, 0.96672011747395, 0.8067326378392154, 0.9896126827587732, 0.5472437931980156, 0.8675314688681943, 0.6754110858875397, 0.6117405996251466, 0.9630779180048616, 0.8178620255334397, 0.8287176712068933, 0.5521738785630912, 0.8058087831641754, 0.7715464023048679, 0.5098884421989982, 0.8421057154624972, 0.5529810438934792, 0.9654634384084751, 0.7921096542927694, 0.7569286659608252, 0.9638901801362684, 0.8009935941149795, 0.5472992052668609, 0.803450665467315, 0.7071748109799121, 0.925372213234267, 0.8358602969244493, 0.5013539774751917, 0.76114733148495, 0.8409196129864291, 0.6004540275972222, 0.6242554642180184, 0.7863163821739155, 0.7855610931621674, 0.5837523643582748, 0.8772054421667996, 0.7761214532930358, 0.7921269887292546, 0.7345999083343054, 0.5157653782176657, 0.5727795890363829, 0.6915629527719143, 0.60715782360429, 0.6709922812985274, 0.8881860429054316, 0.7460448105842246, 0.6579370312362088, 0.6233690431365249, 0.5909442568242784, 0.6524170623766391, 0.7837404434967922, 0.660451043343014, 0.9321777906072863, 0.9168570772960576, 0.7935544991716605, 0.8744927541022869, 0.6213741979948911, 0.5615075822416964, 0.9221301406573355, 0.7089112834189721, 0.5028688779967165, 0.6099135657025361, 0.7942108211391501, 0.5405338211135304, 0.9914194937355469, 0.5930260844968904, 0.6015555805159594, 0.6752832528232355, 0.8471030946837145, 0.9243989589631573, 0.8241347678574591, 0.9430286873895924, 0.6627010592521301, 0.6955259214860333, 0.945768897068516, 0.6773643698613927, 0.8884376117085355, 0.5771789625522717, 0.5974170038608939, 0.5929660038771092, 0.9828182695939262, 0.6150739165700747, 0.7613483097136733, 0.7021389056247636, 0.82820322896286, 0.9907282068793475, 0.604411780562639, 0.6551559017567585, 0.8538058373303081, 0.9698006169562738, 0.5788024824091587, 0.6593266369124686, 0.8663271392865015, 0.7353237624004714, 0.8930129508580846, 0.8655006774565666, 0.8523241985752377, 0.6175467684067242, 0.708934419069368, 0.6427857089803343, 0.8159292634626936, 0.6888466542260839, 0.967865936231636, 0.6210129613100557, 0.5366255401499385, 0.8055446965448945, 0.5940670501192774, 0.5434152401405767, 0.5770401655412056, 0.5969560513988119, 0.9769727910110262, 0.8265256520270928, 0.681882166957902, 0.7863753778650009, 0.5419877757668172, 0.915746317915296, 0.6437626359227024, 0.8142988396829145, 0.7673117652909901, 0.8438416731445058, 0.582425994669028, 0.5418673603327593, 0.695948111847613, 0.8134177987369794, 0.6488121986883286, 0.8608440293982965, 0.826037204606745, 0.6980308605963873, 0.5265271903297091, 0.5917268643558542, 0.787355757808717, 0.5489066071659837, 0.6484038526235057, 0.5341200545517506, 0.6494610042781092, 0.6454295363164777, 0.8952224208786037, 0.9539701298281222, 0.7943905272562894, 0.9958544762887128, 0.8128135554843834, 0.641313791861706, 0.6493027321851108, 0.7653904673512081, 0.7054826776468746, 0.7525027204761019, 0.6046808559381953, 0.7914910565810608, 0.6694432046815346, 0.5753935596737181, 0.7810666446240457, 0.5570427379289616, 0.5937014064608418, 0.7452821653999654, 0.777418175526714, 0.7532049229093205, 0.6886605042809385, 0.6751118458525884, 0.8585855064510803, 0.5479638743332989, 0.9156651473292594, 0.6816877132198763, 0.633358716175487, 0.9518177014927656, 0.6250940166406174, 0.893843180187947, 0.8300287881079281, 0.9027255620113692, 0.508726119225092, 0.6125870110514244, 0.7941271165724119, 0.9781930400316037, 0.5729685691311295, 0.6758304610473418, 0.8769558796748848, 0.6571734248643695, 0.714780505569107, 0.6258097865738217, 0.5080602383576567, 0.6611279135371182, 0.7543823694041599, 0.7366925772535606, 0.6686711754219568, 0.5594371759883598, 0.9200414282447109, 0.6815652629692752, 0.695521573493622, 0.8468671263026029, 0.9795220519056111, 0.9240184997543229, 0.7916586150401608, 0.6269949639335561, 0.7164428625380524, 0.9530461609088254, 0.8060184320214699, 0.8562510544575324, 0.6835677683851109, 0.592003746418656, 0.5901281034667402, 0.8995663411644998, 0.8722930118800893, 0.9300146202027657, 0.8890756195963634, 0.730021074430472, 0.9349353511953671, 0.6871671756372753, 0.7576052260423678, 0.9662646161200914, 0.8377220297703659, 0.6507628793882929, 0.6805378953794877, 0.9988047489800839, 0.639177620400696, 0.7030249993191677, 0.8446042758723032, 0.6468340770906948, 0.629205619203034, 0.5212864402406089, 0.9225537045694742, 0.5748387797752327, 0.8540022649032548, 0.8444647659598924, 0.6220797001143853, 0.5102216212796327, 0.9746962534672807, 0.5184321134355409, 0.5684388088349857, 0.8516495732658551, 0.6665960622084958, 0.5889058436891101, 0.8943330259665722, 0.5161543648040315, 0.5797780091949838, 0.9529271517702254, 0.9552525805177794, 0.5294053961095556, 0.78716934459023, 0.8781555092396992, 0.5723694876394003, 0.8527475703236316, 0.5925102838386178, 0.5882805538505731, 0.92964303237526, 0.9356781468531246, 0.5369521997528874, 0.9991458731018021, 0.7669877301220493, 0.6747239612505052, 0.535984642921164, 0.6560119319379832, 0.8205518301534211, 0.8528868037202795, 0.532365393442346, 0.5197424236061743, 0.7107342852415686, 0.6903272665919551, 0.9047028521248992, 0.95466954281727, 0.706433678510179, 0.5672826228513124, 0.7432827350407214, 0.8657786300792945, 0.5387847379166413, 0.925724889206001, 0.6739053603068051, 0.848139125466419, 0.5019414160679896, 0.8367985441801302, 0.6831704843129207, 0.8373125731233035, 0.7346826111308522, 0.8459907284781356, 0.8588725352469145, 0.6875148775682237, 0.8290308162753826, 0.5307193893180664, 0.864680088396879, 0.7378275872680642, 0.7437283324696132, 0.9915256115701195, 0.932108027455147, 0.9705608240442029, 0.8960667423470785, 0.6131798242272608, 0.9953659654838565, 0.5161862945648015, 0.8239451709312049, 0.9127793868895188, 0.9720370349920426, 0.5136741301187612, 0.7658401581191037, 0.9117626730652535, 0.9958601875666531, 0.9289619401086482, 0.6015215629789699, 0.7335896072725847, 0.6473187728398855, 0.7407121025194993, 0.5759982393572167, 0.8874638457378227, 0.8850098402769022, 0.7029795932915703, 0.9434661558204243, 0.8876161902464879, 0.6064210664180453, 0.7471649340863421, 0.925193315702309, 0.5765551320126745, 0.6387155650094809, 0.7913622837878913, 0.9370640028181594, 0.7290977567978159, 0.8351135999832571, 0.7510163755257597, 0.8387829934022151, 0.81193254034691, 0.5173614455151312, 0.8787496680828402, 0.9013818186588463, 0.796004452817268, 0.5197389768882843, 0.6234341860593895, 0.9789312973458868, 0.639875031473345, 0.8422653430940132, 0.9423526135590956, 0.6472020242361765, 0.7333251297116115, 0.731833506203957, 0.716663284163104, 0.9424638765924747, 0.5974308149181644, 0.9035648936660936, 0.9821764389921134, 0.7424075582759011, 0.9839580075241754, 0.676519638769224, 0.7664598865870298, 0.8167936040809898, 0.6387653296667252, 0.6436751284138729, 0.9336763444709959, 0.945949134844217, 0.6342362544235283, 0.5174282767830517, 0.7760465139717261, 0.7190864646320134, 0.8147268382624209, 0.8816293843365759, 0.7014958751563715, 0.6015709166864329, 0.6415941152814149, 0.8088193411687666, 0.7664514777736766, 0.6943867977788294, 0.5268848510853754, 0.6193774422669525, 0.5632621216355649, 0.6585108569908902, 0.7938668793681362, 0.6013938252153627, 0.6055802394451928, 0.6398995349857255, 0.9573533192872709, 0.8835151613392198, 0.891083610924235, 0.704284904782208, 0.6397645943607386, 0.6380926738924756, 0.5407032898180599, 0.7764146492752455, 0.8901394344683833, 0.6500815228604782, 0.9034359277863147, 0.5702969934077986, 0.6575149649153593, 0.6391157019469873, 0.7249468333750464, 0.5203687133469301, 0.6109392004327265, 0.7825123844081316, 0.7928077639093474, 0.7615661029296911, 0.8766597423044084, 0.6128025888948829, 0.8331838250673729, 0.5723066175936661, 0.9136074775920552, 0.9327256397666035, 0.8205175314379347, 0.6081160789386327, 0.8856694063537895, 0.8518223055061285, 0.6199228734050539, 0.6860128007613421, 0.6011623772087256, 0.6716310234334492, 0.5348770617881753, 0.6325820665288333, 0.5300383711893248, 0.8014128973556918, 0.8549043295978149, 0.6948597667466769, 0.5245187810971732, 0.8146075753379158, 0.7869523607124722, 0.6979290433571477, 0.9253709565962183, 0.8991821885660147, 0.8135058670197116, 0.7589771777127562, 0.9478714741477258, 0.8595509946064208, 0.8535435980107899, 0.5273529412635047, 0.5432859731801132, 0.9155021587246206, 0.6530609134899201, 0.9325770696368664, 0.8480776448942414, 0.8094694023190814, 0.6154601374319297, 0.7128538469866149, 0.8165493829199933, 0.5051529869678096, 0.9598922488091417, 0.6028483345465174, 0.728642469839998, 0.7325540766460573, 0.834836083087562, 0.7174629180510331, 0.9876991125941831, 0.7051900449120676, 0.7714220989850871, 0.5723160035747583, 0.9234803232551262, 0.8780223510993465, 0.9188710065776873, 0.5103226215909413, 0.9635271575363713, 0.940244822948358, 0.6744502768745843, 0.955621715037075, 0.5220712537638685, 0.7654496293594193, 0.5460919277711018, 0.8845163063981873, 0.5212384977973661, 0.857981224742825, 0.7981726550234078, 0.96536703908634, 0.8443442002154493, 0.8018705382927362, 0.8235274560745327, 0.8181742647915351, 0.5188922501546788, 0.6496925289187754, 0.6669159863104381, 0.617079426836273, 0.7431021049579136, 0.5569123155828829, 0.9357654841693319, 0.7224435857969247, 0.5667397289683489, 0.9104049050141614, 0.7181815257977888, 0.6442243089690669, 0.9496167578909831, 0.5053024344842327, 0.8611585770256711, 0.7466579198623194, 0.5629997411728183, 0.5970959648756117, 0.9914256170172386, 0.8005214421046096, 0.6825643306415372, 0.5089699947713033, 0.9211975555041418, 0.9319797746046443, 0.5096619458148612, 0.5203921237921441, 0.6507295236428798, 0.5433539678776844, 0.556052906624505, 0.6524254809584136, 0.5992506871125677, 0.936703675173564, 0.8929390306732288, 0.8686967271993203, 0.8962203726441915, 0.7371416025635431, 0.5198940934224826, 0.6385916675221293, 0.8728613963134714, 0.9615828155654698, 0.6196910715926967, 0.8154399563888691, 0.7900800922836845, 0.580302912176433, 0.8977991171339559, 0.5015054851590662, 0.9220885845997386, 0.9819738917678473, 0.8756009537054932, 0.8088636294631915, 0.6374507700063703, 0.663317183155143, 0.7386533010348891, 0.9923093634269613, 0.5818561080574995, 0.7526616777761118, 0.811325089165613, 0.5672821481757033, 0.6275980427056668, 0.8765856501040606, 0.7242636725052409, 0.9309905687995792, 0.5150707292434097, 0.6559426630282776, 0.6162932516923176, 0.9960347521825266, 0.7124288376431711, 0.7670954888201322, 0.7280472540065532, 0.7643875305010728, 0.9765342642844058, 0.8355627930218592, 0.9750104404256045, 0.5301134357248831, 0.7805099880620585, 0.6452321960232006, 0.7817561763147924, 0.5193243881442117, 0.560969503152547, 0.5613473628525412, 0.7690810952382325, 0.7828273792419752, 0.5054152644567658, 0.7588136482927028, 0.9795473372554943, 0.7830736298463543, 0.6438338871277196, 0.7041638541360838, 0.6122159412359114, 0.5712114982218484, 0.8068102019366056, 0.6239616780625801, 0.929688782730278, 0.531668814149201, 0.6412472487991798, 0.6992025439786067, 0.5710540180831314, 0.5910393014989255, 0.7649565654728958, 0.7285432212894083, 0.5162494806732162, 0.7228269659307935, 0.7290125061356767, 0.5815334801552252, 0.843517491662637, 0.8821636636649975, 0.7281465337106677, 0.9225441650750518, 0.7011764723614714, 0.8217469345217072, 0.8899480417837744, 0.5939652905104598, 0.9978845511886627, 0.9234538769759321, 0.8862857254381704, 0.6372386023985757, 0.7023174312194447, 0.9364269247292629, 0.6639841063948866, 0.9032641983741855, 0.6915058126698029, 0.8054100996143874, 0.5100487684843068, 0.8300047550692435, 0.6857837879058708, 0.6506267475969114, 0.7165743591288839, 0.5036053514050455, 0.9317040866615414, 0.7108755043067905, 0.8182322905735229, 0.8336928890112152, 0.6547883597008355, 0.9018926990726173, 0.89880978102049, 0.6186627880481559, 0.6593836086965359, 0.67276648963544, 0.7822184102308498, 0.6684874205581698, 0.7674328694291627, 0.8180218964997443, 0.7257177369310089, 0.5523864200091388, 0.7665514996776907, 0.7675492086221051, 0.7976587729226796, 0.5920972301841083, 0.5172732806520535, 0.7448701544224792, 0.5456383980061769, 0.7675865714248676, 0.5928871012037974, 0.6432975441376207, 0.8257859967442878, 0.745937798113723, 0.738390109519, 0.8310884895172159, 0.7950020037710401, 0.5523178484690423, 0.9341061118268646, 0.5092737365124196, 0.8893675295083721, 0.8142633079407278, 0.6906360500239161, 0.7931936096473864, 0.5786214942418056, 0.5255705858438764, 0.9673226994645517, 0.6825658114796198, 0.6990893575908248, 0.7689647279348479, 0.7762629044605842, 0.787899759050261, 0.8612539558253843, 0.6968966869744391, 0.8963898451641584, 0.7874776275692648, 0.6169346246839679, 0.7456162782552551, 0.9253078044675884, 0.8043728921457898, 0.8878897619363925, 0.8101042360139501, 0.5013362865537523, 0.6419645923108228, 0.8495431396476412, 0.5624453309804072, 0.9008383721313372, 0.5147451385287836, 0.7214579946039219, 0.8321499104415846, 0.5196080473580488, 0.8720048937484752, 0.655598445294471, 0.6075474520304793, 0.6154669613042898, 0.8399651297639643, 0.7697946149196762, 0.6011135460944501, 0.9767092106165108, 0.6337856801702281, 0.7776414439798223, 0.7746078191257271, 0.793142159059689, 0.5247866652041935, 0.8948301947235571, 0.7573771514214955, 0.6083668028854381, 0.907300897592991, 0.6504068203424305, 0.617418686216896, 0.5349856086287135, 0.5620550987838409, 0.9820890492475123, 0.7642196626032158, 0.6657158913960136, 0.8539587551642036, 0.7986895751963896, 0.6977576028442419, 0.8688267853679255, 0.7163153318221285, 0.8978054293694165, 0.5562238707891396, 0.7978227035728627, 0.5674909221765463, 0.6899473475380353, 0.7190970972201947, 0.8650081894009487, 0.775035991867493, 0.7217251022807419, 0.8404709341125339, 0.8646288213576638, 0.5379876901276016, 0.5178906129772481, 0.6583084173204156, 0.6731145936478715, 0.5607325051718208, 0.5615589240891601, 0.9293369600567007, 0.5087867846744999, 0.7142602864072984, 0.747480576242112, 0.8135285773948422, 0.6669562535618634, 0.8643794168648794, 0.9310210679108952, 0.9823847198187513, 0.6149409385319168, 0.8655435314817495, 0.7071008261543383, 0.5523976022672145, 0.524183662181188, 0.7653996972823487, 0.8063431516438057, 0.5374745069430411, 0.7894098297770019, 0.6307209584402045, 0.8251222004497787, 0.8691504288574734, 0.8758001159385629, 0.90642734797546, 0.7976328926840953, 0.7615243579177798, 0.8067739043396971, 0.8547605703301723, 0.930946449435019, 0.5053095012523622, 0.8893165830370782, 0.9149247226706171, 0.5770988199864353, 0.8155298174252852, 0.5969915773286172, 0.8919822828700179, 0.7788481560679072, 0.7339365882334028, 0.9521731629482624, 0.908016888632325, 0.7576318890513758, 0.8263561852058019, 0.9886462960575197, 0.6492912242208633, 0.5759064445487654, 0.6438049785659269, 0.9961599573215405, 0.6035878910247205, 0.8192066790878987, 0.6793723801121848, 0.5149176103065249, 0.8716668788881881, 0.7303766687344688, 0.7437744424061299, 0.8403188595919369, 0.9146476681917969, 0.9144226458668399, 0.8241969555195214, 0.9400767188872132, 0.8796708199779166, 0.5867123207272281, 0.5661065534733196, 0.9842670002554704, 0.7088977261678507, 0.8373520495971691, 0.9466016074899567, 0.9671096687559122, 0.5014040450051263, 0.6816596474812255, 0.7413921064322477, 0.8500197585856759, 0.8225229948041795, 0.7308164850656009, 0.5216623036800799, 0.678317199776818, 0.7851256717946675, 0.5103419355975908, 0.8196601569712345, 0.7818674644292971, 0.5298765015626382, 0.8462327648127925, 0.9772879507000612, 0.6324107344205412, 0.8827171708520256, 0.6256181471563094, 0.8095571740975434, 0.6344620691005516, 0.9747123256749077, 0.592791651077162, 0.557067118035114, 0.790484083530427, 0.598985629525823, 0.8312950794494223, 0.972020532106863, 0.5089042897221394, 0.8060612789814228, 0.9031149678870163, 0.8652151460304494, 0.7669136717954567, 0.5794517676193336, 0.8425363712278714, 0.9891762054200774, 0.8173903761948815, 0.7794259853691299, 0.7339551166065378, 0.5394477406507698, 0.556744012807699, 0.7331446799778027, 0.5454830283207355, 0.8476924384614626, 0.7780073856725516, 0.818329607734478, 0.8211647962868294, 0.9029867473767959, 0.733753611499397, 0.8084270594194268, 0.7615139308409747, 0.8789347356044743, 0.9431097584677678, 0.6158791780463081, 0.8031087852362604, 0.7018641089417004, 0.8535165093406694, 0.5222207985842773, 0.8305967784000268, 0.999898623019865, 0.5832150161577367, 0.8467455984797989, 0.896549339889776, 0.6669288245133367, 0.7212484650603225, 0.6215986100123481, 0.6454031947913343, 0.9682955548864891, 0.6645712604931293, 0.663061184146073, 0.6879227096452558, 0.8893045607909527, 0.6918347601611998, 0.718874237050344, 0.7170744322668204, 0.9266457364640908, 0.6648140609037345, 0.5613284324301513, 0.6966411963937653, 0.8460904481630513, 0.739592525770473, 0.6970233103754411, 0.7791381561353667, 0.5452501057362795, 0.8186900413852012, 0.5272323129378986, 0.8399320293997783, 0.9796375756990707, 0.7952095396443757, 0.798401405719408, 0.9314878077796653, 0.5960148391749803, 0.5900768312339093, 0.5116196464613243, 0.7726322299207953, 0.8959085868747678, 0.6600348343393145, 0.7713417789156092, 0.9178806511047147, 0.6776629075706067, 0.5424048470261353, 0.9446435821224528, 0.809291715618141, 0.6609481671457218, 0.6701550770981094, 0.7366963063825851, 0.7358670240524675, 0.9402398759264472, 0.8883105212949844, 0.5767489068783174, 0.8252968756736643, 0.5543768874781556, 0.7955125789309502, 0.5849021489259048, 0.7544769073707238, 0.6712448881885786, 0.8925486823602715, 0.7165697164072855, 0.89962582211716, 0.7114949402208695, 0.9255693523855371, 0.7763465960183871, 0.618450351181935, 0.7790208386320476, 0.5352202745711507, 0.9891704145525944, 0.6624294634369445, 0.5563976692071078, 0.8882724792281421, 0.9407512647619354, 0.8314322984636187, 0.7807162869160427, 0.6173605711469301, 0.8879770284960171, 0.886161553238737, 0.706173763979181, 0.5748288568455264, 0.897097308546642, 0.6434027726247901, 0.74760194156338, 0.7920322729796607, 0.8555441361438446, 0.6167779623805374, 0.5261916729697578, 0.7616086480511656, 0.5516801142538106, 0.5368518436001746, 0.9098265251701911, 0.7198986338667326, 0.6399799246270028, 0.6640507012194743, 0.8100597413610224, 0.9947068832545278, 0.7707026884031236, 0.9814662478819047, 0.9890101565786802, 0.8937033933251552, 0.9140219134505654, 0.6216556903657474, 0.7557301924469492, 0.9153055653587335, 0.754516446379301, 0.9774686853543858, 0.7502076482947491, 0.6146817991189966, 0.8830231807869793, 0.9646678134010758, 0.5453158678931883, 0.9157895747562862, 0.7622706910881434, 0.8822423893820512, 0.7164041767939908, 0.8655201616946835, 0.9957172073748555, 0.8184460016840276, 0.8085726609681312, 0.8231370182294477, 0.7726921422239639, 0.5589558487777035, 0.6397519476693545, 0.7651546672729788, 0.6078722209659344, 0.9183875427252378, 0.7862335529028825, 0.8891440159290445, 0.6109661589176987, 0.5365729638518577, 0.6404742572050899, 0.6535953300698574, 0.9676552153095836, 0.9215965981347031, 0.515760110225036, 0.718719823205925, 0.7032753370882583, 0.8551322264716803, 0.7366194035228892, 0.6348135258314045, 0.8283655456532653, 0.9006641899695829, 0.6778397601632018, 0.7322981916569187, 0.7145708542796585, 0.5009851812980679, 0.6676270139881939, 0.6472400057914102, 0.5448059511730533, 0.7682722552757896, 0.7686198830087784, 0.6437274479983579, 0.546644571050813, 0.8022601756921321, 0.5540078581040906, 0.5404803357646176, 0.7930622251001478, 0.9973481352761929, 0.786947871300294, 0.6142307428501093, 0.6205262213662558, 0.6200311119376098, 0.6925347787515862, 0.7463768403961208, 0.9157854106712157, 0.9841070054436734, 0.9046146604123131, 0.8672897415190852, 0.5084601648460894, 0.7625073873469459, 0.8324163747021962, 0.5016499286556593, 0.7240576909173986, 0.5351328689291761, 0.7039042594421585, 0.5412681226663529, 0.7480069391528494, 0.614210766642989, 0.9476529967175451, 0.8911185656182008, 0.5208899659267474, 0.7926277027706221, 0.7897548702533355, 0.6974942145350399, 0.6482394019896965, 0.8338332201828214, 0.9646873368039379, 0.8015251365237392, 0.9233765691926299, 0.6185958025097656, 0.5481499384890696, 0.8296296772151147, 0.6893248659811981, 0.5317261728594251, 0.5458295671561957, 0.8437852024002674, 0.5101631307839003, 0.9326903057353478, 0.9557010368456083, 0.8803604106693255, 0.9042539793946862, 0.5221755262011514, 0.6911902803527633, 0.8633256026710845, 0.9759611541288452, 0.9831686895499968, 0.7726740314507103, 0.5359662365924959, 0.7049292019033488, 0.6159252589704116, 0.5188457977391356, 0.8053665190474937, 0.9822761001453222, 0.712909072782901, 0.7761031219313634, 0.990773681066657, 0.8674799938990794, 0.6740226993512959, 0.6879771168920681, 0.6963561987900253, 0.7418000289018154, 0.7024923243730755, 0.5953431081121066, 0.5851970119943858, 0.6255737677224071, 0.9312029668216117, 0.5948614887303092, 0.5828680610490713, 0.8555473663016062, 0.7873037490485959, 0.6557701870796334, 0.9684821035100158, 0.6079909040684202, 0.9327884268882946, 0.5911808911696887, 0.7605652872617463, 0.9926874369689922, 0.7898819983354092, 0.6356283278357446, 0.7059572460867527, 0.6278501066064799, 0.8424170151022313, 0.5186360410010542, 0.6136251361655191, 0.9457399443190431, 0.6253177657041136, 0.6462976828049336, 0.8366581547266769, 0.8951790282638051, 0.9896551643612765, 0.7858358378929504, 0.993063504185514, 0.6329370916428947, 0.7110768481169445, 0.562755850540752, 0.6172365606427809, 0.5138968216438502, 0.9312926525754174, 0.5564187639649898, 0.5993202917892867, 0.6052324986105493, 0.9071351594473988, 0.5349240051410104, 0.9683161081110236, 0.7815010555708877, 0.7589471296384135, 0.9608367523426091, 0.9811818628886728, 0.6202256428942371, 0.6347571902825007, 0.5696325650429657, 0.6958469660606633, 0.5508213126910059, 0.8600525533497847, 0.6303833962634704, 0.8271573907313883, 0.5802147758369782, 0.511374086216662, 0.935193037408419, 0.6460842197623998, 0.7228526364189263, 0.899147928972994, 0.5708548227700966, 0.5352737074126583, 0.6494057328991445, 0.731492883140168, 0.6622207754544218, 0.5026748339869939, 0.7861407625440129, 0.7895144859985515, 0.5668122641259743, 0.5194209504621901, 0.7664936440772383, 0.6895681728682042, 0.5471113079675233, 0.5697905974943365, 0.9654621412877444, 0.6117534747774322, 0.5195230510250107, 0.5029874549497909, 0.6436230566901067, 0.7814019757174042, 0.7243115146159262, 0.8490022959549282, 0.5986087210572619, 0.8917653315595842, 0.8186740951384265, 0.6557207833538385, 0.7383517661652084, 0.5076593667567217, 0.8351509540790598, 0.9066154116110787, 0.8633966120608996, 0.5448011348714983, 0.8811262486064548, 0.5311062494107786, 0.8434152015057589, 0.7587715892859013, 0.6525977108705863, 0.8709010148712284, 0.9902567437643712, 0.6363651362541571, 0.93154565323665, 0.6673336659816802, 0.6363488918779094, 0.5398241942834009, 0.7914814885511636, 0.9529666873959003, 0.564045965681691, 0.6382659091632683, 0.7805351222831617, 0.7733886221614187, 0.5241008399449433, 0.611945308809795, 0.6169907785866606, 0.6385936386173398, 0.7841750718670859, 0.7173238321530276, 0.7367928171908347, 0.6265166185554858, 0.6483108410096134, 0.9089151560803901, 0.6335384299494107, 0.8466095418964659, 0.636736467136152, 0.7789681772250416, 0.9077504496156267, 0.6867613895975948, 0.8301847995782373, 0.7681290610779284, 0.8055269135501388, 0.509034281413844, 0.5060145402076777, 0.9531102781357583, 0.6049030717098274, 0.6978487916911934, 0.7108004718191071, 0.939874195247411, 0.592163166813884, 0.6911488424262666, 0.6024932357446733, 0.5784406533904147, 0.7248755260288222, 0.8132232450347373, 0.5497978515691366, 0.6422827817500908, 0.5623109553041825, 0.7026465582885555, 0.6835431097023816, 0.8280875424648927, 0.5697242233141735, 0.8156284418986919, 0.5454630115116291, 0.7460605384610532, 0.5902137370746723, 0.7745159125590151, 0.6547269575301189, 0.7346468747258099, 0.9460016552534074, 0.8465037831537723, 0.6581592649653958, 0.9961161028919017, 0.9616426679771357, 0.6219619779644283, 0.8905650088552008, 0.693779959860743, 0.8630066415278895, 0.6282224791138035, 0.7181170944536824, 0.716100884180571, 0.7756481074289252, 0.9142676032562131, 0.9180303779395655, 0.5432335136324105, 0.7182639917924389, 0.8153204221836884, 0.5827031352248715, 0.5463832979704049, 0.7948268737909616, 0.6598653129414418, 0.7845986404426359, 0.7420335255239141, 0.9385969458853061, 0.6671326457500286, 0.6998412684827398, 0.9868858054402161, 0.8786467710924218, 0.7726528808366292, 0.5025826296328212, 0.8629287020940559, 0.768810976141887, 0.7143246919991757, 0.5711739798143532, 0.7403205840894782, 0.6027968963707016, 0.6175212302157366, 0.895600718676384, 0.6650496553531864, 0.5250917194786286, 0.6758665713113955, 0.7805817873318424, 0.6818936839385621, 0.6057482283252358, 0.6871084714045712, 0.6843923217748706, 0.9580840879847571, 0.9620823503252021, 0.9359778904131593, 0.5283872878497775, 0.6871858860380478, 0.9444190932633929, 0.9885328307628365, 0.8083797157964749, 0.9424081240545741, 0.8802440651197797, 0.784110556928838, 0.7405118236993029, 0.8360838660431231, 0.899166063131732, 0.77185218566812, 0.6062536162567856, 0.9208825384410072, 0.7634110466954283, 0.6976841175687796, 0.8327596087256153, 0.9671620346864906, 0.9686730049558581, 0.8064760176546859, 0.8111507853410636, 0.8183558206337572, 0.9303474076168086, 0.7269156219306148, 0.663924586136488, 0.5652355395972282, 0.7114783361613157, 0.9941623968973058, 0.9019171227186691, 0.5718219819754591, 0.604427496795746, 0.8175951412577782, 0.8307192547040465, 0.8351033792034779, 0.8684962626794389, 0.6539215870803055, 0.9848417146820273, 0.6531511706497632, 0.6594799796002644, 0.9779532141456241, 0.673486037170848, 0.5789477708928197, 0.6180602771363755, 0.5329323393363907, 0.8489464197755796, 0.5064534417492499, 0.8862573495967074, 0.6222412180577075, 0.6278909822734882, 0.6167278917623664, 0.7514889415570422, 0.9842393117235242, 0.7040952031606168, 0.6162300748507374, 0.5495587025920416, 0.9547786202798549, 0.8277331199332134, 0.7967722842015716, 0.9440799125798904, 0.9418650824551249, 0.7534229072756989, 0.9912622414268777, 0.8452315698593057, 0.9822508947400309, 0.5730411813614218, 0.8736561417308908, 0.9482751125132334, 0.9656610121257161, 0.5773354752984169, 0.5457123334403183, 0.7477422326987724, 0.7695973624514447, 0.9589493801559172, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0};
int h_B[]= {
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 53, 55, 57, 59, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 175, 177, 179, 181, 184, 186, 188, 190, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 339, 341, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 396, 398, 400, 402, 406, 408, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 476, 478, 480, 482, 484, 486, 488, 490, 492, 494, 496, 498, 500, 502, 504, 506, 508, 510, 512, 514, 516, 518, 520, 522, 524, 526, 529, 531, 533, 535, 538, 540, 542, 544, 547, 549, 551, 553, 556, 558, 560, 562, 564, 566, 568, 570, 572, 574, 576, 578, 581, 583, 586, 588, 591, 593, 596, 598, 601, 603, 606, 608, 611, 613, 616, 618, 620, 622, 625, 627, 629, 631, 634, 636, 640, 642, 644, 646, 648, 650, 652, 654, 657, 659, 661, 663, 665, 667, 670, 672, 674, 676, 678, 680, 682, 684, 686, 688, 691, 693, 695, 697, 700, 702, 705, 707, 713, 715, 719, 721, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 745, 747, 751, 753, 756, 758, 761, 763, 766, 768, 770, 772, 774, 776, 780, 782, 785, 787, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 817, 819, 822, 824, 827, 829, 832, 834, 837, 839, 842, 844, 850, 852, 855, 857, 860, 862, 864, 866, 868, 870, 873, 875, 878, 880, 883, 885, 888, 890, 892, 894, 896, 898, 901, 903, 906, 908, 911, 913, 916, 918, 920, 922, 924, 926, 929, 931, 934, 936, 939, 941, 760, 755, 760, 755, 760, 755, 760, 755, 900, 915, 86, 86, 87, 87, 900, 915, 995, 997, 999, 1001, 1003, 1005, 1008, 1010, 1012, 1014, 1016, 1018, 1020, 1022, 1024, 1026, 1028, 1030, 638, 633, 638, 633, 638, 633, 928, 943, 928, 943, 1107, 1109, 1111, 1113, 1115, 1117, 1119, 1121, 1124, 1126, 1128, 1130, 1132, 1134, 1136, 1138, 1140, 1142, 1144, 1146, 1149, 1151, 1153, 1155, 791, 778, 1208, 1210, 1212, 1214, 1216, 1218, 1221, 1223, 704, 699, 704, 699, 933, 938, 933, 938, 1257, 1259, 933, 938, 1272, 1274, 1277, 1279, 1283, 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, 1301, 1303, 1305, 887, 887, 882, 882, 1325, 1327, 1329, 1331, 1333, 1335, 1337, 1339, 1342, 1344, 1346, 1348, 1350, 1352, 1354, 1356, 1358, 1360, 595, 595, 709, 711, 750, 750, 778, 791, 778, 791, 849, 847, 849, 847, 1466, 1468, 1470, 1472, 1474, 1476, 1478, 1480, 1482, 1484, 1486, 1488, 1492, 1494, 1499, 1501, 1503, 1505, 1508, 1510, 1512, 1514, 1517, 1519, 1523, 1525, 1527, 1529, 1531, 1533, 1536, 1538, 1541, 1543, 1521, 1516, 1148, 1547, 1363, 1498, 1496, 1521, 1516, 1521, 1516, 1498, 1496, 1521, 1516, 994, 994, 1281, 1498, 1496, 1007, 1007, 1703, 1705, 1707, 1709, 1711, 1713, 1715, 1717, 1719, 1721, 1723, 1725, 1729, 1731, 1766, 1768, 1521, 1516, 1774, 1776, 1778, 1780, 1782, 1784, 1786, 1788, 1793, 1795, 1797, 1799, 1148, 1281, 1547, 1363, 1911, 1913, 1915, 1917, 1919, 1921, 1521, 1516, 1363, 1547, 1934, 1936, 1938, 1940, 1942, 1944, 1946, 1948, 1270, 1268, 1270, 1268, 1521, 1516, 1521, 1516, 1363, 2039, 2041, 2043, 2045, 1547, 2058, 2060, 1363, 2072, 2074, 1496, 1498, 1498, 1496, 1535, 1547, 1549, 2136, 2138, 2140, 2142, 2144, 2146, 2149, 2151, 2154, 2156, 2159, 2161, 2164, 2166, 2169, 2171, 2175, 2177, 2180, 2182, 2179, 2153, 2148, 2148, 2153, 2179, 2077, 2179, 2077, 2179, 2077, 2179, 2184, 2077, 2179, 2184, 2174, 2174, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 3072, 3074, 3076, 3078, 3080, 3082, 3084, 3086, 3088, 3090, 3092, 3094, 3096, 3098, 3100, 3102, 3104, 3106, 3108, 3110, 3112, 3114, 3116, 3118, 3120, 3122, 3124, 3126, 3128, 3130, 3132, 3134, 3136, 3138, 3140, 3142, 3144, 3146, 3148, 3150, 3152, 3154, 3156, 3158, 3160, 3162, 3164, 3166, 3168, 3170, 3172, 3174, 3176, 3178, 3180, 3182, 3184, 3186, 3188, 3190, 3192, 3194, 3196, 3198, 3200, 3202, 3204, 3206, 3208, 3210, 3212, 3214, 3216, 3218, 3220, 3222, 3224, 3226, 3228, 3230, 3232, 3234, 3236, 3238, 3240, 3242, 3244, 3246, 3248, 3250, 3252, 3254, 3256, 3258, 3260, 3262, 3264, 3266, 3268, 3270, 3272, 3274, 3276, 3278, 3280, 3282, 3284, 3286, 3288, 3290, 3292, 3294, 3296, 3298, 3300, 3302, 3304, 3306, 3308, 3310, 3312, 3314, 3316, 3318, 3320, 3322, 3324, 3326, 3328, 3330, 3332, 3334, 3336, 3338, 3340, 3342, 3344, 3346, 3348, 3350, 3352, 3354, 3356, 3358, 3360, 3362, 3364, 3366, 3368, 3370, 3372, 3374, 3376, 3378, 3380, 3382, 3384, 3386, 3388, 3390, 3392, 3394, 3396, 3398, 3400, 3402, 3404, 3406, 3408, 3410, 3412, 3414, 3416, 3418, 3420, 3422, 3424, 3426, 3428, 3430, 3432, 3434, 3436, 3438, 3440, 3442, 3444, 3446, 3448, 3450, 3452, 3454, 3456, 3458, 3460, 3462, 3464, 3466, 3468, 3470, 3472, 3474, 3476, 3478, 3480, 3482, 3484, 3486, 3488, 3490, 3492, 3494, 3496, 3498, 3500, 3502, 3504, 3505, 3506, 3507, 3508, 3509, 3510, 3511, 3512, 3513, 3514, 3515, 3516, 3517, 3518, 3519, 3520, 3522, 3524, 3526, 3528, 3530, 3532, 3534, 3536, 3538, 3539, 3540, 3541, 3542, 3543, 3544, 3545, 3546, 3547, 3548, 3550, 3552, 3554, 3556, 3558, 3560, 3562, 3564, 3566, 3568, 3570, 3572, 3573, 3574, 3576, 3578, 3580, 3582, 3583, 3584, 3585, 3586, 3587, 3588, 3589, 3590, 3592, 3593, 3594, 3596, 3598, 3600, 3602, 3604, 3606, 3608, 3610, 3611, 3612, 3613, 3614, 3616, 3618, 3620, 3622, 3624, 3626, 3628, 3630, 3632, 3633, 3634, 3635, 3636, 3637, 3638, 3639, 3640, 3641, 3642, 3643, 3644, 3645, 3646, 3648, 3650, 3652, 3654, 3656, 3658, 3660, 3662, 3664, 3666, 3668, 3670, 3672, 3674, 3676, 3678, 3680, 3681, 3682, 3683, 3684, 3685, 3686, 3687, 3688, 3689, 3690, 3691, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 3700, 3701, 3702, 3704, 3706, 3708, 3710, 3712, 3714, 3716, 3718, 3719, 3720, 3722, 3724, 3726, 3728, 3730, 3732, 3733, 3734, 3735, 3736, 3738, 3740, 3742, 3743, 3744, 3745, 3746, 3748, 3750, 3752, 3754, 3755, 3756, 3757, 3758, 3759, 3760, 3761, 3762, 3763, 3765, 3767, 3768, 3770, 3771, 3773, 3774, 3775, 3776, 3777, 3778, 3779, 3780, 3782, 3784, 3786, 3788, 3790, 3792, 3794, 3796, 3798, 3800, 3801, 3802, 3803, 3804, 3805, 3806, 3807, 3808, 3809, 3810, 3811, 3812, 3813, 3814, 3815, 3816, 3817, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 905, 910, 910, 905, 933, 938, 905, 910, 910, 905, 933, 938, 638, 633, 3850, 717, 712, 4056, 4058, 704, 699, 4060, 4062, 910, 905, 717, 712, 704, 699, 638, 633, 3864, 717, 712, 699, 704, 789, 784, 590, 585, 590, 585, 590, 585, 600, 610, 605, 580, 590, 585, 600, 595, 610, 605, 615, 4081, 3989, 4083, 3994, 656, 3997, 669, 590, 585, 590, 585, 590, 585, 600, 610, 605, 580, 590, 585, 600, 595, 610, 605, 615, 3989, 4085, 3992, 3994, 656, 3997, 669, 910, 905, 3875, 910, 905, 3877, 933, 938, 933, 938, 910, 905, 3884, 910, 905, 3886, 933, 938, 933, 938, 755, 789, 3891, 3893, 784, 789, 4103, 760, 760, 3896, 590, 585, 610, 605, 3901, 784, 590, 585, 3904, 717, 712, 760, 755, 789, 590, 585, 580, 615, 638, 633, 3916, 3917, 3919, 669, 343, 338, 656, 712, 717, 717, 712, 717, 712, 3928, 3930, 3932, 755, 755, 755, 784, 590, 585, 717, 712, 4109, 4012, 4111, 4012, 760, 755, 638, 633, 3942, 638, 633, 3943, 784, 789, 4113, 4115, 854, 859, 3949, 905, 910, 3953, 928, 905, 910, 3953, 928, 4118, 943, 859, 854, 3959, 877, 872, 877, 872, 859, 854, 3965, 877, 872, 877, 872, 590, 585, 590, 585, 590, 585, 600, 610, 605, 580, 590, 585, 600, 595, 610, 605, 615, 638, 633, 3989, 638, 633, 3992, 3994, 656, 3997, 669, 717, 712, 717, 712, 717, 712, 4003, 4004, 704, 699, 717, 712, 717, 712, 4009, 4010, 4012, 760, 755, 760, 755, 760, 755, 765, 789, 784, 789, 784, 4148, 789, 784, 826, 821, 836, 831, 846, 841, 4151, 826, 821, 836, 831, 846, 841, 4153, 859, 854, 4039, 877, 872, 887, 882, 910, 905, 900, 910, 905, 915, 933, 938, 928, 938, 933, 943, 4172, 4175, 4159, 4160, 4161, 4177, 4179, 4181, 4183, 4185, 4190, 1521, 1516, 1521, 1516, 1545, 1540, 1545, 1540, 1547, 1547, 1270, 1268, 1276, 1271, 4202, 1545, 1540, 1148, 1148, 1148, 1521, 1516, 1547, 1363, 4210, 4160, 4161, 1363, 1547, 4212, 4135, 4161, 4217, 4105, 1545, 1540, 4219, 4107, 4108, 4225, 1276, 1271, 4227, 1276, 1271, 1281, 1281, 1281, 4229, 4231, 1545, 1540, 1545, 1540, 1545, 1540, 4135, 4160, 4161, 4135, 4160, 4161, 1521, 1516, 4138, 1521, 1516, 4140, 4156, 4158, 4159, 4160, 4161, 4242, 1521, 1516, 4164, 1521, 1516, 4167, 1545, 1540, 1545, 1540, 4241, 4240, 4241, 4240, 4241, 4240, 2077, 2077, 4241, 4240, 4241, 4240, 4241, 4240, 2148, 2077, 2179, 2077, 2179, 2077, 2179, 2179, 2077, 2179, 2158, 2158, 2077, 2179, 2077, 2179, 2077, 2179, 2077, 2179, 2158, 2153, 2148, 2158, 2153, 2163, 2077, 2179, 2174, 2158, 2153, 2148, 2158, 2153, 2163, 2077, 2179, 2184, 2135, 2133, 2077, 2179, 4263, 2174, 4265, 2174, 4267, 4270, 2135, 2133, 2135, 2133, 2158, 2153, 2148, 2158, 2153, 2163, 2179, 2179, 2179, 2184, 4260, 4259, 4274, 4273, 4260, 4259, 4260, 4259, 4260, 4259, 4260, 4259, 4274, 4273, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 4288, 4289, 4290, 4291, 4292, 4293, 4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303, 4304, 4307, 4308, 4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4320, 4321, 4322, 4323, 4324, 4325, 4326, 4327, 4328, 4329, 4330, 4331, 4332, 4333, 4334, 4335, 4336, 4337, 4338, 4339, 4340, 4341, 4342, 4344, 4346, 4347, 4348, 4349, 4350, 4351, 4352, 4353, 4354, 4355, 4356, 4357, 4358, 4359, 4360, 4361, 4362, 4363, 4364, 4365, 4366, 4367, 4369, 4370, 4371, 4372, 4373, 4374, 4375, 4376, 4377, 4378, 4379, 4380, 4381, 4382, 4383, 4384, 4385, 4386, 4387, 4388, 4389, 4390, 4391, 4392, 4393, 4394, 4395, 4396, 4397, 4398, 4399, 4401, 4402, 4403, 4404, 4405, 4406, 4407, 4408, 4409, 4410, 4411, 4412, 4413, 4414, 4415, 4416, 4417, 4418, 4419, 4420, 4421, 4422, 4423, 4424, 4425, 4426, 4427, 4428, 4429, 4430, 4431, 4432, 4433, 4434, 4435, 4436, 4437, 4438, 4439, 4440, 4441, 4442, 4443, 4444, 4445, 4446, 4447, 4449, 4451, 4452, 4453, 4454, 4455, 4456, 4457, 4458, 4459, 4460, 4461, 4464, 4465, 4466, 4467, 4468, 4469, 4470, 4471, 4472, 4473, 4474, 4476, 4477, 4478, 4479, 4480, 4481, 4482, 4483, 4484, 4485, 4486, 4487, 4488, 4489, 4490, 4491, 4492, 4493, 4494, 4495, 4496, 4497, 4498, 4499, 4500, 4501, 4502, 4503, 4504, 4505, 4506, 4507, 4508, 4509, 4510, 4511, 4512, 4513, 4514, 4515, 4516, 4517, 4518, 4519, 4520, 4521, 4522, 4523, 4524, 4525, 4526, 4527, 4528, 4529, 4530, 4531, 4532, 4533, 4534, 4535, 4536, 4537, 4538, 4539, 4540, 4541, 4542, 4543, 4544, 4545, 4547, 4548, 4549, 4550, 4551, 4552, 4553, 4554, 4556, 4557, 4558, 4559, 4560, 4561, 4563, 4564, 4565, 4566, 4567, 4568, 4569, 4570, 4571, 4572, 4573, 4574, 4575, 4576, 4577, 4578, 4579, 4580, 4581, 4584, 4585, 4586, 4146, 4145, 4146, 4145, 4146, 4145, 4593, 4594, 4595, 4596, 4597, 4598, 4599, 4600, 4601, 4602, 4603, 4604, 4605, 4606, 4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616, 4618, 4619, 4620, 4621, 4623, 4624, 4626, 4627, 4628, 4630, 4631, 4633, 4634, 4636, 4637, 4638, 4639, 4640, 4643, 4644, 4645, 4646, 4647, 4648, 4649, 4650, 4651, 4652, 4653, 4654, 4655, 4656, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4587, 4677, 4678, 4590, 4679, 4680, 4592, 4681, 4682, 4683, 4684, 4587, 4685, 4686, 4188, 4187, 4590, 4687, 4688, 4188, 4187, 4592, 4689, 4690, 4691, 4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704, 4705, 4706, 4707, 4708, 4709, 4710, 4240, 4240, 4711, 4712, 4713, 4714, 4715, 4716, 4717, 4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730, 4731, 4732, 4734, 4736, 4739, 4740, 4741, 4742, 4743, 4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752, 4753, 4754, 4269, 4273, 4755, 4756, 4272, 4274, 4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4766, 4272, 4269, 4272, 4269, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 4800, 4802, 4804, 4806, 4808, 4810, 4812, 4815, 4817, 4819, 4821, 4823, 4825, 4828, 4830, 4832, 4834, 4836, 4838, 4841, 4844, 4846, 4848, 4856, 4858, 4860, 4863, 4866, 4868, 4870, 4879, 4882, 4885, 4887, 4889, 4892, 4895, 4897, 4903, 4908, 4910, 4914, 4917, 4919, 4922, 4926, 4932, 4935, 4937, 4939, 4948, 4950, 4954, 4956, 4959, 4962, 4964, 4967, 4971, 4976, 4979, 4981, 4983, 4986, 4988, 4990, 4992, 4994, 4997, 5000, 5002, 5004, 5007, 5010, 5017, 5019, 5021, 5025, 5027, 5029, 5034, 5036, 5038, 5041, 5043, 5045, 5047, 5049, 5051, 5053, 5055, 5057, 5059, 5062, 5064, 5066, 5069, 5072, 5075, 5016, 5014, 5033, 5081, 5082, 5083, 5084, 5016, 5014, 5033, 5085, 5086, 5016, 5014, 5033, 5033, 5016, 5014, 5033, 5087, 5089, 5091, 5093, 4874, 4851, 4855, 4853, 4874, 4873, 4878, 4876, 5097, 5099, 5101, 5106, 5108, 5112, 4931, 4902, 5040, 4146, 4145, 4147, 5014, 4931, 4902, 4931, 5040, 4146, 4145, 4147, 5014, 4931, 4943, 4150, 5016, 4934, 4943, 4150, 4931, 4943, 5040, 4146, 4145, 4147, 5117, 5016, 5014, 4448, 4952, 4450, 4953, 5016, 5014, 5033, 4975, 4970, 4975, 4974, 4975, 4970, 5121, 4975, 4974, 5123, 5128, 5130, 5132, 5140, 5143, 5016, 5014, 5033, 5151, 5154, 5157, 5159, 5145, 5142, 5161, 5164, 5167, 4666, 4241, 4240, 5147, 5172, 5175, 5176, 5177, 5180, 5181, 5182, 5186, 5188, 5190, 5193, 5145, 5142, 5145, 5116, 5197, 5199, 5201, 4666, 4241, 4240, 5203, 4666, 4241, 5205, 4666, 4241, 5206, 5147, 5147, 4666, 4241, 4240, 5145, 5142, 2135, 2133, 5207, 5210, 5213, 4666, 4241, 4240, 5145, 5116, 2135, 2133, 5216, 5219, 5222, 4666, 4241, 4240, 5145, 5142, 5227, 4666, 4241, 4240, 4666, 4241, 4240, 5147, 4666, 4241, 4240, 5231, 5233, 5235, 5238, 5240, 5240, 5185, 5247, 5248, 5244, 5244, 5229, 5251, 5252, 5240, 5185, 5244, 5229, 5240, 5240, 5240, 5237, 5240, 5240, 5244, 5229, 5263, 5264, 5265, 5266, 5230, 5244, 4274, 4273, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 4065, 4064, 4087, 4065, 4064, 4088, 4996, 4142, 4141, 4924, 5382, 4925, 5012, 5009, 5411, 5412, 5023, 4144, 4143, 5326, 5031, 4144, 4143, 5413, 5414, 4150, 4546, 4147, 5326, 5416, 4996, 4142, 4141, 4924, 5382, 4925, 5012, 5009, 5418, 5419, 5023, 4144, 4143, 5320, 5031, 4144, 4143, 5420, 5421, 4546, 4150, 4147, 4065, 4064, 4090, 4089, 4996, 4142, 4141, 4924, 5382, 4925, 5012, 5009, 5423, 5424, 5023, 4144, 4143, 5323, 5031, 4144, 4143, 5425, 5040, 4546, 4150, 4147, 5012, 5009, 5023, 4144, 4143, 5389, 5031, 4144, 4143, 5426, 5040, 4546, 4150, 4147, 5382, 4925, 4996, 4142, 4141, 4924, 5012, 5009, 5427, 5428, 5031, 4144, 4143, 5429, 5023, 4144, 4143, 5326, 4146, 4145, 4546, 4150, 4147, 4071, 4070, 4840, 4142, 4141, 4843, 5333, 4850, 5434, 5435, 5436, 5437, 4862, 4142, 4141, 4865, 5340, 4872, 5438, 5439, 5440, 5441, 4884, 4881, 4088, 4087, 4894, 4891, 4090, 4089, 4996, 4141, 4142, 4924, 5382, 4925, 4912, 4928, 4934, 5448, 4941, 4144, 4143, 5449, 5450, 5451, 5452, 5453, 4996, 4141, 4142, 4924, 5382, 4925, 5012, 4901, 5454, 5455, 4941, 4144, 4143, 5456, 5040, 4146, 4145, 4400, 4996, 4141, 4142, 4934, 5457, 4941, 4144, 4143, 5458, 5459, 5460, 5461, 4996, 4141, 4142, 4924, 5382, 4925, 4912, 5009, 5462, 5463, 4941, 4144, 4143, 5464, 5040, 4146, 4145, 5465, 4996, 4142, 4141, 4929, 4928, 5466, 5467, 4941, 4144, 4143, 5468, 5040, 4146, 4145, 5469, 4996, 4142, 4141, 4924, 4925, 4929, 4928, 4934, 5470, 4941, 4144, 4143, 5471, 5472, 5473, 5474, 5475, 4130, 4128, 4996, 4142, 4141, 4999, 5382, 5006, 4961, 4958, 5477, 5478, 4144, 4143, 5479, 5480, 5040, 4146, 4145, 4150, 4147, 4546, 4144, 4143, 5481, 4144, 4143, 5482, 5040, 4146, 4145, 4996, 4142, 4141, 4999, 5382, 5006, 4961, 4958, 5483, 5484, 5023, 5389, 5031, 5485, 5040, 4146, 4145, 4150, 4147, 4546, 4129, 4131, 5486, 5487, 5488, 5489, 4966, 4969, 5490, 5491, 4973, 5493, 5494, 4978, 4129, 4128, 4985, 4131, 4130, 4996, 4142, 4141, 4999, 5382, 5006, 5012, 5009, 5501, 5502, 5023, 4144, 4143, 5389, 5031, 4144, 4143, 5503, 5040, 4146, 4145, 4150, 4546, 4147, 5399, 4555, 5402, 4562, 5061, 5406, 5071, 5068, 5077, 5074, 5508, 5509, 5510, 5511, 5512, 4583, 5513, 5514, 5515, 5516, 5517, 5518, 5520, 5521, 5523, 4193, 4192, 4233, 4233, 5095, 5096, 5443, 5528, 5529, 5530, 5531, 4233, 5535, 5536, 5537, 4233, 5539, 5540, 5145, 5142, 5446, 5542, 5543, 5145, 5116, 5447, 5545, 5546, 5547, 5548, 5549, 5550, 5551, 4622, 5552, 5553, 5557, 5558, 5559, 5560, 5561, 4629, 5562, 5563, 5492, 5492, 5492, 5495, 5567, 5568, 5569, 5570, 5571, 4233, 4233, 4233, 4236, 4236, 5573, 5574, 5575, 5576, 5577, 5578, 5145, 5142, 4238, 4238, 5579, 5580, 5581, 5582, 5156, 5153, 4245, 4245, 5587, 5230, 5588, 5589, 4272, 4274, 4273, 4269, 5240, 5237, 5590, 5240, 5237, 5592, 5593, 5594, 5595, 5597, 5598, 4272, 4274, 4273, 4269, 5599, 5600, 5601, 4272, 5230, 4269, 5212, 5209, 5215, 5602, 4272, 4269, 5603, 5604, 5229, 4272, 4269, 5605, 5230, 5212, 5209, 5215, 5221, 5218, 5224, 5212, 5209, 5215, 5221, 5218, 5224, 5606, 5230, 5607, 5608, 5240, 5237, 5613, 5611, 5240, 5237, 5614, 5615, 5616, 57, 58, 59, 60, 61, 62, 63, 5632, 5633, 5634, 5635, 5636, 5637, 5638, 5639, 5640, 5641, 5642, 5643, 5644, 5645, 5646, 5648, 5649, 5650, 5651, 5652, 5653, 5654, 5657, 5658, 5659, 5660, 5662, 5663, 5664, 5665, 5666, 5667, 5668, 5669, 5670, 5672, 5673, 5674, 5675, 5676, 5677, 5678, 5681, 5682, 5683, 5684, 5685, 5686, 5687, 5688, 5689, 5690, 5691, 5692, 5693, 5694, 5695, 5696, 5698, 5699, 5700, 5701, 5702, 5703, 5704, 5706, 5707, 5708, 5709, 5710, 5711, 5712, 5713, 5714, 5715, 5716, 5717, 5718, 5720, 5721, 5722, 5723, 5724, 5725, 5726, 5727, 5728, 5729, 5730, 5731, 5732, 5734, 5735, 5736, 5738, 5739, 5740, 5741, 5742, 5743, 5744, 5745, 5746, 5747, 5748, 5749, 5750, 5751, 5752, 5753, 5754, 5755, 5757, 5759, 5760, 5761, 5762, 5763, 5764, 5765, 5767, 5769, 5770, 5771, 5772, 5773, 5774, 5775, 5776, 5777, 5778, 5779, 5780, 5781, 5782, 5783, 5784, 5785, 5787, 5788, 5789, 5791, 5795, 5796, 5797, 5798, 5799, 5800, 5801, 5802, 5803, 5805, 5806, 5807, 5809, 5810, 5811, 5812, 5813, 5814, 5815, 5816, 5818, 5819, 5820, 5821, 5825, 5826, 5827, 5828, 5829, 5830, 5831, 5832, 5833, 5835, 5836, 5837, 5839, 5840, 5841, 5843, 5844, 5845, 5846, 5847, 5848, 5850, 5851, 5852, 5854, 5855, 5856, 5858, 5859, 5860, 5861, 5862, 5863, 5864, 5865, 5867, 5868, 5869, 5871, 5875, 5876, 5877, 5878, 5879, 5880, 5881, 5882, 5883, 5884, 5885, 5887, 5888, 5891, 5892, 5893, 5894, 5895, 5896, 5897, 5898, 5900, 5901, 5903, 5904, 5905, 5906, 5907, 5908, 5909, 5910, 5911, 5912, 5913, 5914, 5916, 5917, 5918, 5920, 5921, 5922, 5923, 5924, 5925, 5926, 5927, 5928, 5930, 5932, 5933, 5934, 5936, 5937, 5939, 5940, 5941, 5942, 5943, 5944, 5945, 5946, 5947, 5948, 5949, 5950, 5951, 5952, 5953, 5955, 5956, 5957, 5958, 5959, 5960, 5961, 5963, 5964, 5965, 5966, 5967, 5968, 5969, 5970, 5971, 5972, 5973, 5974, 5975, 5976, 5977, 5978, 5979, 5981, 5982, 5983, 5984, 5985, 5989, 5991, 5993, 5994, 5995, 5996, 5997, 5998, 5999, 6000, 6001, 6003, 6005, 6006, 6009, 6010, 6012, 6013, 6014, 6015, 6017, 6018, 6019, 6022, 6025, 6027, 6028, 6030, 6033, 6035, 6036, 6038, 6039, 6040, 6041, 6042, 6045, 6047, 6048, 6049, 6050, 6051, 6052, 6055, 6058, 6059, 6060, 6061, 6063, 6066, 6067, 6068, 6069, 6070, 6071, 6072, 6074, 6075, 6076, 6077, 6078, 6079, 6081, 6082, 6083, 6084, 6087, 6089, 6090, 6091, 6092, 6093, 6095, 6096, 6097, 6098, 6099, 6100, 6101, 6102, 6103, 6104, 6105, 6107, 6108, 6109, 6110, 6111, 6112, 6113, 6114, 6115, 6116, 6117, 6118, 6119, 6120, 6121, 6122, 6123, 6124, 6125, 6126, 6127, 6128, 6129, 6131, 6132, 6133, 6134, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6144, 6147, 6150, 6156, 6159, 6163, 6166, 6170, 6176, 6179, 6183, 6186, 6189, 6191, 6193, 6199, 6202, 6206, 6210, 6213, 6215, 6219, 6223, 6228, 6232, 6235, 6238, 6242, 6244, 6247, 6249, 6257, 6265, 6267, 6269, 6271, 6273, 6279, 6281, 6282, 6285, 6286, 6292, 6295, 6298, 6302, 6305, 6306, 6309, 6310, 6316, 6319, 6322, 6325, 6328, 6331, 6334, 6337, 6342, 6344, 6345, 6348, 6351, 6357, 6360, 6362, 6365, 6368, 6370, 6372, 6375, 6381, 6387, 6390, 6403, 6406, 6408, 6414, 6417, 6421, 6424, 6427, 6436, 6438, 6445, 6198, 6155, 6175, 6433, 6431, 6435, 6155, 6227, 6175, 6198, 6227, 6435, 6449, 6254, 6256, 6262, 6264, 6459, 6461, 6462, 6465, 6466, 6278, 6315, 6341, 6291, 6431, 6278, 6291, 6341, 6315, 6380, 6341, 6433, 6469, 6473, 6413, 6433, 6431, 6350, 6349, 6395, 6356, 6380, 5919, 6385, 6433, 6431, 6394, 6393, 6395, 6396, 6413, 6433, 6431, 6399, 6401, 6481, 6488, 6489, 6490, 6413, 6433, 6431, 6435, 6494, 6495, 5584, 5226, 5225, 6499, 5992, 5990, 5584, 5226, 5225, 6502, 6504, 5584, 5583, 6506, 6472, 6476, 5584, 5583, 6508, 6510, 5992, 5990, 5584, 5226, 5225, 6513, 6515, 5584, 5583, 5584, 5225, 5226, 6518, 6519, 6472, 6522, 6476, 5584, 5226, 5225, 6525, 6526, 5225, 5226, 5584, 6529, 5584, 5226, 5225, 6532, 6472, 6534, 6476, 6537, 6472, 6540, 6476, 6543, 5226, 5225, 5584, 6546, 5584, 5583, 6548, 6549, 5584, 5583, 6550, 5584, 5583, 6553, 6555, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6594, 6596, 6597, 6598, 6599, 6601, 6602, 6603, 6606, 6608, 6609, 6610, 6612, 6613, 6614, 6615, 6617, 6618, 6620, 6622, 6623, 6628, 6631, 6633, 6635, 6636, 6637, 6639, 6641, 6643, 6644, 6645, 6647, 6648, 6649, 6652, 6654, 6657, 6658, 6661, 6662, 6664, 6665, 6668, 6670, 6671, 6672, 6673, 6677, 6201, 6678, 6158, 6679, 6178, 6680, 6681, 6682, 6149, 6146, 6683, 6158, 6684, 6234, 6685, 6178, 6605, 6686, 6201, 6383, 6687, 6234, 6688, 6675, 6690, 6691, 6692, 6693, 6627, 6625, 6699, 6630, 5794, 6700, 6318, 6701, 6651, 5874, 6702, 6294, 6703, 6666, 6667, 6675, 6704, 6630, 5794, 6705, 6294, 6706, 6638, 5824, 6707, 6318, 6708, 6330, 6709, 6651, 5874, 6710, 6675, 6713, 6416, 6714, 6715, 6716, 6717, 6718, 6719, 6359, 5890, 5889, 5902, 5899, 6720, 6383, 6721, 6722, 6723, 6724, 6725, 6726, 6727, 6728, 6729, 6416, 6730, 6731, 6666, 6667, 6732, 6733, 6667, 6666, 6738, 6416, 6739, 6740, 6741, 6675, 6440, 6744, 6745, 6746, 6689, 6748, 6749, 6750, 6751, 6752, 6753, 6743, 6755, 6756, 6470, 6758, 6759, 6743, 6760, 6761, 6743, 6689, 6764, 6765, 6766, 6767, 6768, 6769, 6771, 6772, 6743, 6773, 6774, 6775, 6777, 6456, 6778, 6457, 6780, 6737, 6781, 6782, 6783, 6786, 6787, 6788, 6789, 6482, 6790, 6791, 6792, 6696, 6794, 6698, 6796, 6470, 6798, 6474, 6800, 6482, 6802, 6803, 6804, 6743, 6806, 6807, 6743, 6737, 6810, 6811, 6743, 6813, 6814, 6500, 6080, 6763, 6511, 6086, 6517, 6524, 6785, 6533, 6536, 6539, 6542, 6545, 6547, 6808, 6809, 6552, 6816, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6196, 6897, 5705, 6205, 6859, 6153, 6899, 5655, 6162, 6851, 6173, 6901, 5679, 6182, 6855, 6902, 6905, 6906, 6153, 6908, 5655, 6162, 6851, 6231, 6910, 5719, 6169, 6862, 6173, 6912, 5679, 6182, 6855, 6913, 6196, 6915, 5705, 6205, 6859, 6916, 5719, 6218, 6862, 6231, 6918, 6241, 5737, 6866, 6920, 6252, 6260, 6925, 6926, 6276, 6928, 5790, 6929, 6313, 6931, 5838, 5842, 6340, 6933, 5870, 6934, 6289, 6936, 5808, 6301, 6938, 6939, 6940, 6276, 6942, 5790, 6943, 6289, 6945, 5808, 6301, 6378, 6947, 5853, 6948, 6313, 6950, 5838, 5842, 6378, 6952, 5853, 5857, 6340, 6954, 5870, 6955, 6957, 6411, 6959, 5962, 6420, 6895, 6960, 6962, 6354, 6966, 6967, 6968, 6886, 6969, 6970, 6890, 6378, 6972, 6973, 6890, 6975, 6977, 6411, 6982, 5962, 6420, 6895, 6983, 6985, 6986, 6989, 6990, 6411, 6992, 5962, 6420, 6895, 6993, 6996, 6480, 6477, 6480, 6478, 6997, 6998, 7001, 7004, 7008, 7009, 7011, 7014, 7015, 7017, 7018, 7021, 7025, 7027, 7028, 7032, 7034, 6480, 6477, 7036, 7037, 7040, 6480, 6477, 6480, 6478, 7044, 7045, 7048, 7050, 7052, 7054, 6480, 6477, 6480, 6478, 6480, 6479, 7056, 7057, 7060, 7061, 7063, 7064, 7065, 7067, 7068, 7070, 7007, 7071, 7072, 7073, 7074, 7024, 7075, 7031, 7076, 7077, 7043, 7078, 7079, 7080, 7081, 7082, 7083, 7084, 7085, 7086, 7087, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7104, 7106, 7107, 7108, 7109, 7111, 7112, 7113, 7114, 7116, 7117, 7118, 7120, 7122, 7124, 7125, 7126, 7127, 7129, 7130, 7131, 7132, 7134, 7135, 7136, 7138, 7140, 7141, 7142, 7144, 7145, 7146, 7147, 7149, 7150, 7151, 7153, 7154, 7155, 7157, 7159, 7161, 7163, 7164, 7165, 7167, 7169, 7171, 7172, 7173, 7176, 7178, 7180, 7182, 7183, 7184, 7186, 7188, 7190, 7191, 7192, 7194, 7195, 7196, 7198, 7201, 7203, 7204, 7205, 7208, 7210, 7212, 7213, 7215, 7216, 7219, 7222, 7224, 7225, 7226, 7228, 7230, 7232, 7234, 7235, 7236, 7207, 7239, 7240, 7221, 7241, 7242, 7244, 6904, 7245, 7246, 6995, 6062, 6995, 6062, 6904, 6021, 6919, 6021, 7253, 7254, 7257, 6020, 7207, 7260, 7261, 7263, 7264, 7207, 7265, 7266, 7221, 7267, 7268, 7270, 6020, 6020, 6021, 7207, 7275, 7276, 7221, 7277, 7278, 7279, 7280, 7282, 6995, 6062, 6995, 6062, 7291, 7248, 7012, 7013, 7251, 7255, 7287, 7296, 7255, 7298, 7033, 7035, 7301, 7049, 7051, 7053, 7055, 7284, 7287, 7287, 7289, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6896, 7361, 6898, 7365, 6900, 7369, 6907, 7374, 6909, 7378, 6911, 7382, 6914, 7386, 7389, 7392, 7393, 6921, 6923, 6927, 6930, 6932, 6935, 6941, 6944, 6946, 6949, 6951, 6953, 6958, 7426, 6965, 6971, 6981, 7437, 6991, 7443, 7446, 7447, 7431, 7449, 7450, 7453, 5988, 7454, 7456, 7457, 7458, 7459, 7460, 5988, 7461, 7462, 7463, 7464, 7441, 6455, 7408, 7168, 7409, 7467, 7468, 7469, 7473, 7474, 7431, 7476, 7477, 7168, 7403, 7408, 7160, 7409, 7480, 7160, 7403, 7168, 7408, 7409, 7481, 7179, 7414, 7187, 7419, 7422, 7199, 7440, 7482, 7483, 7484, 7431, 7433, 7435, 7486, 7487, 7440, 7489, 7441, 7492, 7493, 7494, 7495, 7452, 7497, 7498, 7499, 7500, 7501, 7502, 7504, 7466, 7506, 7507, 7471, 7472, 7479, 7509, 7510, 7511, 7512, 7491, 7513, 7514, 7515, 7516, 61, 62, 63, 7202, 7428, 7209, 7591, 7110, 7367, 7105, 7363, 7115, 7371, 7595, 7233, 7445, 7233, 7445, 7105, 7363, 7110, 7367, 7115, 7371, 7602, 7123, 7376, 7128, 7380, 7133, 7384, 7139, 7388, 7143, 7391, 7148, 7395, 6924, 6922, 7445, 7607, 7608, 7170, 7609, 7166, 7610, 7611, 7202, 7428, 7202, 7428, 7209, 7617, 7166, 7620, 7162, 7621, 7170, 7622, 7158, 7623, 7624, 7158, 7626, 7162, 7627, 7166, 7628, 7170, 7629, 7630, 7177, 7632, 7181, 7633, 7185, 7634, 7189, 7635, 7193, 7636, 7197, 7637, 7638, 7202, 7428, 7209, 7642, 7643, 7217, 7644, 7223, 7439, 7647, 7649, 7233, 7445, 7233, 7445, 7590, 7593, 7654, 7455, 7598, 7600, 7603, 7605, 7465, 7662, 7614, 7665, 7666, 7616, 7619, 7667, 7641, 7646, 7672, 7651, 7653, 7292, 7306, 7299, 7293, 7294, 7295, 7297, 7306, 7299, 7304, 7303, 7306, 7305, 7308, 7309, 7310, 7311, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7680, 7681, 7682, 7684, 7685, 7686, 7687, 7688, 7689, 7691, 7692, 7693, 7694, 7695, 7696, 7697, 7698, 7699, 7700, 7702, 7703, 7704, 7705, 7706, 7707, 7708, 7709, 7710, 7711, 7712, 7713, 7714, 7715, 7716, 7719, 7721, 7724, 7725, 7726, 7727, 7728, 7730, 7732, 7734, 7736, 7739, 7741, 7743, 7745, 7748, 7750, 7752, 7754, 7756, 7758, 7761, 7762, 7763, 7766, 7768, 7769, 7772, 7773, 7774, 7775, 7776, 7777, 7690, 7779, 7780, 7781, 7701, 7782, 7783, 7784, 7718, 7612, 7786, 7789, 7790, 7625, 7631, 7639, 7792, 7793, 7648, 7651, 7795, 7796, 7290, 7797, 7798, 7799, 7800, 7801, 7802, 7803, 7505, 7804, 7805, 7300, 7508, 7302, 7806, 7807, 7808, 7809, 7307, 7810, 7811, 7812, 7813, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7903, 7873, 7767, 7765, 7683, 7880, 7894, 7876, 7900, 7878, 7939, 7882, 7884, 7886, 7900, 7888, 7894, 7890, 7943, 7900, 7892, 7894, 7898, 7896, 7902, 7900, 7898, 7947, 7753, 7722, 7720, 7757, 7740, 7742, 7948, 7909, 7911, 7767, 7765, 7729, 7733, 7737, 7757, 7731, 7735, 7753, 7952, 7742, 7746, 7757, 7740, 7753, 7744, 7953, 7751, 7753, 7749, 7759, 7757, 7755, 7954, 7928, 7767, 7765, 7764, 7932, 7957, 7958, 7934, 7936, 7961, 7496, 7963, 7503, 7969, 7970, 7972, 7973, 7974, 7975, 7977, 7979, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8001, 8002, 8003, 8004, 8005, 8006, 8007, 8008, 8009, 8011, 8012, 8013, 8014, 8015, 8016, 8017, 8019, 8020, 8021, 8022, 8023, 8024, 8025, 8026, 7905, 8028, 8029, 8030, 8031, 8032, 8033, 8035, 8036, 8037, 8038, 8039, 8040, 8041, 8042, 8043, 8044, 8045, 8047, 8048, 8049, 8050, 8051, 8052, 8054, 8055, 8056, 8057, 8058, 8059, 8061, 8062, 8063, 8064, 8065, 8068, 8069, 8071, 8073, 63, 8129, 8132, 8134, 8139, 8141, 8144, 8146, 8149, 8152, 8153, 8155, 8157, 8161, 8164, 8166, 8168, 8170, 8172, 8174, 8176, 8178, 8180, 8183, 8066, 7937, 7941, 7942, 8066, 7949, 8066, 7950, 8066, 7955, 8067, 7959, 7960, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8192, 8193, 8195, 8197, 8199, 8201, 8204, 8205, 8208, 8211, 8214, 8215, 8216, 8217, 8218, 8027, 8219, 8220, 8221, 8222, 8223, 8224, 8225, 8226, 8227, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8257, 8258, 8259, 8261, 8263, 8264, 8265, 7938, 7945, 7945, 8271, 7951, 7951, 7956, 7965, 7966, 7982, 7968, 7962, 7983, 8077, 7967, 7980, 7981, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8327, 8328, 7944, 8010, 8329, 7944, 8018, 8060, 8034, 8331, 8332, 8060, 8046, 8060, 8053, 8333, 8334, 8335, 8336, 8337, 8338, 8074, 8339, 8340, 8341, 8342, 8343, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8267, 8386, 8387, 8389, 8390, 8391, 8392, 8272, 8274, 8395, 8396, 8397, 8398, 8276, 8405, 8401, 8408, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8448, 8385, 8388, 8453, 8455, 8456, 8457, 8459, 8461, 8462, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8513, 8514, 8075, 8070, 8079, 8081, 8076, 8078, 8080, 8072, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8578, 8189, 8579, 8580, 8581, 8582, 8583, 8584, 8190, 8585, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8641, 8648, 8400, 8642, 8404, 8407, 8646, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8403, 8705, 8706, 8708, 8709, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8768, 8710, 8771, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8770, 8833, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8896, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8960, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63};
int h_C[]= {
1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 54, 56, 58, 60, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 176, 178, 180, 182, 185, 187, 189, 191, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 287, 289, 291, 293, 295, 297, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 340, 342, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 375, 377, 379, 381, 383, 385, 387, 389, 391, 393, 395, 397, 399, 401, 403, 407, 409, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435, 437, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 477, 479, 481, 483, 485, 487, 489, 491, 493, 495, 497, 499, 501, 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 523, 525, 527, 530, 532, 534, 536, 539, 541, 543, 545, 548, 550, 552, 554, 557, 559, 561, 563, 565, 567, 569, 571, 573, 575, 577, 579, 582, 584, 587, 589, 592, 594, 597, 599, 602, 604, 607, 609, 612, 614, 617, 619, 621, 623, 626, 628, 630, 632, 635, 637, 641, 643, 645, 647, 649, 651, 653, 655, 658, 660, 662, 664, 666, 668, 671, 673, 675, 677, 679, 681, 683, 685, 687, 689, 692, 694, 696, 698, 701, 703, 706, 708, 714, 716, 720, 722, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 746, 748, 752, 754, 757, 759, 762, 764, 767, 769, 771, 773, 775, 777, 781, 783, 786, 788, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 818, 820, 823, 825, 828, 830, 833, 835, 838, 840, 843, 845, 851, 853, 856, 858, 861, 863, 865, 867, 869, 871, 874, 876, 879, 881, 884, 886, 889, 891, 893, 895, 897, 899, 902, 904, 907, 909, 912, 914, 917, 919, 921, 923, 925, 927, 930, 932, 935, 937, 940, 942, 52, 52, 52, 52, 61, 61, 61, 61, 183, 183, 690, 723, 690, 723, 192, 192, 996, 998, 1000, 1002, 1004, 1006, 1009, 1011, 1013, 1015, 1017, 1019, 1021, 1023, 1025, 1027, 1029, 1031, 136, 136, 136, 136, 137, 137, 174, 174, 213, 213, 1108, 1110, 1112, 1114, 1116, 1118, 1120, 1122, 1125, 1127, 1129, 1131, 1133, 1135, 1137, 1139, 1141, 1143, 1145, 1147, 1150, 1152, 1154, 1156, 779, 779, 1209, 1211, 1213, 1215, 1217, 1219, 1222, 1224, 404, 404, 405, 405, 438, 438, 438, 438, 1258, 1260, 475, 475, 1273, 1275, 1278, 1280, 1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298, 1300, 1302, 1304, 1306, 528, 537, 528, 537, 1326, 1328, 1330, 1332, 1334, 1336, 1338, 1340, 1343, 1345, 1347, 1349, 1351, 1353, 1355, 1357, 1359, 1361, 546, 555, 710, 710, 744, 749, 790, 779, 779, 790, 816, 816, 848, 848, 1467, 1469, 1471, 1473, 1475, 1477, 1479, 1481, 1483, 1485, 1487, 1489, 1493, 1495, 1500, 1502, 1504, 1506, 1509, 1511, 1513, 1515, 1518, 1520, 1524, 1526, 1528, 1530, 1532, 1534, 1537, 1539, 1542, 1544, 1123, 1123, 1491, 1362, 1362, 1497, 1497, 1282, 1282, 1282, 1282, 1497, 1497, 1123, 1123, 1507, 1522, 1491, 1497, 1497, 1507, 1522, 1704, 1706, 1708, 1710, 1712, 1714, 1716, 1718, 1720, 1722, 1724, 1726, 1730, 1732, 1767, 1769, 1123, 1123, 1775, 1777, 1779, 1781, 1783, 1785, 1787, 1789, 1794, 1796, 1798, 1800, 1491, 1491, 1546, 1546, 1912, 1914, 1916, 1918, 1920, 1922, 1282, 1282, 1546, 1546, 1935, 1937, 1939, 1941, 1943, 1945, 1947, 1949, 1256, 1256, 1269, 1269, 1282, 1282, 1282, 1282, 1546, 2040, 2042, 2044, 2046, 1362, 2059, 2061, 1362, 2073, 2075, 1490, 1490, 1497, 1497, 1548, 1546, 1548, 2137, 2139, 2141, 2143, 2145, 2147, 2150, 2152, 2155, 2157, 2160, 2162, 2165, 2167, 2170, 2172, 2176, 2178, 2181, 2183, 2076, 1765, 2036, 2037, 2038, 2057, 2057, 2057, 2076, 2076, 2076, 2076, 2168, 2076, 2076, 2173, 2168, 2173, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 3073, 3075, 3077, 3079, 3081, 3083, 3085, 3087, 3089, 3091, 3093, 3095, 3097, 3099, 3101, 3103, 3105, 3107, 3109, 3111, 3113, 3115, 3117, 3119, 3121, 3123, 3125, 3127, 3129, 3131, 3133, 3135, 3137, 3139, 3141, 3143, 3145, 3147, 3149, 3151, 3153, 3155, 3157, 3159, 3161, 3163, 3165, 3167, 3169, 3171, 3173, 3175, 3177, 3179, 3181, 3183, 3185, 3187, 3189, 3191, 3193, 3195, 3197, 3199, 3201, 3203, 3205, 3207, 3209, 3211, 3213, 3215, 3217, 3219, 3221, 3223, 3225, 3227, 3229, 3231, 3233, 3235, 3237, 3239, 3241, 3243, 3245, 3247, 3249, 3251, 3253, 3255, 3257, 3259, 3261, 3263, 3265, 3267, 3269, 3271, 3273, 3275, 3277, 3279, 3281, 3283, 3285, 3287, 3289, 3291, 3293, 3295, 3297, 3299, 3301, 3303, 3305, 3307, 3309, 3311, 3313, 3315, 3317, 3319, 3321, 3323, 3325, 3327, 3329, 3331, 3333, 3335, 3337, 3339, 3341, 3343, 3345, 3347, 3349, 3351, 3353, 3355, 3357, 3359, 3361, 3363, 3365, 3367, 3369, 3371, 3373, 3375, 3377, 3379, 3381, 3383, 3385, 3387, 3389, 3391, 3393, 3395, 3397, 3399, 3401, 3403, 3405, 3407, 3409, 3411, 3413, 3415, 3417, 3419, 3421, 3423, 3425, 3427, 3429, 3431, 3433, 3435, 3437, 3439, 3441, 3443, 3445, 3447, 3449, 3451, 3453, 3455, 3457, 3459, 3461, 3463, 3465, 3467, 3469, 3471, 3473, 3475, 3477, 3479, 3481, 3483, 3485, 3487, 3489, 3491, 3493, 3495, 3497, 3499, 3501, 3503, 961, 962, 963, 964, 967, 968, 969, 970, 973, 974, 977, 980, 981, 982, 992, 993, 3521, 3523, 3525, 3527, 3529, 3531, 3533, 3535, 3537, 1049, 1050, 1052, 1053, 1076, 1077, 1091, 1094, 1103, 1106, 3549, 3551, 3553, 3555, 3557, 3559, 3561, 3563, 3565, 3567, 3569, 3571, 1163, 1164, 3575, 3577, 3579, 3581, 1229, 1230, 1232, 1233, 1245, 1246, 1247, 1248, 3591, 1265, 1266, 3595, 3597, 3599, 3601, 3603, 3605, 3607, 3609, 1312, 1315, 1321, 1324, 3615, 3617, 3619, 3621, 3623, 3625, 3627, 3629, 3631, 1366, 1369, 1405, 1406, 1414, 1417, 1423, 1426, 1427, 1430, 1437, 1438, 1445, 1446, 3647, 3649, 3651, 3653, 3655, 3657, 3659, 3661, 3663, 3665, 3667, 3669, 3671, 3673, 3675, 3677, 3679, 1550, 1551, 1552, 1553, 1554, 1678, 1679, 1680, 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, 1692, 1695, 1698, 3703, 3705, 3707, 3709, 3711, 3713, 3715, 3717, 1770, 1771, 3721, 3723, 3725, 3727, 3729, 3731, 1805, 1806, 1909, 1910, 3737, 3739, 3741, 1925, 1926, 1930, 1931, 3747, 3749, 3751, 3753, 2011, 2012, 2018, 2019, 2025, 2026, 2027, 2028, 2035, 3764, 3766, 2047, 3769, 2071, 3772, 2115, 2117, 2119, 2120, 2129, 2132, 2134, 3781, 3783, 3785, 3787, 3789, 3791, 3793, 3795, 3797, 3799, 2197, 2300, 2484, 2485, 2486, 2491, 2497, 2498, 2507, 2508, 2510, 2511, 2512, 2513, 2514, 2515, 2545, 2547, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 3840, 4048, 4048, 3841, 3843, 3842, 3844, 4048, 4048, 4048, 3846, 3845, 3848, 3847, 3849, 3852, 3851, 4057, 4059, 3854, 3853, 4061, 4063, 3856, 3855, 3858, 3857, 3860, 3859, 3862, 3861, 3863, 3866, 3865, 3868, 3867, 4023, 3869, 3971, 3970, 3973, 3972, 3975, 3974, 3976, 3978, 3870, 3871, 3981, 3980, 3983, 3982, 3985, 3984, 3872, 4082, 624, 4084, 3993, 3995, 3996, 3998, 3971, 3970, 3973, 3972, 3975, 3974, 3976, 3978, 3977, 3979, 3981, 3980, 3983, 3982, 3985, 3984, 3986, 624, 4086, 639, 3993, 3995, 3996, 3998, 3874, 3873, 183, 4048, 3876, 192, 3879, 3878, 3881, 3880, 3883, 3882, 183, 4048, 3885, 192, 3888, 3887, 3890, 3889, 3907, 3909, 624, 3892, 3895, 3894, 4104, 3908, 4017, 3934, 3898, 3897, 3900, 3899, 639, 3909, 3902, 3980, 3903, 3906, 3905, 3908, 3907, 3909, 3911, 3910, 3912, 3913, 3915, 3914, 624, 639, 3918, 3920, 3922, 3921, 3923, 3925, 3924, 4008, 3926, 4008, 3927, 718, 3929, 3931, 3933, 4015, 4016, 3934, 3980, 3935, 3937, 3936, 4110, 404, 4112, 405, 3939, 3938, 3941, 3940, 624, 3991, 3991, 639, 3945, 3944, 4114, 4116, 3947, 3946, 3948, 3951, 3950, 3952, 3954, 3951, 3950, 3952, 3954, 4119, 3955, 3957, 3956, 3958, 4041, 3960, 4041, 3961, 3963, 3962, 3964, 3967, 3966, 3969, 3968, 3971, 3970, 3973, 3972, 3975, 3974, 3976, 3978, 3977, 3979, 3981, 3980, 3983, 3982, 3985, 3984, 3986, 3988, 3987, 624, 3991, 3990, 639, 3993, 3995, 3996, 3998, 4000, 3999, 4008, 4001, 4008, 4002, 718, 690, 4006, 4005, 4008, 4007, 4008, 4008, 718, 723, 4011, 4014, 4013, 4017, 4015, 4017, 4016, 4018, 4020, 4019, 4023, 4021, 4149, 4023, 4022, 4025, 4024, 4027, 4026, 4029, 4028, 4152, 4031, 4030, 4033, 4032, 4035, 4034, 4154, 4037, 4036, 4038, 4041, 4040, 4043, 4042, 4045, 4044, 4046, 4048, 4047, 4049, 4051, 4050, 4052, 4054, 4053, 4055, 4173, 4176, 1491, 1491, 1491, 4178, 4180, 4182, 4184, 4186, 4191, 4073, 4072, 4166, 4074, 4076, 4075, 4078, 4077, 4079, 4080, 4092, 4091, 4094, 4093, 4203, 4096, 4095, 4132, 4133, 4134, 4098, 4097, 4100, 4099, 4211, 1491, 1491, 4102, 4101, 4213, 1341, 1341, 4218, 1507, 4123, 4106, 4220, 1220, 1220, 4226, 4120, 4117, 4228, 4120, 4120, 4121, 4133, 4134, 4230, 4232, 4123, 4122, 4125, 4124, 4127, 4126, 4132, 4133, 4134, 1341, 1341, 1341, 4137, 4136, 1507, 4166, 4139, 1522, 4155, 4157, 1491, 1491, 1491, 4243, 4163, 4162, 1507, 4166, 4165, 1522, 4169, 4168, 4171, 4170, 4174, 4174, 4174, 4174, 4174, 4174, 4207, 4224, 4189, 4189, 4189, 4189, 4189, 4189, 4194, 4224, 4195, 4197, 4196, 4199, 4198, 4200, 4224, 4201, 4248, 4251, 4224, 4204, 4224, 4205, 4207, 4206, 4209, 4208, 4248, 4247, 4221, 4223, 4222, 4252, 4215, 4214, 4216, 4248, 4247, 4221, 4223, 4222, 4252, 4224, 4255, 4256, 4244, 4244, 4235, 4234, 4264, 4237, 4266, 4239, 4268, 4271, 4244, 4244, 4246, 4246, 4248, 4247, 4249, 4251, 4250, 4252, 4253, 4254, 4255, 4256, 4261, 4261, 4257, 4257, 4258, 4258, 4261, 4261, 4261, 4261, 4261, 4261, 4262, 4262, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 965, 966, 971, 972, 975, 976, 978, 979, 983, 984, 985, 986, 987, 988, 989, 990, 991, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1051, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1092, 1093, 1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1104, 1105, 1157, 1158, 1159, 1160, 1161, 1162, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1225, 1226, 1227, 1228, 1231, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1261, 1262, 1263, 1264, 1267, 1307, 1308, 1309, 1310, 1311, 1313, 1314, 1316, 1317, 1318, 1319, 1320, 1322, 1323, 1364, 1365, 1367, 1368, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1415, 1416, 1418, 1419, 1420, 1421, 1422, 1424, 1425, 1428, 1429, 1431, 1432, 1433, 1434, 1435, 1436, 1439, 1440, 1441, 1442, 1443, 1444, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 1463, 1464, 1465, 1555, 1556, 1557, 4306, 4305, 4306, 4305, 4310, 4309, 1693, 1694, 1696, 1697, 1699, 1700, 1701, 1702, 1727, 1728, 1761, 1762, 1763, 1764, 1772, 1773, 1790, 1791, 1792, 1801, 1802, 1803, 1804, 1807, 1808, 1809, 1810, 1923, 1924, 1927, 1928, 1929, 1932, 1933, 2013, 2014, 2020, 2021, 2022, 2023, 2024, 2029, 2030, 2031, 2032, 2033, 2034, 2054, 2055, 2056, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2112, 2113, 2114, 2116, 2118, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2130, 2131, 4174, 2188, 2189, 4174, 2191, 2192, 4174, 2194, 2195, 2198, 2199, 4189, 2265, 2266, 4589, 4588, 4189, 2270, 2271, 4607, 4591, 4189, 2275, 2276, 2281, 2282, 2283, 2284, 2285, 2286, 2287, 2290, 2301, 2302, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 2320, 2321, 4617, 4617, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2417, 2418, 2419, 2420, 2421, 2422, 2423, 2424, 2425, 2481, 2483, 2487, 2488, 2499, 2509, 2533, 2534, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 2543, 2544, 2546, 2548, 2549, 2565, 2566, 4737, 4735, 2612, 2613, 4738, 4735, 2668, 2669, 2696, 2697, 2723, 2724, 2803, 2804, 2810, 2811, 4733, 4733, 4738, 4737, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 4801, 4803, 4805, 4807, 4809, 4811, 4813, 4816, 4818, 4820, 4822, 4824, 4826, 4829, 4831, 4833, 4835, 4837, 4839, 4842, 4845, 4847, 4849, 4857, 4859, 4861, 4864, 4867, 4869, 4871, 4880, 4883, 4886, 4888, 4890, 4893, 4896, 4898, 4904, 4909, 4911, 4915, 4918, 4920, 4923, 4927, 4933, 4936, 4938, 4940, 4949, 4951, 4955, 4957, 4960, 4963, 4965, 4968, 4972, 4977, 4980, 4982, 4984, 4987, 4989, 4991, 4993, 4995, 4998, 5001, 5003, 5005, 5008, 5011, 5018, 5020, 5022, 5026, 5028, 5030, 5035, 5037, 5039, 5042, 5044, 5046, 5048, 5050, 5052, 5054, 5056, 5058, 5060, 5063, 5065, 5067, 5070, 5073, 5076, 5015, 4814, 4067, 1582, 1583, 1588, 1589, 5015, 4827, 5032, 1608, 1609, 5015, 4827, 4067, 4069, 5015, 4827, 5032, 5088, 5090, 5092, 5094, 4345, 4343, 4854, 4852, 4368, 4368, 4877, 4875, 5098, 5100, 5102, 5107, 5109, 5113, 5015, 4942, 4946, 4945, 4899, 4900, 4916, 4930, 4942, 5015, 4906, 4906, 4905, 4907, 4916, 5015, 4942, 4913, 5015, 4916, 4942, 4921, 4930, 4942, 4946, 4945, 4944, 4947, 5118, 5015, 5013, 5024, 5032, 5024, 5032, 5015, 5013, 5032, 4463, 4462, 4463, 4463, 4475, 4475, 5122, 4475, 4475, 5124, 5129, 5131, 5133, 5141, 5144, 5015, 5013, 5032, 5152, 5155, 5158, 5160, 4607, 4582, 2187, 2190, 2193, 5080, 5079, 5078, 5146, 2264, 2267, 2268, 2269, 2272, 2273, 2274, 5187, 5189, 5191, 5194, 4607, 4607, 4607, 4607, 5198, 5200, 5202, 5105, 5104, 5103, 5204, 5111, 5110, 2324, 5111, 5110, 2330, 5146, 5146, 5115, 5138, 5114, 4642, 4625, 5120, 5119, 5208, 5211, 5214, 5115, 5138, 5114, 4642, 4625, 5120, 5119, 5217, 5220, 5223, 5127, 5126, 5125, 4642, 4641, 5228, 5136, 5135, 5134, 5139, 5138, 5137, 5146, 5150, 5149, 5148, 5232, 5234, 5236, 5239, 4261, 5196, 5195, 2597, 2598, 4257, 5171, 5170, 2617, 2618, 5196, 5195, 5243, 5192, 4258, 4261, 5196, 5195, 4261, 4261, 4262, 4733, 2815, 2816, 2826, 2827, 4735, 5243, 5242, 5241, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 5313, 5312, 5314, 5316, 5315, 5317, 5379, 5378, 5353, 5380, 5381, 5352, 5385, 5318, 1572, 1573, 5388, 5387, 5319, 4066, 5391, 5391, 5390, 1581, 5415, 5397, 5396, 5395, 4068, 5417, 5379, 5378, 5362, 5380, 5381, 5352, 5385, 5324, 1598, 1599, 5388, 5387, 5325, 5024, 5391, 5391, 5390, 1607, 5422, 5327, 5397, 5395, 5408, 5321, 5410, 5409, 5379, 5378, 5377, 5380, 5381, 5352, 5385, 5324, 1625, 1626, 5388, 5387, 5322, 4066, 5391, 5391, 5390, 1634, 5394, 5327, 5397, 5367, 5385, 5324, 5388, 5387, 5325, 4068, 5391, 5391, 5390, 1648, 5394, 5327, 5397, 5367, 5381, 5352, 5379, 5378, 5377, 5380, 5385, 5324, 1661, 1662, 5391, 5391, 5390, 1666, 5388, 5387, 5325, 5024, 5393, 5364, 5327, 5397, 5367, 5408, 5407, 5330, 5329, 5328, 5331, 5332, 5334, 1739, 1740, 1741, 1742, 5337, 5336, 5335, 5338, 5339, 5341, 1749, 1750, 1751, 1752, 5343, 5342, 5345, 5344, 5347, 5346, 5349, 5348, 5379, 5377, 5356, 5380, 5381, 5352, 5385, 5357, 5358, 1820, 5361, 5360, 5359, 1824, 1825, 1826, 1827, 1828, 5379, 5377, 5351, 5380, 5381, 5352, 5385, 5357, 1837, 1838, 5361, 5360, 5354, 1842, 5394, 5393, 5392, 5350, 5379, 5377, 5351, 5358, 1851, 5361, 5360, 5359, 1855, 1856, 1857, 1858, 5379, 5377, 5351, 5380, 5381, 5352, 5385, 5357, 1867, 1868, 5361, 5360, 5354, 1872, 5394, 5393, 5355, 1876, 5379, 5378, 5353, 5385, 5357, 1882, 1883, 5361, 5360, 5354, 1887, 5394, 5393, 5355, 1891, 5379, 5356, 5362, 5380, 5383, 5385, 5357, 5358, 1900, 5361, 5360, 5359, 1904, 1905, 1906, 1907, 1908, 5375, 5372, 5379, 5378, 5362, 5380, 5381, 5383, 5366, 5365, 1960, 1961, 5387, 5363, 1964, 1965, 5394, 5393, 5364, 5397, 5367, 5396, 5387, 5386, 1974, 5391, 5390, 1977, 5394, 5393, 5364, 5379, 5378, 5377, 5380, 5381, 5383, 5366, 5365, 1989, 1990, 5388, 5024, 5391, 1994, 5394, 5393, 5392, 5397, 5367, 5396, 5373, 5376, 2003, 2004, 2005, 2006, 5368, 5369, 2009, 2010, 5370, 2016, 2017, 5371, 5373, 5372, 5374, 5376, 5375, 5379, 5378, 5377, 5380, 5381, 5383, 5385, 5384, 2086, 2087, 5388, 5387, 5386, 5024, 5391, 5391, 5390, 2095, 5394, 5393, 5392, 5397, 5396, 5395, 5398, 5400, 5401, 5403, 5404, 5405, 5408, 5407, 5410, 5409, 2185, 2186, 5162, 5165, 5168, 5476, 2200, 2201, 2202, 2226, 5173, 5519, 5178, 5522, 5183, 5431, 5430, 5432, 5433, 5506, 5507, 5442, 2303, 2304, 2305, 2306, 5444, 2316, 2317, 2318, 5507, 2322, 2323, 5500, 5445, 5476, 2328, 2329, 5500, 5499, 5476, 2358, 2391, 2392, 2393, 2394, 2395, 2396, 5476, 2398, 2399, 2409, 2410, 2411, 2412, 2413, 5476, 2415, 2416, 4632, 4632, 4632, 4635, 2474, 2475, 2476, 2477, 2478, 5496, 5497, 5498, 5506, 5507, 2494, 2495, 2496, 2500, 2501, 2502, 5500, 5499, 5506, 5507, 2526, 2527, 2528, 2529, 5505, 5504, 5506, 5507, 2564, 5538, 2582, 2583, 5533, 5526, 5525, 5524, 5586, 5585, 5591, 5586, 5585, 2611, 2614, 2615, 5596, 2648, 2649, 5533, 5526, 5525, 5524, 2656, 2657, 2667, 5533, 5534, 5527, 5555, 5554, 5556, 2695, 5533, 5532, 2703, 2704, 5534, 5533, 5532, 2722, 5538, 5555, 5554, 5556, 5565, 5564, 5566, 5555, 5554, 5556, 5565, 5564, 5566, 2802, 5572, 2809, 2814, 5586, 5585, 2828, 5612, 5586, 5585, 2838, 2839, 2840, 57, 58, 59, 60, 61, 62, 63, 1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, 5647, 1574, 1575, 1576, 1577, 1578, 1579, 1580, 1584, 1585, 1586, 1587, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 5671, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1610, 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 5697, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1649, 1650, 1651, 1652, 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, 5733, 1663, 1664, 1665, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675, 1676, 1677, 1733, 1734, 1735, 1736, 1737, 1738, 5756, 5758, 1743, 1744, 1745, 1746, 1747, 1748, 5766, 5768, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1811, 1812, 1813, 1814, 1815, 1816, 1817, 1818, 1819, 1821, 1822, 1823, 5792, 1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 5804, 1839, 1840, 1841, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850, 1852, 1853, 1854, 5822, 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 5834, 1869, 1870, 1871, 1873, 1874, 1875, 1877, 1878, 1879, 1880, 1881, 5849, 1884, 1885, 1886, 1888, 1889, 1890, 1892, 1893, 1894, 1895, 1896, 1897, 1898, 1899, 1901, 1902, 1903, 5872, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 5886, 1962, 1963, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1975, 1976, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 5915, 1991, 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 5929, 5931, 2007, 2008, 5935, 2015, 5938, 2048, 2049, 2050, 2051, 2052, 2053, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085, 5954, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2096, 2097, 2098, 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 5980, 5163, 5166, 5169, 2196, 5986, 5174, 5179, 5184, 2277, 2278, 2279, 2280, 2288, 2289, 2299, 6002, 6004, 2307, 6007, 2319, 6011, 2325, 2326, 2327, 6016, 2331, 2332, 2333, 6023, 6026, 2397, 6029, 6031, 6034, 2414, 6037, 2437, 2458, 2471, 2473, 6043, 6046, 2479, 2480, 2482, 2489, 2490, 6053, 6056, 2503, 2504, 2505, 2506, 6064, 2530, 2531, 2532, 2535, 5245, 2567, 6073, 2584, 2585, 2586, 2587, 2595, 2596, 2609, 2610, 5249, 6085, 6088, 2650, 2651, 2652, 2653, 6094, 5253, 2670, 2671, 2672, 2681, 2682, 2683, 5255, 2698, 2699, 6106, 2705, 2706, 2707, 5257, 2725, 2738, 2739, 2740, 2743, 2744, 2745, 2772, 2773, 2774, 2777, 2778, 2779, 5259, 2805, 5261, 5609, 2824, 2825, 6130, 2836, 2837, 6135, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6145, 6148, 6151, 6157, 6160, 6164, 6167, 6171, 6177, 6180, 6184, 6187, 6190, 6192, 6194, 6200, 6203, 6207, 6211, 6214, 6216, 6220, 6224, 6229, 6233, 6236, 6239, 6243, 6245, 6248, 6250, 6258, 6266, 6268, 6270, 6272, 6274, 6280, 5786, 6283, 5793, 6287, 6293, 6296, 6299, 6303, 5817, 6307, 5823, 6311, 6317, 6320, 6323, 6326, 6329, 6332, 6335, 6338, 6343, 5866, 6346, 5873, 6352, 6358, 6361, 6363, 6366, 6369, 6371, 6373, 6376, 6382, 6388, 6391, 6404, 6407, 6409, 6415, 6418, 6422, 6425, 6428, 6437, 6439, 5987, 6197, 6154, 6174, 6432, 6430, 6434, 6154, 6379, 6174, 6197, 6226, 6434, 6450, 6253, 6255, 6261, 6263, 6008, 5541, 6463, 5544, 6467, 6277, 6314, 6379, 6290, 6430, 6277, 6290, 6379, 6314, 6379, 6379, 6432, 6024, 6032, 6412, 6432, 6430, 6397, 6405, 6398, 6355, 6379, 6386, 6384, 6432, 6430, 6405, 6402, 6398, 6400, 6412, 6432, 6430, 6398, 6400, 6044, 6054, 6057, 6491, 6412, 6432, 6430, 6434, 6065, 6496, 6460, 6485, 6484, 5246, 6442, 6441, 6460, 6452, 6451, 6503, 6505, 6493, 6492, 6507, 6444, 6444, 6487, 6486, 6509, 5250, 6447, 6446, 6460, 6452, 6451, 6514, 6516, 6454, 6453, 6460, 6458, 6485, 5254, 6520, 6464, 6523, 6468, 6460, 6485, 6484, 5256, 6527, 6458, 6485, 6483, 6530, 6460, 6485, 6484, 5258, 6464, 6535, 6468, 6538, 6471, 6541, 6475, 6544, 6485, 6484, 6483, 5260, 6487, 6486, 5262, 5610, 6493, 6492, 6551, 6498, 6497, 6554, 6136, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6152, 6161, 6165, 6168, 6172, 6181, 6185, 6188, 6195, 6204, 6208, 6212, 6217, 6221, 6225, 6230, 6237, 6240, 6246, 6251, 6259, 6275, 6284, 6288, 6297, 6300, 6304, 6308, 6312, 6321, 6324, 6327, 6333, 6336, 6339, 6347, 6353, 6364, 6367, 6374, 6377, 6389, 6392, 6410, 6419, 6423, 6426, 6429, 2203, 6607, 2209, 6595, 2215, 6600, 2221, 2222, 2223, 6593, 6592, 2227, 6595, 2233, 6611, 2239, 6600, 6604, 2246, 6607, 6611, 2257, 6616, 2262, 6621, 2291, 2293, 2294, 2296, 6626, 6624, 2334, 6629, 6632, 2339, 6642, 2344, 6650, 6653, 2349, 6634, 2354, 6397, 6405, 6674, 2359, 6629, 6632, 2364, 6634, 2369, 6646, 6640, 2374, 6642, 2379, 6646, 2384, 6650, 6653, 2389, 6674, 2426, 6669, 2432, 2433, 2434, 2435, 2436, 2438, 6655, 6660, 6656, 6660, 6659, 2447, 6663, 2450, 2451, 2453, 2454, 2455, 2456, 2457, 2459, 2460, 6669, 2466, 2467, 6397, 6405, 2470, 2472, 6405, 6402, 2516, 6669, 2522, 2523, 2524, 6674, 6735, 2561, 2562, 2563, 6443, 2577, 2578, 2579, 2580, 2581, 6754, 6676, 2593, 2594, 6711, 2600, 2601, 6742, 2607, 2608, 6676, 6448, 2643, 2644, 2645, 2646, 2647, 6770, 2654, 2655, 6735, 2664, 2665, 2666, 6521, 6711, 2680, 6712, 2685, 6735, 2692, 2693, 2694, 2700, 2701, 2702, 6531, 6694, 2719, 2720, 2721, 6695, 2737, 6697, 2742, 6711, 2771, 6712, 2776, 6734, 2799, 2800, 2801, 6742, 2807, 2808, 6735, 6736, 2822, 2823, 6742, 2834, 2835, 6747, 6757, 6762, 6812, 6812, 6812, 6779, 6784, 6793, 6795, 6797, 6799, 6801, 6805, 6812, 6812, 6812, 6815, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6856, 2205, 6858, 6857, 6209, 6848, 2211, 6850, 6849, 5656, 6852, 2217, 6854, 6853, 5680, 6903, 2224, 2225, 6848, 2229, 6850, 6849, 5656, 6879, 2235, 6861, 6860, 5661, 6852, 2241, 6854, 6853, 5680, 2245, 6856, 2248, 6858, 6857, 6209, 2252, 6861, 6860, 6222, 6863, 2258, 6865, 6864, 6619, 2263, 6867, 6868, 2297, 2298, 6869, 2336, 6870, 2338, 6876, 2341, 6877, 6878, 6882, 2346, 6883, 2348, 6871, 2351, 6872, 6873, 2355, 2356, 2357, 6869, 2361, 6870, 2363, 6871, 2366, 6872, 6873, 6874, 2371, 6875, 2373, 6876, 2376, 6877, 6878, 6879, 2381, 6880, 6881, 6882, 2386, 6883, 2388, 2390, 6891, 2428, 6893, 6892, 6894, 6961, 6963, 6884, 2440, 2441, 2442, 6885, 2444, 2445, 6887, 6888, 2449, 6974, 6889, 6976, 6978, 6891, 2462, 6893, 6892, 6894, 6984, 2468, 2469, 2492, 2493, 6891, 2518, 6893, 6892, 6894, 6994, 2525, 6980, 6964, 6980, 6979, 2560, 6999, 2576, 7005, 2592, 7010, 2599, 2606, 7016, 2616, 2642, 7022, 7026, 2663, 7029, 2679, 2684, 6980, 6964, 2691, 7038, 7041, 6980, 6964, 6980, 6979, 2718, 7046, 2736, 2741, 2770, 2775, 6980, 6964, 6980, 6979, 6988, 6987, 2798, 7058, 2806, 7062, 2813, 2821, 7066, 2833, 7069, 2848, 6501, 2860, 2866, 2868, 2870, 6512, 2890, 6776, 2903, 2908, 6528, 2918, 2927, 2929, 2945, 2947, 2957, 2959, 2962, 2966, 2970, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 2204, 2206, 2207, 2208, 2210, 2212, 2213, 2214, 2216, 2218, 2219, 2220, 7121, 2228, 2230, 2231, 2232, 2234, 2236, 2237, 2238, 2240, 2242, 2243, 2244, 2247, 2249, 2250, 2251, 2253, 2254, 2255, 2256, 2259, 2260, 2261, 2292, 2295, 7156, 2335, 2337, 2340, 2342, 2343, 2345, 2347, 2350, 2352, 2353, 7174, 2360, 2362, 2365, 2367, 2368, 2370, 2372, 2375, 2377, 2378, 2380, 2382, 2383, 2385, 2387, 2427, 2429, 2430, 2431, 2439, 7211, 2443, 7214, 2446, 2448, 2452, 2461, 2463, 2464, 2465, 7229, 7231, 2517, 2519, 2520, 2521, 7206, 2553, 2554, 7220, 2558, 2559, 7000, 7119, 7002, 7006, 7237, 7238, 7237, 7238, 7119, 7137, 7227, 7152, 7019, 7023, 7030, 7175, 7206, 2689, 2690, 7039, 7042, 7206, 2711, 2712, 7220, 2716, 2717, 7047, 7175, 7175, 7200, 7206, 2783, 2784, 7220, 2791, 2792, 2796, 2797, 7059, 7237, 7238, 7237, 7238, 2856, 7247, 7249, 7274, 7250, 7285, 7252, 2888, 7286, 2894, 7258, 7259, 2910, 7271, 7272, 7273, 7274, 7283, 7285, 7286, 7288, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7360, 7362, 7364, 7366, 7368, 7370, 7373, 7375, 7377, 7379, 7381, 7383, 7385, 7387, 7390, 6917, 7394, 7396, 7397, 7399, 7401, 7404, 7406, 7410, 7412, 7415, 7417, 7420, 7423, 7425, 7427, 7429, 7434, 7436, 7438, 7442, 7444, 2552, 7448, 7430, 2557, 7451, 2574, 7372, 7003, 2590, 2591, 2604, 2605, 2625, 7372, 2633, 2640, 2641, 7020, 7237, 7398, 7407, 7405, 6937, 2678, 2688, 7470, 2710, 7475, 7430, 2715, 7478, 7405, 7402, 7407, 7400, 6937, 2735, 7400, 7402, 7405, 7407, 6937, 2755, 7411, 7413, 7416, 7418, 7421, 7424, 6956, 2769, 2782, 7485, 7430, 7432, 7218, 2790, 7488, 7227, 7490, 7237, 2819, 2820, 2831, 2832, 7243, 2859, 2861, 2862, 2865, 2867, 2869, 2889, 7256, 2902, 2904, 7262, 7286, 7269, 2926, 2928, 2944, 2946, 7281, 2958, 2961, 2965, 2969, 61, 62, 63, 7581, 7582, 7583, 2556, 7554, 7555, 7552, 7553, 7556, 7557, 2575, 7587, 7588, 7587, 7588, 7552, 7553, 7554, 7555, 7556, 7557, 2626, 7558, 7559, 7560, 7561, 7562, 7563, 7564, 7565, 7579, 7566, 7567, 7568, 7570, 7569, 7588, 2661, 2662, 7574, 2674, 7573, 2676, 2677, 7581, 7582, 7581, 7582, 7583, 2714, 7573, 2727, 7572, 2729, 7574, 2731, 7571, 2733, 2734, 7571, 2747, 7572, 2749, 7573, 2751, 7574, 2753, 2754, 7575, 2757, 7576, 2759, 7577, 2761, 7578, 2763, 7579, 2765, 7580, 2767, 2768, 7581, 7582, 7583, 2786, 2787, 7584, 2789, 7585, 7586, 2795, 2812, 7587, 7588, 7587, 7588, 7589, 7592, 2847, 7596, 7597, 7599, 7604, 7604, 7606, 2893, 7613, 2907, 2909, 7615, 7618, 2917, 7640, 7645, 2956, 7650, 7652, 7655, 7657, 7656, 7658, 7659, 7660, 7661, 7664, 7663, 7669, 7668, 7671, 7670, 7673, 7674, 7675, 7676, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 2550, 2551, 2555, 2568, 2569, 2570, 2571, 2572, 2573, 2588, 2589, 2602, 2603, 2619, 2620, 2621, 2622, 2623, 2624, 2627, 2628, 2629, 2630, 2631, 2632, 2634, 2635, 2636, 2637, 2638, 2639, 2658, 2659, 2660, 2673, 2675, 2686, 2687, 2708, 2709, 2713, 2726, 2728, 2730, 2732, 2746, 2748, 2750, 2752, 2756, 2758, 2760, 2762, 2764, 2766, 2780, 2781, 2785, 2788, 2793, 2794, 2817, 2818, 2829, 2830, 2842, 2846, 7594, 2855, 2858, 2864, 7601, 2882, 2886, 2887, 7717, 7723, 2906, 2912, 2916, 7738, 7747, 7760, 2949, 2953, 7770, 7771, 2964, 2968, 7778, 2980, 2981, 2982, 2984, 2985, 2986, 2991, 7785, 2996, 2997, 7787, 7788, 7791, 3009, 3010, 3013, 3014, 7794, 3019, 3021, 3023, 3025, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7904, 7872, 7930, 7930, 7874, 7879, 7893, 7875, 7899, 7877, 2854, 7881, 7883, 7885, 7899, 7887, 7893, 7889, 2876, 7899, 7891, 7893, 7897, 7895, 7901, 7899, 7897, 2892, 7923, 7907, 7906, 7925, 7917, 7918, 2901, 7908, 7910, 7930, 7930, 7912, 7914, 7916, 7925, 7913, 7915, 7923, 2925, 7918, 7920, 7925, 7917, 7923, 7919, 2936, 7922, 7923, 7921, 7926, 7925, 7924, 2943, 7927, 7930, 7930, 7929, 7931, 2955, 2960, 7933, 7935, 2974, 7940, 7964, 7946, 2993, 7971, 3001, 3002, 3006, 7976, 7978, 3018, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 2841, 2843, 2844, 2845, 2849, 2850, 2851, 2852, 2853, 2857, 2863, 2871, 2872, 2873, 2874, 2875, 2877, 2878, 2879, 2880, 2881, 2883, 2884, 2885, 8000, 2895, 2896, 2897, 2898, 2899, 2900, 2905, 2911, 2913, 2914, 2915, 2919, 2920, 2921, 2922, 2923, 2924, 2930, 2931, 2932, 2933, 2934, 2935, 2937, 2938, 2939, 2940, 2941, 2942, 2948, 2950, 2951, 2952, 2954, 2963, 2967, 2978, 2990, 63, 8130, 8133, 8135, 8140, 8142, 8145, 8147, 8150, 2891, 8154, 8156, 8158, 8162, 8165, 8167, 8169, 8171, 8173, 8175, 8177, 8179, 8181, 8184, 8186, 8128, 8137, 8138, 8186, 8159, 8186, 8160, 8186, 8182, 8187, 8187, 8188, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8131, 8194, 8196, 8198, 8151, 8202, 8163, 8206, 8209, 8212, 8185, 2971, 2973, 2979, 2983, 8200, 2998, 3000, 3003, 3005, 3015, 3017, 3020, 3022, 3024, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8136, 8143, 8148, 8203, 8207, 8210, 8213, 8256, 8260, 8260, 2992, 8262, 8262, 8266, 8270, 8278, 8279, 8279, 8269, 8280, 8278, 8279, 8279, 8278, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 2972, 2975, 8322, 8320, 2987, 8322, 8321, 8326, 8323, 2999, 3004, 8326, 8324, 8326, 8325, 3016, 3026, 3028, 3029, 3030, 3034, 8330, 3037, 3038, 3040, 3041, 3045, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8384, 2976, 2977, 2988, 2989, 2994, 2995, 8393, 8394, 3007, 3008, 3011, 3012, 8399, 3036, 8402, 8409, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8268, 8449, 8451, 8454, 8273, 8275, 8458, 8460, 8277, 8406, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8450, 8452, 8515, 8512, 8518, 8520, 8516, 8517, 8519, 8515, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 3027, 8576, 3032, 3033, 3035, 3039, 3042, 3043, 8577, 3046, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 3031, 3044, 8640, 8643, 8644, 8645, 8647, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8704, 8410, 8463, 8521, 8464, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8707, 8769, 8772, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8832, 8649, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8834, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8897, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63};
bool h_Op[]= {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#define THREADS_PER_BLOCK 64
#define BLOCKS_PER_GRID 1
#define SIZE_OF_IN 3072
#define SIZE_OF_AC 6016
__device__ void
ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float R[142*THREADS_PER_BLOCK];
const int t= THREADS_PER_BLOCK;
__shared__ float final;
final=0;
R[i + 0*t] = A[i + 0*t];
R[i + 1*t] = A[i + 1*t];
R[i + 2*t] = A[i + 2*t];
R[i + 3*t] = A[i + 3*t];
R[i + 4*t] = A[i + 4*t];
R[i + 5*t] = A[i + 5*t];
R[i + 6*t] = A[i + 6*t];
R[i + 7*t] = A[i + 7*t];
R[i + 8*t] = A[i + 8*t];
R[i + 9*t] = A[i + 9*t];
R[i + 10*t] = A[i + 10*t];
R[i + 11*t] = A[i + 11*t];
R[i + 12*t] = A[i + 12*t];
R[i + 13*t] = A[i + 13*t];
R[i + 14*t] = A[i + 14*t];
R[i + 15*t] = A[i + 15*t];
R[i + 16*t] = A[i + 16*t];
R[i + 17*t] = A[i + 17*t];
R[i + 18*t] = A[i + 18*t];
R[i + 19*t] = A[i + 19*t];
R[i + 20*t] = A[i + 20*t];
R[i + 21*t] = A[i + 21*t];
R[i + 22*t] = A[i + 22*t];
R[i + 23*t] = A[i + 23*t];
R[i + 24*t] = A[i + 24*t];
R[i + 25*t] = A[i + 25*t];
R[i + 26*t] = A[i + 26*t];
R[i + 27*t] = A[i + 27*t];
R[i + 28*t] = A[i + 28*t];
R[i + 29*t] = A[i + 29*t];
R[i + 30*t] = A[i + 30*t];
R[i + 31*t] = A[i + 31*t];
R[i + 32*t] = A[i + 32*t];
R[i + 33*t] = A[i + 33*t];
R[i + 34*t] = A[i + 34*t];
R[i + 35*t] = A[i + 35*t];
R[i + 36*t] = A[i + 36*t];
R[i + 37*t] = A[i + 37*t];
R[i + 38*t] = A[i + 38*t];
R[i + 39*t] = A[i + 39*t];
R[i + 40*t] = A[i + 40*t];
R[i + 41*t] = A[i + 41*t];
R[i + 42*t] = A[i + 42*t];
R[i + 43*t] = A[i + 43*t];
R[i + 44*t] = A[i + 44*t];
R[i + 45*t] = A[i + 45*t];
R[i + 46*t] = A[i + 46*t];
R[i + 47*t] = A[i + 47*t];
__syncthreads();
for (int iter=0; iter< n_iter; iter++) {
R[i + 48*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]];
R[i + 49*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]];
R[i + 50*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]];
R[i + 51*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]];
R[i + 52*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]];
R[i + 53*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]];
R[i + 54*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]];
R[i + 55*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]];
R[i + 56*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]];
R[i + 57*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]];
R[i + 58*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]];
R[i + 59*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]];
__syncthreads();
R[i + 60*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]];
R[i + 61*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]];
R[i + 62*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]];
R[i + 63*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]];
R[i + 64*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]];
R[i + 65*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]];
R[i + 66*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]];
__syncthreads();
R[i + 67*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]];
R[i + 68*t] = Op[i + 20*t] ? R[B[i + 20*t]] * R[C[i + 20*t]] : R[B[i + 20*t]] + R[C[i + 20*t]];
R[i + 69*t] = Op[i + 21*t] ? R[B[i + 21*t]] * R[C[i + 21*t]] : R[B[i + 21*t]] + R[C[i + 21*t]];
R[i + 70*t] = Op[i + 22*t] ? R[B[i + 22*t]] * R[C[i + 22*t]] : R[B[i + 22*t]] + R[C[i + 22*t]];
R[i + 71*t] = Op[i + 23*t] ? R[B[i + 23*t]] * R[C[i + 23*t]] : R[B[i + 23*t]] + R[C[i + 23*t]];
R[i + 72*t] = Op[i + 24*t] ? R[B[i + 24*t]] * R[C[i + 24*t]] : R[B[i + 24*t]] + R[C[i + 24*t]];
R[i + 73*t] = Op[i + 25*t] ? R[B[i + 25*t]] * R[C[i + 25*t]] : R[B[i + 25*t]] + R[C[i + 25*t]];
R[i + 74*t] = Op[i + 26*t] ? R[B[i + 26*t]] * R[C[i + 26*t]] : R[B[i + 26*t]] + R[C[i + 26*t]];
__syncthreads();
R[i + 75*t] = Op[i + 27*t] ? R[B[i + 27*t]] * R[C[i + 27*t]] : R[B[i + 27*t]] + R[C[i + 27*t]];
R[i + 76*t] = Op[i + 28*t] ? R[B[i + 28*t]] * R[C[i + 28*t]] : R[B[i + 28*t]] + R[C[i + 28*t]];
R[i + 77*t] = Op[i + 29*t] ? R[B[i + 29*t]] * R[C[i + 29*t]] : R[B[i + 29*t]] + R[C[i + 29*t]];
R[i + 78*t] = Op[i + 30*t] ? R[B[i + 30*t]] * R[C[i + 30*t]] : R[B[i + 30*t]] + R[C[i + 30*t]];
R[i + 79*t] = Op[i + 31*t] ? R[B[i + 31*t]] * R[C[i + 31*t]] : R[B[i + 31*t]] + R[C[i + 31*t]];
R[i + 80*t] = Op[i + 32*t] ? R[B[i + 32*t]] * R[C[i + 32*t]] : R[B[i + 32*t]] + R[C[i + 32*t]];
R[i + 81*t] = Op[i + 33*t] ? R[B[i + 33*t]] * R[C[i + 33*t]] : R[B[i + 33*t]] + R[C[i + 33*t]];
R[i + 82*t] = Op[i + 34*t] ? R[B[i + 34*t]] * R[C[i + 34*t]] : R[B[i + 34*t]] + R[C[i + 34*t]];
__syncthreads();
R[i + 83*t] = Op[i + 35*t] ? R[B[i + 35*t]] * R[C[i + 35*t]] : R[B[i + 35*t]] + R[C[i + 35*t]];
R[i + 84*t] = Op[i + 36*t] ? R[B[i + 36*t]] * R[C[i + 36*t]] : R[B[i + 36*t]] + R[C[i + 36*t]];
R[i + 85*t] = Op[i + 37*t] ? R[B[i + 37*t]] * R[C[i + 37*t]] : R[B[i + 37*t]] + R[C[i + 37*t]];
R[i + 86*t] = Op[i + 38*t] ? R[B[i + 38*t]] * R[C[i + 38*t]] : R[B[i + 38*t]] + R[C[i + 38*t]];
R[i + 87*t] = Op[i + 39*t] ? R[B[i + 39*t]] * R[C[i + 39*t]] : R[B[i + 39*t]] + R[C[i + 39*t]];
__syncthreads();
R[i + 88*t] = Op[i + 40*t] ? R[B[i + 40*t]] * R[C[i + 40*t]] : R[B[i + 40*t]] + R[C[i + 40*t]];
R[i + 89*t] = Op[i + 41*t] ? R[B[i + 41*t]] * R[C[i + 41*t]] : R[B[i + 41*t]] + R[C[i + 41*t]];
R[i + 90*t] = Op[i + 42*t] ? R[B[i + 42*t]] * R[C[i + 42*t]] : R[B[i + 42*t]] + R[C[i + 42*t]];
R[i + 91*t] = Op[i + 43*t] ? R[B[i + 43*t]] * R[C[i + 43*t]] : R[B[i + 43*t]] + R[C[i + 43*t]];
R[i + 92*t] = Op[i + 44*t] ? R[B[i + 44*t]] * R[C[i + 44*t]] : R[B[i + 44*t]] + R[C[i + 44*t]];
R[i + 93*t] = Op[i + 45*t] ? R[B[i + 45*t]] * R[C[i + 45*t]] : R[B[i + 45*t]] + R[C[i + 45*t]];
R[i + 94*t] = Op[i + 46*t] ? R[B[i + 46*t]] * R[C[i + 46*t]] : R[B[i + 46*t]] + R[C[i + 46*t]];
R[i + 95*t] = Op[i + 47*t] ? R[B[i + 47*t]] * R[C[i + 47*t]] : R[B[i + 47*t]] + R[C[i + 47*t]];
__syncthreads();
R[i + 96*t] = Op[i + 48*t] ? R[B[i + 48*t]] * R[C[i + 48*t]] : R[B[i + 48*t]] + R[C[i + 48*t]];
R[i + 97*t] = Op[i + 49*t] ? R[B[i + 49*t]] * R[C[i + 49*t]] : R[B[i + 49*t]] + R[C[i + 49*t]];
R[i + 98*t] = Op[i + 50*t] ? R[B[i + 50*t]] * R[C[i + 50*t]] : R[B[i + 50*t]] + R[C[i + 50*t]];
R[i + 99*t] = Op[i + 51*t] ? R[B[i + 51*t]] * R[C[i + 51*t]] : R[B[i + 51*t]] + R[C[i + 51*t]];
R[i + 100*t] = Op[i + 52*t] ? R[B[i + 52*t]] * R[C[i + 52*t]] : R[B[i + 52*t]] + R[C[i + 52*t]];
R[i + 101*t] = Op[i + 53*t] ? R[B[i + 53*t]] * R[C[i + 53*t]] : R[B[i + 53*t]] + R[C[i + 53*t]];
R[i + 102*t] = Op[i + 54*t] ? R[B[i + 54*t]] * R[C[i + 54*t]] : R[B[i + 54*t]] + R[C[i + 54*t]];
__syncthreads();
R[i + 103*t] = Op[i + 55*t] ? R[B[i + 55*t]] * R[C[i + 55*t]] : R[B[i + 55*t]] + R[C[i + 55*t]];
R[i + 104*t] = Op[i + 56*t] ? R[B[i + 56*t]] * R[C[i + 56*t]] : R[B[i + 56*t]] + R[C[i + 56*t]];
R[i + 105*t] = Op[i + 57*t] ? R[B[i + 57*t]] * R[C[i + 57*t]] : R[B[i + 57*t]] + R[C[i + 57*t]];
R[i + 106*t] = Op[i + 58*t] ? R[B[i + 58*t]] * R[C[i + 58*t]] : R[B[i + 58*t]] + R[C[i + 58*t]];
__syncthreads();
R[i + 107*t] = Op[i + 59*t] ? R[B[i + 59*t]] * R[C[i + 59*t]] : R[B[i + 59*t]] + R[C[i + 59*t]];
R[i + 108*t] = Op[i + 60*t] ? R[B[i + 60*t]] * R[C[i + 60*t]] : R[B[i + 60*t]] + R[C[i + 60*t]];
R[i + 109*t] = Op[i + 61*t] ? R[B[i + 61*t]] * R[C[i + 61*t]] : R[B[i + 61*t]] + R[C[i + 61*t]];
R[i + 110*t] = Op[i + 62*t] ? R[B[i + 62*t]] * R[C[i + 62*t]] : R[B[i + 62*t]] + R[C[i + 62*t]];
__syncthreads();
R[i + 111*t] = Op[i + 63*t] ? R[B[i + 63*t]] * R[C[i + 63*t]] : R[B[i + 63*t]] + R[C[i + 63*t]];
R[i + 112*t] = Op[i + 64*t] ? R[B[i + 64*t]] * R[C[i + 64*t]] : R[B[i + 64*t]] + R[C[i + 64*t]];
R[i + 113*t] = Op[i + 65*t] ? R[B[i + 65*t]] * R[C[i + 65*t]] : R[B[i + 65*t]] + R[C[i + 65*t]];
R[i + 114*t] = Op[i + 66*t] ? R[B[i + 66*t]] * R[C[i + 66*t]] : R[B[i + 66*t]] + R[C[i + 66*t]];
__syncthreads();
R[i + 115*t] = Op[i + 67*t] ? R[B[i + 67*t]] * R[C[i + 67*t]] : R[B[i + 67*t]] + R[C[i + 67*t]];
R[i + 116*t] = Op[i + 68*t] ? R[B[i + 68*t]] * R[C[i + 68*t]] : R[B[i + 68*t]] + R[C[i + 68*t]];
R[i + 117*t] = Op[i + 69*t] ? R[B[i + 69*t]] * R[C[i + 69*t]] : R[B[i + 69*t]] + R[C[i + 69*t]];
__syncthreads();
R[i + 118*t] = Op[i + 70*t] ? R[B[i + 70*t]] * R[C[i + 70*t]] : R[B[i + 70*t]] + R[C[i + 70*t]];
R[i + 119*t] = Op[i + 71*t] ? R[B[i + 71*t]] * R[C[i + 71*t]] : R[B[i + 71*t]] + R[C[i + 71*t]];
__syncthreads();
R[i + 120*t] = Op[i + 72*t] ? R[B[i + 72*t]] * R[C[i + 72*t]] : R[B[i + 72*t]] + R[C[i + 72*t]];
R[i + 121*t] = Op[i + 73*t] ? R[B[i + 73*t]] * R[C[i + 73*t]] : R[B[i + 73*t]] + R[C[i + 73*t]];
R[i + 122*t] = Op[i + 74*t] ? R[B[i + 74*t]] * R[C[i + 74*t]] : R[B[i + 74*t]] + R[C[i + 74*t]];
__syncthreads();
R[i + 123*t] = Op[i + 75*t] ? R[B[i + 75*t]] * R[C[i + 75*t]] : R[B[i + 75*t]] + R[C[i + 75*t]];
R[i + 124*t] = Op[i + 76*t] ? R[B[i + 76*t]] * R[C[i + 76*t]] : R[B[i + 76*t]] + R[C[i + 76*t]];
__syncthreads();
R[i + 125*t] = Op[i + 77*t] ? R[B[i + 77*t]] * R[C[i + 77*t]] : R[B[i + 77*t]] + R[C[i + 77*t]];
R[i + 126*t] = Op[i + 78*t] ? R[B[i + 78*t]] * R[C[i + 78*t]] : R[B[i + 78*t]] + R[C[i + 78*t]];
__syncthreads();
R[i + 127*t] = Op[i + 79*t] ? R[B[i + 79*t]] * R[C[i + 79*t]] : R[B[i + 79*t]] + R[C[i + 79*t]];
__syncthreads();
R[i + 128*t] = Op[i + 80*t] ? R[B[i + 80*t]] * R[C[i + 80*t]] : R[B[i + 80*t]] + R[C[i + 80*t]];
__syncthreads();
R[i + 129*t] = Op[i + 81*t] ? R[B[i + 81*t]] * R[C[i + 81*t]] : R[B[i + 81*t]] + R[C[i + 81*t]];
__syncthreads();
R[i + 130*t] = Op[i + 82*t] ? R[B[i + 82*t]] * R[C[i + 82*t]] : R[B[i + 82*t]] + R[C[i + 82*t]];
__syncthreads();
R[i + 131*t] = Op[i + 83*t] ? R[B[i + 83*t]] * R[C[i + 83*t]] : R[B[i + 83*t]] + R[C[i + 83*t]];
__syncthreads();
R[i + 132*t] = Op[i + 84*t] ? R[B[i + 84*t]] * R[C[i + 84*t]] : R[B[i + 84*t]] + R[C[i + 84*t]];
__syncthreads();
R[i + 133*t] = Op[i + 85*t] ? R[B[i + 85*t]] * R[C[i + 85*t]] : R[B[i + 85*t]] + R[C[i + 85*t]];
__syncthreads();
R[i + 134*t] = Op[i + 86*t] ? R[B[i + 86*t]] * R[C[i + 86*t]] : R[B[i + 86*t]] + R[C[i + 86*t]];
__syncthreads();
R[i + 135*t] = Op[i + 87*t] ? R[B[i + 87*t]] * R[C[i + 87*t]] : R[B[i + 87*t]] + R[C[i + 87*t]];
__syncthreads();
R[i + 136*t] = Op[i + 88*t] ? R[B[i + 88*t]] * R[C[i + 88*t]] : R[B[i + 88*t]] + R[C[i + 88*t]];
__syncthreads();
R[i + 137*t] = Op[i + 89*t] ? R[B[i + 89*t]] * R[C[i + 89*t]] : R[B[i + 89*t]] + R[C[i + 89*t]];
__syncthreads();
R[i + 138*t] = Op[i + 90*t] ? R[B[i + 90*t]] * R[C[i + 90*t]] : R[B[i + 90*t]] + R[C[i + 90*t]];
__syncthreads();
R[i + 139*t] = Op[i + 91*t] ? R[B[i + 91*t]] * R[C[i + 91*t]] : R[B[i + 91*t]] + R[C[i + 91*t]];
__syncthreads();
R[i + 140*t] = Op[i + 92*t] ? R[B[i + 92*t]] * R[C[i + 92*t]] : R[B[i + 92*t]] + R[C[i + 92*t]];
__syncthreads();
R[i + 141*t] = Op[i + 93*t] ? R[B[i + 93*t]] * R[C[i + 93*t]] : R[B[i + 93*t]] + R[C[i + 93*t]];
if (i==0) { final += R[141*t]; }
__syncthreads();
}
if (i==0) { A[0]= final;}
}
|
6f0d2086720e8c97d0acc67b26e26c691f16e1c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <stdio.h>
#include "cuenergy.h"
//#if UNROLLX != 8
//# error "UNROLLX must be 8"
//#endif
#if BLOCKSIZEX != 16
# error "BLOCKSIZEX must be 16"
#endif
// Max constant buffer size is 64KB, minus whatever
// the CUDA runtime and compiler are using that we don't know about.
// At 16 bytes for atom, for this program 4070 atoms is about the max
// we can store in the constant buffer.
__constant__ float4 atominfo[MAXATOMS];
// This kernel calculates coulombic potential at each grid point and
// stores the results in the output array.
__global__ void cenergy(int numatoms, float gridspacing, float * energygrid) {
unsigned int xindex = __umul24(blockIdx.x, blockDim.x) * UNROLLX
+ threadIdx.x;
unsigned int yindex = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
unsigned int outaddr = (__umul24(gridDim.x, blockDim.x) * UNROLLX) * yindex
+ xindex;
float coory = gridspacing * yindex;
float coorx = gridspacing * xindex;
float energyvalx1=0.0f;
float energyvalx2=0.0f;
float gridspacing_u = gridspacing * BLOCKSIZEX;
int atomid;
for (atomid=0; atomid<numatoms; atomid++) {
float dy = coory - atominfo[atomid].y;
float dyz2 = (dy * dy) + atominfo[atomid].z;
float dx1 = coorx - atominfo[atomid].x;
float dx2 = dx1 + gridspacing_u;
energyvalx1 += atominfo[atomid].w * (1.0f / sqrtf(dx1*dx1 + dyz2));
energyvalx2 += atominfo[atomid].w * (1.0f / sqrtf(dx2*dx2 + dyz2));
}
energygrid[outaddr] += energyvalx1;
energygrid[outaddr+1*BLOCKSIZEX] += energyvalx2;
}
// This function copies atoms from the CPU to the GPU and
// precalculates (z^2) for each atom.
int copyatomstoconstbuf(float *atoms, int count, float zplane) {
if (count > MAXATOMS) {
printf("Atom count exceeds constant buffer storage capacity\n");
return -1;
}
float atompre[4*MAXATOMS];
int i;
for (i=0; i<count*4; i+=4) {
atompre[i ] = atoms[i ];
atompre[i + 1] = atoms[i + 1];
float dz = zplane - atoms[i + 2];
atompre[i + 2] = dz*dz;
atompre[i + 3] = atoms[i + 3];
}
hipMemcpyToSymbol(atominfo, atompre, count * 4 * sizeof(float), 0);
CUERR // check and clear any existing errors
return 0;
}
| 6f0d2086720e8c97d0acc67b26e26c691f16e1c0.cu | /***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <stdio.h>
#include "cuenergy.h"
//#if UNROLLX != 8
//# error "UNROLLX must be 8"
//#endif
#if BLOCKSIZEX != 16
# error "BLOCKSIZEX must be 16"
#endif
// Max constant buffer size is 64KB, minus whatever
// the CUDA runtime and compiler are using that we don't know about.
// At 16 bytes for atom, for this program 4070 atoms is about the max
// we can store in the constant buffer.
__constant__ float4 atominfo[MAXATOMS];
// This kernel calculates coulombic potential at each grid point and
// stores the results in the output array.
__global__ void cenergy(int numatoms, float gridspacing, float * energygrid) {
unsigned int xindex = __umul24(blockIdx.x, blockDim.x) * UNROLLX
+ threadIdx.x;
unsigned int yindex = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
unsigned int outaddr = (__umul24(gridDim.x, blockDim.x) * UNROLLX) * yindex
+ xindex;
float coory = gridspacing * yindex;
float coorx = gridspacing * xindex;
float energyvalx1=0.0f;
float energyvalx2=0.0f;
float gridspacing_u = gridspacing * BLOCKSIZEX;
int atomid;
for (atomid=0; atomid<numatoms; atomid++) {
float dy = coory - atominfo[atomid].y;
float dyz2 = (dy * dy) + atominfo[atomid].z;
float dx1 = coorx - atominfo[atomid].x;
float dx2 = dx1 + gridspacing_u;
energyvalx1 += atominfo[atomid].w * (1.0f / sqrtf(dx1*dx1 + dyz2));
energyvalx2 += atominfo[atomid].w * (1.0f / sqrtf(dx2*dx2 + dyz2));
}
energygrid[outaddr] += energyvalx1;
energygrid[outaddr+1*BLOCKSIZEX] += energyvalx2;
}
// This function copies atoms from the CPU to the GPU and
// precalculates (z^2) for each atom.
int copyatomstoconstbuf(float *atoms, int count, float zplane) {
if (count > MAXATOMS) {
printf("Atom count exceeds constant buffer storage capacity\n");
return -1;
}
float atompre[4*MAXATOMS];
int i;
for (i=0; i<count*4; i+=4) {
atompre[i ] = atoms[i ];
atompre[i + 1] = atoms[i + 1];
float dz = zplane - atoms[i + 2];
atompre[i + 2] = dz*dz;
atompre[i + 3] = atoms[i + 3];
}
cudaMemcpyToSymbol(atominfo, atompre, count * 4 * sizeof(float), 0);
CUERR // check and clear any existing errors
return 0;
}
|
26acfc075ab00939fc99201a4d6980711dd49af8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2020 by Contributors
* \file relu_lib.cu
* \brief simple custom relu and noisy relu operator implemented using CUDA function
*/
#include <iostream>
#include "lib_api.h"
#define NumThreadPerBlock 256 // mxnet recommended cuda thread number per block
__global__ void relu_gpu_forward(float *out, float *in, int64_t N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N)
out[tid] = in[tid] > 0 ? in[tid] : 0;
}
__global__ void relu_gpu_backward(float *ingrad, float *outgrad, float *indata, int64_t N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N)
ingrad[tid] = indata[tid] > 0 ? 1 * outgrad[tid] : 0;
}
MXReturnValue forwardCPU(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
float* in_data = inputs[0].data<float>();
float* out_data = outputs[0].data<float>();
for (int i=0; i<inputs[0].size(); i++) {
out_data[i] = in_data[i] > 0 ? in_data[i] : 0;
}
return MX_SUCCESS;
}
MXReturnValue backwardCPU(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
float* out_grad = inputs[0].data<float>();
float* in_data = inputs[1].data<float>();
float* in_grad = outputs[0].data<float>();
for (int i=0; i<inputs[1].size(); i++) {
in_grad[i] = in_data[i] > 0 ? 1 * out_grad[i] : 0;
}
return MX_SUCCESS;
}
MXReturnValue forwardGPU(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
float* in_data = inputs[0].data<float>();
float* out_data = outputs[0].data<float>();
mx_stream_t cuda_stream = res.get_cuda_stream();
int64_t N = inputs[0].size();
int num_block = (N + NumThreadPerBlock - 1) / NumThreadPerBlock;
hipLaunchKernelGGL(( relu_gpu_forward), dim3(num_block),dim3(NumThreadPerBlock),0,cuda_stream, out_data, in_data, N);
return MX_SUCCESS;
}
MXReturnValue backwardGPU(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
float* out_grad = inputs[0].data<float>();
float* in_data = inputs[1].data<float>();
float* in_grad = outputs[0].data<float>();
mx_stream_t cuda_stream = res.get_cuda_stream();
int64_t N = inputs[0].size();
int num_block = (N + NumThreadPerBlock - 1) / NumThreadPerBlock;
hipLaunchKernelGGL(( relu_gpu_backward), dim3(num_block),dim3(NumThreadPerBlock),0,cuda_stream, in_grad, out_grad, in_data, N);
return MX_SUCCESS;
}
MXReturnValue parseAttrs(std::map<std::string, std::string> attrs, int* num_in, int* num_out) {
*num_in = 1;
*num_out = 1;
return MX_SUCCESS;
}
MXReturnValue inferType(std::map<std::string, std::string> attrs,
std::vector<int> &intypes,
std::vector<int> &outtypes) {
outtypes[0] = intypes[0];
return MX_SUCCESS;
}
MXReturnValue inferShape(std::map<std::string, std::string> attrs,
std::vector<std::vector<unsigned int>> &inshapes,
std::vector<std::vector<unsigned int>> &outshapes) {
outshapes[0] = inshapes[0];
return MX_SUCCESS;
}
REGISTER_OP(my_relu)
.setParseAttrs(parseAttrs)
.setInferType(inferType)
.setInferShape(inferShape)
.setForward(forwardCPU, "cpu")
.setForward(forwardGPU, "gpu")
.setBackward(backwardCPU, "cpu")
.setBackward(backwardGPU, "gpu");
class MyStatefulReluCPU : public CustomStatefulOp {
public:
explicit MyStatefulReluCPU() {}
MXReturnValue Forward(std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource op_res) {
std::map<std::string, std::string> attrs;
return forwardCPU(attrs, inputs, outputs, op_res);
}
MXReturnValue Backward(std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource op_res) {
std::map<std::string, std::string> attrs;
return backwardCPU(attrs, inputs, outputs, op_res);
}
~MyStatefulReluCPU() {}
};
class MyStatefulReluGPU : public CustomStatefulOp {
public:
explicit MyStatefulReluGPU() {}
MXReturnValue Forward(std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource op_res) {
std::map<std::string, std::string> attrs;
return forwardGPU(attrs, inputs, outputs, op_res);
}
MXReturnValue Backward(std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource op_res) {
std::map<std::string, std::string> attrs;
return backwardGPU(attrs, inputs, outputs, op_res);
}
~MyStatefulReluGPU() {}
};
MXReturnValue createOpStateCPU(std::map<std::string, std::string> attrs,
CustomStatefulOp** op_inst) {
*op_inst = new MyStatefulReluCPU();
return MX_SUCCESS;
}
MXReturnValue createOpStateGPU(std::map<std::string, std::string> attrs,
CustomStatefulOp** op_inst) {
*op_inst = new MyStatefulReluGPU();
return MX_SUCCESS;
}
REGISTER_OP(my_state_relu)
.setParseAttrs(parseAttrs)
.setInferType(inferType)
.setInferShape(inferShape)
.setCreateOpState(createOpStateCPU, "cpu")
.setCreateOpState(createOpStateGPU, "gpu");
/*
* Below is noisy ReLU operator example
* noisy ReLU is made from ReLU extended to include Gaussian noise
* forward - add Gaussian noise generated from normal distribution to each unit
* backward - gradient doesn't need to change since noise is constant
*/
#define NumRandomPerThread 64 // mxnet recommended random numbers generated per thread
__global__ void noisy_relu_gpu_forward(float *out, float *in, int64_t N, mx_gpu_rand_t* states, int step) {
// the launcher logic ensures tid less than NumGPURandomStates
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// each thread generates unique sequence of random numbers
mx_gpu_rand_t thread_state = states[tid];
// each thread works on <step> number of calculation
int start = tid * step;
int end = start + step;
for (int i=start; i<end && i<N; ++i) {
float noise = hiprand_normal(&thread_state);
out[i] = in[i] + noise > 0 ? in[i] + noise : 0;
}
}
MXReturnValue noisyForwardCPU(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
float* in_data = inputs[0].data<float>();
float* out_data = outputs[0].data<float>();
mx_cpu_rand_t* states = res.get_cpu_rand_states();
std::normal_distribution<float> dist_normal;
for (int i=0; i<inputs[0].size(); ++i) {
float noise = dist_normal(*states);
out_data[i] = in_data[i] + noise > 0 ? in_data[i] + noise : 0;
}
return MX_SUCCESS;
}
MXReturnValue noisyForwardGPU(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
float* in_data = inputs[0].data<float>();
float* out_data = outputs[0].data<float>();
mx_stream_t cuda_stream = res.get_cuda_stream();
int64_t N = inputs[0].size();
// below is mxnet recommended workflow to parallel random number generating
int nthread = (N + NumRandomPerThread - 1) / NumRandomPerThread;
// we should not launch more threads than mxnet supported random number GPU states
int num_thread_need = nthread < MX_NUM_GPU_RANDOM_STATES ? nthread : MX_NUM_GPU_RANDOM_STATES;
// each cuda thread processes [step * tid, step * id + step) snippet of input tensor
int step = (N + num_thread_need - 1) / num_thread_need;
// this can ensure number of parallel threads less than mxnet supported random number states
int num_block = (num_thread_need + NumThreadPerBlock - 1) / NumThreadPerBlock;
hipLaunchKernelGGL(( noisy_relu_gpu_forward), dim3(num_block),dim3(NumThreadPerBlock),0,cuda_stream,
out_data, in_data, N, res.get_gpu_rand_states(), step);
return MX_SUCCESS;
}
REGISTER_OP(my_noisy_relu)
.setParseAttrs(parseAttrs)
.setInferType(inferType)
.setInferShape(inferShape)
.setForward(noisyForwardCPU, "cpu")
.setForward(noisyForwardGPU, "gpu")
.setBackward(backwardCPU, "cpu")
.setBackward(backwardGPU, "gpu");
MXReturnValue initialize(int version) {
if (version >= 10400) {
std::cout << "MXNet version " << version << " supported" << std::endl;
return MX_SUCCESS;
} else {
std::cout << "MXNet version " << version << " not supported" << std::endl;
return MX_FAIL;
}
}
| 26acfc075ab00939fc99201a4d6980711dd49af8.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2020 by Contributors
* \file relu_lib.cu
* \brief simple custom relu and noisy relu operator implemented using CUDA function
*/
#include <iostream>
#include "lib_api.h"
#define NumThreadPerBlock 256 // mxnet recommended cuda thread number per block
__global__ void relu_gpu_forward(float *out, float *in, int64_t N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N)
out[tid] = in[tid] > 0 ? in[tid] : 0;
}
__global__ void relu_gpu_backward(float *ingrad, float *outgrad, float *indata, int64_t N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N)
ingrad[tid] = indata[tid] > 0 ? 1 * outgrad[tid] : 0;
}
MXReturnValue forwardCPU(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
float* in_data = inputs[0].data<float>();
float* out_data = outputs[0].data<float>();
for (int i=0; i<inputs[0].size(); i++) {
out_data[i] = in_data[i] > 0 ? in_data[i] : 0;
}
return MX_SUCCESS;
}
MXReturnValue backwardCPU(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
float* out_grad = inputs[0].data<float>();
float* in_data = inputs[1].data<float>();
float* in_grad = outputs[0].data<float>();
for (int i=0; i<inputs[1].size(); i++) {
in_grad[i] = in_data[i] > 0 ? 1 * out_grad[i] : 0;
}
return MX_SUCCESS;
}
MXReturnValue forwardGPU(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
float* in_data = inputs[0].data<float>();
float* out_data = outputs[0].data<float>();
mx_stream_t cuda_stream = res.get_cuda_stream();
int64_t N = inputs[0].size();
int num_block = (N + NumThreadPerBlock - 1) / NumThreadPerBlock;
relu_gpu_forward<<<num_block,NumThreadPerBlock,0,cuda_stream>>>(out_data, in_data, N);
return MX_SUCCESS;
}
MXReturnValue backwardGPU(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
float* out_grad = inputs[0].data<float>();
float* in_data = inputs[1].data<float>();
float* in_grad = outputs[0].data<float>();
mx_stream_t cuda_stream = res.get_cuda_stream();
int64_t N = inputs[0].size();
int num_block = (N + NumThreadPerBlock - 1) / NumThreadPerBlock;
relu_gpu_backward<<<num_block,NumThreadPerBlock,0,cuda_stream>>>(in_grad, out_grad, in_data, N);
return MX_SUCCESS;
}
MXReturnValue parseAttrs(std::map<std::string, std::string> attrs, int* num_in, int* num_out) {
*num_in = 1;
*num_out = 1;
return MX_SUCCESS;
}
MXReturnValue inferType(std::map<std::string, std::string> attrs,
std::vector<int> &intypes,
std::vector<int> &outtypes) {
outtypes[0] = intypes[0];
return MX_SUCCESS;
}
MXReturnValue inferShape(std::map<std::string, std::string> attrs,
std::vector<std::vector<unsigned int>> &inshapes,
std::vector<std::vector<unsigned int>> &outshapes) {
outshapes[0] = inshapes[0];
return MX_SUCCESS;
}
REGISTER_OP(my_relu)
.setParseAttrs(parseAttrs)
.setInferType(inferType)
.setInferShape(inferShape)
.setForward(forwardCPU, "cpu")
.setForward(forwardGPU, "gpu")
.setBackward(backwardCPU, "cpu")
.setBackward(backwardGPU, "gpu");
class MyStatefulReluCPU : public CustomStatefulOp {
public:
explicit MyStatefulReluCPU() {}
MXReturnValue Forward(std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource op_res) {
std::map<std::string, std::string> attrs;
return forwardCPU(attrs, inputs, outputs, op_res);
}
MXReturnValue Backward(std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource op_res) {
std::map<std::string, std::string> attrs;
return backwardCPU(attrs, inputs, outputs, op_res);
}
~MyStatefulReluCPU() {}
};
class MyStatefulReluGPU : public CustomStatefulOp {
public:
explicit MyStatefulReluGPU() {}
MXReturnValue Forward(std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource op_res) {
std::map<std::string, std::string> attrs;
return forwardGPU(attrs, inputs, outputs, op_res);
}
MXReturnValue Backward(std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource op_res) {
std::map<std::string, std::string> attrs;
return backwardGPU(attrs, inputs, outputs, op_res);
}
~MyStatefulReluGPU() {}
};
MXReturnValue createOpStateCPU(std::map<std::string, std::string> attrs,
CustomStatefulOp** op_inst) {
*op_inst = new MyStatefulReluCPU();
return MX_SUCCESS;
}
MXReturnValue createOpStateGPU(std::map<std::string, std::string> attrs,
CustomStatefulOp** op_inst) {
*op_inst = new MyStatefulReluGPU();
return MX_SUCCESS;
}
REGISTER_OP(my_state_relu)
.setParseAttrs(parseAttrs)
.setInferType(inferType)
.setInferShape(inferShape)
.setCreateOpState(createOpStateCPU, "cpu")
.setCreateOpState(createOpStateGPU, "gpu");
/*
* Below is noisy ReLU operator example
* noisy ReLU is made from ReLU extended to include Gaussian noise
* forward - add Gaussian noise generated from normal distribution to each unit
* backward - gradient doesn't need to change since noise is constant
*/
#define NumRandomPerThread 64 // mxnet recommended random numbers generated per thread
__global__ void noisy_relu_gpu_forward(float *out, float *in, int64_t N, mx_gpu_rand_t* states, int step) {
// the launcher logic ensures tid less than NumGPURandomStates
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// each thread generates unique sequence of random numbers
mx_gpu_rand_t thread_state = states[tid];
// each thread works on <step> number of calculation
int start = tid * step;
int end = start + step;
for (int i=start; i<end && i<N; ++i) {
float noise = curand_normal(&thread_state);
out[i] = in[i] + noise > 0 ? in[i] + noise : 0;
}
}
MXReturnValue noisyForwardCPU(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
float* in_data = inputs[0].data<float>();
float* out_data = outputs[0].data<float>();
mx_cpu_rand_t* states = res.get_cpu_rand_states();
std::normal_distribution<float> dist_normal;
for (int i=0; i<inputs[0].size(); ++i) {
float noise = dist_normal(*states);
out_data[i] = in_data[i] + noise > 0 ? in_data[i] + noise : 0;
}
return MX_SUCCESS;
}
MXReturnValue noisyForwardGPU(std::map<std::string, std::string> attrs,
std::vector<MXTensor> inputs,
std::vector<MXTensor> outputs,
OpResource res) {
float* in_data = inputs[0].data<float>();
float* out_data = outputs[0].data<float>();
mx_stream_t cuda_stream = res.get_cuda_stream();
int64_t N = inputs[0].size();
// below is mxnet recommended workflow to parallel random number generating
int nthread = (N + NumRandomPerThread - 1) / NumRandomPerThread;
// we should not launch more threads than mxnet supported random number GPU states
int num_thread_need = nthread < MX_NUM_GPU_RANDOM_STATES ? nthread : MX_NUM_GPU_RANDOM_STATES;
// each cuda thread processes [step * tid, step * id + step) snippet of input tensor
int step = (N + num_thread_need - 1) / num_thread_need;
// this can ensure number of parallel threads less than mxnet supported random number states
int num_block = (num_thread_need + NumThreadPerBlock - 1) / NumThreadPerBlock;
noisy_relu_gpu_forward<<<num_block,NumThreadPerBlock,0,cuda_stream>>>(
out_data, in_data, N, res.get_gpu_rand_states(), step);
return MX_SUCCESS;
}
REGISTER_OP(my_noisy_relu)
.setParseAttrs(parseAttrs)
.setInferType(inferType)
.setInferShape(inferShape)
.setForward(noisyForwardCPU, "cpu")
.setForward(noisyForwardGPU, "gpu")
.setBackward(backwardCPU, "cpu")
.setBackward(backwardGPU, "gpu");
MXReturnValue initialize(int version) {
if (version >= 10400) {
std::cout << "MXNet version " << version << " supported" << std::endl;
return MX_SUCCESS;
} else {
std::cout << "MXNet version " << version << " not supported" << std::endl;
return MX_FAIL;
}
}
|
2aded7166a41e432d8b55a34917b03e42882cfa7.hip | // !!! This is a file automatically generated by hipify!!!
#include "ATen/ATen.h"
#include "ATen/hip/HIPContext.h"
#include "ATen/hip/detail/IndexUtils.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <cmath>
#include "ATen/TensorUtils.h"
#include "ATen/AccumulateType.h"
#include <THH/THHGeneral.h>
#include "type_shim.h"
template <typename T, typename GRAD_T>
__global__ void adam_cuda_kernel(
T* __restrict__ p,
T* __restrict__ m,
T* __restrict__ v,
const GRAD_T * __restrict__ g,
const float b1,
const float b2,
const float eps,
const float grad_scale,
const float step_size,
const size_t tsize,
const float decay_size)
{
//Assuming 2D grids and 2D blocks
const int blockId = gridDim.x * blockIdx.y + blockIdx.x;
const int threadsPerBlock = blockDim.x * blockDim.y;
const int threadIdInBlock = threadIdx.y * blockDim.x + threadIdx.x;
const int i = (blockId * threadsPerBlock + threadIdInBlock);
const int totThreads = gridDim.x*gridDim.y*threadsPerBlock;
for (int j = i; j < tsize; j+=totThreads) {
// weight decay
p[j] = p[j] * decay_size;
T scaled_grad = static_cast<T>(g[j]) / grad_scale;
m[j] = b1*m[j] + (1-b1)*scaled_grad;
v[j] = b2*v[j] + (1-b2)*scaled_grad*scaled_grad;
const float update = m[j] / (sqrtf(v[j]) + eps);
p[j] = p[j] - (step_size*update);
}
}
void fused_adam_cuda(
at::Tensor & p,
at::Tensor & m,
at::Tensor & v,
at::Tensor & g,
float lr,
float beta1,
float beta2,
float eps,
float grad_scale,
int step,
int bias_correction,
float decay)
{
//Get tensor size
int tsize = p.numel();
//Determine #threads and #blocks
const int threadsPerBlock = 512;
const dim3 blocks((tsize+threadsPerBlock-1)/threadsPerBlock);
AT_ASSERTM(at::cuda::detail::canUse32BitIndexMath(p), "parameter tensor is too large to be indexed with int32");
//Constants
float step_size = lr;
if (bias_correction == 1) {
const double bias_correction1 = 1.0 - ::pow(static_cast<double>(beta1), step);
const double bias_correction2 = 1.0 - ::pow(static_cast<double>(beta2), step);
step_size = static_cast<float>(lr * std::sqrt(bias_correction2) / bias_correction1);
}
float decay_size = 1.0;
if (decay != 0.0) {
decay_size = 1.0 - step_size * decay;
}
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (g.scalar_type() == at::ScalarType::Half || g.scalar_type() == at::ScalarType::BFloat16) {
AT_ASSERTM(p.scalar_type() == at::ScalarType::Float, "expected parameter to be of float type");
using namespace at; // prevents "toString is undefined" errors
DISPATCH_FLOAT_AND_HALF_AND_BF16(g.scalar_type(), 0, "adam_cuda_kernel",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( adam_cuda_kernel<accscalar_t, scalar_t_0>), dim3(blocks),dim3(threadsPerBlock), 0, stream,
p.DATA_PTR<accscalar_t>(),
m.DATA_PTR<accscalar_t>(),
v.DATA_PTR<accscalar_t>(),
g.DATA_PTR<scalar_t_0>(),
beta1,
beta2,
eps,
grad_scale,
step_size,
tsize,
decay_size);
);
} else {
using namespace at;
DISPATCH_DOUBLE_AND_FLOAT(g.scalar_type(), 0, "adam_cuda_kernel",
hipLaunchKernelGGL(( adam_cuda_kernel<scalar_t_0, scalar_t_0>), dim3(blocks),dim3(threadsPerBlock), 0, stream,
p.DATA_PTR<scalar_t_0>(),
m.DATA_PTR<scalar_t_0>(),
v.DATA_PTR<scalar_t_0>(),
g.DATA_PTR<scalar_t_0>(),
beta1,
beta2,
eps,
grad_scale,
step_size,
tsize,
decay_size);
);
}
THCudaCheck(hipGetLastError());
}
| 2aded7166a41e432d8b55a34917b03e42882cfa7.cu | #include "ATen/ATen.h"
#include "ATen/cuda/CUDAContext.h"
#include "ATen/cuda/detail/IndexUtils.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <cmath>
#include "ATen/TensorUtils.h"
#include "ATen/AccumulateType.h"
#include <THC/THCGeneral.h>
#include "type_shim.h"
template <typename T, typename GRAD_T>
__global__ void adam_cuda_kernel(
T* __restrict__ p,
T* __restrict__ m,
T* __restrict__ v,
const GRAD_T * __restrict__ g,
const float b1,
const float b2,
const float eps,
const float grad_scale,
const float step_size,
const size_t tsize,
const float decay_size)
{
//Assuming 2D grids and 2D blocks
const int blockId = gridDim.x * blockIdx.y + blockIdx.x;
const int threadsPerBlock = blockDim.x * blockDim.y;
const int threadIdInBlock = threadIdx.y * blockDim.x + threadIdx.x;
const int i = (blockId * threadsPerBlock + threadIdInBlock);
const int totThreads = gridDim.x*gridDim.y*threadsPerBlock;
for (int j = i; j < tsize; j+=totThreads) {
// weight decay
p[j] = p[j] * decay_size;
T scaled_grad = static_cast<T>(g[j]) / grad_scale;
m[j] = b1*m[j] + (1-b1)*scaled_grad;
v[j] = b2*v[j] + (1-b2)*scaled_grad*scaled_grad;
const float update = m[j] / (sqrtf(v[j]) + eps);
p[j] = p[j] - (step_size*update);
}
}
void fused_adam_cuda(
at::Tensor & p,
at::Tensor & m,
at::Tensor & v,
at::Tensor & g,
float lr,
float beta1,
float beta2,
float eps,
float grad_scale,
int step,
int bias_correction,
float decay)
{
//Get tensor size
int tsize = p.numel();
//Determine #threads and #blocks
const int threadsPerBlock = 512;
const dim3 blocks((tsize+threadsPerBlock-1)/threadsPerBlock);
AT_ASSERTM(at::cuda::detail::canUse32BitIndexMath(p), "parameter tensor is too large to be indexed with int32");
//Constants
float step_size = lr;
if (bias_correction == 1) {
const double bias_correction1 = 1.0 - std::pow(static_cast<double>(beta1), step);
const double bias_correction2 = 1.0 - std::pow(static_cast<double>(beta2), step);
step_size = static_cast<float>(lr * std::sqrt(bias_correction2) / bias_correction1);
}
float decay_size = 1.0;
if (decay != 0.0) {
decay_size = 1.0 - step_size * decay;
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (g.scalar_type() == at::ScalarType::Half || g.scalar_type() == at::ScalarType::BFloat16) {
AT_ASSERTM(p.scalar_type() == at::ScalarType::Float, "expected parameter to be of float type");
using namespace at; // prevents "toString is undefined" errors
DISPATCH_FLOAT_AND_HALF_AND_BF16(g.scalar_type(), 0, "adam_cuda_kernel",
using accscalar_t = at::acc_type<scalar_t_0, true>;
adam_cuda_kernel<accscalar_t, scalar_t_0><<<blocks,threadsPerBlock, 0, stream>>>(
p.DATA_PTR<accscalar_t>(),
m.DATA_PTR<accscalar_t>(),
v.DATA_PTR<accscalar_t>(),
g.DATA_PTR<scalar_t_0>(),
beta1,
beta2,
eps,
grad_scale,
step_size,
tsize,
decay_size);
);
} else {
using namespace at;
DISPATCH_DOUBLE_AND_FLOAT(g.scalar_type(), 0, "adam_cuda_kernel",
adam_cuda_kernel<scalar_t_0, scalar_t_0><<<blocks,threadsPerBlock, 0, stream>>>(
p.DATA_PTR<scalar_t_0>(),
m.DATA_PTR<scalar_t_0>(),
v.DATA_PTR<scalar_t_0>(),
g.DATA_PTR<scalar_t_0>(),
beta1,
beta2,
eps,
grad_scale,
step_size,
tsize,
decay_size);
);
}
THCudaCheck(cudaGetLastError());
}
|
c31080c83389701467a8f594bbec6a2facee1aa1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <gptl.h>
#include <gptl_cuda.h>
#include "./localproto.h"
__host__ int sleep1 (int outerlooplen, int oversub, int cores_per_sm, int cores_per_gpu)
{
int blocksize, gridsize;
int ret;
int n, nn;
int totalwork;
int chunksize;
int nchunks;
static const char *thisfunc = "onlysleep";
chunksize = outerlooplen;
nchunks = (outerlooplen + (chunksize-1)) / chunksize;
printf ("outerlooplen=%d broken into %d kernels of chunksize=%d\n",
outerlooplen, nchunks, chunksize);
printf ("%s: issuing hipMalloc calls to hold results\n", thisfunc);
n = 0;
for (nn = 0; nn < outerlooplen; nn += chunksize) {
printf ("chunk=%d totalwork=%d\n", n, MIN (chunksize, outerlooplen - nn));
++n;
}
printf ("Sleeping 1 second on GPU...\n");
ret = GPTLstart ("sleep1ongpu");
for (nn = 0; nn < outerlooplen; nn += chunksize) {
totalwork = MIN (chunksize, outerlooplen - nn);
blocksize = MIN (cores_per_sm, totalwork);
gridsize = (totalwork-1) / blocksize + 1;
hipLaunchKernelGGL(( sleep) , dim3(gridsize), dim3(blocksize), 0, 0, 1.f, outerlooplen);
hipDeviceSynchronize();
}
ret = GPTLstop ("sleep1ongpu");
return 0;
}
| c31080c83389701467a8f594bbec6a2facee1aa1.cu | #include <stdio.h>
#include <stdlib.h>
#include <gptl.h>
#include <gptl_cuda.h>
#include "./localproto.h"
__host__ int sleep1 (int outerlooplen, int oversub, int cores_per_sm, int cores_per_gpu)
{
int blocksize, gridsize;
int ret;
int n, nn;
int totalwork;
int chunksize;
int nchunks;
static const char *thisfunc = "onlysleep";
chunksize = outerlooplen;
nchunks = (outerlooplen + (chunksize-1)) / chunksize;
printf ("outerlooplen=%d broken into %d kernels of chunksize=%d\n",
outerlooplen, nchunks, chunksize);
printf ("%s: issuing cudaMalloc calls to hold results\n", thisfunc);
n = 0;
for (nn = 0; nn < outerlooplen; nn += chunksize) {
printf ("chunk=%d totalwork=%d\n", n, MIN (chunksize, outerlooplen - nn));
++n;
}
printf ("Sleeping 1 second on GPU...\n");
ret = GPTLstart ("sleep1ongpu");
for (nn = 0; nn < outerlooplen; nn += chunksize) {
totalwork = MIN (chunksize, outerlooplen - nn);
blocksize = MIN (cores_per_sm, totalwork);
gridsize = (totalwork-1) / blocksize + 1;
sleep <<<gridsize, blocksize>>> (1.f, outerlooplen);
cudaDeviceSynchronize();
}
ret = GPTLstop ("sleep1ongpu");
return 0;
}
|
3c587abfc5353f97e2c73af581f6d70a8c7d96f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <chrono>
__global__ void polynomial_expansion (float* poly, int degree, int n, float* array) {
float out = 0.;
float xdegree = 1.;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n){
float x = array[i];
for (int k=0; k<=degree; ++k) {
out += xdegree*poly[k];
xdegree *= x;
}
array[i] = out;
}
}
int main (int argc, char* argv[]) {
//TODO: add usage
if (argc < 3) {
std::cerr<<"usage: "<<argv[0]<<" n degree"<<std::endl;
return -1;
}
int n = atoi(argv[1]);
int degree = atoi(argv[2]);
int nbiter = 1;
float* array = new float[n];
float* poly = new float[degree+1];
float *xPointer, *coeffPointer;
for (int i=0; i<n; ++i)
array[i] = 1.0;
for (int i=0; i<degree+1; ++i)
poly[i] = 1.0;
hipMalloc(&xPointer, n*sizeof(float));
hipMalloc(&coeffPointer, (degree+1)*sizeof(float));
// hipMemcpy(xPointer, array, n*sizeof(float), hipMemcpyHostToDevice);
// hipMemcpy(coeffPointer, poly, (degree+1)*sizeof(float), hipMemcpyHostToDevice);
std::chrono::time_point<std::chrono::system_clock> begin, end;
begin = std::chrono::system_clock::now();
for (int iter = 0; iter<nbiter; ++iter){
hipLaunchKernelGGL(( polynomial_expansion), dim3((n+255)/256), dim3(256), 0, 0, coeffPointer, degree, n, xPointer);
}
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
//hipMemcpy(array, xPointer, n*sizeof(float), hipMemcpyDeviceToHost);
end = std::chrono::system_clock::now();
std::chrono::duration<double> totaltime = (end-begin)/nbiter;
std::cerr<<array[0]<<std::endl;
std::cout<<n<<" "<<degree<<" "<<totaltime.count()<<std::endl;
delete[] array;
delete[] poly;
hipFree(xPointer);
hipFree(coeffPointer);
return 0;
}
| 3c587abfc5353f97e2c73af581f6d70a8c7d96f3.cu | #include <iostream>
#include <chrono>
__global__ void polynomial_expansion (float* poly, int degree, int n, float* array) {
float out = 0.;
float xdegree = 1.;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n){
float x = array[i];
for (int k=0; k<=degree; ++k) {
out += xdegree*poly[k];
xdegree *= x;
}
array[i] = out;
}
}
int main (int argc, char* argv[]) {
//TODO: add usage
if (argc < 3) {
std::cerr<<"usage: "<<argv[0]<<" n degree"<<std::endl;
return -1;
}
int n = atoi(argv[1]);
int degree = atoi(argv[2]);
int nbiter = 1;
float* array = new float[n];
float* poly = new float[degree+1];
float *xPointer, *coeffPointer;
for (int i=0; i<n; ++i)
array[i] = 1.0;
for (int i=0; i<degree+1; ++i)
poly[i] = 1.0;
cudaMalloc(&xPointer, n*sizeof(float));
cudaMalloc(&coeffPointer, (degree+1)*sizeof(float));
// cudaMemcpy(xPointer, array, n*sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(coeffPointer, poly, (degree+1)*sizeof(float), cudaMemcpyHostToDevice);
std::chrono::time_point<std::chrono::system_clock> begin, end;
begin = std::chrono::system_clock::now();
for (int iter = 0; iter<nbiter; ++iter){
polynomial_expansion<<<(n+255)/256, 256>>>(coeffPointer, degree, n, xPointer);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
//cudaMemcpy(array, xPointer, n*sizeof(float), cudaMemcpyDeviceToHost);
end = std::chrono::system_clock::now();
std::chrono::duration<double> totaltime = (end-begin)/nbiter;
std::cerr<<array[0]<<std::endl;
std::cout<<n<<" "<<degree<<" "<<totaltime.count()<<std::endl;
delete[] array;
delete[] poly;
cudaFree(xPointer);
cudaFree(coeffPointer);
return 0;
}
|
ee97f6d8ade6632f790a1ef65772dda590d238c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pairwise_hist.cuh"
#include "split_properties_helpers.cuh"
#include "compute_pair_hist_loop.cuh"
#include <hip/hip_cooperative_groups.h>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel {
template <int BLOCK_SIZE>
struct TPairBinaryHist {
float* Slice;
__forceinline__ __device__ int HistSize() {
return BLOCK_SIZE * 16;
}
__forceinline__ __device__ int SliceOffset() {
return 512 * (threadIdx.x >> 5);
}
__forceinline__ __device__ TPairBinaryHist(float* buff) {
Slice = buff;
for (int i = threadIdx.x; i < HistSize(); i += BLOCK_SIZE) {
Slice[i] = 0;
}
Slice += SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPair(const ui32 ci1, const ui32 ci2, const float w) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
#pragma unroll 4
for (int i = 0; i < 8; i++) {
int f = (((threadIdx.x >> 2) + i) & 7) << 2;
const int bin1 = bfe(ci1, 28 - f, 4);
const int bin2 = bfe(ci2, 28 - f, 4);
const int invBin1 = (~bin1) & 15;
const int invBin2 = (~bin2) & 15;
//00 01 10 11
const ui32 bins = (invBin1 & invBin2) | ((invBin1 & bin2) << 8) | ((bin1 & invBin2) << 16) | ((bin1 & bin2) << 24);
#pragma unroll
for (int currentHist = 0; currentHist < 4; ++currentHist) {
const uchar histOffset = (threadIdx.x + currentHist) & 3;
const short bin = (bins >> (histOffset << 3)) & 15;
// 32 * bin + 4 * featureId + histId
//512 floats per warp
syncTile.sync();
Slice[f + (bin << 5) + histOffset] += w;
}
}
}
__forceinline__ __device__ void Reduce() {
__syncthreads();
Slice -= SliceOffset();
float sum = 0.f;
if (threadIdx.x < 512) {
const int warpCount = BLOCK_SIZE / 32;
int binId = threadIdx.x / 32;
const int x = threadIdx.x & 31;
Slice += 32 * binId + x;
{
#pragma unroll
for (int warpId = 0; warpId < warpCount; ++warpId) {
sum += Slice[warpId * 512];
}
}
}
__syncthreads();
if (threadIdx.x < 512) {
Slice[0] = sum;
}
__syncthreads();
}
};
template<int BLOCK_SIZE, int INNER_UNROLL, int OUTER_UNROLL, int BLOCKS_PER_FEATURE>
__forceinline__ __device__ void ComputeSplitPropertiesBinaryPass(const TCFeature* feature, int fCount,
const ui32* __restrict cindex,
const uint2* __restrict pairs,
const float* __restrict weight,
const TDataPartition* partition,
float* __restrict histogram,
float* __restrict smem) {
using THist = TPairBinaryHist<BLOCK_SIZE>;
ComputePairHistogram<BLOCK_SIZE, 1, INNER_UNROLL, OUTER_UNROLL, BLOCKS_PER_FEATURE, THist >(partition->Offset, cindex, partition->Size, pairs, weight, smem);
const int histId = threadIdx.x & 3;
const int fid = (threadIdx.x >> 2);
__syncthreads();
if (fid < fCount) {
float sum = 0;
const int groupId = fid / 4;
const int fixedBitId = 3 - fid % 4;
const int activeMask = (1 << fixedBitId);
//fix i'th bit and iterate through others
for (int i = 0; i < 16; ++i) {
if (i & activeMask) {
sum += smem[32 * i + 4 * groupId + histId];
}
}
if (BLOCKS_PER_FEATURE > 1) {
atomicAdd(histogram + feature[fid].FirstFoldIndex * 4 + histId, sum);
} else {
histogram[feature[fid].FirstFoldIndex * 4 + histId] += sum;
}
}
__syncthreads();
}
#define DECLARE_PASS_BINARY(N, OUTER_UNROLL, M) \
ComputeSplitPropertiesBinaryPass<BLOCK_SIZE, N, OUTER_UNROLL, M>(feature, fCount, cindex, pairs, weight, partition, histogram, &localHist[0]);
template<int BLOCK_SIZE, bool FULL_PASS, int M>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BLOCK_SIZE, 2)
#else
__launch_bounds__(BLOCK_SIZE, 1)
#endif
__global__ void ComputeSplitPropertiesBinaryPairs(const TCFeature* feature, int fCount, const ui32* cindex,
const uint2* pairs, const float* weight,
const TDataPartition* partition,
int histLineSize,
float* histogram) {
{
const int featureOffset = (blockIdx.x / M) * 32;
feature += featureOffset;
cindex += feature->Offset;
fCount = min(fCount - featureOffset, 32);
}
if (FULL_PASS) {
partition += blockIdx.y;
histogram += blockIdx.y * ((ui64)histLineSize * 4ULL);
} else {
const int depth = (int)log2((float)gridDim.y);
int partId = GetPairwisePartIdToCalculate(partition);
partition += partId;
histogram += (((blockIdx.z + 1) << depth) | blockIdx.y) * ((ui64)histLineSize) * 4ULL;
}
__shared__ float localHist[16 * BLOCK_SIZE];
if (partition->Size == 0) {
return;
}
DECLARE_PASS_BINARY(1, 1, M);
}
void ComputePairwiseHistogramBinary(const TCFeature* features,
const ui32 featureCount,
const ui32* compressedIndex,
const uint2* pairs, ui32 pairCount,
const float* weight,
const TDataPartition* partition,
ui32 partCount,
ui32 histLineSize,
bool fullPass,
float* histogram,
TCudaStream stream) {
if (featureCount > 0) {
const int blockSize = 768;
dim3 numBlocks;
numBlocks.x = (featureCount + 31) / 32;
numBlocks.y = fullPass ? partCount : partCount / 4;
numBlocks.z = fullPass ? 1 : 3;
const ui32 blockPerFeatureMultiplier = EstimateBlockPerFeatureMultiplier(numBlocks, pairCount, 64);
numBlocks.x *= blockPerFeatureMultiplier;
#define NB_HIST(IS_FULL, BLOCKS_PER_FEATURE) \
ComputeSplitPropertiesBinaryPairs < blockSize, IS_FULL, BLOCKS_PER_FEATURE > << <numBlocks, blockSize, 0, stream>>>(\
features, featureCount, compressedIndex, pairs,\
weight, partition, histLineSize, histogram);
#define DISPATCH(BLOCKS_PER_FEATURE) \
if (fullPass) { \
NB_HIST(true, BLOCKS_PER_FEATURE) \
} else { \
NB_HIST(false, BLOCKS_PER_FEATURE)\
}
if (blockPerFeatureMultiplier == 1) {
DISPATCH(1);
} else if (blockPerFeatureMultiplier == 2) {
DISPATCH(2);
} else if (blockPerFeatureMultiplier == 4) {
DISPATCH(4);
} else if (blockPerFeatureMultiplier == 8) {
DISPATCH(8);
} else if (blockPerFeatureMultiplier == 16) {
DISPATCH(16);
} else if (blockPerFeatureMultiplier == 32) {
DISPATCH(32);
} else if (blockPerFeatureMultiplier == 64) {
DISPATCH(64);
} else {
exit(0);
}
#undef NB_HIST
#undef DISPATCH
}
}
}
| ee97f6d8ade6632f790a1ef65772dda590d238c1.cu | #include "pairwise_hist.cuh"
#include "split_properties_helpers.cuh"
#include "compute_pair_hist_loop.cuh"
#include <cooperative_groups.h>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel {
template <int BLOCK_SIZE>
struct TPairBinaryHist {
float* Slice;
__forceinline__ __device__ int HistSize() {
return BLOCK_SIZE * 16;
}
__forceinline__ __device__ int SliceOffset() {
return 512 * (threadIdx.x >> 5);
}
__forceinline__ __device__ TPairBinaryHist(float* buff) {
Slice = buff;
for (int i = threadIdx.x; i < HistSize(); i += BLOCK_SIZE) {
Slice[i] = 0;
}
Slice += SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPair(const ui32 ci1, const ui32 ci2, const float w) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
#pragma unroll 4
for (int i = 0; i < 8; i++) {
int f = (((threadIdx.x >> 2) + i) & 7) << 2;
const int bin1 = bfe(ci1, 28 - f, 4);
const int bin2 = bfe(ci2, 28 - f, 4);
const int invBin1 = (~bin1) & 15;
const int invBin2 = (~bin2) & 15;
//00 01 10 11
const ui32 bins = (invBin1 & invBin2) | ((invBin1 & bin2) << 8) | ((bin1 & invBin2) << 16) | ((bin1 & bin2) << 24);
#pragma unroll
for (int currentHist = 0; currentHist < 4; ++currentHist) {
const uchar histOffset = (threadIdx.x + currentHist) & 3;
const short bin = (bins >> (histOffset << 3)) & 15;
// 32 * bin + 4 * featureId + histId
//512 floats per warp
syncTile.sync();
Slice[f + (bin << 5) + histOffset] += w;
}
}
}
__forceinline__ __device__ void Reduce() {
__syncthreads();
Slice -= SliceOffset();
float sum = 0.f;
if (threadIdx.x < 512) {
const int warpCount = BLOCK_SIZE / 32;
int binId = threadIdx.x / 32;
const int x = threadIdx.x & 31;
Slice += 32 * binId + x;
{
#pragma unroll
for (int warpId = 0; warpId < warpCount; ++warpId) {
sum += Slice[warpId * 512];
}
}
}
__syncthreads();
if (threadIdx.x < 512) {
Slice[0] = sum;
}
__syncthreads();
}
};
template<int BLOCK_SIZE, int INNER_UNROLL, int OUTER_UNROLL, int BLOCKS_PER_FEATURE>
__forceinline__ __device__ void ComputeSplitPropertiesBinaryPass(const TCFeature* feature, int fCount,
const ui32* __restrict cindex,
const uint2* __restrict pairs,
const float* __restrict weight,
const TDataPartition* partition,
float* __restrict histogram,
float* __restrict smem) {
using THist = TPairBinaryHist<BLOCK_SIZE>;
ComputePairHistogram<BLOCK_SIZE, 1, INNER_UNROLL, OUTER_UNROLL, BLOCKS_PER_FEATURE, THist >(partition->Offset, cindex, partition->Size, pairs, weight, smem);
const int histId = threadIdx.x & 3;
const int fid = (threadIdx.x >> 2);
__syncthreads();
if (fid < fCount) {
float sum = 0;
const int groupId = fid / 4;
const int fixedBitId = 3 - fid % 4;
const int activeMask = (1 << fixedBitId);
//fix i'th bit and iterate through others
for (int i = 0; i < 16; ++i) {
if (i & activeMask) {
sum += smem[32 * i + 4 * groupId + histId];
}
}
if (BLOCKS_PER_FEATURE > 1) {
atomicAdd(histogram + feature[fid].FirstFoldIndex * 4 + histId, sum);
} else {
histogram[feature[fid].FirstFoldIndex * 4 + histId] += sum;
}
}
__syncthreads();
}
#define DECLARE_PASS_BINARY(N, OUTER_UNROLL, M) \
ComputeSplitPropertiesBinaryPass<BLOCK_SIZE, N, OUTER_UNROLL, M>(feature, fCount, cindex, pairs, weight, partition, histogram, &localHist[0]);
template<int BLOCK_SIZE, bool FULL_PASS, int M>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BLOCK_SIZE, 2)
#else
__launch_bounds__(BLOCK_SIZE, 1)
#endif
__global__ void ComputeSplitPropertiesBinaryPairs(const TCFeature* feature, int fCount, const ui32* cindex,
const uint2* pairs, const float* weight,
const TDataPartition* partition,
int histLineSize,
float* histogram) {
{
const int featureOffset = (blockIdx.x / M) * 32;
feature += featureOffset;
cindex += feature->Offset;
fCount = min(fCount - featureOffset, 32);
}
if (FULL_PASS) {
partition += blockIdx.y;
histogram += blockIdx.y * ((ui64)histLineSize * 4ULL);
} else {
const int depth = (int)log2((float)gridDim.y);
int partId = GetPairwisePartIdToCalculate(partition);
partition += partId;
histogram += (((blockIdx.z + 1) << depth) | blockIdx.y) * ((ui64)histLineSize) * 4ULL;
}
__shared__ float localHist[16 * BLOCK_SIZE];
if (partition->Size == 0) {
return;
}
DECLARE_PASS_BINARY(1, 1, M);
}
void ComputePairwiseHistogramBinary(const TCFeature* features,
const ui32 featureCount,
const ui32* compressedIndex,
const uint2* pairs, ui32 pairCount,
const float* weight,
const TDataPartition* partition,
ui32 partCount,
ui32 histLineSize,
bool fullPass,
float* histogram,
TCudaStream stream) {
if (featureCount > 0) {
const int blockSize = 768;
dim3 numBlocks;
numBlocks.x = (featureCount + 31) / 32;
numBlocks.y = fullPass ? partCount : partCount / 4;
numBlocks.z = fullPass ? 1 : 3;
const ui32 blockPerFeatureMultiplier = EstimateBlockPerFeatureMultiplier(numBlocks, pairCount, 64);
numBlocks.x *= blockPerFeatureMultiplier;
#define NB_HIST(IS_FULL, BLOCKS_PER_FEATURE) \
ComputeSplitPropertiesBinaryPairs < blockSize, IS_FULL, BLOCKS_PER_FEATURE > << <numBlocks, blockSize, 0, stream>>>(\
features, featureCount, compressedIndex, pairs,\
weight, partition, histLineSize, histogram);
#define DISPATCH(BLOCKS_PER_FEATURE) \
if (fullPass) { \
NB_HIST(true, BLOCKS_PER_FEATURE) \
} else { \
NB_HIST(false, BLOCKS_PER_FEATURE)\
}
if (blockPerFeatureMultiplier == 1) {
DISPATCH(1);
} else if (blockPerFeatureMultiplier == 2) {
DISPATCH(2);
} else if (blockPerFeatureMultiplier == 4) {
DISPATCH(4);
} else if (blockPerFeatureMultiplier == 8) {
DISPATCH(8);
} else if (blockPerFeatureMultiplier == 16) {
DISPATCH(16);
} else if (blockPerFeatureMultiplier == 32) {
DISPATCH(32);
} else if (blockPerFeatureMultiplier == 64) {
DISPATCH(64);
} else {
exit(0);
}
#undef NB_HIST
#undef DISPATCH
}
}
}
|
0594408c6faa26c9c7efe36ca967532c31d5f713.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "THHTensor.hpp"
#include "TH/THHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "common.h"
template <typename Dtype, typename Acctype, bool COUNT_INCLUDE_PAD>
__global__ void AvePoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Acctype aveval = Acctype(0);
const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
if(COUNT_INCLUDE_PAD)
top_data[index] = ScalarConvert<Acctype, Dtype>::to(aveval / pool_size);
else
top_data[index] = ScalarConvert<Acctype, Dtype>::to(aveval / ((hend - hstart) * (wend - wstart)));
}
}
template <typename Dtype, typename Acctype, bool COUNT_INCLUDE_PAD>
__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Acctype gradient = Acctype(0);
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
if(COUNT_INCLUDE_PAD)
gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;
else
gradient += top_diff_slice[ph * pooled_width + pw] / ((hend - hstart) * (wend - wstart));
}
}
bottom_diff[index] = ScalarConvert<Acctype, Dtype>::to(gradient);
}
}
#include "generic/SpatialAveragePooling.cu"
#include "THHGenerateFloatTypes.h"
| 0594408c6faa26c9c7efe36ca967532c31d5f713.cu | #include "THCUNN.h"
#include "THCTensor.hpp"
#include "TH/THHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "common.h"
template <typename Dtype, typename Acctype, bool COUNT_INCLUDE_PAD>
__global__ void AvePoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Acctype aveval = Acctype(0);
const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
if(COUNT_INCLUDE_PAD)
top_data[index] = ScalarConvert<Acctype, Dtype>::to(aveval / pool_size);
else
top_data[index] = ScalarConvert<Acctype, Dtype>::to(aveval / ((hend - hstart) * (wend - wstart)));
}
}
template <typename Dtype, typename Acctype, bool COUNT_INCLUDE_PAD>
__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Acctype gradient = Acctype(0);
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
if(COUNT_INCLUDE_PAD)
gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;
else
gradient += top_diff_slice[ph * pooled_width + pw] / ((hend - hstart) * (wend - wstart));
}
}
bottom_diff[index] = ScalarConvert<Acctype, Dtype>::to(gradient);
}
}
#include "generic/SpatialAveragePooling.cu"
#include "THCGenerateFloatTypes.h"
|
d4c7a81660056659577b9b5ab106f8623c8dea64.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "CImg.h"
#include <iostream>
#include <chrono>
#include <fstream>
using namespace cimg_library;
using namespace std;
int* rgbGPU;
int* rgbBlurGPU;
unsigned char* imageData;
double* mask;
__global__ void startKernel() {
}
__global__ void seperateRGB(unsigned char* image, int* resultArray,int width, int height) {
int k = threadIdx.x + blockIdx.x * blockDim.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (k < width && row < height) {
int rowOffset = (width * row * 3);
int r = (int)image[rowOffset + k * 3];
int g = (int)image[rowOffset + k * 3 + 1];
int b = (int)image[rowOffset + k * 3 + 2];
resultArray[(row * width) + k] = r;
resultArray[(width * height) + (row * width) + k] = g;
resultArray[((width * height) * 2) + (row * width) + k] = b;
}
}
__global__ void Blur(int* rgbArray, int* blurImage, double* mask, int width, int height, int maskSize) {
int k = threadIdx.x + blockIdx.x * blockDim.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (k < width && row < height) {
int currentHeight = (row * width);
int offset = width * height;
blurImage[currentHeight + k] = 0; //red
blurImage[offset + currentHeight + k] = 0; //blue
blurImage[offset * 2 + currentHeight + k] = 0; //green
double r = 0.;
double g = 0.;
double b = 0.;
for (int i = 0; i < maskSize; i++)
{
int maskHeight = (i - ((maskSize - 1) / 2) + row) * width;
maskHeight = max(maskHeight, 0);
maskHeight = min(maskHeight, width * height);
for (int j = 0; j < maskSize; j++) {
int maskWidth = k + j - ((maskSize - 1) / 2);
maskWidth = max(maskWidth, 0);
maskWidth = min(maskWidth, width);
r = r + rgbArray[maskHeight + maskWidth] * mask[i * maskSize + j];
g = g + rgbArray[offset + maskHeight + maskWidth] * mask[i * maskSize + j];
b = b + rgbArray[offset * 2 + maskHeight + maskWidth] * mask[i * maskSize + j];
}
}
blurImage[currentHeight + k] = r;
blurImage[offset + currentHeight + k] = g;
blurImage[offset * 2 + currentHeight + k] = b;
}
}
int width;
int height;
unsigned char* pixelDataChar;
int dataSize;
float maxValue;
void readPPM(string fileName) {
ifstream file;
string version;
file.open(fileName, ios::in | ios::binary);
if (!file) {
cerr << "file could not be open" << endl;
exit(EXIT_FAILURE);
}
file >> version;
// Check version equals P6
if (version.compare("P6") != 0)
{
cout << "Invalid image format (must be 'P6')";
exit(EXIT_FAILURE);
}
file >> width >> height >> maxValue;
dataSize = height * width * 3;
pixelDataChar = new unsigned char[dataSize];
file.get();
file.read((char*)pixelDataChar, dataSize);
file.close();
}
void test(bool show)
{
//Mask
CImg<double> mask5(5, 5);
mask5(0, 0) = mask5(0, 4) = mask5(4, 0) = mask5(4, 4) = 1.0 / 256.0;
mask5(0, 1) = mask5(0, 3) = mask5(1, 0) = mask5(1, 4) = mask5(3, 0) = mask5(3, 4) = mask5(4, 1) = mask5(4, 3) = 4.0 / 256.0;
mask5(0, 2) = mask5(2, 0) = mask5(2, 4) = mask5(4, 2) = 6.0 / 256.0;
mask5(1, 1) = mask5(1, 3) = mask5(3, 1) = mask5(3, 3) = 16.0 / 256.0;
mask5(1, 2) = mask5(2, 1) = mask5(2, 3) = mask5(3, 2) = 24.0 / 256.0;
mask5(2, 2) = 36.0 / 256.0;
double maskArray[5 * 5];
double sum = 0.;
//Make the mask vector an array
for (int i = 0; i < 5; i++) {
for (int j = 0; j < 5; j++) {
maskArray[i * 5 + j] = mask5(i, j);
}
}
//Read data from PPM
readPPM("cake.ppm");
// Don't use the small cake
// CImg<unsigned char> image("cake-small.ppm"), blurimage("cake-small.ppm");
CImg<unsigned char> image("cake-small.ppm"), blurimage("cake-small.ppm"), blurImageGPU("cake-small.ppm");
// Use the big cake
int* arrayRGB;
int* arrayBlur;
arrayRGB = new int[dataSize];
arrayBlur = new int[dataSize];
for (int i = 0; i < (height * width) * 3; i++) {
arrayRGB[i] = -1;
arrayBlur[i] = -1;
}
int threads = 4;
dim3 block_dim(threads, threads, 1);
dim3 grid_dim(width / threads, height / threads + 1, 1);
startKernel << <1, 1 >> > ();
hipDeviceSynchronize();
std::cout << "Start GPU" << std::endl;
auto begin = std::chrono::high_resolution_clock::now();
hipMalloc((void**)&rgbGPU, dataSize * sizeof(int));
hipMalloc((void**)&rgbBlurGPU, dataSize * sizeof(int));
hipMalloc((void**)&imageData, dataSize * sizeof(unsigned char));
hipMalloc((void**)&mask, 5 * 5 * sizeof(double));
hipMemcpy(rgbGPU, arrayRGB, dataSize * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(imageData, pixelDataChar, dataSize * sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(mask, maskArray, 5 * 5* sizeof(double), hipMemcpyHostToDevice);
hipError_t err;
seperateRGB << <grid_dim, block_dim >> > (imageData, rgbGPU, width, height);
/* err = hipGetLastError();
if (err != hipSuccess) {
cout << "error: " << hipGetErrorString(err) << " err code: " << err << endl;
err = hipSuccess;
getchar();
}*/
hipDeviceSynchronize();
Blur << <grid_dim, block_dim >> > (rgbGPU, rgbBlurGPU, mask, width, height, 5);
/* err = hipGetLastError();
if (err != hipSuccess) {
cout << "error: " << hipGetErrorString(err) << " err code: " << err << " func 2" << endl;
err = hipSuccess;
}*/
hipDeviceSynchronize();
hipMemcpy(arrayBlur, rgbBlurGPU, dataSize * sizeof(int), hipMemcpyDeviceToHost);
auto end = std::chrono::high_resolution_clock::now();
int offset = width * height;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
blurImageGPU[i * width + j] = arrayBlur[i * width + j];
blurImageGPU[offset + (i * width) + j] = arrayBlur[offset + i * width + j];
blurImageGPU[offset * 2 + i * width + j] = arrayBlur[offset * 2 + i * width + j];
}
}
std::chrono::duration<double> elapsed = end - begin;
std::cout << "Time taken GPU = " << elapsed.count() << " seconds" << endl;
// Convolve and record the time taken to do the operation
auto beginCPU = std::chrono::high_resolution_clock::now();
// Blur the image!
blurimage.convolve(mask5);
auto endCPU = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsedCPU = endCPU - beginCPU;
std::cout << "Time taken to convolve = " << elapsedCPU.count() << " seconds" << endl;
// Show the original and the blurred images and compare.
// To display the images as 400 x 300
/* CImgDisplay main_disp(400, 300, "Original image");
CImgDisplay main_disp2(400, 300, "Blurred image");
main_disp.render(image);
main_disp2.render(blurimage);*/
// Display the images in their original size
if (show) {
CImgDisplay main_disp3(blurImageGPU, "Blurred image GPU");
CImgDisplay main_disp(image, "Original image");
CImgDisplay main_disp2(blurimage, "Blurred image CPU");
while (1)
{
main_disp.wait(); main_disp2.wait(); main_disp3.wait();
}
}
if (!show) {
fstream file;
file.open("TestRun.txt", ios::out | ios::app);
file << elapsed.count() << ";" << elapsedCPU.count() << "\n";
file.close();
}
delete[] arrayRGB;
delete[] pixelDataChar;
delete[] arrayBlur;
}
int main() {
int times = 1;
//cout << "How many times do you want to run?" << endl;
//cin >> times;
if (times == 1) {
test(true);
}
else {
for (int i = 0; i < times; i++) {
test(false);
}
}
}
| d4c7a81660056659577b9b5ab106f8623c8dea64.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "CImg.h"
#include <iostream>
#include <chrono>
#include <fstream>
using namespace cimg_library;
using namespace std;
int* rgbGPU;
int* rgbBlurGPU;
unsigned char* imageData;
double* mask;
__global__ void startKernel() {
}
__global__ void seperateRGB(unsigned char* image, int* resultArray,int width, int height) {
int k = threadIdx.x + blockIdx.x * blockDim.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (k < width && row < height) {
int rowOffset = (width * row * 3);
int r = (int)image[rowOffset + k * 3];
int g = (int)image[rowOffset + k * 3 + 1];
int b = (int)image[rowOffset + k * 3 + 2];
resultArray[(row * width) + k] = r;
resultArray[(width * height) + (row * width) + k] = g;
resultArray[((width * height) * 2) + (row * width) + k] = b;
}
}
__global__ void Blur(int* rgbArray, int* blurImage, double* mask, int width, int height, int maskSize) {
int k = threadIdx.x + blockIdx.x * blockDim.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (k < width && row < height) {
int currentHeight = (row * width);
int offset = width * height;
blurImage[currentHeight + k] = 0; //red
blurImage[offset + currentHeight + k] = 0; //blue
blurImage[offset * 2 + currentHeight + k] = 0; //green
double r = 0.;
double g = 0.;
double b = 0.;
for (int i = 0; i < maskSize; i++)
{
int maskHeight = (i - ((maskSize - 1) / 2) + row) * width;
maskHeight = max(maskHeight, 0);
maskHeight = min(maskHeight, width * height);
for (int j = 0; j < maskSize; j++) {
int maskWidth = k + j - ((maskSize - 1) / 2);
maskWidth = max(maskWidth, 0);
maskWidth = min(maskWidth, width);
r = r + rgbArray[maskHeight + maskWidth] * mask[i * maskSize + j];
g = g + rgbArray[offset + maskHeight + maskWidth] * mask[i * maskSize + j];
b = b + rgbArray[offset * 2 + maskHeight + maskWidth] * mask[i * maskSize + j];
}
}
blurImage[currentHeight + k] = r;
blurImage[offset + currentHeight + k] = g;
blurImage[offset * 2 + currentHeight + k] = b;
}
}
int width;
int height;
unsigned char* pixelDataChar;
int dataSize;
float maxValue;
void readPPM(string fileName) {
ifstream file;
string version;
file.open(fileName, ios::in | ios::binary);
if (!file) {
cerr << "file could not be open" << endl;
exit(EXIT_FAILURE);
}
file >> version;
// Check version equals P6
if (version.compare("P6") != 0)
{
cout << "Invalid image format (must be 'P6')";
exit(EXIT_FAILURE);
}
file >> width >> height >> maxValue;
dataSize = height * width * 3;
pixelDataChar = new unsigned char[dataSize];
file.get();
file.read((char*)pixelDataChar, dataSize);
file.close();
}
void test(bool show)
{
//Mask
CImg<double> mask5(5, 5);
mask5(0, 0) = mask5(0, 4) = mask5(4, 0) = mask5(4, 4) = 1.0 / 256.0;
mask5(0, 1) = mask5(0, 3) = mask5(1, 0) = mask5(1, 4) = mask5(3, 0) = mask5(3, 4) = mask5(4, 1) = mask5(4, 3) = 4.0 / 256.0;
mask5(0, 2) = mask5(2, 0) = mask5(2, 4) = mask5(4, 2) = 6.0 / 256.0;
mask5(1, 1) = mask5(1, 3) = mask5(3, 1) = mask5(3, 3) = 16.0 / 256.0;
mask5(1, 2) = mask5(2, 1) = mask5(2, 3) = mask5(3, 2) = 24.0 / 256.0;
mask5(2, 2) = 36.0 / 256.0;
double maskArray[5 * 5];
double sum = 0.;
//Make the mask vector an array
for (int i = 0; i < 5; i++) {
for (int j = 0; j < 5; j++) {
maskArray[i * 5 + j] = mask5(i, j);
}
}
//Read data from PPM
readPPM("cake.ppm");
// Don't use the small cake
// CImg<unsigned char> image("cake-small.ppm"), blurimage("cake-small.ppm");
CImg<unsigned char> image("cake-small.ppm"), blurimage("cake-small.ppm"), blurImageGPU("cake-small.ppm");
// Use the big cake
int* arrayRGB;
int* arrayBlur;
arrayRGB = new int[dataSize];
arrayBlur = new int[dataSize];
for (int i = 0; i < (height * width) * 3; i++) {
arrayRGB[i] = -1;
arrayBlur[i] = -1;
}
int threads = 4;
dim3 block_dim(threads, threads, 1);
dim3 grid_dim(width / threads, height / threads + 1, 1);
startKernel << <1, 1 >> > ();
cudaDeviceSynchronize();
std::cout << "Start GPU" << std::endl;
auto begin = std::chrono::high_resolution_clock::now();
cudaMalloc((void**)&rgbGPU, dataSize * sizeof(int));
cudaMalloc((void**)&rgbBlurGPU, dataSize * sizeof(int));
cudaMalloc((void**)&imageData, dataSize * sizeof(unsigned char));
cudaMalloc((void**)&mask, 5 * 5 * sizeof(double));
cudaMemcpy(rgbGPU, arrayRGB, dataSize * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(imageData, pixelDataChar, dataSize * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(mask, maskArray, 5 * 5* sizeof(double), cudaMemcpyHostToDevice);
cudaError_t err;
seperateRGB << <grid_dim, block_dim >> > (imageData, rgbGPU, width, height);
/* err = cudaGetLastError();
if (err != cudaSuccess) {
cout << "error: " << cudaGetErrorString(err) << " err code: " << err << endl;
err = cudaSuccess;
getchar();
}*/
cudaDeviceSynchronize();
Blur << <grid_dim, block_dim >> > (rgbGPU, rgbBlurGPU, mask, width, height, 5);
/* err = cudaGetLastError();
if (err != cudaSuccess) {
cout << "error: " << cudaGetErrorString(err) << " err code: " << err << " func 2" << endl;
err = cudaSuccess;
}*/
cudaThreadSynchronize();
cudaMemcpy(arrayBlur, rgbBlurGPU, dataSize * sizeof(int), cudaMemcpyDeviceToHost);
auto end = std::chrono::high_resolution_clock::now();
int offset = width * height;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
blurImageGPU[i * width + j] = arrayBlur[i * width + j];
blurImageGPU[offset + (i * width) + j] = arrayBlur[offset + i * width + j];
blurImageGPU[offset * 2 + i * width + j] = arrayBlur[offset * 2 + i * width + j];
}
}
std::chrono::duration<double> elapsed = end - begin;
std::cout << "Time taken GPU = " << elapsed.count() << " seconds" << endl;
// Convolve and record the time taken to do the operation
auto beginCPU = std::chrono::high_resolution_clock::now();
// Blur the image!
blurimage.convolve(mask5);
auto endCPU = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsedCPU = endCPU - beginCPU;
std::cout << "Time taken to convolve = " << elapsedCPU.count() << " seconds" << endl;
// Show the original and the blurred images and compare.
// To display the images as 400 x 300
/* CImgDisplay main_disp(400, 300, "Original image");
CImgDisplay main_disp2(400, 300, "Blurred image");
main_disp.render(image);
main_disp2.render(blurimage);*/
// Display the images in their original size
if (show) {
CImgDisplay main_disp3(blurImageGPU, "Blurred image GPU");
CImgDisplay main_disp(image, "Original image");
CImgDisplay main_disp2(blurimage, "Blurred image CPU");
while (1)
{
main_disp.wait(); main_disp2.wait(); main_disp3.wait();
}
}
if (!show) {
fstream file;
file.open("TestRun.txt", ios::out | ios::app);
file << elapsed.count() << ";" << elapsedCPU.count() << "\n";
file.close();
}
delete[] arrayRGB;
delete[] pixelDataChar;
delete[] arrayBlur;
}
int main() {
int times = 1;
//cout << "How many times do you want to run?" << endl;
//cin >> times;
if (times == 1) {
test(true);
}
else {
for (int i = 0; i < times; i++) {
test(false);
}
}
}
|
3509da18d3f3ece512a0f50d33297da43643ec4a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: joaander
#include "TwoStepLangevinGPU.cuh"
#include "saruprngCUDA.h"
#include <assert.h>
/*! \file TwoStepLangevinGPU.cu
\brief Defines GPU kernel code for Langevin integration on the GPU. Used by TwoStepLangevinGPU.
*/
//! Shared memory array for gpu_langevin_step_two_kernel()
extern __shared__ Scalar s_gammas[];
//! Shared memory used in reducing sums for bd energy tally
extern __shared__ Scalar bdtally_sdata[];
//! Takes the second half-step forward in the Langevin integration on a group of particles with
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_accel array of particle accelerations
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param d_gamma List of per-type gammas
\param n_types Number of particle types in the simulation
\param use_lambda If true, gamma = lambda * diameter
\param lambda Scale factor to convert diameter to lambda (when use_lambda is true)
\param timestep Current timestep of the simulation
\param seed User chosen random number seed
\param T Temperature set point
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
\param tally Boolean indicating whether energy tally is performed or not
\param d_partial_sum_bdenergy Placeholder for the partial sum
This kernel is implemented in a very similar manner to gpu_nve_step_two_kernel(), see it for design details.
This kernel will tally the energy transfer from the bd thermal reservoir and the particle system
Random number generation is done per thread with Saru's 3-seed constructor. The seeds are, the time step,
the particle tag, and the user-defined seed.
This kernel must be launched with enough dynamic shared memory per block to read in d_gamma
*/
extern "C" __global__
void gpu_langevin_step_two_kernel(const Scalar4 *d_pos,
Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar *d_diameter,
const unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
Scalar *d_gamma,
unsigned int n_types,
bool use_lambda,
Scalar lambda,
unsigned int timestep,
unsigned int seed,
Scalar T,
Scalar deltaT,
unsigned int D,
bool tally,
Scalar *d_partial_sum_bdenergy)
{
if (!use_lambda)
{
// read in the gammas (1 dimensional array)
for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < n_types)
s_gammas[cur_offset + threadIdx.x] = d_gamma[cur_offset + threadIdx.x];
}
__syncthreads();
}
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
Scalar bd_energy_transfer = 0;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// ******** first, calculate the additional BD force
// read the current particle velocity (MEM TRANSFER: 16 bytes)
Scalar4 vel = d_vel[idx];
// read in the tag of our particle.
// (MEM TRANSFER: 4 bytes)
unsigned int ptag = d_tag[idx];
// calculate the magnitude of the random force
Scalar gamma;
if (use_lambda)
{
// read in the tag of our particle.
// (MEM TRANSFER: 4 bytes)
gamma = lambda*d_diameter[idx];
}
else
{
// read in the type of our particle. A texture read of only the fourth part of the position Scalar4
// (where type is stored) is used.
unsigned int typ = __scalar_as_int(d_pos[idx].w);
gamma = s_gammas[typ];
}
Scalar coeff = sqrtf(Scalar(6.0) * gamma * T / deltaT);
Scalar3 bd_force = make_scalar3(Scalar(0.0), Scalar(0.0), Scalar(0.0));
//Initialize the Random Number Generator and generate the 3 random numbers
SaruGPU s(ptag, timestep + seed); // 2 dimensional seeding
Scalar randomx=s.s<Scalar>(-1.0, 1.0);
Scalar randomy=s.s<Scalar>(-1.0, 1.0);
Scalar randomz=s.s<Scalar>(-1.0, 1.0);
bd_force.x = randomx*coeff - gamma*vel.x;
bd_force.y = randomy*coeff - gamma*vel.y;
if (D > 2)
bd_force.z = randomz*coeff - gamma*vel.z;
// read in the net force and calculate the acceleration MEM TRANSFER: 16 bytes
Scalar4 net_force = d_net_force[idx];
Scalar3 accel = make_scalar3(net_force.x,net_force.y,net_force.z);
// MEM TRANSFER: 4 bytes FLOPS: 3
Scalar mass = vel.w;
Scalar minv = Scalar(1.0) / mass;
accel.x = (accel.x + bd_force.x) * minv;
accel.y = (accel.y + bd_force.y) * minv;
accel.z = (accel.z + bd_force.z) * minv;
// v(t+deltaT) = v(t+deltaT/2) + 1/2 * a(t+deltaT)*deltaT
// update the velocity (FLOPS: 6)
vel.x += (Scalar(1.0)/Scalar(2.0)) * accel.x * deltaT;
vel.y += (Scalar(1.0)/Scalar(2.0)) * accel.y * deltaT;
vel.z += (Scalar(1.0)/Scalar(2.0)) * accel.z * deltaT;
// tally the energy transfer from the bd thermal reservor to the particles (FLOPS: 6)
bd_energy_transfer = bd_force.x *vel.x + bd_force.y * vel.y + bd_force.z * vel.z;
// write out data (MEM TRANSFER: 32 bytes)
d_vel[idx] = vel;
// since we calculate the acceleration, we need to write it for the next step
d_accel[idx] = accel;
}
if (tally)
{
// don't ovewrite values in the s_gammas array with bd_energy transfer
__syncthreads();
bdtally_sdata[threadIdx.x] = bd_energy_transfer;
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
bdtally_sdata[threadIdx.x] += bdtally_sdata[threadIdx.x + offs];
offs >>= 1;
__syncthreads();
}
// write out our partial sum
if (threadIdx.x == 0)
{
d_partial_sum_bdenergy[blockIdx.x] = bdtally_sdata[0];
}
}
}
//! Kernel function for reducing a partial sum to a full sum (one value)
/*! \param d_sum Placeholder for the sum
\param d_partial_sum Array containing the parial sum
\param num_blocks Number of blocks to execute
*/
extern "C" __global__
void gpu_bdtally_reduce_partial_sum_kernel(Scalar *d_sum,
Scalar* d_partial_sum,
unsigned int num_blocks)
{
Scalar sum = Scalar(0.0);
// sum up the values in the partial sum via a sliding window
for (int start = 0; start < num_blocks; start += blockDim.x)
{
__syncthreads();
if (start + threadIdx.x < num_blocks)
bdtally_sdata[threadIdx.x] = d_partial_sum[start + threadIdx.x];
else
bdtally_sdata[threadIdx.x] = Scalar(0.0);
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
bdtally_sdata[threadIdx.x] += bdtally_sdata[threadIdx.x + offs];
offs >>= 1;
__syncthreads();
}
// everybody sums up sum2K
sum += bdtally_sdata[0];
}
if (threadIdx.x == 0)
*d_sum = sum;
}
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_accel array of particle accelerations
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param langevin_args Collected arguments for gpu_langevin_step_two_kernel()
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
This is just a driver for gpu_nve_step_two_kernel(), see it for details.
*/
hipError_t gpu_langevin_step_two(const Scalar4 *d_pos,
Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar *d_diameter,
const unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
const langevin_step_two_args& langevin_args,
Scalar deltaT,
unsigned int D)
{
// setup the grid to run the kernel
dim3 grid(langevin_args.num_blocks, 1, 1);
dim3 grid1(1, 1, 1);
dim3 threads(langevin_args.block_size, 1, 1);
dim3 threads1(256, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_langevin_step_two_kernel), dim3(grid),
dim3(threads),
max((unsigned int)(sizeof(Scalar)*langevin_args.n_types),
(unsigned int)(langevin_args.block_size*sizeof(Scalar)))
, 0, d_pos,
d_vel,
d_accel,
d_diameter,
d_tag,
d_group_members,
group_size,
d_net_force,
langevin_args.d_gamma,
langevin_args.n_types,
langevin_args.use_lambda,
langevin_args.lambda,
langevin_args.timestep,
langevin_args.seed,
langevin_args.T,
deltaT,
D,
langevin_args.tally,
langevin_args.d_partial_sum_bdenergy);
// run the summation kernel
if (langevin_args.tally)
hipLaunchKernelGGL(( gpu_bdtally_reduce_partial_sum_kernel), dim3(grid1),
dim3(threads1),
langevin_args.block_size*sizeof(Scalar)
, 0, &langevin_args.d_sum_bdenergy[0],
langevin_args.d_partial_sum_bdenergy,
langevin_args.num_blocks);
return hipSuccess;
}
| 3509da18d3f3ece512a0f50d33297da43643ec4a.cu | /*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2016 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: joaander
#include "TwoStepLangevinGPU.cuh"
#include "saruprngCUDA.h"
#include <assert.h>
/*! \file TwoStepLangevinGPU.cu
\brief Defines GPU kernel code for Langevin integration on the GPU. Used by TwoStepLangevinGPU.
*/
//! Shared memory array for gpu_langevin_step_two_kernel()
extern __shared__ Scalar s_gammas[];
//! Shared memory used in reducing sums for bd energy tally
extern __shared__ Scalar bdtally_sdata[];
//! Takes the second half-step forward in the Langevin integration on a group of particles with
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_accel array of particle accelerations
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param d_gamma List of per-type gammas
\param n_types Number of particle types in the simulation
\param use_lambda If true, gamma = lambda * diameter
\param lambda Scale factor to convert diameter to lambda (when use_lambda is true)
\param timestep Current timestep of the simulation
\param seed User chosen random number seed
\param T Temperature set point
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
\param tally Boolean indicating whether energy tally is performed or not
\param d_partial_sum_bdenergy Placeholder for the partial sum
This kernel is implemented in a very similar manner to gpu_nve_step_two_kernel(), see it for design details.
This kernel will tally the energy transfer from the bd thermal reservoir and the particle system
Random number generation is done per thread with Saru's 3-seed constructor. The seeds are, the time step,
the particle tag, and the user-defined seed.
This kernel must be launched with enough dynamic shared memory per block to read in d_gamma
*/
extern "C" __global__
void gpu_langevin_step_two_kernel(const Scalar4 *d_pos,
Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar *d_diameter,
const unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
Scalar *d_gamma,
unsigned int n_types,
bool use_lambda,
Scalar lambda,
unsigned int timestep,
unsigned int seed,
Scalar T,
Scalar deltaT,
unsigned int D,
bool tally,
Scalar *d_partial_sum_bdenergy)
{
if (!use_lambda)
{
// read in the gammas (1 dimensional array)
for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < n_types)
s_gammas[cur_offset + threadIdx.x] = d_gamma[cur_offset + threadIdx.x];
}
__syncthreads();
}
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
Scalar bd_energy_transfer = 0;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// ******** first, calculate the additional BD force
// read the current particle velocity (MEM TRANSFER: 16 bytes)
Scalar4 vel = d_vel[idx];
// read in the tag of our particle.
// (MEM TRANSFER: 4 bytes)
unsigned int ptag = d_tag[idx];
// calculate the magnitude of the random force
Scalar gamma;
if (use_lambda)
{
// read in the tag of our particle.
// (MEM TRANSFER: 4 bytes)
gamma = lambda*d_diameter[idx];
}
else
{
// read in the type of our particle. A texture read of only the fourth part of the position Scalar4
// (where type is stored) is used.
unsigned int typ = __scalar_as_int(d_pos[idx].w);
gamma = s_gammas[typ];
}
Scalar coeff = sqrtf(Scalar(6.0) * gamma * T / deltaT);
Scalar3 bd_force = make_scalar3(Scalar(0.0), Scalar(0.0), Scalar(0.0));
//Initialize the Random Number Generator and generate the 3 random numbers
SaruGPU s(ptag, timestep + seed); // 2 dimensional seeding
Scalar randomx=s.s<Scalar>(-1.0, 1.0);
Scalar randomy=s.s<Scalar>(-1.0, 1.0);
Scalar randomz=s.s<Scalar>(-1.0, 1.0);
bd_force.x = randomx*coeff - gamma*vel.x;
bd_force.y = randomy*coeff - gamma*vel.y;
if (D > 2)
bd_force.z = randomz*coeff - gamma*vel.z;
// read in the net force and calculate the acceleration MEM TRANSFER: 16 bytes
Scalar4 net_force = d_net_force[idx];
Scalar3 accel = make_scalar3(net_force.x,net_force.y,net_force.z);
// MEM TRANSFER: 4 bytes FLOPS: 3
Scalar mass = vel.w;
Scalar minv = Scalar(1.0) / mass;
accel.x = (accel.x + bd_force.x) * minv;
accel.y = (accel.y + bd_force.y) * minv;
accel.z = (accel.z + bd_force.z) * minv;
// v(t+deltaT) = v(t+deltaT/2) + 1/2 * a(t+deltaT)*deltaT
// update the velocity (FLOPS: 6)
vel.x += (Scalar(1.0)/Scalar(2.0)) * accel.x * deltaT;
vel.y += (Scalar(1.0)/Scalar(2.0)) * accel.y * deltaT;
vel.z += (Scalar(1.0)/Scalar(2.0)) * accel.z * deltaT;
// tally the energy transfer from the bd thermal reservor to the particles (FLOPS: 6)
bd_energy_transfer = bd_force.x *vel.x + bd_force.y * vel.y + bd_force.z * vel.z;
// write out data (MEM TRANSFER: 32 bytes)
d_vel[idx] = vel;
// since we calculate the acceleration, we need to write it for the next step
d_accel[idx] = accel;
}
if (tally)
{
// don't ovewrite values in the s_gammas array with bd_energy transfer
__syncthreads();
bdtally_sdata[threadIdx.x] = bd_energy_transfer;
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
bdtally_sdata[threadIdx.x] += bdtally_sdata[threadIdx.x + offs];
offs >>= 1;
__syncthreads();
}
// write out our partial sum
if (threadIdx.x == 0)
{
d_partial_sum_bdenergy[blockIdx.x] = bdtally_sdata[0];
}
}
}
//! Kernel function for reducing a partial sum to a full sum (one value)
/*! \param d_sum Placeholder for the sum
\param d_partial_sum Array containing the parial sum
\param num_blocks Number of blocks to execute
*/
extern "C" __global__
void gpu_bdtally_reduce_partial_sum_kernel(Scalar *d_sum,
Scalar* d_partial_sum,
unsigned int num_blocks)
{
Scalar sum = Scalar(0.0);
// sum up the values in the partial sum via a sliding window
for (int start = 0; start < num_blocks; start += blockDim.x)
{
__syncthreads();
if (start + threadIdx.x < num_blocks)
bdtally_sdata[threadIdx.x] = d_partial_sum[start + threadIdx.x];
else
bdtally_sdata[threadIdx.x] = Scalar(0.0);
__syncthreads();
// reduce the sum in parallel
int offs = blockDim.x >> 1;
while (offs > 0)
{
if (threadIdx.x < offs)
bdtally_sdata[threadIdx.x] += bdtally_sdata[threadIdx.x + offs];
offs >>= 1;
__syncthreads();
}
// everybody sums up sum2K
sum += bdtally_sdata[0];
}
if (threadIdx.x == 0)
*d_sum = sum;
}
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_accel array of particle accelerations
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param langevin_args Collected arguments for gpu_langevin_step_two_kernel()
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
This is just a driver for gpu_nve_step_two_kernel(), see it for details.
*/
cudaError_t gpu_langevin_step_two(const Scalar4 *d_pos,
Scalar4 *d_vel,
Scalar3 *d_accel,
const Scalar *d_diameter,
const unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
const langevin_step_two_args& langevin_args,
Scalar deltaT,
unsigned int D)
{
// setup the grid to run the kernel
dim3 grid(langevin_args.num_blocks, 1, 1);
dim3 grid1(1, 1, 1);
dim3 threads(langevin_args.block_size, 1, 1);
dim3 threads1(256, 1, 1);
// run the kernel
gpu_langevin_step_two_kernel<<< grid,
threads,
max((unsigned int)(sizeof(Scalar)*langevin_args.n_types),
(unsigned int)(langevin_args.block_size*sizeof(Scalar)))
>>>(d_pos,
d_vel,
d_accel,
d_diameter,
d_tag,
d_group_members,
group_size,
d_net_force,
langevin_args.d_gamma,
langevin_args.n_types,
langevin_args.use_lambda,
langevin_args.lambda,
langevin_args.timestep,
langevin_args.seed,
langevin_args.T,
deltaT,
D,
langevin_args.tally,
langevin_args.d_partial_sum_bdenergy);
// run the summation kernel
if (langevin_args.tally)
gpu_bdtally_reduce_partial_sum_kernel<<<grid1,
threads1,
langevin_args.block_size*sizeof(Scalar)
>>>(&langevin_args.d_sum_bdenergy[0],
langevin_args.d_partial_sum_bdenergy,
langevin_args.num_blocks);
return cudaSuccess;
}
|
44bbe0d6d7374bbba4efda038c1a1d571f697d24.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _FTMM_KERNEL_H_
#define _FTMM_KERNEL_H_
#include <stdio.h>
#include <assert.h>
#include "trsv.h"
// CSR Format
__global__ void trsvKernel(float *b, float *x, float *val, int *ind, int *cnt, int *topo, int *lv, int iter)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
float __shared__ dot[size];
int row = topo[lv[iter]+bid];
int nz = cnt[row+1] - cnt[row];
if(nz == 1) // only one element
{
x[row] = b[row]/val[cnt[row]];
return;
}
else
{
float tmpval, tmpx;
if(tid < nz-1)
{
tmpval = val[cnt[row]+tid];
tmpx = x[ind[cnt[row]+tid]];
dot[tid] = tmpval*tmpx;
}
else
dot[tid] = 0.0;
}
__syncthreads();
// reduce
int tid2;
int tot = ceilf(log2f((nz-1)*1.0));
while(tot > 1)
{
int offset = tot*2;
if(tid < offset)
{
tid2 = tid + offset;
if(tid2 < cnt[bid+1]-cnt[bid]-1)
dot[tid] += dot[tid2];
}
__syncthreads();
tot = offset;
}
if(tid == nz-1)
{
x[row] = (b[row] - dot[0]) / val[cnt[row+1]-1];
}
}
#endif
| 44bbe0d6d7374bbba4efda038c1a1d571f697d24.cu | #ifndef _FTMM_KERNEL_H_
#define _FTMM_KERNEL_H_
#include <stdio.h>
#include <assert.h>
#include "trsv.h"
// CSR Format
__global__ void trsvKernel(float *b, float *x, float *val, int *ind, int *cnt, int *topo, int *lv, int iter)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
float __shared__ dot[size];
int row = topo[lv[iter]+bid];
int nz = cnt[row+1] - cnt[row];
if(nz == 1) // only one element
{
x[row] = b[row]/val[cnt[row]];
return;
}
else
{
float tmpval, tmpx;
if(tid < nz-1)
{
tmpval = val[cnt[row]+tid];
tmpx = x[ind[cnt[row]+tid]];
dot[tid] = tmpval*tmpx;
}
else
dot[tid] = 0.0;
}
__syncthreads();
// reduce
int tid2;
int tot = ceilf(log2f((nz-1)*1.0));
while(tot > 1)
{
int offset = tot*2;
if(tid < offset)
{
tid2 = tid + offset;
if(tid2 < cnt[bid+1]-cnt[bid]-1)
dot[tid] += dot[tid2];
}
__syncthreads();
tot = offset;
}
if(tid == nz-1)
{
x[row] = (b[row] - dot[0]) / val[cnt[row+1]-1];
}
}
#endif
|
f419cba856c8706845ea2b2f7ac83ba8660a6255.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//===----------------------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// <math.h>
// This file was copied from libc++'s test suite, then modified to test CUDA.
// For the most part, this consists of adding __device__ attributes and
// deleting long double.
// This test requires C++11 (it's mostly decltype checks).
#if __cplusplus >= 201103L
#include <math.h>
#include <type_traits>
#include <cassert>
#include <stdio.h>
// See PR21083
// Ambiguous is a user-defined type that defines its own overloads of cmath
// functions. When the std overloads are candidates too (by using or adl),
// they should not interfere.
struct Ambiguous : std::true_type { // ADL
__device__ operator float () { return 0.f; }
__device__ operator double () { return 0.; }
};
__device__ Ambiguous abs(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous acos(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous asin(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous atan(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous atan2(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous ceil(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous cos(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous cosh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous exp(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fabs(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous floor(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fmod(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous frexp(Ambiguous, int*){ return Ambiguous(); }
__device__ Ambiguous ldexp(Ambiguous, int){ return Ambiguous(); }
__device__ Ambiguous log(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous log10(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous modf(Ambiguous, Ambiguous*){ return Ambiguous(); }
__device__ Ambiguous pow(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous sin(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous sinh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous sqrt(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous tan(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous tanh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous signbit(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fpclassify(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isfinite(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isnormal(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isgreater(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isgreaterequal(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isless(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous islessequal(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous islessgreater(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isunordered(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous acosh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous asinh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous atanh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous cbrt(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous copysign(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous erf(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous erfc(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous exp2(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous expm1(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fdim(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fma(Ambiguous, Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fmax(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fmin(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous hypot(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous ilogb(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous lgamma(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous llrint(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous llround(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous log1p(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous log2(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous logb(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous lrint(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous lround(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous nearbyint(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous nextafter(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous remainder(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous remquo(Ambiguous, Ambiguous, int*){ return Ambiguous(); }
__device__ Ambiguous rint(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous round(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous scalbln(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous scalbn(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous tgamma(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous trunc(Ambiguous){ return Ambiguous(); }
__device__ void test_abs()
{
static_assert((std::is_same<decltype(abs((float)0)), float>::value), "");
static_assert((std::is_same<decltype(abs((double)0)), double>::value), "");
static_assert((std::is_same<decltype(abs(Ambiguous())), Ambiguous>::value), "");
assert(abs(-1) == 1);
assert(abs(-1.) == 1);
assert(abs(-1.f) == 1);
}
__device__ void test_acos()
{
static_assert((std::is_same<decltype(acos((float)0)), float>::value), "");
static_assert((std::is_same<decltype(acos((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((int)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((long)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((double)0)), double>::value), "");
static_assert((std::is_same<decltype(acosf(0)), float>::value), "");
static_assert((std::is_same<decltype(acos(Ambiguous())), Ambiguous>::value), "");
assert(acos(1) == 0);
assert(acos(1.) == 0);
assert(acos(1.f) == 0);
}
__device__ void test_asin()
{
static_assert((std::is_same<decltype(asin((float)0)), float>::value), "");
static_assert((std::is_same<decltype(asin((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((int)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((long)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((double)0)), double>::value), "");
static_assert((std::is_same<decltype(asinf(0)), float>::value), "");
static_assert((std::is_same<decltype(asin(Ambiguous())), Ambiguous>::value), "");
assert(asin(0) == 0);
assert(asin(0.) == 0);
assert(asin(0.f) == 0);
}
__device__ void test_atan()
{
static_assert((std::is_same<decltype(atan((float)0)), float>::value), "");
static_assert((std::is_same<decltype(atan((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((int)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((double)0)), double>::value), "");
static_assert((std::is_same<decltype(atanf(0)), float>::value), "");
static_assert((std::is_same<decltype(atan(Ambiguous())), Ambiguous>::value), "");
assert(atan(0) == 0);
assert(atan(0.) == 0);
assert(atan(0.f) == 0);
}
__device__ void test_atan2()
{
static_assert((std::is_same<decltype(atan2((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(atan2((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2f(0,0)), float>::value), "");
static_assert((std::is_same<decltype(atan2((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(atan2(0, 1) == 0);
assert(atan2(0, 1.) == 0);
assert(atan2(0, 1.f) == 0);
assert(atan2(0., 1) == 0);
assert(atan2(0., 1.) == 0);
assert(atan2(0., 1.f) == 0);
assert(atan2(0.f, 1) == 0);
assert(atan2(0.f, 1.) == 0);
assert(atan2(0.f, 1.f) == 0);
}
__device__ void test_ceil()
{
static_assert((std::is_same<decltype(ceil((float)0)), float>::value), "");
static_assert((std::is_same<decltype(ceil((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((int)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((long)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((double)0)), double>::value), "");
static_assert((std::is_same<decltype(ceilf(0)), float>::value), "");
static_assert((std::is_same<decltype(ceil(Ambiguous())), Ambiguous>::value), "");
assert(ceil(0) == 0);
assert(ceil(0.) == 0);
assert(ceil(0.f) == 0);
}
__device__ void test_cos()
{
static_assert((std::is_same<decltype(cos((float)0)), float>::value), "");
static_assert((std::is_same<decltype(cos((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((int)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((long)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((double)0)), double>::value), "");
static_assert((std::is_same<decltype(cosf(0)), float>::value), "");
static_assert((std::is_same<decltype(cos(Ambiguous())), Ambiguous>::value), "");
assert(cos(0) == 1);
assert(cos(0.) == 1);
assert(cos(0.f) == 1);
}
__device__ void test_cosh()
{
static_assert((std::is_same<decltype(cosh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(cosh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(coshf(0)), float>::value), "");
static_assert((std::is_same<decltype(cosh(Ambiguous())), Ambiguous>::value), "");
assert(cosh(0) == 1);
assert(cosh(0.) == 1);
assert(cosh(0.f) == 1);
}
__device__ void test_exp()
{
static_assert((std::is_same<decltype(exp((float)0)), float>::value), "");
static_assert((std::is_same<decltype(exp((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((int)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((double)0)), double>::value), "");
static_assert((std::is_same<decltype(expf(0)), float>::value), "");
static_assert((std::is_same<decltype(exp(Ambiguous())), Ambiguous>::value), "");
assert(exp(0) == 1);
assert(exp(0.) == 1);
assert(exp(0.f) == 1);
}
__device__ void test_fabs()
{
static_assert((std::is_same<decltype(fabs((float)0)), float>::value), "");
static_assert((std::is_same<decltype(fabs((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((int)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((long)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((double)0)), double>::value), "");
static_assert((std::is_same<decltype(fabsf(0.0f)), float>::value), "");
static_assert((std::is_same<decltype(fabs(Ambiguous())), Ambiguous>::value), "");
assert(fabs(-1) == 1);
assert(fabs(-1.) == 1);
assert(fabs(-1.f) == 1);
}
__device__ void test_floor()
{
static_assert((std::is_same<decltype(floor((float)0)), float>::value), "");
static_assert((std::is_same<decltype(floor((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((int)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((long)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((double)0)), double>::value), "");
static_assert((std::is_same<decltype(floorf(0)), float>::value), "");
static_assert((std::is_same<decltype(floor(Ambiguous())), Ambiguous>::value), "");
assert(floor(1) == 1);
assert(floor(1.) == 1);
assert(floor(1.f) == 1);
}
__device__ void test_fmod()
{
static_assert((std::is_same<decltype(fmod((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(fmod((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmodf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(fmod((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(fmod(1.5, 1) == .5);
assert(fmod(1.5, 1.) == .5);
assert(fmod(1.5, 1.f) == .5);
assert(fmod(1.5f, 1) == .5);
assert(fmod(1.5f, 1.) == .5);
assert(fmod(1.5f, 1.f) == .5);
assert(fmod(2, 1) == 0);
assert(fmod(2, 1.) == 0);
assert(fmod(2, 1.f) == 0);
}
__device__ void test_frexp()
{
int ip;
static_assert((std::is_same<decltype(frexp((float)0, &ip)), float>::value), "");
static_assert((std::is_same<decltype(frexp((bool)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((unsigned short)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((int)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((unsigned int)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((unsigned long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((long long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((unsigned long long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((double)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexpf(0, &ip)), float>::value), "");
static_assert((std::is_same<decltype(frexp(Ambiguous(), &ip)), Ambiguous>::value), "");
assert(frexp(0, &ip) == 0);
assert(frexp(0., &ip) == 0);
assert(frexp(0.f, &ip) == 0);
}
__device__ void test_ldexp()
{
int ip = 1;
static_assert((std::is_same<decltype(ldexp((float)0, ip)), float>::value), "");
static_assert((std::is_same<decltype(ldexp((bool)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((unsigned short)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((int)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((unsigned int)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((long)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((unsigned long)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((long long)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((unsigned long long)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((double)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexpf(0, ip)), float>::value), "");
static_assert((std::is_same<decltype(ldexp(Ambiguous(), ip)), Ambiguous>::value), "");
assert(ldexp(1, ip) == 2);
assert(ldexp(1., ip) == 2);
assert(ldexp(1.f, ip) == 2);
}
__device__ void test_log()
{
static_assert((std::is_same<decltype(log((float)0)), float>::value), "");
static_assert((std::is_same<decltype(log((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(log((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(log((int)0)), double>::value), "");
static_assert((std::is_same<decltype(log((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(log((long)0)), double>::value), "");
static_assert((std::is_same<decltype(log((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(log((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log((double)0)), double>::value), "");
static_assert((std::is_same<decltype(logf(0)), float>::value), "");
static_assert((std::is_same<decltype(log(Ambiguous())), Ambiguous>::value), "");
assert(log(1) == 0);
assert(log(1.) == 0);
assert(log(1.f) == 0);
}
__device__ void test_log10()
{
static_assert((std::is_same<decltype(log10((float)0)), float>::value), "");
static_assert((std::is_same<decltype(log10((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((int)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((long)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((double)0)), double>::value), "");
static_assert((std::is_same<decltype(log10f(0)), float>::value), "");
static_assert((std::is_same<decltype(log10(Ambiguous())), Ambiguous>::value), "");
assert(log10(1) == 0);
assert(log10(1.) == 0);
assert(log10(1.f) == 0);
}
__device__ void test_modf()
{
static_assert((std::is_same<decltype(modf((float)0, (float*)0)), float>::value), "");
static_assert((std::is_same<decltype(modf((double)0, (double*)0)), double>::value), "");
static_assert((std::is_same<decltype(modff(0, (float*)0)), float>::value), "");
static_assert((std::is_same<decltype(modf(Ambiguous(), (Ambiguous*)0)), Ambiguous>::value), "");
double i;
assert(modf(1, &i) == 0);
assert(modf(1., &i) == 0);
assert(modf(1.f, &i) == 0);
}
__device__ void test_pow()
{
static_assert((std::is_same<decltype(pow((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(pow((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(powf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(pow((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(pow(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(pow(1, 1) == 1);
assert(pow(1., 1) == 1);
assert(pow(1.f, 1) == 1);
assert(pow(1, 1.) == 1);
assert(pow(1., 1.) == 1);
assert(pow(1.f, 1.) == 1);
assert(pow(1, 1.f) == 1);
assert(pow(1., 1.f) == 1);
assert(pow(1.f, 1.f) == 1);
}
__device__ void test_sin()
{
static_assert((std::is_same<decltype(sin((float)0)), float>::value), "");
static_assert((std::is_same<decltype(sin((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((int)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((long)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((double)0)), double>::value), "");
static_assert((std::is_same<decltype(sinf(0)), float>::value), "");
static_assert((std::is_same<decltype(sin(Ambiguous())), Ambiguous>::value), "");
assert(sin(0) == 0);
assert(sin(0.) == 0);
assert(sin(0.f) == 0);
}
__device__ void test_sinh()
{
static_assert((std::is_same<decltype(sinh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(sinh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(sinhf(0)), float>::value), "");
static_assert((std::is_same<decltype(sinh(Ambiguous())), Ambiguous>::value), "");
assert(sinh(0) == 0);
assert(sinh(0.) == 0);
assert(sinh(0.f) == 0);
}
__device__ void test_sqrt()
{
static_assert((std::is_same<decltype(sqrt((float)0)), float>::value), "");
static_assert((std::is_same<decltype(sqrt((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((int)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((long)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((double)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrtf(0)), float>::value), "");
static_assert((std::is_same<decltype(sqrt(Ambiguous())), Ambiguous>::value), "");
assert(sqrt(4) == 2);
assert(sqrt(4.) == 2);
assert(sqrt(4.f) == 2);
}
__device__ void test_tan()
{
static_assert((std::is_same<decltype(tan((float)0)), float>::value), "");
static_assert((std::is_same<decltype(tan((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((int)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((long)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((double)0)), double>::value), "");
static_assert((std::is_same<decltype(tanf(0)), float>::value), "");
static_assert((std::is_same<decltype(tan(Ambiguous())), Ambiguous>::value), "");
assert(tan(0) == 0);
assert(tan(0.) == 0);
assert(tan(0.f) == 0);
}
__device__ void test_tanh()
{
static_assert((std::is_same<decltype(tanh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(tanh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(tanhf(0)), float>::value), "");
static_assert((std::is_same<decltype(tanh(Ambiguous())), Ambiguous>::value), "");
assert(tanh(0) == 0);
assert(tanh(0.) == 0);
assert(tanh(0.f) == 0);
}
__device__ void test_signbit()
{
#ifdef signbit
#error signbit defined
#endif
static_assert((std::is_same<decltype(signbit((float)0)), bool>::value), "");
static_assert((std::is_same<decltype(signbit((double)0)), bool>::value), "");
static_assert((std::is_same<decltype(signbit(0)), bool>::value), "");
static_assert((std::is_same<decltype(signbit(Ambiguous())), Ambiguous>::value), "");
assert(signbit(-1) == true);
assert(signbit(-1.) == true);
assert(signbit(-1.f) == true);
}
__device__ void test_fpclassify()
{
#ifdef fpclassify
#error fpclassify defined
#endif
static_assert((std::is_same<decltype(fpclassify((float)0)), int>::value), "");
static_assert((std::is_same<decltype(fpclassify((double)0)), int>::value), "");
static_assert((std::is_same<decltype(fpclassify(0)), int>::value), "");
static_assert((std::is_same<decltype(fpclassify(Ambiguous())), Ambiguous>::value), "");
assert(fpclassify(-1) == FP_NORMAL);
assert(fpclassify(-1.) == FP_NORMAL);
assert(fpclassify(-1.f) == FP_NORMAL);
}
__device__ void test_isfinite()
{
#ifdef isfinite
#error isfinite defined
#endif
static_assert((std::is_same<decltype(isfinite((float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isfinite((double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isfinite(0)), bool>::value), "");
static_assert((std::is_same<decltype(isfinite(Ambiguous())), Ambiguous>::value), "");
assert(isfinite(-1) == true);
assert(isfinite(-1.) == true);
assert(isfinite(-1.f) == true);
}
__device__ void test_isnormal()
{
#ifdef isnormal
#error isnormal defined
#endif
static_assert((std::is_same<decltype(isnormal((float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isnormal((double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isnormal(0)), bool>::value), "");
static_assert((std::is_same<decltype(isnormal(Ambiguous())), Ambiguous>::value), "");
assert(std::isnormal(-1) == true);
assert(std::isnormal(-1.) == true);
assert(std::isnormal(-1.f) == true);
}
__device__ void test_isgreater()
{
#ifdef isgreater
#error isgreater defined
#endif
static_assert((std::is_same<decltype(isgreater((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreater((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreater((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreater((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreater(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreater(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::isgreater(-1, 0) == false);
assert(std::isgreater(-1, 0.) == false);
assert(std::isgreater(-1, 0.f) == false);
assert(std::isgreater(-1., 0) == false);
assert(std::isgreater(-1., 0.) == false);
assert(std::isgreater(-1., 0.f) == false);
assert(std::isgreater(-1.f, 0) == false);
assert(std::isgreater(-1.f, 0.) == false);
assert(std::isgreater(-1.f, 0.f) == false);
}
__device__ void test_isgreaterequal()
{
#ifdef isgreaterequal
#error isgreaterequal defined
#endif
static_assert((std::is_same<decltype(isgreaterequal((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreaterequal((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreaterequal((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreaterequal((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreaterequal(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreaterequal(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::isgreaterequal(-1, 0) == false);
assert(std::isgreaterequal(-1, 0.) == false);
assert(std::isgreaterequal(-1, 0.f) == false);
assert(std::isgreaterequal(-1., 0) == false);
assert(std::isgreaterequal(-1., 0.) == false);
assert(std::isgreaterequal(-1., 0.f) == false);
assert(std::isgreaterequal(-1.f, 0) == false);
assert(std::isgreaterequal(-1.f, 0.) == false);
assert(std::isgreaterequal(-1.f, 0.f) == false);
}
__device__ void test_isinf()
{
#ifdef isinf
#error isinf defined
#endif
static_assert((std::is_same<decltype(isinf((float)0)), bool>::value), "");
typedef decltype(isinf((double)0)) DoubleRetType;
#ifndef __linux__
static_assert((std::is_same<DoubleRetType, bool>::value), "");
#else
// GLIBC < 2.26 defines 'isinf(double)' with a return type of 'int' in
// all C++ dialects. The test should tolerate this.
// See: https://sourceware.org/bugzilla/show_bug.cgi?id=19439
static_assert((std::is_same<DoubleRetType, bool>::value
|| std::is_same<DoubleRetType, int>::value), "");
#endif
static_assert((std::is_same<decltype(isinf(0)), bool>::value), "");
assert(std::isinf(-1) == false);
assert(std::isinf(-1.) == false);
assert(std::isinf(-1.f) == false);
}
__device__ void test_isless()
{
#ifdef isless
#error isless defined
#endif
static_assert((std::is_same<decltype(isless((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isless((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isless((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isless((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isless(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isless(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::isless(-1, 0) == true);
assert(std::isless(-1, 0.) == true);
assert(std::isless(-1, 0.f) == true);
assert(std::isless(-1., 0) == true);
assert(std::isless(-1., 0.) == true);
assert(std::isless(-1., 0.f) == true);
assert(std::isless(-1.f, 0) == true);
assert(std::isless(-1.f, 0.) == true);
assert(std::isless(-1.f, 0.f) == true);
}
__device__ void test_islessequal()
{
#ifdef islessequal
#error islessequal defined
#endif
static_assert((std::is_same<decltype(islessequal((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessequal((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessequal((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessequal((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessequal(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessequal(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::islessequal(-1, 0) == true);
assert(std::islessequal(-1, 0.) == true);
assert(std::islessequal(-1, 0.f) == true);
assert(std::islessequal(-1., 0) == true);
assert(std::islessequal(-1., 0.) == true);
assert(std::islessequal(-1., 0.f) == true);
assert(std::islessequal(-1.f, 0) == true);
assert(std::islessequal(-1.f, 0.) == true);
assert(std::islessequal(-1.f, 0.f) == true);
}
__device__ void test_islessgreater()
{
#ifdef islessgreater
#error islessgreater defined
#endif
static_assert((std::is_same<decltype(islessgreater((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessgreater((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessgreater((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessgreater((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessgreater(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessgreater(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::islessgreater(-1, 0) == true);
assert(std::islessgreater(-1, 0.) == true);
assert(std::islessgreater(-1, 0.f) == true);
assert(std::islessgreater(-1., 0) == true);
assert(std::islessgreater(-1., 0.) == true);
assert(std::islessgreater(-1., 0.f) == true);
assert(std::islessgreater(-1.f, 0) == true);
assert(std::islessgreater(-1.f, 0.) == true);
assert(std::islessgreater(-1.f, 0.f) == true);
}
__device__ void test_isnan()
{
#ifdef isnan
#error isnan defined
#endif
static_assert((std::is_same<decltype(isnan((float)0)), bool>::value), "");
typedef decltype(isnan((double)0)) DoubleRetType;
#ifndef __linux__
static_assert((std::is_same<DoubleRetType, bool>::value), "");
#else
// GLIBC < 2.26 defines 'isnan(double)' with a return type of 'int' in
// all C++ dialects. The test should tolerate this.
// See: https://sourceware.org/bugzilla/show_bug.cgi?id=19439
static_assert((std::is_same<DoubleRetType, bool>::value
|| std::is_same<DoubleRetType, int>::value), "");
#endif
static_assert((std::is_same<decltype(isnan(0)), bool>::value), "");
assert(std::isnan(-1) == false);
assert(std::isnan(-1.) == false);
assert(std::isnan(-1.f) == false);
}
__device__ void test_isunordered()
{
#ifdef isunordered
#error isunordered defined
#endif
static_assert((std::is_same<decltype(isunordered((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isunordered((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isunordered((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isunordered((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isunordered(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isunordered(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::isunordered(-1, 0) == false);
assert(std::isunordered(-1, 0.) == false);
assert(std::isunordered(-1, 0.f) == false);
assert(std::isunordered(-1., 0) == false);
assert(std::isunordered(-1., 0.) == false);
assert(std::isunordered(-1., 0.f) == false);
assert(std::isunordered(-1.f, 0) == false);
assert(std::isunordered(-1.f, 0.) == false);
assert(std::isunordered(-1.f, 0.f) == false);
}
__device__ void test_acosh()
{
static_assert((std::is_same<decltype(acosh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(acosh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(acoshf(0)), float>::value), "");
static_assert((std::is_same<decltype(acosh(Ambiguous())), Ambiguous>::value), "");
assert(std::acosh(1) == 0);
assert(std::acosh(1.) == 0);
assert(std::acosh(1.f) == 0);
}
__device__ void test_asinh()
{
static_assert((std::is_same<decltype(asinh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(asinh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(asinhf(0)), float>::value), "");
static_assert((std::is_same<decltype(asinh(Ambiguous())), Ambiguous>::value), "");
assert(asinh(0) == 0);
assert(asinh(0.) == 0);
assert(asinh(0.f) == 0);
}
__device__ void test_atanh()
{
static_assert((std::is_same<decltype(atanh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(atanh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(atanhf(0)), float>::value), "");
static_assert((std::is_same<decltype(atanh(Ambiguous())), Ambiguous>::value), "");
assert(atanh(0) == 0);
assert(atanh(0.) == 0);
assert(atanh(0.f) == 0);
}
__device__ void test_cbrt()
{
static_assert((std::is_same<decltype(cbrt((float)0)), float>::value), "");
static_assert((std::is_same<decltype(cbrt((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((int)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((long)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((double)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrtf(0)), float>::value), "");
static_assert((std::is_same<decltype(cbrt(Ambiguous())), Ambiguous>::value), "");
assert(cbrt(1) == 1);
assert(cbrt(1.) == 1);
assert(cbrt(1.f) == 1);
}
__device__ void test_copysign()
{
static_assert((std::is_same<decltype(copysign((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(copysign((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((double)0, (double)0)), double>::value), "");
// CUDA's copysign(float, double) currently returns a float, in violation
// of the spec. We can't easily change this, so accept either one.
static_assert(
(std::is_same<decltype(copysign((float)0, (double)0)), double>::value ||
std::is_same<decltype(copysign((float)0, (double)0)), float>::value),
"");
static_assert((std::is_same<decltype(copysignf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(copysign((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::copysign(1, 1) == 1);
assert(std::copysign(1., 1) == 1);
assert(std::copysign(1.f, 1) == 1);
assert(std::copysign(1, 1.) == 1);
assert(std::copysign(1., 1.) == 1);
assert(std::copysign(1.f, 1.) == 1);
assert(std::copysign(1, 1.f) == 1);
assert(std::copysign(1., 1.f) == 1);
assert(std::copysign(1.f, 1.f) == 1);
}
__device__ void test_erf()
{
static_assert((std::is_same<decltype(erf((float)0)), float>::value), "");
static_assert((std::is_same<decltype(erf((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((int)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((long)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((double)0)), double>::value), "");
static_assert((std::is_same<decltype(erff(0)), float>::value), "");
static_assert((std::is_same<decltype(erf(Ambiguous())), Ambiguous>::value), "");
assert(erf(0) == 0);
assert(erf(0.) == 0);
assert(erf(0.f) == 0);
}
__device__ void test_erfc()
{
static_assert((std::is_same<decltype(erfc((float)0)), float>::value), "");
static_assert((std::is_same<decltype(erfc((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((int)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((long)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((double)0)), double>::value), "");
static_assert((std::is_same<decltype(erfcf(0)), float>::value), "");
static_assert((std::is_same<decltype(erfc(Ambiguous())), Ambiguous>::value), "");
assert(erfc(0) == 1);
assert(erfc(0.) == 1);
assert(erfc(0.f) == 1);
}
__device__ void test_exp2()
{
static_assert((std::is_same<decltype(exp2((float)0)), float>::value), "");
static_assert((std::is_same<decltype(exp2((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((int)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((double)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2f(0)), float>::value), "");
static_assert((std::is_same<decltype(exp2(Ambiguous())), Ambiguous>::value), "");
assert(exp2(1) == 2);
assert(exp2(1.) == 2);
assert(exp2(1.f) == 2);
}
__device__ void test_expm1()
{
static_assert((std::is_same<decltype(expm1((float)0)), float>::value), "");
static_assert((std::is_same<decltype(expm1((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((int)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((long)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((double)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1f(0)), float>::value), "");
static_assert((std::is_same<decltype(expm1(Ambiguous())), Ambiguous>::value), "");
assert(expm1(0) == 0);
assert(expm1(0.) == 0);
assert(expm1(0.f) == 0);
}
__device__ void test_fdim()
{
static_assert((std::is_same<decltype(fdim((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(fdim((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fdimf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(fdim((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::fdim(1, 0) == 1);
assert(std::fdim(1., 0) == 1);
assert(std::fdim(1.f, 0) == 1);
assert(std::fdim(1, 0.) == 1);
assert(std::fdim(1., 0.) == 1);
assert(std::fdim(1.f, 0.) == 1);
assert(std::fdim(1, 0.f) == 1);
assert(std::fdim(1., 0.f) == 1);
assert(std::fdim(1.f, 0.f) == 1);
}
__device__ void test_fma()
{
static_assert((std::is_same<decltype(fma((bool)0, (float)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((char)0, (float)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((unsigned)0, (float)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((float)0, (int)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((float)0, (long)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((float)0, (float)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((float)0, (float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((float)0, (float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(fma((bool)0, (double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((char)0, (double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((unsigned)0, (double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((double)0, (int)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((double)0, (long)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((double)0, (double)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((double)0, (double)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((double)0, (double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmaf(0,0,0)), float>::value), "");
static_assert((std::is_same<decltype(fma(Ambiguous(), Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::fma(1, 1, 1) == 2);
assert(std::fma(1., 1, 1) == 2);
assert(std::fma(1.f, 1, 1) == 2);
assert(std::fma(1, 1., 1) == 2);
assert(std::fma(1., 1., 1) == 2);
assert(std::fma(1.f, 1., 1) == 2);
assert(std::fma(1, 1.f, 1) == 2);
assert(std::fma(1., 1.f, 1) == 2);
assert(std::fma(1.f, 1.f, 1) == 2);
assert(std::fma(1, 1, 1.) == 2);
assert(std::fma(1., 1, 1.) == 2);
assert(std::fma(1.f, 1, 1.) == 2);
assert(std::fma(1, 1., 1.) == 2);
assert(std::fma(1., 1., 1.) == 2);
assert(std::fma(1.f, 1., 1.) == 2);
assert(std::fma(1, 1.f, 1.) == 2);
assert(std::fma(1., 1.f, 1.) == 2);
assert(std::fma(1.f, 1.f, 1.) == 2);
assert(std::fma(1, 1, 1.f) == 2);
assert(std::fma(1., 1, 1.f) == 2);
assert(std::fma(1.f, 1, 1.f) == 2);
assert(std::fma(1, 1., 1.f) == 2);
assert(std::fma(1., 1., 1.f) == 2);
assert(std::fma(1.f, 1., 1.f) == 2);
assert(std::fma(1, 1.f, 1.f) == 2);
assert(std::fma(1., 1.f, 1.f) == 2);
assert(std::fma(1.f, 1.f, 1.f) == 2);
}
__device__ void test_fmax()
{
static_assert((std::is_same<decltype(fmax((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(fmax((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmaxf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(fmax((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::fmax(1, 0) == 1);
assert(std::fmax(1., 0) == 1);
assert(std::fmax(1.f, 0) == 1);
assert(std::fmax(1, 0.) == 1);
assert(std::fmax(1., 0.) == 1);
assert(std::fmax(1.f, 0.) == 1);
assert(std::fmax(1, 0.f) == 1);
assert(std::fmax(1., 0.f) == 1);
assert(std::fmax(1.f, 0.f) == 1);
}
__device__ void test_fmin()
{
static_assert((std::is_same<decltype(fmin((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(fmin((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fminf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(fmin((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::fmin(1, 0) == 0);
assert(std::fmin(1., 0) == 0);
assert(std::fmin(1.f, 0) == 0);
assert(std::fmin(1, 0.) == 0);
assert(std::fmin(1., 0.) == 0);
assert(std::fmin(1.f, 0.) == 0);
assert(std::fmin(1, 0.f) == 0);
assert(std::fmin(1., 0.f) == 0);
assert(std::fmin(1.f, 0.f) == 0);
}
__device__ void test_hypot()
{
static_assert((std::is_same<decltype(hypot((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(hypot((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(hypotf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(hypot((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::hypot(3, 4) == 5);
assert(std::hypot(3, 4.) == 5);
assert(std::hypot(3, 4.f) == 5);
assert(std::hypot(3., 4) == 5);
assert(std::hypot(3., 4.) == 5);
assert(std::hypot(3., 4.f) == 5);
assert(std::hypot(3.f, 4) == 5);
assert(std::hypot(3.f, 4.) == 5);
assert(std::hypot(3.f, 4.f) == 5);
}
__device__ void test_ilogb()
{
static_assert((std::is_same<decltype(ilogb((float)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((bool)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((unsigned short)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((int)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((unsigned int)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((long)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((unsigned long)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((long long)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((unsigned long long)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((double)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogbf(0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb(Ambiguous())), Ambiguous>::value), "");
assert(ilogb(1) == 0);
assert(ilogb(1.) == 0);
assert(ilogb(1.f) == 0);
}
__device__ void test_lgamma()
{
static_assert((std::is_same<decltype(lgamma((float)0)), float>::value), "");
static_assert((std::is_same<decltype(lgamma((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((int)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((long)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((double)0)), double>::value), "");
static_assert((std::is_same<decltype(lgammaf(0)), float>::value), "");
static_assert((std::is_same<decltype(lgamma(Ambiguous())), Ambiguous>::value), "");
assert(lgamma(1) == 0);
assert(lgamma(1.) == 0);
assert(lgamma(1.f) == 0);
}
__device__ void test_llrint()
{
static_assert((std::is_same<decltype(llrint((float)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((bool)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((unsigned short)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((int)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((unsigned int)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((unsigned long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((long long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((unsigned long long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((double)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrintf(0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint(Ambiguous())), Ambiguous>::value), "");
assert(llrint(1) == 1LL);
assert(llrint(1.) == 1LL);
#if TORCH_HIP_VERSION > 7050
assert(llrint(1.f) == 1LL);
#endif
}
__device__ void test_llround()
{
static_assert((std::is_same<decltype(llround((float)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((bool)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((unsigned short)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((int)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((unsigned int)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((unsigned long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((long long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((unsigned long long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((double)0)), long long>::value), "");
static_assert((std::is_same<decltype(llroundf(0)), long long>::value), "");
static_assert((std::is_same<decltype(llround(Ambiguous())), Ambiguous>::value), "");
assert(llround(1) == 1LL);
assert(llround(1.) == 1LL);
assert(llround(1.f) == 1LL);
}
__device__ void test_log1p()
{
static_assert((std::is_same<decltype(log1p((float)0)), float>::value), "");
static_assert((std::is_same<decltype(log1p((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((int)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((long)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((double)0)), double>::value), "");
static_assert((std::is_same<decltype(log1pf(0)), float>::value), "");
static_assert((std::is_same<decltype(log1p(Ambiguous())), Ambiguous>::value), "");
assert(log1p(0) == 0);
assert(log1p(0.) == 0);
assert(log1p(0.f) == 0);
}
__device__ void test_log2()
{
static_assert((std::is_same<decltype(log2((float)0)), float>::value), "");
static_assert((std::is_same<decltype(log2((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((int)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((long)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((double)0)), double>::value), "");
static_assert((std::is_same<decltype(log2f(0)), float>::value), "");
static_assert((std::is_same<decltype(log2(Ambiguous())), Ambiguous>::value), "");
assert(log2(1) == 0);
assert(log2(1.) == 0);
assert(log2(1.f) == 0);
}
__device__ void test_logb()
{
static_assert((std::is_same<decltype(logb((float)0)), float>::value), "");
static_assert((std::is_same<decltype(logb((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((int)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((long)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((double)0)), double>::value), "");
static_assert((std::is_same<decltype(logbf(0)), float>::value), "");
static_assert((std::is_same<decltype(logb(Ambiguous())), Ambiguous>::value), "");
assert(logb(1) == 0);
assert(logb(1.) == 0);
assert(logb(1.f) == 0);
}
__device__ void test_lrint()
{
static_assert((std::is_same<decltype(lrint((float)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((bool)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((unsigned short)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((int)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((unsigned int)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((long)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((unsigned long)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((long long)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((unsigned long long)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((double)0)), long>::value), "");
static_assert((std::is_same<decltype(lrintf(0)), long>::value), "");
static_assert((std::is_same<decltype(lrint(Ambiguous())), Ambiguous>::value), "");
assert(lrint(1) == 1L);
assert(lrint(1.) == 1L);
#if TORCH_HIP_VERSION > 7050
assert(lrint(1.f) == 1L);
#endif
}
__device__ void test_lround()
{
static_assert((std::is_same<decltype(lround((float)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((bool)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((unsigned short)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((int)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((unsigned int)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((long)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((unsigned long)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((long long)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((unsigned long long)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((double)0)), long>::value), "");
static_assert((std::is_same<decltype(lroundf(0)), long>::value), "");
static_assert((std::is_same<decltype(lround(Ambiguous())), Ambiguous>::value), "");
assert(lround(1) == 1L);
assert(lround(1.) == 1L);
assert(lround(1.f) == 1L);
}
__device__ void test_nan()
{
static_assert((std::is_same<decltype(nan("")), double>::value), "");
static_assert((std::is_same<decltype(nanf("")), float>::value), "");
}
__device__ void test_nearbyint()
{
static_assert((std::is_same<decltype(nearbyint((float)0)), float>::value), "");
static_assert((std::is_same<decltype(nearbyint((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((int)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((long)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((double)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyintf(0)), float>::value), "");
static_assert((std::is_same<decltype(nearbyint(Ambiguous())), Ambiguous>::value), "");
assert(nearbyint(1) == 1);
assert(nearbyint(1.) == 1);
assert(nearbyint(1.f) == 1);
}
__device__ void test_nextafter()
{
static_assert((std::is_same<decltype(nextafter((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(nextafter((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafterf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(nextafter((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
//assert(nextafter(0,1) == hexfloat<double>(0x1, 0, -1074));
// Invoke all our overloads. Even though we don't check the exact result
// (this is pretty annoying to do for this function), we make sure to *use*
// the results so that these function calls can't be DCE'ed.
assert(nextafter(0, 1) != 0);
assert(nextafter(0, 1.) != 0);
assert(nextafter(0, 1.f) != 0);
assert(nextafter(0., 1) != 0);
assert(nextafter(0., 1.) != 0);
assert(nextafter(0., 1.f) != 0);
assert(nextafter(0.f, 1) != 0);
assert(nextafter(0.f, 1.) != 0);
assert(nextafter(0.f, 1.f) != 0);
}
__device__ void test_remainder()
{
static_assert((std::is_same<decltype(remainder((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(remainder((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(remainderf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(remainder((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(::remainder(1.5, 1) == -.5);
assert(::remainder(1.5, 1.) == -.5);
assert(::remainder(1.5, 1.f) == -.5);
assert(::remainder(1.5f, 1) == -.5);
assert(::remainder(1.5f, 1.) == -.5);
assert(::remainder(1.5f, 1.f) == -.5);
assert(::remainder(2, 1) == 0);
assert(::remainder(2, 1.) == 0);
assert(::remainder(2, 1.f) == 0);
}
__device__ void test_remquo()
{
int ip;
static_assert((std::is_same<decltype(remquo((float)0, (float)0, &ip)), float>::value), "");
static_assert((std::is_same<decltype(remquo((bool)0, (float)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((unsigned short)0, (double)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((float)0, (unsigned int)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((double)0, (long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((int)0, (long long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((int)0, (unsigned long long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((double)0, (double)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((float)0, (double)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquof(0,0, &ip)), float>::value), "");
static_assert((std::is_same<decltype(remquo((int)0, (int)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo(Ambiguous(), Ambiguous(), &ip)), Ambiguous>::value), "");
assert(std::remquo(1, 1, &ip) == 0);
assert(std::remquo(1, 1., &ip) == 0);
assert(std::remquo(1, 1.f, &ip) == 0);
assert(std::remquo(0.5, 1, &ip) == 0.5);
assert(std::remquo(0.5, 1., &ip) == 0.5);
assert(std::remquo(0.5, 1.f, &ip) == 0.5);
assert(std::remquo(0.5f, 1, &ip) == 0.5);
assert(std::remquo(0.5f, 1., &ip) == 0.5);
assert(std::remquo(0.5f, 1.f, &ip) == 0.5);
}
__device__ void test_rint()
{
static_assert((std::is_same<decltype(rint((float)0)), float>::value), "");
static_assert((std::is_same<decltype(rint((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((int)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((long)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((double)0)), double>::value), "");
static_assert((std::is_same<decltype(rintf(0)), float>::value), "");
static_assert((std::is_same<decltype(rint(Ambiguous())), Ambiguous>::value), "");
assert(rint(1) == 1);
assert(rint(1.) == 1);
assert(rint(1.f) == 1);
}
__device__ void test_round()
{
static_assert((std::is_same<decltype(round((float)0)), float>::value), "");
static_assert((std::is_same<decltype(round((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(round((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(round((int)0)), double>::value), "");
static_assert((std::is_same<decltype(round((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(round((long)0)), double>::value), "");
static_assert((std::is_same<decltype(round((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(round((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(round((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(round((double)0)), double>::value), "");
static_assert((std::is_same<decltype(roundf(0)), float>::value), "");
static_assert((std::is_same<decltype(round(Ambiguous())), Ambiguous>::value), "");
assert(round(1) == 1);
assert(round(1.) == 1);
assert(round(1.f) == 1);
}
__device__ void test_scalbln()
{
static_assert((std::is_same<decltype(scalbln((float)0, (long)0)), float>::value), "");
static_assert((std::is_same<decltype(scalbln((bool)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((unsigned short)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((int)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((unsigned int)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((long)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((unsigned long)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((long long)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((unsigned long long)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalblnf(0, (long)0)), float>::value), "");
static_assert((std::is_same<decltype(scalbln(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::scalbln(1, 1) == 2);
assert(std::scalbln(1, 1.) == 2);
assert(std::scalbln(1, 1.f) == 2);
assert(std::scalbln(1., 1) == 2);
assert(std::scalbln(1., 1.) == 2);
assert(std::scalbln(1., 1.f) == 2);
assert(std::scalbln(1.f, 1) == 2);
assert(std::scalbln(1.f, 1.) == 2);
assert(std::scalbln(1.f, 1.f) == 2);
}
__device__ void test_scalbn()
{
static_assert((std::is_same<decltype(scalbn((float)0, (int)0)), float>::value), "");
static_assert((std::is_same<decltype(scalbn((bool)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((unsigned short)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((unsigned int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((long)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((unsigned long)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((long long)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((unsigned long long)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((double)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbnf(0, (int)0)), float>::value), "");
static_assert((std::is_same<decltype(scalbn(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::scalbn(1, 1) == 2);
assert(std::scalbn(1, 1.) == 2);
assert(std::scalbn(1, 1.f) == 2);
assert(std::scalbn(1., 1) == 2);
assert(std::scalbn(1., 1.) == 2);
assert(std::scalbn(1., 1.f) == 2);
assert(std::scalbn(1.f, 1) == 2);
assert(std::scalbn(1.f, 1.) == 2);
assert(std::scalbn(1.f, 1.f) == 2);
}
__device__ void test_tgamma()
{
static_assert((std::is_same<decltype(tgamma((float)0)), float>::value), "");
static_assert((std::is_same<decltype(tgamma((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((int)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((long)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((double)0)), double>::value), "");
static_assert((std::is_same<decltype(tgammaf(0)), float>::value), "");
static_assert((std::is_same<decltype(tgamma(Ambiguous())), Ambiguous>::value), "");
assert(tgamma(1) == 1);
assert(tgamma(1.) == 1);
assert(tgamma(1.f) == 1);
}
__device__ void test_trunc()
{
static_assert((std::is_same<decltype(trunc((float)0)), float>::value), "");
static_assert((std::is_same<decltype(trunc((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((int)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((long)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((double)0)), double>::value), "");
static_assert((std::is_same<decltype(truncf(0)), float>::value), "");
static_assert((std::is_same<decltype(trunc(Ambiguous())), Ambiguous>::value), "");
assert(trunc(1) == 1);
assert(trunc(1.) == 1);
assert(trunc(1.f) == 1);
}
__global__ void tests()
{
test_abs();
test_acos();
test_asin();
test_atan();
test_atan2();
test_ceil();
test_cos();
test_cosh();
test_exp();
test_fabs();
test_floor();
test_fmod();
test_frexp();
test_ldexp();
test_log();
test_log10();
test_modf();
test_pow();
test_sin();
test_sinh();
test_sqrt();
test_tan();
test_tanh();
test_signbit();
test_fpclassify();
test_isfinite();
test_isnormal();
test_isgreater();
test_isgreaterequal();
test_isinf();
test_isless();
test_islessequal();
test_islessgreater();
test_isnan();
test_isunordered();
test_acosh();
test_asinh();
test_atanh();
test_cbrt();
test_copysign();
test_erf();
test_erfc();
test_exp2();
test_expm1();
test_fdim();
test_fma();
test_fmax();
test_fmin();
test_hypot();
test_ilogb();
test_lgamma();
test_llrint();
test_llround();
test_log1p();
test_log2();
test_logb();
test_lrint();
test_lround();
test_nan();
test_nearbyint();
test_nextafter();
test_remainder();
test_remquo();
test_rint();
test_round();
test_scalbln();
test_scalbn();
test_tgamma();
test_trunc();
}
int main() {
hipLaunchKernelGGL(( tests), dim3(1),dim3(1), 0, 0, );
hipError_t err = hipDeviceSynchronize();
if (err != hipSuccess) {
printf("CUDA error %d\n", (int)err);
return 1;
}
printf("Success!\n");
return 0;
}
#else
#include <stdio.h>
// No C++11; test is a nop.
int main() {
printf("Success!\n");
return 0;
}
#endif // __cplusplus < 201103L
| f419cba856c8706845ea2b2f7ac83ba8660a6255.cu | //===----------------------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// <math.h>
// This file was copied from libc++'s test suite, then modified to test CUDA.
// For the most part, this consists of adding __device__ attributes and
// deleting long double.
// This test requires C++11 (it's mostly decltype checks).
#if __cplusplus >= 201103L
#include <math.h>
#include <type_traits>
#include <cassert>
#include <stdio.h>
// See PR21083
// Ambiguous is a user-defined type that defines its own overloads of cmath
// functions. When the std overloads are candidates too (by using or adl),
// they should not interfere.
struct Ambiguous : std::true_type { // ADL
__device__ operator float () { return 0.f; }
__device__ operator double () { return 0.; }
};
__device__ Ambiguous abs(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous acos(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous asin(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous atan(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous atan2(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous ceil(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous cos(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous cosh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous exp(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fabs(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous floor(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fmod(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous frexp(Ambiguous, int*){ return Ambiguous(); }
__device__ Ambiguous ldexp(Ambiguous, int){ return Ambiguous(); }
__device__ Ambiguous log(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous log10(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous modf(Ambiguous, Ambiguous*){ return Ambiguous(); }
__device__ Ambiguous pow(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous sin(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous sinh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous sqrt(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous tan(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous tanh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous signbit(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fpclassify(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isfinite(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isnormal(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isgreater(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isgreaterequal(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isless(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous islessequal(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous islessgreater(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous isunordered(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous acosh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous asinh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous atanh(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous cbrt(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous copysign(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous erf(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous erfc(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous exp2(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous expm1(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fdim(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fma(Ambiguous, Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fmax(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous fmin(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous hypot(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous ilogb(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous lgamma(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous llrint(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous llround(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous log1p(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous log2(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous logb(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous lrint(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous lround(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous nearbyint(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous nextafter(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous remainder(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous remquo(Ambiguous, Ambiguous, int*){ return Ambiguous(); }
__device__ Ambiguous rint(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous round(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous scalbln(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous scalbn(Ambiguous, Ambiguous){ return Ambiguous(); }
__device__ Ambiguous tgamma(Ambiguous){ return Ambiguous(); }
__device__ Ambiguous trunc(Ambiguous){ return Ambiguous(); }
__device__ void test_abs()
{
static_assert((std::is_same<decltype(abs((float)0)), float>::value), "");
static_assert((std::is_same<decltype(abs((double)0)), double>::value), "");
static_assert((std::is_same<decltype(abs(Ambiguous())), Ambiguous>::value), "");
assert(abs(-1) == 1);
assert(abs(-1.) == 1);
assert(abs(-1.f) == 1);
}
__device__ void test_acos()
{
static_assert((std::is_same<decltype(acos((float)0)), float>::value), "");
static_assert((std::is_same<decltype(acos((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((int)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((long)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(acos((double)0)), double>::value), "");
static_assert((std::is_same<decltype(acosf(0)), float>::value), "");
static_assert((std::is_same<decltype(acos(Ambiguous())), Ambiguous>::value), "");
assert(acos(1) == 0);
assert(acos(1.) == 0);
assert(acos(1.f) == 0);
}
__device__ void test_asin()
{
static_assert((std::is_same<decltype(asin((float)0)), float>::value), "");
static_assert((std::is_same<decltype(asin((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((int)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((long)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(asin((double)0)), double>::value), "");
static_assert((std::is_same<decltype(asinf(0)), float>::value), "");
static_assert((std::is_same<decltype(asin(Ambiguous())), Ambiguous>::value), "");
assert(asin(0) == 0);
assert(asin(0.) == 0);
assert(asin(0.f) == 0);
}
__device__ void test_atan()
{
static_assert((std::is_same<decltype(atan((float)0)), float>::value), "");
static_assert((std::is_same<decltype(atan((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((int)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan((double)0)), double>::value), "");
static_assert((std::is_same<decltype(atanf(0)), float>::value), "");
static_assert((std::is_same<decltype(atan(Ambiguous())), Ambiguous>::value), "");
assert(atan(0) == 0);
assert(atan(0.) == 0);
assert(atan(0.f) == 0);
}
__device__ void test_atan2()
{
static_assert((std::is_same<decltype(atan2((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(atan2((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2f(0,0)), float>::value), "");
static_assert((std::is_same<decltype(atan2((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(atan2(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(atan2(0, 1) == 0);
assert(atan2(0, 1.) == 0);
assert(atan2(0, 1.f) == 0);
assert(atan2(0., 1) == 0);
assert(atan2(0., 1.) == 0);
assert(atan2(0., 1.f) == 0);
assert(atan2(0.f, 1) == 0);
assert(atan2(0.f, 1.) == 0);
assert(atan2(0.f, 1.f) == 0);
}
__device__ void test_ceil()
{
static_assert((std::is_same<decltype(ceil((float)0)), float>::value), "");
static_assert((std::is_same<decltype(ceil((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((int)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((long)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(ceil((double)0)), double>::value), "");
static_assert((std::is_same<decltype(ceilf(0)), float>::value), "");
static_assert((std::is_same<decltype(ceil(Ambiguous())), Ambiguous>::value), "");
assert(ceil(0) == 0);
assert(ceil(0.) == 0);
assert(ceil(0.f) == 0);
}
__device__ void test_cos()
{
static_assert((std::is_same<decltype(cos((float)0)), float>::value), "");
static_assert((std::is_same<decltype(cos((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((int)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((long)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cos((double)0)), double>::value), "");
static_assert((std::is_same<decltype(cosf(0)), float>::value), "");
static_assert((std::is_same<decltype(cos(Ambiguous())), Ambiguous>::value), "");
assert(cos(0) == 1);
assert(cos(0.) == 1);
assert(cos(0.f) == 1);
}
__device__ void test_cosh()
{
static_assert((std::is_same<decltype(cosh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(cosh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cosh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(coshf(0)), float>::value), "");
static_assert((std::is_same<decltype(cosh(Ambiguous())), Ambiguous>::value), "");
assert(cosh(0) == 1);
assert(cosh(0.) == 1);
assert(cosh(0.f) == 1);
}
__device__ void test_exp()
{
static_assert((std::is_same<decltype(exp((float)0)), float>::value), "");
static_assert((std::is_same<decltype(exp((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((int)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp((double)0)), double>::value), "");
static_assert((std::is_same<decltype(expf(0)), float>::value), "");
static_assert((std::is_same<decltype(exp(Ambiguous())), Ambiguous>::value), "");
assert(exp(0) == 1);
assert(exp(0.) == 1);
assert(exp(0.f) == 1);
}
__device__ void test_fabs()
{
static_assert((std::is_same<decltype(fabs((float)0)), float>::value), "");
static_assert((std::is_same<decltype(fabs((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((int)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((long)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fabs((double)0)), double>::value), "");
static_assert((std::is_same<decltype(fabsf(0.0f)), float>::value), "");
static_assert((std::is_same<decltype(fabs(Ambiguous())), Ambiguous>::value), "");
assert(fabs(-1) == 1);
assert(fabs(-1.) == 1);
assert(fabs(-1.f) == 1);
}
__device__ void test_floor()
{
static_assert((std::is_same<decltype(floor((float)0)), float>::value), "");
static_assert((std::is_same<decltype(floor((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((int)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((long)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(floor((double)0)), double>::value), "");
static_assert((std::is_same<decltype(floorf(0)), float>::value), "");
static_assert((std::is_same<decltype(floor(Ambiguous())), Ambiguous>::value), "");
assert(floor(1) == 1);
assert(floor(1.) == 1);
assert(floor(1.f) == 1);
}
__device__ void test_fmod()
{
static_assert((std::is_same<decltype(fmod((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(fmod((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmodf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(fmod((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmod(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(fmod(1.5, 1) == .5);
assert(fmod(1.5, 1.) == .5);
assert(fmod(1.5, 1.f) == .5);
assert(fmod(1.5f, 1) == .5);
assert(fmod(1.5f, 1.) == .5);
assert(fmod(1.5f, 1.f) == .5);
assert(fmod(2, 1) == 0);
assert(fmod(2, 1.) == 0);
assert(fmod(2, 1.f) == 0);
}
__device__ void test_frexp()
{
int ip;
static_assert((std::is_same<decltype(frexp((float)0, &ip)), float>::value), "");
static_assert((std::is_same<decltype(frexp((bool)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((unsigned short)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((int)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((unsigned int)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((unsigned long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((long long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((unsigned long long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexp((double)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(frexpf(0, &ip)), float>::value), "");
static_assert((std::is_same<decltype(frexp(Ambiguous(), &ip)), Ambiguous>::value), "");
assert(frexp(0, &ip) == 0);
assert(frexp(0., &ip) == 0);
assert(frexp(0.f, &ip) == 0);
}
__device__ void test_ldexp()
{
int ip = 1;
static_assert((std::is_same<decltype(ldexp((float)0, ip)), float>::value), "");
static_assert((std::is_same<decltype(ldexp((bool)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((unsigned short)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((int)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((unsigned int)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((long)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((unsigned long)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((long long)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((unsigned long long)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexp((double)0, ip)), double>::value), "");
static_assert((std::is_same<decltype(ldexpf(0, ip)), float>::value), "");
static_assert((std::is_same<decltype(ldexp(Ambiguous(), ip)), Ambiguous>::value), "");
assert(ldexp(1, ip) == 2);
assert(ldexp(1., ip) == 2);
assert(ldexp(1.f, ip) == 2);
}
__device__ void test_log()
{
static_assert((std::is_same<decltype(log((float)0)), float>::value), "");
static_assert((std::is_same<decltype(log((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(log((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(log((int)0)), double>::value), "");
static_assert((std::is_same<decltype(log((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(log((long)0)), double>::value), "");
static_assert((std::is_same<decltype(log((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(log((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log((double)0)), double>::value), "");
static_assert((std::is_same<decltype(logf(0)), float>::value), "");
static_assert((std::is_same<decltype(log(Ambiguous())), Ambiguous>::value), "");
assert(log(1) == 0);
assert(log(1.) == 0);
assert(log(1.f) == 0);
}
__device__ void test_log10()
{
static_assert((std::is_same<decltype(log10((float)0)), float>::value), "");
static_assert((std::is_same<decltype(log10((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((int)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((long)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log10((double)0)), double>::value), "");
static_assert((std::is_same<decltype(log10f(0)), float>::value), "");
static_assert((std::is_same<decltype(log10(Ambiguous())), Ambiguous>::value), "");
assert(log10(1) == 0);
assert(log10(1.) == 0);
assert(log10(1.f) == 0);
}
__device__ void test_modf()
{
static_assert((std::is_same<decltype(modf((float)0, (float*)0)), float>::value), "");
static_assert((std::is_same<decltype(modf((double)0, (double*)0)), double>::value), "");
static_assert((std::is_same<decltype(modff(0, (float*)0)), float>::value), "");
static_assert((std::is_same<decltype(modf(Ambiguous(), (Ambiguous*)0)), Ambiguous>::value), "");
double i;
assert(modf(1, &i) == 0);
assert(modf(1., &i) == 0);
assert(modf(1.f, &i) == 0);
}
__device__ void test_pow()
{
static_assert((std::is_same<decltype(pow((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(pow((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(pow((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(powf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(pow((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(pow(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(pow(1, 1) == 1);
assert(pow(1., 1) == 1);
assert(pow(1.f, 1) == 1);
assert(pow(1, 1.) == 1);
assert(pow(1., 1.) == 1);
assert(pow(1.f, 1.) == 1);
assert(pow(1, 1.f) == 1);
assert(pow(1., 1.f) == 1);
assert(pow(1.f, 1.f) == 1);
}
__device__ void test_sin()
{
static_assert((std::is_same<decltype(sin((float)0)), float>::value), "");
static_assert((std::is_same<decltype(sin((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((int)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((long)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sin((double)0)), double>::value), "");
static_assert((std::is_same<decltype(sinf(0)), float>::value), "");
static_assert((std::is_same<decltype(sin(Ambiguous())), Ambiguous>::value), "");
assert(sin(0) == 0);
assert(sin(0.) == 0);
assert(sin(0.f) == 0);
}
__device__ void test_sinh()
{
static_assert((std::is_same<decltype(sinh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(sinh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sinh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(sinhf(0)), float>::value), "");
static_assert((std::is_same<decltype(sinh(Ambiguous())), Ambiguous>::value), "");
assert(sinh(0) == 0);
assert(sinh(0.) == 0);
assert(sinh(0.f) == 0);
}
__device__ void test_sqrt()
{
static_assert((std::is_same<decltype(sqrt((float)0)), float>::value), "");
static_assert((std::is_same<decltype(sqrt((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((int)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((long)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrt((double)0)), double>::value), "");
static_assert((std::is_same<decltype(sqrtf(0)), float>::value), "");
static_assert((std::is_same<decltype(sqrt(Ambiguous())), Ambiguous>::value), "");
assert(sqrt(4) == 2);
assert(sqrt(4.) == 2);
assert(sqrt(4.f) == 2);
}
__device__ void test_tan()
{
static_assert((std::is_same<decltype(tan((float)0)), float>::value), "");
static_assert((std::is_same<decltype(tan((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((int)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((long)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tan((double)0)), double>::value), "");
static_assert((std::is_same<decltype(tanf(0)), float>::value), "");
static_assert((std::is_same<decltype(tan(Ambiguous())), Ambiguous>::value), "");
assert(tan(0) == 0);
assert(tan(0.) == 0);
assert(tan(0.f) == 0);
}
__device__ void test_tanh()
{
static_assert((std::is_same<decltype(tanh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(tanh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tanh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(tanhf(0)), float>::value), "");
static_assert((std::is_same<decltype(tanh(Ambiguous())), Ambiguous>::value), "");
assert(tanh(0) == 0);
assert(tanh(0.) == 0);
assert(tanh(0.f) == 0);
}
__device__ void test_signbit()
{
#ifdef signbit
#error signbit defined
#endif
static_assert((std::is_same<decltype(signbit((float)0)), bool>::value), "");
static_assert((std::is_same<decltype(signbit((double)0)), bool>::value), "");
static_assert((std::is_same<decltype(signbit(0)), bool>::value), "");
static_assert((std::is_same<decltype(signbit(Ambiguous())), Ambiguous>::value), "");
assert(signbit(-1) == true);
assert(signbit(-1.) == true);
assert(signbit(-1.f) == true);
}
__device__ void test_fpclassify()
{
#ifdef fpclassify
#error fpclassify defined
#endif
static_assert((std::is_same<decltype(fpclassify((float)0)), int>::value), "");
static_assert((std::is_same<decltype(fpclassify((double)0)), int>::value), "");
static_assert((std::is_same<decltype(fpclassify(0)), int>::value), "");
static_assert((std::is_same<decltype(fpclassify(Ambiguous())), Ambiguous>::value), "");
assert(fpclassify(-1) == FP_NORMAL);
assert(fpclassify(-1.) == FP_NORMAL);
assert(fpclassify(-1.f) == FP_NORMAL);
}
__device__ void test_isfinite()
{
#ifdef isfinite
#error isfinite defined
#endif
static_assert((std::is_same<decltype(isfinite((float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isfinite((double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isfinite(0)), bool>::value), "");
static_assert((std::is_same<decltype(isfinite(Ambiguous())), Ambiguous>::value), "");
assert(isfinite(-1) == true);
assert(isfinite(-1.) == true);
assert(isfinite(-1.f) == true);
}
__device__ void test_isnormal()
{
#ifdef isnormal
#error isnormal defined
#endif
static_assert((std::is_same<decltype(isnormal((float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isnormal((double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isnormal(0)), bool>::value), "");
static_assert((std::is_same<decltype(isnormal(Ambiguous())), Ambiguous>::value), "");
assert(std::isnormal(-1) == true);
assert(std::isnormal(-1.) == true);
assert(std::isnormal(-1.f) == true);
}
__device__ void test_isgreater()
{
#ifdef isgreater
#error isgreater defined
#endif
static_assert((std::is_same<decltype(isgreater((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreater((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreater((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreater((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreater(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreater(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::isgreater(-1, 0) == false);
assert(std::isgreater(-1, 0.) == false);
assert(std::isgreater(-1, 0.f) == false);
assert(std::isgreater(-1., 0) == false);
assert(std::isgreater(-1., 0.) == false);
assert(std::isgreater(-1., 0.f) == false);
assert(std::isgreater(-1.f, 0) == false);
assert(std::isgreater(-1.f, 0.) == false);
assert(std::isgreater(-1.f, 0.f) == false);
}
__device__ void test_isgreaterequal()
{
#ifdef isgreaterequal
#error isgreaterequal defined
#endif
static_assert((std::is_same<decltype(isgreaterequal((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreaterequal((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreaterequal((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreaterequal((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreaterequal(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isgreaterequal(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::isgreaterequal(-1, 0) == false);
assert(std::isgreaterequal(-1, 0.) == false);
assert(std::isgreaterequal(-1, 0.f) == false);
assert(std::isgreaterequal(-1., 0) == false);
assert(std::isgreaterequal(-1., 0.) == false);
assert(std::isgreaterequal(-1., 0.f) == false);
assert(std::isgreaterequal(-1.f, 0) == false);
assert(std::isgreaterequal(-1.f, 0.) == false);
assert(std::isgreaterequal(-1.f, 0.f) == false);
}
__device__ void test_isinf()
{
#ifdef isinf
#error isinf defined
#endif
static_assert((std::is_same<decltype(isinf((float)0)), bool>::value), "");
typedef decltype(isinf((double)0)) DoubleRetType;
#ifndef __linux__
static_assert((std::is_same<DoubleRetType, bool>::value), "");
#else
// GLIBC < 2.26 defines 'isinf(double)' with a return type of 'int' in
// all C++ dialects. The test should tolerate this.
// See: https://sourceware.org/bugzilla/show_bug.cgi?id=19439
static_assert((std::is_same<DoubleRetType, bool>::value
|| std::is_same<DoubleRetType, int>::value), "");
#endif
static_assert((std::is_same<decltype(isinf(0)), bool>::value), "");
assert(std::isinf(-1) == false);
assert(std::isinf(-1.) == false);
assert(std::isinf(-1.f) == false);
}
__device__ void test_isless()
{
#ifdef isless
#error isless defined
#endif
static_assert((std::is_same<decltype(isless((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isless((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isless((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isless((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isless(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isless(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::isless(-1, 0) == true);
assert(std::isless(-1, 0.) == true);
assert(std::isless(-1, 0.f) == true);
assert(std::isless(-1., 0) == true);
assert(std::isless(-1., 0.) == true);
assert(std::isless(-1., 0.f) == true);
assert(std::isless(-1.f, 0) == true);
assert(std::isless(-1.f, 0.) == true);
assert(std::isless(-1.f, 0.f) == true);
}
__device__ void test_islessequal()
{
#ifdef islessequal
#error islessequal defined
#endif
static_assert((std::is_same<decltype(islessequal((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessequal((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessequal((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessequal((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessequal(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessequal(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::islessequal(-1, 0) == true);
assert(std::islessequal(-1, 0.) == true);
assert(std::islessequal(-1, 0.f) == true);
assert(std::islessequal(-1., 0) == true);
assert(std::islessequal(-1., 0.) == true);
assert(std::islessequal(-1., 0.f) == true);
assert(std::islessequal(-1.f, 0) == true);
assert(std::islessequal(-1.f, 0.) == true);
assert(std::islessequal(-1.f, 0.f) == true);
}
__device__ void test_islessgreater()
{
#ifdef islessgreater
#error islessgreater defined
#endif
static_assert((std::is_same<decltype(islessgreater((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessgreater((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessgreater((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessgreater((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessgreater(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(islessgreater(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::islessgreater(-1, 0) == true);
assert(std::islessgreater(-1, 0.) == true);
assert(std::islessgreater(-1, 0.f) == true);
assert(std::islessgreater(-1., 0) == true);
assert(std::islessgreater(-1., 0.) == true);
assert(std::islessgreater(-1., 0.f) == true);
assert(std::islessgreater(-1.f, 0) == true);
assert(std::islessgreater(-1.f, 0.) == true);
assert(std::islessgreater(-1.f, 0.f) == true);
}
__device__ void test_isnan()
{
#ifdef isnan
#error isnan defined
#endif
static_assert((std::is_same<decltype(isnan((float)0)), bool>::value), "");
typedef decltype(isnan((double)0)) DoubleRetType;
#ifndef __linux__
static_assert((std::is_same<DoubleRetType, bool>::value), "");
#else
// GLIBC < 2.26 defines 'isnan(double)' with a return type of 'int' in
// all C++ dialects. The test should tolerate this.
// See: https://sourceware.org/bugzilla/show_bug.cgi?id=19439
static_assert((std::is_same<DoubleRetType, bool>::value
|| std::is_same<DoubleRetType, int>::value), "");
#endif
static_assert((std::is_same<decltype(isnan(0)), bool>::value), "");
assert(std::isnan(-1) == false);
assert(std::isnan(-1.) == false);
assert(std::isnan(-1.f) == false);
}
__device__ void test_isunordered()
{
#ifdef isunordered
#error isunordered defined
#endif
static_assert((std::is_same<decltype(isunordered((float)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isunordered((float)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isunordered((double)0, (float)0)), bool>::value), "");
static_assert((std::is_same<decltype(isunordered((double)0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isunordered(0, (double)0)), bool>::value), "");
static_assert((std::is_same<decltype(isunordered(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::isunordered(-1, 0) == false);
assert(std::isunordered(-1, 0.) == false);
assert(std::isunordered(-1, 0.f) == false);
assert(std::isunordered(-1., 0) == false);
assert(std::isunordered(-1., 0.) == false);
assert(std::isunordered(-1., 0.f) == false);
assert(std::isunordered(-1.f, 0) == false);
assert(std::isunordered(-1.f, 0.) == false);
assert(std::isunordered(-1.f, 0.f) == false);
}
__device__ void test_acosh()
{
static_assert((std::is_same<decltype(acosh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(acosh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(acosh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(acoshf(0)), float>::value), "");
static_assert((std::is_same<decltype(acosh(Ambiguous())), Ambiguous>::value), "");
assert(std::acosh(1) == 0);
assert(std::acosh(1.) == 0);
assert(std::acosh(1.f) == 0);
}
__device__ void test_asinh()
{
static_assert((std::is_same<decltype(asinh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(asinh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(asinh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(asinhf(0)), float>::value), "");
static_assert((std::is_same<decltype(asinh(Ambiguous())), Ambiguous>::value), "");
assert(asinh(0) == 0);
assert(asinh(0.) == 0);
assert(asinh(0.f) == 0);
}
__device__ void test_atanh()
{
static_assert((std::is_same<decltype(atanh((float)0)), float>::value), "");
static_assert((std::is_same<decltype(atanh((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((int)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((long)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(atanh((double)0)), double>::value), "");
static_assert((std::is_same<decltype(atanhf(0)), float>::value), "");
static_assert((std::is_same<decltype(atanh(Ambiguous())), Ambiguous>::value), "");
assert(atanh(0) == 0);
assert(atanh(0.) == 0);
assert(atanh(0.f) == 0);
}
__device__ void test_cbrt()
{
static_assert((std::is_same<decltype(cbrt((float)0)), float>::value), "");
static_assert((std::is_same<decltype(cbrt((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((int)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((long)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrt((double)0)), double>::value), "");
static_assert((std::is_same<decltype(cbrtf(0)), float>::value), "");
static_assert((std::is_same<decltype(cbrt(Ambiguous())), Ambiguous>::value), "");
assert(cbrt(1) == 1);
assert(cbrt(1.) == 1);
assert(cbrt(1.f) == 1);
}
__device__ void test_copysign()
{
static_assert((std::is_same<decltype(copysign((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(copysign((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign((double)0, (double)0)), double>::value), "");
// CUDA's copysign(float, double) currently returns a float, in violation
// of the spec. We can't easily change this, so accept either one.
static_assert(
(std::is_same<decltype(copysign((float)0, (double)0)), double>::value ||
std::is_same<decltype(copysign((float)0, (double)0)), float>::value),
"");
static_assert((std::is_same<decltype(copysignf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(copysign((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(copysign(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::copysign(1, 1) == 1);
assert(std::copysign(1., 1) == 1);
assert(std::copysign(1.f, 1) == 1);
assert(std::copysign(1, 1.) == 1);
assert(std::copysign(1., 1.) == 1);
assert(std::copysign(1.f, 1.) == 1);
assert(std::copysign(1, 1.f) == 1);
assert(std::copysign(1., 1.f) == 1);
assert(std::copysign(1.f, 1.f) == 1);
}
__device__ void test_erf()
{
static_assert((std::is_same<decltype(erf((float)0)), float>::value), "");
static_assert((std::is_same<decltype(erf((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((int)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((long)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(erf((double)0)), double>::value), "");
static_assert((std::is_same<decltype(erff(0)), float>::value), "");
static_assert((std::is_same<decltype(erf(Ambiguous())), Ambiguous>::value), "");
assert(erf(0) == 0);
assert(erf(0.) == 0);
assert(erf(0.f) == 0);
}
__device__ void test_erfc()
{
static_assert((std::is_same<decltype(erfc((float)0)), float>::value), "");
static_assert((std::is_same<decltype(erfc((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((int)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((long)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(erfc((double)0)), double>::value), "");
static_assert((std::is_same<decltype(erfcf(0)), float>::value), "");
static_assert((std::is_same<decltype(erfc(Ambiguous())), Ambiguous>::value), "");
assert(erfc(0) == 1);
assert(erfc(0.) == 1);
assert(erfc(0.f) == 1);
}
__device__ void test_exp2()
{
static_assert((std::is_same<decltype(exp2((float)0)), float>::value), "");
static_assert((std::is_same<decltype(exp2((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((int)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2((double)0)), double>::value), "");
static_assert((std::is_same<decltype(exp2f(0)), float>::value), "");
static_assert((std::is_same<decltype(exp2(Ambiguous())), Ambiguous>::value), "");
assert(exp2(1) == 2);
assert(exp2(1.) == 2);
assert(exp2(1.f) == 2);
}
__device__ void test_expm1()
{
static_assert((std::is_same<decltype(expm1((float)0)), float>::value), "");
static_assert((std::is_same<decltype(expm1((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((int)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((long)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1((double)0)), double>::value), "");
static_assert((std::is_same<decltype(expm1f(0)), float>::value), "");
static_assert((std::is_same<decltype(expm1(Ambiguous())), Ambiguous>::value), "");
assert(expm1(0) == 0);
assert(expm1(0.) == 0);
assert(expm1(0.f) == 0);
}
__device__ void test_fdim()
{
static_assert((std::is_same<decltype(fdim((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(fdim((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fdimf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(fdim((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(fdim(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::fdim(1, 0) == 1);
assert(std::fdim(1., 0) == 1);
assert(std::fdim(1.f, 0) == 1);
assert(std::fdim(1, 0.) == 1);
assert(std::fdim(1., 0.) == 1);
assert(std::fdim(1.f, 0.) == 1);
assert(std::fdim(1, 0.f) == 1);
assert(std::fdim(1., 0.f) == 1);
assert(std::fdim(1.f, 0.f) == 1);
}
__device__ void test_fma()
{
static_assert((std::is_same<decltype(fma((bool)0, (float)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((char)0, (float)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((unsigned)0, (float)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((float)0, (int)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((float)0, (long)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((float)0, (float)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((float)0, (float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((float)0, (float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(fma((bool)0, (double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((char)0, (double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((unsigned)0, (double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((double)0, (int)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((double)0, (long)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((double)0, (double)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((double)0, (double)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fma((double)0, (double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmaf(0,0,0)), float>::value), "");
static_assert((std::is_same<decltype(fma(Ambiguous(), Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::fma(1, 1, 1) == 2);
assert(std::fma(1., 1, 1) == 2);
assert(std::fma(1.f, 1, 1) == 2);
assert(std::fma(1, 1., 1) == 2);
assert(std::fma(1., 1., 1) == 2);
assert(std::fma(1.f, 1., 1) == 2);
assert(std::fma(1, 1.f, 1) == 2);
assert(std::fma(1., 1.f, 1) == 2);
assert(std::fma(1.f, 1.f, 1) == 2);
assert(std::fma(1, 1, 1.) == 2);
assert(std::fma(1., 1, 1.) == 2);
assert(std::fma(1.f, 1, 1.) == 2);
assert(std::fma(1, 1., 1.) == 2);
assert(std::fma(1., 1., 1.) == 2);
assert(std::fma(1.f, 1., 1.) == 2);
assert(std::fma(1, 1.f, 1.) == 2);
assert(std::fma(1., 1.f, 1.) == 2);
assert(std::fma(1.f, 1.f, 1.) == 2);
assert(std::fma(1, 1, 1.f) == 2);
assert(std::fma(1., 1, 1.f) == 2);
assert(std::fma(1.f, 1, 1.f) == 2);
assert(std::fma(1, 1., 1.f) == 2);
assert(std::fma(1., 1., 1.f) == 2);
assert(std::fma(1.f, 1., 1.f) == 2);
assert(std::fma(1, 1.f, 1.f) == 2);
assert(std::fma(1., 1.f, 1.f) == 2);
assert(std::fma(1.f, 1.f, 1.f) == 2);
}
__device__ void test_fmax()
{
static_assert((std::is_same<decltype(fmax((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(fmax((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmaxf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(fmax((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmax(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::fmax(1, 0) == 1);
assert(std::fmax(1., 0) == 1);
assert(std::fmax(1.f, 0) == 1);
assert(std::fmax(1, 0.) == 1);
assert(std::fmax(1., 0.) == 1);
assert(std::fmax(1.f, 0.) == 1);
assert(std::fmax(1, 0.f) == 1);
assert(std::fmax(1., 0.f) == 1);
assert(std::fmax(1.f, 0.f) == 1);
}
__device__ void test_fmin()
{
static_assert((std::is_same<decltype(fmin((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(fmin((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(fminf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(fmin((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(fmin(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::fmin(1, 0) == 0);
assert(std::fmin(1., 0) == 0);
assert(std::fmin(1.f, 0) == 0);
assert(std::fmin(1, 0.) == 0);
assert(std::fmin(1., 0.) == 0);
assert(std::fmin(1.f, 0.) == 0);
assert(std::fmin(1, 0.f) == 0);
assert(std::fmin(1., 0.f) == 0);
assert(std::fmin(1.f, 0.f) == 0);
}
__device__ void test_hypot()
{
static_assert((std::is_same<decltype(hypot((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(hypot((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(hypotf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(hypot((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(hypot(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::hypot(3, 4) == 5);
assert(std::hypot(3, 4.) == 5);
assert(std::hypot(3, 4.f) == 5);
assert(std::hypot(3., 4) == 5);
assert(std::hypot(3., 4.) == 5);
assert(std::hypot(3., 4.f) == 5);
assert(std::hypot(3.f, 4) == 5);
assert(std::hypot(3.f, 4.) == 5);
assert(std::hypot(3.f, 4.f) == 5);
}
__device__ void test_ilogb()
{
static_assert((std::is_same<decltype(ilogb((float)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((bool)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((unsigned short)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((int)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((unsigned int)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((long)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((unsigned long)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((long long)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((unsigned long long)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb((double)0)), int>::value), "");
static_assert((std::is_same<decltype(ilogbf(0)), int>::value), "");
static_assert((std::is_same<decltype(ilogb(Ambiguous())), Ambiguous>::value), "");
assert(ilogb(1) == 0);
assert(ilogb(1.) == 0);
assert(ilogb(1.f) == 0);
}
__device__ void test_lgamma()
{
static_assert((std::is_same<decltype(lgamma((float)0)), float>::value), "");
static_assert((std::is_same<decltype(lgamma((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((int)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((long)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(lgamma((double)0)), double>::value), "");
static_assert((std::is_same<decltype(lgammaf(0)), float>::value), "");
static_assert((std::is_same<decltype(lgamma(Ambiguous())), Ambiguous>::value), "");
assert(lgamma(1) == 0);
assert(lgamma(1.) == 0);
assert(lgamma(1.f) == 0);
}
__device__ void test_llrint()
{
static_assert((std::is_same<decltype(llrint((float)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((bool)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((unsigned short)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((int)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((unsigned int)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((unsigned long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((long long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((unsigned long long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint((double)0)), long long>::value), "");
static_assert((std::is_same<decltype(llrintf(0)), long long>::value), "");
static_assert((std::is_same<decltype(llrint(Ambiguous())), Ambiguous>::value), "");
assert(llrint(1) == 1LL);
assert(llrint(1.) == 1LL);
#if CUDA_VERSION > 7050
assert(llrint(1.f) == 1LL);
#endif
}
__device__ void test_llround()
{
static_assert((std::is_same<decltype(llround((float)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((bool)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((unsigned short)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((int)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((unsigned int)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((unsigned long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((long long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((unsigned long long)0)), long long>::value), "");
static_assert((std::is_same<decltype(llround((double)0)), long long>::value), "");
static_assert((std::is_same<decltype(llroundf(0)), long long>::value), "");
static_assert((std::is_same<decltype(llround(Ambiguous())), Ambiguous>::value), "");
assert(llround(1) == 1LL);
assert(llround(1.) == 1LL);
assert(llround(1.f) == 1LL);
}
__device__ void test_log1p()
{
static_assert((std::is_same<decltype(log1p((float)0)), float>::value), "");
static_assert((std::is_same<decltype(log1p((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((int)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((long)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log1p((double)0)), double>::value), "");
static_assert((std::is_same<decltype(log1pf(0)), float>::value), "");
static_assert((std::is_same<decltype(log1p(Ambiguous())), Ambiguous>::value), "");
assert(log1p(0) == 0);
assert(log1p(0.) == 0);
assert(log1p(0.f) == 0);
}
__device__ void test_log2()
{
static_assert((std::is_same<decltype(log2((float)0)), float>::value), "");
static_assert((std::is_same<decltype(log2((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((int)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((long)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(log2((double)0)), double>::value), "");
static_assert((std::is_same<decltype(log2f(0)), float>::value), "");
static_assert((std::is_same<decltype(log2(Ambiguous())), Ambiguous>::value), "");
assert(log2(1) == 0);
assert(log2(1.) == 0);
assert(log2(1.f) == 0);
}
__device__ void test_logb()
{
static_assert((std::is_same<decltype(logb((float)0)), float>::value), "");
static_assert((std::is_same<decltype(logb((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((int)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((long)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(logb((double)0)), double>::value), "");
static_assert((std::is_same<decltype(logbf(0)), float>::value), "");
static_assert((std::is_same<decltype(logb(Ambiguous())), Ambiguous>::value), "");
assert(logb(1) == 0);
assert(logb(1.) == 0);
assert(logb(1.f) == 0);
}
__device__ void test_lrint()
{
static_assert((std::is_same<decltype(lrint((float)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((bool)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((unsigned short)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((int)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((unsigned int)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((long)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((unsigned long)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((long long)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((unsigned long long)0)), long>::value), "");
static_assert((std::is_same<decltype(lrint((double)0)), long>::value), "");
static_assert((std::is_same<decltype(lrintf(0)), long>::value), "");
static_assert((std::is_same<decltype(lrint(Ambiguous())), Ambiguous>::value), "");
assert(lrint(1) == 1L);
assert(lrint(1.) == 1L);
#if CUDA_VERSION > 7050
assert(lrint(1.f) == 1L);
#endif
}
__device__ void test_lround()
{
static_assert((std::is_same<decltype(lround((float)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((bool)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((unsigned short)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((int)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((unsigned int)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((long)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((unsigned long)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((long long)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((unsigned long long)0)), long>::value), "");
static_assert((std::is_same<decltype(lround((double)0)), long>::value), "");
static_assert((std::is_same<decltype(lroundf(0)), long>::value), "");
static_assert((std::is_same<decltype(lround(Ambiguous())), Ambiguous>::value), "");
assert(lround(1) == 1L);
assert(lround(1.) == 1L);
assert(lround(1.f) == 1L);
}
__device__ void test_nan()
{
static_assert((std::is_same<decltype(nan("")), double>::value), "");
static_assert((std::is_same<decltype(nanf("")), float>::value), "");
}
__device__ void test_nearbyint()
{
static_assert((std::is_same<decltype(nearbyint((float)0)), float>::value), "");
static_assert((std::is_same<decltype(nearbyint((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((int)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((long)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyint((double)0)), double>::value), "");
static_assert((std::is_same<decltype(nearbyintf(0)), float>::value), "");
static_assert((std::is_same<decltype(nearbyint(Ambiguous())), Ambiguous>::value), "");
assert(nearbyint(1) == 1);
assert(nearbyint(1.) == 1);
assert(nearbyint(1.f) == 1);
}
__device__ void test_nextafter()
{
static_assert((std::is_same<decltype(nextafter((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(nextafter((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafterf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(nextafter((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(nextafter(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
//assert(nextafter(0,1) == hexfloat<double>(0x1, 0, -1074));
// Invoke all our overloads. Even though we don't check the exact result
// (this is pretty annoying to do for this function), we make sure to *use*
// the results so that these function calls can't be DCE'ed.
assert(nextafter(0, 1) != 0);
assert(nextafter(0, 1.) != 0);
assert(nextafter(0, 1.f) != 0);
assert(nextafter(0., 1) != 0);
assert(nextafter(0., 1.) != 0);
assert(nextafter(0., 1.f) != 0);
assert(nextafter(0.f, 1) != 0);
assert(nextafter(0.f, 1.) != 0);
assert(nextafter(0.f, 1.f) != 0);
}
__device__ void test_remainder()
{
static_assert((std::is_same<decltype(remainder((float)0, (float)0)), float>::value), "");
static_assert((std::is_same<decltype(remainder((bool)0, (float)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((unsigned short)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((float)0, (unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((int)0, (long long)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((int)0, (unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((double)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder((float)0, (double)0)), double>::value), "");
static_assert((std::is_same<decltype(remainderf(0,0)), float>::value), "");
static_assert((std::is_same<decltype(remainder((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(remainder(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::remainder(1.5, 1) == -.5);
assert(std::remainder(1.5, 1.) == -.5);
assert(std::remainder(1.5, 1.f) == -.5);
assert(std::remainder(1.5f, 1) == -.5);
assert(std::remainder(1.5f, 1.) == -.5);
assert(std::remainder(1.5f, 1.f) == -.5);
assert(std::remainder(2, 1) == 0);
assert(std::remainder(2, 1.) == 0);
assert(std::remainder(2, 1.f) == 0);
}
__device__ void test_remquo()
{
int ip;
static_assert((std::is_same<decltype(remquo((float)0, (float)0, &ip)), float>::value), "");
static_assert((std::is_same<decltype(remquo((bool)0, (float)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((unsigned short)0, (double)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((float)0, (unsigned int)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((double)0, (long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((int)0, (long long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((int)0, (unsigned long long)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((double)0, (double)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo((float)0, (double)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquof(0,0, &ip)), float>::value), "");
static_assert((std::is_same<decltype(remquo((int)0, (int)0, &ip)), double>::value), "");
static_assert((std::is_same<decltype(remquo(Ambiguous(), Ambiguous(), &ip)), Ambiguous>::value), "");
assert(std::remquo(1, 1, &ip) == 0);
assert(std::remquo(1, 1., &ip) == 0);
assert(std::remquo(1, 1.f, &ip) == 0);
assert(std::remquo(0.5, 1, &ip) == 0.5);
assert(std::remquo(0.5, 1., &ip) == 0.5);
assert(std::remquo(0.5, 1.f, &ip) == 0.5);
assert(std::remquo(0.5f, 1, &ip) == 0.5);
assert(std::remquo(0.5f, 1., &ip) == 0.5);
assert(std::remquo(0.5f, 1.f, &ip) == 0.5);
}
__device__ void test_rint()
{
static_assert((std::is_same<decltype(rint((float)0)), float>::value), "");
static_assert((std::is_same<decltype(rint((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((int)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((long)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(rint((double)0)), double>::value), "");
static_assert((std::is_same<decltype(rintf(0)), float>::value), "");
static_assert((std::is_same<decltype(rint(Ambiguous())), Ambiguous>::value), "");
assert(rint(1) == 1);
assert(rint(1.) == 1);
assert(rint(1.f) == 1);
}
__device__ void test_round()
{
static_assert((std::is_same<decltype(round((float)0)), float>::value), "");
static_assert((std::is_same<decltype(round((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(round((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(round((int)0)), double>::value), "");
static_assert((std::is_same<decltype(round((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(round((long)0)), double>::value), "");
static_assert((std::is_same<decltype(round((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(round((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(round((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(round((double)0)), double>::value), "");
static_assert((std::is_same<decltype(roundf(0)), float>::value), "");
static_assert((std::is_same<decltype(round(Ambiguous())), Ambiguous>::value), "");
assert(round(1) == 1);
assert(round(1.) == 1);
assert(round(1.f) == 1);
}
__device__ void test_scalbln()
{
static_assert((std::is_same<decltype(scalbln((float)0, (long)0)), float>::value), "");
static_assert((std::is_same<decltype(scalbln((bool)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((unsigned short)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((int)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((unsigned int)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((long)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((unsigned long)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((long long)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((unsigned long long)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbln((double)0, (long)0)), double>::value), "");
static_assert((std::is_same<decltype(scalblnf(0, (long)0)), float>::value), "");
static_assert((std::is_same<decltype(scalbln(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::scalbln(1, 1) == 2);
assert(std::scalbln(1, 1.) == 2);
assert(std::scalbln(1, 1.f) == 2);
assert(std::scalbln(1., 1) == 2);
assert(std::scalbln(1., 1.) == 2);
assert(std::scalbln(1., 1.f) == 2);
assert(std::scalbln(1.f, 1) == 2);
assert(std::scalbln(1.f, 1.) == 2);
assert(std::scalbln(1.f, 1.f) == 2);
}
__device__ void test_scalbn()
{
static_assert((std::is_same<decltype(scalbn((float)0, (int)0)), float>::value), "");
static_assert((std::is_same<decltype(scalbn((bool)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((unsigned short)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((unsigned int)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((long)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((unsigned long)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((long long)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((unsigned long long)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbn((double)0, (int)0)), double>::value), "");
static_assert((std::is_same<decltype(scalbnf(0, (int)0)), float>::value), "");
static_assert((std::is_same<decltype(scalbn(Ambiguous(), Ambiguous())), Ambiguous>::value), "");
assert(std::scalbn(1, 1) == 2);
assert(std::scalbn(1, 1.) == 2);
assert(std::scalbn(1, 1.f) == 2);
assert(std::scalbn(1., 1) == 2);
assert(std::scalbn(1., 1.) == 2);
assert(std::scalbn(1., 1.f) == 2);
assert(std::scalbn(1.f, 1) == 2);
assert(std::scalbn(1.f, 1.) == 2);
assert(std::scalbn(1.f, 1.f) == 2);
}
__device__ void test_tgamma()
{
static_assert((std::is_same<decltype(tgamma((float)0)), float>::value), "");
static_assert((std::is_same<decltype(tgamma((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((int)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((long)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(tgamma((double)0)), double>::value), "");
static_assert((std::is_same<decltype(tgammaf(0)), float>::value), "");
static_assert((std::is_same<decltype(tgamma(Ambiguous())), Ambiguous>::value), "");
assert(tgamma(1) == 1);
assert(tgamma(1.) == 1);
assert(tgamma(1.f) == 1);
}
__device__ void test_trunc()
{
static_assert((std::is_same<decltype(trunc((float)0)), float>::value), "");
static_assert((std::is_same<decltype(trunc((bool)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((unsigned short)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((int)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((unsigned int)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((long)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((unsigned long)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((long long)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((unsigned long long)0)), double>::value), "");
static_assert((std::is_same<decltype(trunc((double)0)), double>::value), "");
static_assert((std::is_same<decltype(truncf(0)), float>::value), "");
static_assert((std::is_same<decltype(trunc(Ambiguous())), Ambiguous>::value), "");
assert(trunc(1) == 1);
assert(trunc(1.) == 1);
assert(trunc(1.f) == 1);
}
__global__ void tests()
{
test_abs();
test_acos();
test_asin();
test_atan();
test_atan2();
test_ceil();
test_cos();
test_cosh();
test_exp();
test_fabs();
test_floor();
test_fmod();
test_frexp();
test_ldexp();
test_log();
test_log10();
test_modf();
test_pow();
test_sin();
test_sinh();
test_sqrt();
test_tan();
test_tanh();
test_signbit();
test_fpclassify();
test_isfinite();
test_isnormal();
test_isgreater();
test_isgreaterequal();
test_isinf();
test_isless();
test_islessequal();
test_islessgreater();
test_isnan();
test_isunordered();
test_acosh();
test_asinh();
test_atanh();
test_cbrt();
test_copysign();
test_erf();
test_erfc();
test_exp2();
test_expm1();
test_fdim();
test_fma();
test_fmax();
test_fmin();
test_hypot();
test_ilogb();
test_lgamma();
test_llrint();
test_llround();
test_log1p();
test_log2();
test_logb();
test_lrint();
test_lround();
test_nan();
test_nearbyint();
test_nextafter();
test_remainder();
test_remquo();
test_rint();
test_round();
test_scalbln();
test_scalbn();
test_tgamma();
test_trunc();
}
int main() {
tests<<<1,1>>>();
cudaError_t err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
printf("CUDA error %d\n", (int)err);
return 1;
}
printf("Success!\n");
return 0;
}
#else
#include <stdio.h>
// No C++11; test is a nop.
int main() {
printf("Success!\n");
return 0;
}
#endif // __cplusplus < 201103L
|
d3e0a1a13383ee12a17657d871590023d76bd290.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IcmEncoder.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/impl/FaissAssert.h>
#include <faiss/gpu/impl/L2Norm.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/MatrixMult.cuh>
#include <faiss/gpu/utils/Pair.cuh>
#include <faiss/gpu/utils/Reductions.cuh>
#include <hiprand/hiprand_kernel.h>
namespace faiss {
namespace gpu {
extern __shared__ char smem[];
/** encode using iterative conditional mode
*
* For subcode cm of a vector, we fix the other subcodes cj (j != m)
* and then find the optimal value of cm (cm = 1,...,K) such that
* minimizing the objective function.
*
* @param uterm precomputed unary terms, size (M, n, K)
* @param bterm precomputed binary terms, size (M1, M2, K1, K2)
* @param codes output vector encodings, size (n, M)
* @param M number of codebooks
* @param K number of codewords in a codebook
* @param m identify which subcode to condition on
*/
__global__ void runIcmEncodeStep(
const float* uterm,
const float* bterm,
int32_t* codes,
int M,
int K,
int m) {
using KVPair = Pair<float, int>;
int id = blockIdx.x; // each block takes care of one vector
int code = threadIdx.x; // each thread takes care of one possible code
// compute the objective value by look-up tables
KVPair obj(0.0f, code);
obj.k = uterm[id * K + code];
#pragma unroll
for (int m2 = 0; m2 < M; m2++) {
if (m2 == m) {
continue;
}
int32_t code2 = codes[id * M + m2];
obj.k += bterm[m2 * K * K + code * K + code2];
}
// find the minimum objective value and the corresponding code
__syncthreads();
obj = blockReduceAll<KVPair, Min<KVPair>, false, false>(
obj, Min<KVPair>(), (KVPair*)smem);
if (code == 0) {
codes[id * M + m] = obj.v;
}
}
/** compute reconstruction error for each vector
*
* decoded_x[i] = \sum codebooks[m][codes[i][m]], m = 1,..,M
* obj[i] = ||x[i] - decoded_x[i]||^2
*
* @param x input vectors, size [n, dims]
* @param codebooks codebooks, size [M, K, dims]
* @param codes vector codes, size [n, M]
* @param obj output reconstruction errors, size [n]
* @param n number of input vectors
* @param K number of codewords in a codebook
* @param M number of codebooks
*/
__global__ void runEvaluation(
const float* x,
const float* codebooks,
const int32_t* codes,
float* obj, // output
int n,
int M,
int K,
int dims) {
int id = blockIdx.x; // each block takes care of one vector
int d = threadIdx.x; // each thread takes care of one dimension
float acc = 0.0f;
#pragma unroll
for (int m = 0; m < M; m++) {
int32_t code = codes[id * M + m];
acc += codebooks[m * K * dims + code * dims + d];
}
acc -= x[id * dims + d];
acc = acc * acc;
// sum values of all dimensions together
__syncthreads();
acc = blockReduceAllSum<float, false, false>(acc, (float*)smem);
if (d == 0) {
obj[id] = acc;
}
}
/** perturb vector codes
*
* repeat nperts times:
* codes[i][randint(0, M)] = randint(0, K)
*
* @param seed random seed
* @param codes vector codes, size [n, M]
* @param n number of input vectors
* @param M number of codebooks
* @param K number of codewords in a codebook
* @param nperts number of subcode to be perturbed in a vector
*/
__global__ void runCodesPerturbation(
int seed,
int32_t* codes,
int n,
int M,
int K,
int nperts) {
// each thread takes care of one vector
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= n) {
return;
}
// we have to initialize the state
hiprandState_t state;
hiprand_init(seed, id, 0, &state);
for (int i = 0; i < nperts; i++) {
int pos = int(hiprand_uniform(&state) * M);
int32_t val = int32_t(hiprand_uniform(&state) * K);
codes[id * M + pos] = val;
}
}
/** select the best codes by reconstruction errors
*
* if objs[i] < best_objs[i]:
* best_objs[i] = objs[i]
* best_codes[i] = codes[i]
*
* @param bestCodes the best codes we've encountered, size [n, M]
* @param bestObjs min reconstruction errors we've encountered, size [n]
* @param codes input vector codes, size [n, M]
* @param objs reconstruction errors of input vector codes, size [n]
* @param n number of input vectors
*/
__global__ void runCodesSelection(
int32_t* bestCodes,
float* bestObjs,
const int32_t* codes,
const float* objs,
int n,
int M) {
// each thread takes care of one vector
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= n || objs[id] >= bestObjs[id]) {
return;
}
bestObjs[id] = objs[id];
#pragma unroll
for (int m = 0; m < M; m++) {
bestCodes[id * M + m] = codes[id * M + m];
}
}
/** add L2 norm of codewords in a codebook to the unary terms
*
* uterm[i][k] = norm[k]
*
* @param uterm unary terms, size [n, K]
* @param norm L2 norm of each codeword in a codebook, size [K]
* @param K number of codewords in a codebook
*/
__global__ void runNormAddition(float* uterm, const float* norm, int K) {
int id = blockIdx.x;
int code = threadIdx.x;
uterm[id * K + code] += norm[code];
}
IcmEncoderImpl::IcmEncoderImpl(
int M,
int K,
int dims,
GpuResourcesProvider* prov,
int device)
: M(M), K(K), dims(dims), prov(prov), device(device) {
res = prov->getResources();
}
void IcmEncoderImpl::computeUnaryTerms(
float* uterm, // output, [M, n, K]
const float* x, // [n, d]
const float* codebooks, // [M, K, d]
int n) const {
auto stream = res->getDefaultStreamCurrentDevice();
auto handle = res->getBlasHandleCurrentDevice();
DeviceTensor<float, 2, true> vecs(const_cast<float*>(x), {n, dims});
for (int m = 0; m < M; m++) {
auto cPtr = const_cast<float*>(codebooks + m * K * dims);
auto bPtr = uterm + m * n * K;
DeviceTensor<float, 2, true> ci(cPtr, {K, dims});
DeviceTensor<float, 2, true> bi(bPtr, {n, K});
runMatrixMult(
bi, false, vecs, false, ci, true, -2.0f, 0.0f, handle, stream);
}
DeviceTensor<float, 2, true> c(
const_cast<float*>(codebooks), {M * K, dims});
DeviceTensor<float, 1, true> norm(
res.get(), makeTempAlloc(AllocType::Other, stream), {M * K});
runL2Norm(c, true, norm, true, stream);
for (int m = 0; m < M; m++) {
auto uPtr = uterm + m * n * K;
auto nPtr = norm.data() + m * K;
hipLaunchKernelGGL(( runNormAddition), dim3(n), dim3(K), 0, stream, uPtr, nPtr, K);
}
}
void IcmEncoderImpl::computeBinaryTerms(float* bterm, const float* codebooks)
const {
auto stream = res->getDefaultStreamCurrentDevice();
auto handle = res->getBlasHandleCurrentDevice();
for (int m1 = 0; m1 < M; m1++) {
for (int m2 = 0; m2 < M; m2++) {
auto ptr1 = const_cast<float*>(codebooks + m1 * K * dims);
auto ptr2 = const_cast<float*>(codebooks + m2 * K * dims);
auto ptr3 = bterm + m1 * M * K * K + m2 * K * K;
DeviceTensor<float, 2, true> c1(ptr1, {K, dims});
DeviceTensor<float, 2, true> c2(ptr2, {K, dims});
DeviceTensor<float, 2, true> b(ptr3, {K, K});
runMatrixMult(
b, false, c1, false, c2, true, 2.0f, 0.0f, handle, stream);
}
}
}
void IcmEncoderImpl::setBinaryTerm(const float* codebooksHost) {
DeviceScope scope(device);
auto device = getCurrentDevice();
auto stream = res->getDefaultStreamCurrentDevice();
// copy from host to device memory
codebooks = toDeviceNonTemporary<float, 3>(
res.get(),
device,
const_cast<float*>(codebooksHost),
stream,
{M, K, dims});
bterm = DeviceTensor<float, 4, true>(
res.get(), makeDevAlloc(AllocType::Other, stream), {M, M, K, K});
computeBinaryTerms(bterm.data(), codebooks.data());
}
void IcmEncoderImpl::encode(
int32_t* codesHost,
const float* xHost,
const float* codebooksHost,
std::mt19937& gen,
int n,
int nperts,
int ilsIters,
int icmIters) const {
DeviceScope scope(device);
auto device = getCurrentDevice();
auto stream = res->getDefaultStreamCurrentDevice();
// copy from host to device memory
auto codes = toDeviceTemporary<int32_t, 2>(
res.get(), device, const_cast<int32_t*>(codesHost), stream, {n, M});
auto x = toDeviceTemporary<float, 2>(
res.get(), device, const_cast<float*>(xHost), stream, {n, dims});
// compute unary terms
DeviceTensor<float, 3, true> uterm(
res.get(), makeTempAlloc(AllocType::Other, stream), {M, n, K});
computeUnaryTerms(uterm.data(), x.data(), codebooks.data(), n);
DeviceTensor<int32_t, 2, true> bestCodes(
res.get(), makeTempAlloc(AllocType::Other, stream), {n, M});
fromDevice<int32_t, 2>(codes, bestCodes.data(), stream);
DeviceTensor<float, 1, true> bestObjs(
res.get(), makeTempAlloc(AllocType::Other, stream), {n});
DeviceTensor<float, 1, true> objs(
res.get(), makeTempAlloc(AllocType::Other, stream), {n});
// compute how much shared memory we need
const int evaluateSmem = sizeof(float) * (dims + kWarpSize - 1) / kWarpSize;
const int encodeSmem =
sizeof(Pair<float, int>) * (K + kWarpSize - 1) / kWarpSize;
// compute the reconstruction error for each vector
hipLaunchKernelGGL(( runEvaluation), dim3(n), dim3(dims), evaluateSmem, stream,
x.data(),
codebooks.data(),
codes.data(),
bestObjs.data(),
n,
M,
K,
dims);
int blockSize = 256;
int numBlocks = (n + blockSize - 1) / blockSize;
for (int i = 0; i < ilsIters; i++) {
hipLaunchKernelGGL(( runCodesPerturbation), dim3(numBlocks), dim3(blockSize), 0, stream,
gen(), codes.data(), n, M, K, nperts);
// perform icm encoding
for (int j = 0; j < icmIters; j++) {
for (int m = 0; m < M; m++) {
hipLaunchKernelGGL(( runIcmEncodeStep), dim3(n), dim3(K), encodeSmem, stream,
uterm[m].data(),
bterm[m].data(),
codes.data(),
M,
K,
m);
}
}
// compute the reconstruction error for each vector given codes
hipLaunchKernelGGL(( runEvaluation), dim3(n), dim3(dims), evaluateSmem, stream,
x.data(),
codebooks.data(),
codes.data(),
objs.data(),
n,
M,
K,
dims);
// if objs[i] < best_objs[i], replace best_codes[i] with codes[i]
hipLaunchKernelGGL(( runCodesSelection), dim3(numBlocks), dim3(blockSize), 0, stream,
bestCodes.data(),
bestObjs.data(),
codes.data(),
objs.data(),
n,
M);
codes.copyFrom(bestCodes, stream);
}
// copy back to host memory
fromDevice<int32_t, 2>(bestCodes, codesHost, stream);
}
} // namespace gpu
} // namespace faiss
| d3e0a1a13383ee12a17657d871590023d76bd290.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IcmEncoder.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/impl/FaissAssert.h>
#include <faiss/gpu/impl/L2Norm.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/MatrixMult.cuh>
#include <faiss/gpu/utils/Pair.cuh>
#include <faiss/gpu/utils/Reductions.cuh>
#include <curand_kernel.h>
namespace faiss {
namespace gpu {
extern __shared__ char smem[];
/** encode using iterative conditional mode
*
* For subcode cm of a vector, we fix the other subcodes cj (j != m)
* and then find the optimal value of cm (cm = 1,...,K) such that
* minimizing the objective function.
*
* @param uterm precomputed unary terms, size (M, n, K)
* @param bterm precomputed binary terms, size (M1, M2, K1, K2)
* @param codes output vector encodings, size (n, M)
* @param M number of codebooks
* @param K number of codewords in a codebook
* @param m identify which subcode to condition on
*/
__global__ void runIcmEncodeStep(
const float* uterm,
const float* bterm,
int32_t* codes,
int M,
int K,
int m) {
using KVPair = Pair<float, int>;
int id = blockIdx.x; // each block takes care of one vector
int code = threadIdx.x; // each thread takes care of one possible code
// compute the objective value by look-up tables
KVPair obj(0.0f, code);
obj.k = uterm[id * K + code];
#pragma unroll
for (int m2 = 0; m2 < M; m2++) {
if (m2 == m) {
continue;
}
int32_t code2 = codes[id * M + m2];
obj.k += bterm[m2 * K * K + code * K + code2];
}
// find the minimum objective value and the corresponding code
__syncthreads();
obj = blockReduceAll<KVPair, Min<KVPair>, false, false>(
obj, Min<KVPair>(), (KVPair*)smem);
if (code == 0) {
codes[id * M + m] = obj.v;
}
}
/** compute reconstruction error for each vector
*
* decoded_x[i] = \sum codebooks[m][codes[i][m]], m = 1,..,M
* obj[i] = ||x[i] - decoded_x[i]||^2
*
* @param x input vectors, size [n, dims]
* @param codebooks codebooks, size [M, K, dims]
* @param codes vector codes, size [n, M]
* @param obj output reconstruction errors, size [n]
* @param n number of input vectors
* @param K number of codewords in a codebook
* @param M number of codebooks
*/
__global__ void runEvaluation(
const float* x,
const float* codebooks,
const int32_t* codes,
float* obj, // output
int n,
int M,
int K,
int dims) {
int id = blockIdx.x; // each block takes care of one vector
int d = threadIdx.x; // each thread takes care of one dimension
float acc = 0.0f;
#pragma unroll
for (int m = 0; m < M; m++) {
int32_t code = codes[id * M + m];
acc += codebooks[m * K * dims + code * dims + d];
}
acc -= x[id * dims + d];
acc = acc * acc;
// sum values of all dimensions together
__syncthreads();
acc = blockReduceAllSum<float, false, false>(acc, (float*)smem);
if (d == 0) {
obj[id] = acc;
}
}
/** perturb vector codes
*
* repeat nperts times:
* codes[i][randint(0, M)] = randint(0, K)
*
* @param seed random seed
* @param codes vector codes, size [n, M]
* @param n number of input vectors
* @param M number of codebooks
* @param K number of codewords in a codebook
* @param nperts number of subcode to be perturbed in a vector
*/
__global__ void runCodesPerturbation(
int seed,
int32_t* codes,
int n,
int M,
int K,
int nperts) {
// each thread takes care of one vector
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= n) {
return;
}
// we have to initialize the state
curandState_t state;
curand_init(seed, id, 0, &state);
for (int i = 0; i < nperts; i++) {
int pos = int(curand_uniform(&state) * M);
int32_t val = int32_t(curand_uniform(&state) * K);
codes[id * M + pos] = val;
}
}
/** select the best codes by reconstruction errors
*
* if objs[i] < best_objs[i]:
* best_objs[i] = objs[i]
* best_codes[i] = codes[i]
*
* @param bestCodes the best codes we've encountered, size [n, M]
* @param bestObjs min reconstruction errors we've encountered, size [n]
* @param codes input vector codes, size [n, M]
* @param objs reconstruction errors of input vector codes, size [n]
* @param n number of input vectors
*/
__global__ void runCodesSelection(
int32_t* bestCodes,
float* bestObjs,
const int32_t* codes,
const float* objs,
int n,
int M) {
// each thread takes care of one vector
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= n || objs[id] >= bestObjs[id]) {
return;
}
bestObjs[id] = objs[id];
#pragma unroll
for (int m = 0; m < M; m++) {
bestCodes[id * M + m] = codes[id * M + m];
}
}
/** add L2 norm of codewords in a codebook to the unary terms
*
* uterm[i][k] = norm[k]
*
* @param uterm unary terms, size [n, K]
* @param norm L2 norm of each codeword in a codebook, size [K]
* @param K number of codewords in a codebook
*/
__global__ void runNormAddition(float* uterm, const float* norm, int K) {
int id = blockIdx.x;
int code = threadIdx.x;
uterm[id * K + code] += norm[code];
}
IcmEncoderImpl::IcmEncoderImpl(
int M,
int K,
int dims,
GpuResourcesProvider* prov,
int device)
: M(M), K(K), dims(dims), prov(prov), device(device) {
res = prov->getResources();
}
void IcmEncoderImpl::computeUnaryTerms(
float* uterm, // output, [M, n, K]
const float* x, // [n, d]
const float* codebooks, // [M, K, d]
int n) const {
auto stream = res->getDefaultStreamCurrentDevice();
auto handle = res->getBlasHandleCurrentDevice();
DeviceTensor<float, 2, true> vecs(const_cast<float*>(x), {n, dims});
for (int m = 0; m < M; m++) {
auto cPtr = const_cast<float*>(codebooks + m * K * dims);
auto bPtr = uterm + m * n * K;
DeviceTensor<float, 2, true> ci(cPtr, {K, dims});
DeviceTensor<float, 2, true> bi(bPtr, {n, K});
runMatrixMult(
bi, false, vecs, false, ci, true, -2.0f, 0.0f, handle, stream);
}
DeviceTensor<float, 2, true> c(
const_cast<float*>(codebooks), {M * K, dims});
DeviceTensor<float, 1, true> norm(
res.get(), makeTempAlloc(AllocType::Other, stream), {M * K});
runL2Norm(c, true, norm, true, stream);
for (int m = 0; m < M; m++) {
auto uPtr = uterm + m * n * K;
auto nPtr = norm.data() + m * K;
runNormAddition<<<n, K, 0, stream>>>(uPtr, nPtr, K);
}
}
void IcmEncoderImpl::computeBinaryTerms(float* bterm, const float* codebooks)
const {
auto stream = res->getDefaultStreamCurrentDevice();
auto handle = res->getBlasHandleCurrentDevice();
for (int m1 = 0; m1 < M; m1++) {
for (int m2 = 0; m2 < M; m2++) {
auto ptr1 = const_cast<float*>(codebooks + m1 * K * dims);
auto ptr2 = const_cast<float*>(codebooks + m2 * K * dims);
auto ptr3 = bterm + m1 * M * K * K + m2 * K * K;
DeviceTensor<float, 2, true> c1(ptr1, {K, dims});
DeviceTensor<float, 2, true> c2(ptr2, {K, dims});
DeviceTensor<float, 2, true> b(ptr3, {K, K});
runMatrixMult(
b, false, c1, false, c2, true, 2.0f, 0.0f, handle, stream);
}
}
}
void IcmEncoderImpl::setBinaryTerm(const float* codebooksHost) {
DeviceScope scope(device);
auto device = getCurrentDevice();
auto stream = res->getDefaultStreamCurrentDevice();
// copy from host to device memory
codebooks = toDeviceNonTemporary<float, 3>(
res.get(),
device,
const_cast<float*>(codebooksHost),
stream,
{M, K, dims});
bterm = DeviceTensor<float, 4, true>(
res.get(), makeDevAlloc(AllocType::Other, stream), {M, M, K, K});
computeBinaryTerms(bterm.data(), codebooks.data());
}
void IcmEncoderImpl::encode(
int32_t* codesHost,
const float* xHost,
const float* codebooksHost,
std::mt19937& gen,
int n,
int nperts,
int ilsIters,
int icmIters) const {
DeviceScope scope(device);
auto device = getCurrentDevice();
auto stream = res->getDefaultStreamCurrentDevice();
// copy from host to device memory
auto codes = toDeviceTemporary<int32_t, 2>(
res.get(), device, const_cast<int32_t*>(codesHost), stream, {n, M});
auto x = toDeviceTemporary<float, 2>(
res.get(), device, const_cast<float*>(xHost), stream, {n, dims});
// compute unary terms
DeviceTensor<float, 3, true> uterm(
res.get(), makeTempAlloc(AllocType::Other, stream), {M, n, K});
computeUnaryTerms(uterm.data(), x.data(), codebooks.data(), n);
DeviceTensor<int32_t, 2, true> bestCodes(
res.get(), makeTempAlloc(AllocType::Other, stream), {n, M});
fromDevice<int32_t, 2>(codes, bestCodes.data(), stream);
DeviceTensor<float, 1, true> bestObjs(
res.get(), makeTempAlloc(AllocType::Other, stream), {n});
DeviceTensor<float, 1, true> objs(
res.get(), makeTempAlloc(AllocType::Other, stream), {n});
// compute how much shared memory we need
const int evaluateSmem = sizeof(float) * (dims + kWarpSize - 1) / kWarpSize;
const int encodeSmem =
sizeof(Pair<float, int>) * (K + kWarpSize - 1) / kWarpSize;
// compute the reconstruction error for each vector
runEvaluation<<<n, dims, evaluateSmem, stream>>>(
x.data(),
codebooks.data(),
codes.data(),
bestObjs.data(),
n,
M,
K,
dims);
int blockSize = 256;
int numBlocks = (n + blockSize - 1) / blockSize;
for (int i = 0; i < ilsIters; i++) {
runCodesPerturbation<<<numBlocks, blockSize, 0, stream>>>(
gen(), codes.data(), n, M, K, nperts);
// perform icm encoding
for (int j = 0; j < icmIters; j++) {
for (int m = 0; m < M; m++) {
runIcmEncodeStep<<<n, K, encodeSmem, stream>>>(
uterm[m].data(),
bterm[m].data(),
codes.data(),
M,
K,
m);
}
}
// compute the reconstruction error for each vector given codes
runEvaluation<<<n, dims, evaluateSmem, stream>>>(
x.data(),
codebooks.data(),
codes.data(),
objs.data(),
n,
M,
K,
dims);
// if objs[i] < best_objs[i], replace best_codes[i] with codes[i]
runCodesSelection<<<numBlocks, blockSize, 0, stream>>>(
bestCodes.data(),
bestObjs.data(),
codes.data(),
objs.data(),
n,
M);
codes.copyFrom(bestCodes, stream);
}
// copy back to host memory
fromDevice<int32_t, 2>(bestCodes, codesHost, stream);
}
} // namespace gpu
} // namespace faiss
|
d2508636dc746608c7a4eabd5f34b0ca777ddc2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Nearest neighbor search
* 20%
*
* Kd-treeO(log M)
* O(N log M)
* O(N)
* GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <list>
#include <time.h>
#define CITY_SIZE 400
#define NUM_GPU_BLOCKS 4
#define NUM_GPU_THREADS 32
#define NUM_FEATURES 1
struct ZoneType {
int type;
int level;
};
struct ZoningPlan {
ZoneType zones[CITY_SIZE][CITY_SIZE];
};
struct DistanceMap {
int distances[CITY_SIZE][CITY_SIZE][NUM_FEATURES];
};
struct Point2D {
int x;
int y;
__host__ __device__
Point2D() : x(0), y(0) {}
__host__ __device__
Point2D(int x, int y) : x(x), y(y) {}
};
__host__ __device__
unsigned int rand(unsigned int* randx) {
*randx = *randx * 1103515245 + 12345;
return (*randx)&2147483647;
}
__host__ __device__
float randf(unsigned int* randx) {
return rand(randx) / (float(2147483647) + 1);
}
__host__ __device__
float randf(unsigned int* randx, float a, float b) {
return randf(randx) * (b - a) + a;
}
__host__ __device__
int sampleFromCdf(unsigned int* randx, float* cdf, int num) {
float rnd = randf(randx, 0, cdf[num-1]);
for (int i = 0; i < num; ++i) {
if (rnd <= cdf[i]) return i;
}
return num - 1;
}
__host__ __device__
int sampleFromPdf(unsigned int* randx, float* pdf, int num) {
if (num == 0) return 0;
float cdf[40];
cdf[0] = pdf[0];
for (int i = 1; i < num; ++i) {
if (pdf[i] >= 0) {
cdf[i] = cdf[i - 1] + pdf[i];
} else {
cdf[i] = cdf[i - 1];
}
}
return sampleFromCdf(randx, cdf, num);
}
/**
*
*/
__host__
void generateZoningPlan(ZoningPlan& zoningPlan, std::vector<float> zoneTypeDistribution, std::vector<Point2D>& hostStoreLocations) {
std::vector<float> numRemainings(zoneTypeDistribution.size());
for (int i = 0; i < zoneTypeDistribution.size(); ++i) {
numRemainings[i] = CITY_SIZE * CITY_SIZE * zoneTypeDistribution[i];
}
unsigned int randx = 0;
for (int r = 0; r < CITY_SIZE; ++r) {
for (int c = 0; c < CITY_SIZE; ++c) {
int type = sampleFromPdf(&randx, numRemainings.data(), numRemainings.size());
zoningPlan.zones[r][c].type = type;
numRemainings[type] -= 1;
switch (type) {
case 0:
break;
case 1:
hostStoreLocations.push_back(Point2D(c, r));
break;
}
}
}
}
/**
*
*/
__global__
void computeDistanceToStore(ZoningPlan* zoningPLan, DistanceMap* distanceMap) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
//
Point2D queue[1000];
int queue_begin = 0;
int queue_end = 0;
int stride = ceilf((float)(CITY_SIZE * CITY_SIZE) / NUM_GPU_BLOCKS / NUM_GPU_THREADS);
//
for (int i = 0; i < stride; ++i) {
int r = (idx * NUM_GPU_BLOCKS * NUM_GPU_THREADS + i) / CITY_SIZE;
int c = (idx * NUM_GPU_BLOCKS * NUM_GPU_THREADS + i) % CITY_SIZE;
if (zoningPLan->zones[r][c].type == 1) {
queue[queue_end++] = Point2D(c, r);
distanceMap->distances[r][c][0] = 0;
}
}
//
while (queue_begin < queue_end) {
Point2D pt = queue[queue_begin++];
int d = distanceMap->distances[pt.y][pt.x][0];
if (pt.y > 0) {
int old = atomicMin(&distanceMap->distances[pt.y-1][pt.x][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x, pt.y-1);
}
}
if (pt.y < CITY_SIZE - 1) {
int old = atomicMin(&distanceMap->distances[pt.y+1][pt.x][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x, pt.y+1);
}
}
if (pt.x > 0) {
int old = atomicMin(&distanceMap->distances[pt.y][pt.x-1][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x-1, pt.y);
}
}
if (pt.x < CITY_SIZE - 1) {
int old = atomicMin(&distanceMap->distances[pt.y][pt.x+1][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x+1, pt.y);
}
}
}
}
/**
*
*/
__global__
void computeDistanceToStoreBySingleThread(ZoningPlan* zoningPLan, DistanceMap* distanceMap) {
Point2D queue[1000];
int queue_begin = 0;
int queue_end = 0;
for (int i = 0; i < CITY_SIZE * CITY_SIZE; ++i) {
int r = i / CITY_SIZE;
int c = i % CITY_SIZE;
if (zoningPLan->zones[r][c].type == 1) {
queue[queue_end++] = Point2D(c, r);
distanceMap->distances[r][c][0] = 0;
}
}
while (queue_begin < queue_end) {
Point2D pt = queue[queue_begin++];
int d = distanceMap->distances[pt.y][pt.x][0];
if (pt.y > 0) {
int old = atomicMin(&distanceMap->distances[pt.y-1][pt.x][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x, pt.y-1);
}
}
if (pt.y < CITY_SIZE - 1) {
int old = atomicMin(&distanceMap->distances[pt.y+1][pt.x][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x, pt.y+1);
}
}
if (pt.x > 0) {
int old = atomicMin(&distanceMap->distances[pt.y][pt.x-1][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x-1, pt.y);
}
}
if (pt.x < CITY_SIZE - 1) {
int old = atomicMin(&distanceMap->distances[pt.y][pt.x+1][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x+1, pt.y);
}
}
}
}
int main()
{
time_t start, end;
ZoningPlan* hostZoningPlan = (ZoningPlan*)malloc(sizeof(ZoningPlan));
std::vector<Point2D> hostStoreLocations;
DistanceMap* hostDistanceMap = (DistanceMap*)malloc(sizeof(DistanceMap));
DistanceMap* hostDistanceMap2 = (DistanceMap*)malloc(sizeof(DistanceMap));
//
memset(hostDistanceMap, 9999, sizeof(DistanceMap));
memset(hostDistanceMap2, 9999, sizeof(DistanceMap));
std::vector<float> zoneTypeDistribution(2);
zoneTypeDistribution[0] = 0.8f;
zoneTypeDistribution[1] = 0.2f;
//
//
start = clock();
generateZoningPlan(*hostZoningPlan, zoneTypeDistribution, hostStoreLocations);
end = clock();
printf("generateZoningPlan: %lf\n", (double)(end-start)/CLOCKS_PER_SEC);
/*
for (int r = CITY_SIZE - 1; r >= 0; --r) {
for (int c = 0; c < CITY_SIZE; ++c) {
printf("%d, ", hostZoningPlan->zones[r][c].type);
}
printf("\n");
}
printf("\n");
*/
//
ZoningPlan* devZoningPlan;
if (hipMalloc((void**)&devZoningPlan, sizeof(ZoningPlan)) != hipSuccess) {
printf("memory allocation error!\n");
exit(1);
}
if (hipMemcpy(devZoningPlan, hostZoningPlan, sizeof(ZoningPlan), hipMemcpyHostToDevice) != hipSuccess) {
printf("memory copy error!\n");
exit(1);
}
//
DistanceMap* devDistanceMap;
hipMalloc((void**)&devDistanceMap, sizeof(DistanceMap));
///////////////////////////////////////////////////////////////////////
//
//
hipMemcpy(devDistanceMap, hostDistanceMap2, sizeof(DistanceMap), hipMemcpyHostToDevice);
//
start = clock();
hipLaunchKernelGGL(( computeDistanceToStoreBySingleThread), dim3(1), dim3(1), 0, 0, devZoningPlan, devDistanceMap);
end = clock();
printf("computeDistanceToStoreBySingleThread: %lf\n", (double)(end-start)/CLOCKS_PER_SEC);
// CPU
hipMemcpy(hostDistanceMap2, devDistanceMap, sizeof(DistanceMap), hipMemcpyDeviceToHost);
///////////////////////////////////////////////////////////////////////
//
//
hipMemcpy(devDistanceMap, hostDistanceMap, sizeof(DistanceMap), hipMemcpyHostToDevice);
//
start = clock();
hipLaunchKernelGGL(( computeDistanceToStore), dim3(NUM_GPU_BLOCKS), dim3(NUM_GPU_THREADS), 0, 0, devZoningPlan, devDistanceMap);
end = clock();
printf("computeDistanceToStore: %lf\n", (double)(end-start)/CLOCKS_PER_SEC);
// CPU
hipMemcpy(hostDistanceMap, devDistanceMap, sizeof(DistanceMap), hipMemcpyDeviceToHost);
//
for (int r = CITY_SIZE - 1; r >= 0; --r) {
for (int c = 0; c < CITY_SIZE; ++c) {
if (hostDistanceMap->distances[r][c][0] != hostDistanceMap2->distances[r][c][0]) {
printf("ERROR!\n");
}
}
}
printf("\n");
/*
for (int r = CITY_SIZE - 1; r >= 0; --r) {
for (int c = 0; c < CITY_SIZE; ++c) {
printf("%d, ", hostDistanceMap->distances[r][c][0]);
}
printf("\n");
}
printf("\n");
for (int r = CITY_SIZE - 1; r >= 0; --r) {
for (int c = 0; c < CITY_SIZE; ++c) {
printf("%d, ", hostDistanceMap2->distances[r][c][0]);
}
printf("\n");
}
printf("\n");
*/
//
hipFree(devZoningPlan);
hipFree(devDistanceMap);
// CPU
free(hostZoningPlan);
free(hostDistanceMap);
free(hostDistanceMap2);
hipDeviceReset();
}
| d2508636dc746608c7a4eabd5f34b0ca777ddc2c.cu | /**
* Nearest neighbor search
* マップ内に店ゾーンが20%の確率で配備されている時、
* 住宅ゾーンから直近の店ゾーンまでのマンハッタン距離を計算する。
* Kd-treeなどのアルゴリズムだと、各住宅ゾーンから直近の店までの距離の計算にO(log M)。
* 従って、全ての住宅ゾーンについて調べると、O(N log M)。
* 一方、本実装では、各店ゾーンから周辺ゾーンに再帰的に距離を更新していくので、O(N)で済む。
* しかも、GPUで並列化することで、さらに計算時間を短縮できる。
*/
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <list>
#include <time.h>
#define CITY_SIZE 400
#define NUM_GPU_BLOCKS 4
#define NUM_GPU_THREADS 32
#define NUM_FEATURES 1
struct ZoneType {
int type;
int level;
};
struct ZoningPlan {
ZoneType zones[CITY_SIZE][CITY_SIZE];
};
struct DistanceMap {
int distances[CITY_SIZE][CITY_SIZE][NUM_FEATURES];
};
struct Point2D {
int x;
int y;
__host__ __device__
Point2D() : x(0), y(0) {}
__host__ __device__
Point2D(int x, int y) : x(x), y(y) {}
};
__host__ __device__
unsigned int rand(unsigned int* randx) {
*randx = *randx * 1103515245 + 12345;
return (*randx)&2147483647;
}
__host__ __device__
float randf(unsigned int* randx) {
return rand(randx) / (float(2147483647) + 1);
}
__host__ __device__
float randf(unsigned int* randx, float a, float b) {
return randf(randx) * (b - a) + a;
}
__host__ __device__
int sampleFromCdf(unsigned int* randx, float* cdf, int num) {
float rnd = randf(randx, 0, cdf[num-1]);
for (int i = 0; i < num; ++i) {
if (rnd <= cdf[i]) return i;
}
return num - 1;
}
__host__ __device__
int sampleFromPdf(unsigned int* randx, float* pdf, int num) {
if (num == 0) return 0;
float cdf[40];
cdf[0] = pdf[0];
for (int i = 1; i < num; ++i) {
if (pdf[i] >= 0) {
cdf[i] = cdf[i - 1] + pdf[i];
} else {
cdf[i] = cdf[i - 1];
}
}
return sampleFromCdf(randx, cdf, num);
}
/**
* ゾーンプランを生成する。
*/
__host__
void generateZoningPlan(ZoningPlan& zoningPlan, std::vector<float> zoneTypeDistribution, std::vector<Point2D>& hostStoreLocations) {
std::vector<float> numRemainings(zoneTypeDistribution.size());
for (int i = 0; i < zoneTypeDistribution.size(); ++i) {
numRemainings[i] = CITY_SIZE * CITY_SIZE * zoneTypeDistribution[i];
}
unsigned int randx = 0;
for (int r = 0; r < CITY_SIZE; ++r) {
for (int c = 0; c < CITY_SIZE; ++c) {
int type = sampleFromPdf(&randx, numRemainings.data(), numRemainings.size());
zoningPlan.zones[r][c].type = type;
numRemainings[type] -= 1;
switch (type) {
case 0:
break;
case 1:
hostStoreLocations.push_back(Point2D(c, r));
break;
}
}
}
}
/**
* 直近の店までの距離を計算する(マルチスレッド版)
*/
__global__
void computeDistanceToStore(ZoningPlan* zoningPLan, DistanceMap* distanceMap) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
// キュー
Point2D queue[1000];
int queue_begin = 0;
int queue_end = 0;
int stride = ceilf((float)(CITY_SIZE * CITY_SIZE) / NUM_GPU_BLOCKS / NUM_GPU_THREADS);
// 分割された領域内で、店を探す
for (int i = 0; i < stride; ++i) {
int r = (idx * NUM_GPU_BLOCKS * NUM_GPU_THREADS + i) / CITY_SIZE;
int c = (idx * NUM_GPU_BLOCKS * NUM_GPU_THREADS + i) % CITY_SIZE;
if (zoningPLan->zones[r][c].type == 1) {
queue[queue_end++] = Point2D(c, r);
distanceMap->distances[r][c][0] = 0;
}
}
// 距離マップを生成
while (queue_begin < queue_end) {
Point2D pt = queue[queue_begin++];
int d = distanceMap->distances[pt.y][pt.x][0];
if (pt.y > 0) {
int old = atomicMin(&distanceMap->distances[pt.y-1][pt.x][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x, pt.y-1);
}
}
if (pt.y < CITY_SIZE - 1) {
int old = atomicMin(&distanceMap->distances[pt.y+1][pt.x][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x, pt.y+1);
}
}
if (pt.x > 0) {
int old = atomicMin(&distanceMap->distances[pt.y][pt.x-1][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x-1, pt.y);
}
}
if (pt.x < CITY_SIZE - 1) {
int old = atomicMin(&distanceMap->distances[pt.y][pt.x+1][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x+1, pt.y);
}
}
}
}
/**
* 直近の店までの距離を計算する(シングルスレッド版)
*/
__global__
void computeDistanceToStoreBySingleThread(ZoningPlan* zoningPLan, DistanceMap* distanceMap) {
Point2D queue[1000];
int queue_begin = 0;
int queue_end = 0;
for (int i = 0; i < CITY_SIZE * CITY_SIZE; ++i) {
int r = i / CITY_SIZE;
int c = i % CITY_SIZE;
if (zoningPLan->zones[r][c].type == 1) {
queue[queue_end++] = Point2D(c, r);
distanceMap->distances[r][c][0] = 0;
}
}
while (queue_begin < queue_end) {
Point2D pt = queue[queue_begin++];
int d = distanceMap->distances[pt.y][pt.x][0];
if (pt.y > 0) {
int old = atomicMin(&distanceMap->distances[pt.y-1][pt.x][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x, pt.y-1);
}
}
if (pt.y < CITY_SIZE - 1) {
int old = atomicMin(&distanceMap->distances[pt.y+1][pt.x][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x, pt.y+1);
}
}
if (pt.x > 0) {
int old = atomicMin(&distanceMap->distances[pt.y][pt.x-1][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x-1, pt.y);
}
}
if (pt.x < CITY_SIZE - 1) {
int old = atomicMin(&distanceMap->distances[pt.y][pt.x+1][0], d + 1);
if (old > d + 1) {
queue[queue_end++] = Point2D(pt.x+1, pt.y);
}
}
}
}
int main()
{
time_t start, end;
ZoningPlan* hostZoningPlan = (ZoningPlan*)malloc(sizeof(ZoningPlan));
std::vector<Point2D> hostStoreLocations;
DistanceMap* hostDistanceMap = (DistanceMap*)malloc(sizeof(DistanceMap));
DistanceMap* hostDistanceMap2 = (DistanceMap*)malloc(sizeof(DistanceMap));
// 距離を初期化
memset(hostDistanceMap, 9999, sizeof(DistanceMap));
memset(hostDistanceMap2, 9999, sizeof(DistanceMap));
std::vector<float> zoneTypeDistribution(2);
zoneTypeDistribution[0] = 0.8f;
zoneTypeDistribution[1] = 0.2f;
// 初期プランを生成
// 同時に、店の座標リストを作成
start = clock();
generateZoningPlan(*hostZoningPlan, zoneTypeDistribution, hostStoreLocations);
end = clock();
printf("generateZoningPlan: %lf\n", (double)(end-start)/CLOCKS_PER_SEC);
/*
for (int r = CITY_SIZE - 1; r >= 0; --r) {
for (int c = 0; c < CITY_SIZE; ++c) {
printf("%d, ", hostZoningPlan->zones[r][c].type);
}
printf("\n");
}
printf("\n");
*/
// 初期プランをデバイスバッファへコピー
ZoningPlan* devZoningPlan;
if (cudaMalloc((void**)&devZoningPlan, sizeof(ZoningPlan)) != cudaSuccess) {
printf("memory allocation error!\n");
exit(1);
}
if (cudaMemcpy(devZoningPlan, hostZoningPlan, sizeof(ZoningPlan), cudaMemcpyHostToDevice) != cudaSuccess) {
printf("memory copy error!\n");
exit(1);
}
// 距離マップ用に、デバイスバッファを確保
DistanceMap* devDistanceMap;
cudaMalloc((void**)&devDistanceMap, sizeof(DistanceMap));
///////////////////////////////////////////////////////////////////////
// シングルスレッドで、直近の店までの距離を計算
// 距離をデバイスバッファへコピー
cudaMemcpy(devDistanceMap, hostDistanceMap2, sizeof(DistanceMap), cudaMemcpyHostToDevice);
// スコアの直近の店までの距離を計算
start = clock();
computeDistanceToStoreBySingleThread<<<1, 1>>>(devZoningPlan, devDistanceMap);
end = clock();
printf("computeDistanceToStoreBySingleThread: %lf\n", (double)(end-start)/CLOCKS_PER_SEC);
// 距離をCPUバッファへコピー
cudaMemcpy(hostDistanceMap2, devDistanceMap, sizeof(DistanceMap), cudaMemcpyDeviceToHost);
///////////////////////////////////////////////////////////////////////
// マルチスレッドで、直近の店までの距離を計算
// 距離をデバイスバッファへコピー
cudaMemcpy(devDistanceMap, hostDistanceMap, sizeof(DistanceMap), cudaMemcpyHostToDevice);
// スコアの直近の店までの距離を並列で計算
start = clock();
computeDistanceToStore<<<NUM_GPU_BLOCKS, NUM_GPU_THREADS>>>(devZoningPlan, devDistanceMap);
end = clock();
printf("computeDistanceToStore: %lf\n", (double)(end-start)/CLOCKS_PER_SEC);
// 距離をCPUバッファへコピー
cudaMemcpy(hostDistanceMap, devDistanceMap, sizeof(DistanceMap), cudaMemcpyDeviceToHost);
// シングルスレッドとマルチスレッドの結果を比較
for (int r = CITY_SIZE - 1; r >= 0; --r) {
for (int c = 0; c < CITY_SIZE; ++c) {
if (hostDistanceMap->distances[r][c][0] != hostDistanceMap2->distances[r][c][0]) {
printf("ERROR!\n");
}
}
}
printf("\n");
/*
for (int r = CITY_SIZE - 1; r >= 0; --r) {
for (int c = 0; c < CITY_SIZE; ++c) {
printf("%d, ", hostDistanceMap->distances[r][c][0]);
}
printf("\n");
}
printf("\n");
for (int r = CITY_SIZE - 1; r >= 0; --r) {
for (int c = 0; c < CITY_SIZE; ++c) {
printf("%d, ", hostDistanceMap2->distances[r][c][0]);
}
printf("\n");
}
printf("\n");
*/
// デバイスバッファの開放
cudaFree(devZoningPlan);
cudaFree(devDistanceMap);
// CPUバッファの開放
free(hostZoningPlan);
free(hostDistanceMap);
free(hostDistanceMap2);
cudaDeviceReset();
}
|
ff63ac0249cfa1ea81140836c0225b8cc9caf275.hip | // !!! This is a file automatically generated by hipify!!!
/*
* EXAMPLE OF TILED MATRIX-MATRIX MULTIPLICATION CHAPTER 4
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <math.h>
#define CHECK_ERROR(call) { \
hipError_t err = call; \
if (err != hipSuccess) { \
printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); \
exit(err); \
} \
}
#define TILE_WIDTH 4
__global__
void matrixMulKernel(float *P, float *M, float *N, int Width) {
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int tx = threadIdx.x, bx = blockIdx.x;
int ty = threadIdx.y, by = blockIdx.y;
// identify row and column of the d_P element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
if ( Row < Width && Col < Width ) {
float pValue = 0;
// Loop over the d_M and d_N tiles required to compute the d_P element
for (int ph = 0; ph < Width/TILE_WIDTH; ph++) {
// Collaborative loading of d_M and d_N tiles n to the shared memory
Mds[ty][tx] = M[Row * Width + ph * TILE_WIDTH + tx];
Nds[ty][tx] = N[(ph * TILE_WIDTH + ty) * Width + Col];
__syncthreads();
for(int k = 0; k < TILE_WIDTH; k++){
pValue += Mds[ty][k]*Nds[k][tx];
}
__syncthreads();
}
P[Row*Width+Col] = pValue;
}
}
void matrixMul(float *h_P, float *h_M, float *h_N, int dim) {
int size = (dim*dim)*sizeof(float);
float *d_M, *d_N, *d_P;
//1. Allocate global memory on the device for d_Pin and d_Pout
// With this type of allocation it isn't possible acces using higher-dimensional indexing syntax
// it need to linearize first.
CHECK_ERROR(hipMalloc((void**)&d_M, size));
CHECK_ERROR(hipMalloc((void**)&d_N, size));
CHECK_ERROR(hipMalloc((void**)&d_P, size)); // assume square matricies
// copy h_Pin to device memory
hipMemcpy(d_M, h_M, size, hipMemcpyHostToDevice);
hipMemcpy(d_N, h_N, size, hipMemcpyHostToDevice);
//2. Kernel launch code - with TILe_WIDTH^2 threads per block
dim3 dimGrid(ceil(dim / 4.0),ceil(dim / 4.0),1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH,1);
hipLaunchKernelGGL(( matrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_P, d_M, d_N, dim);
//3. copy d_Pout from the device memory
hipMemcpy(h_P, d_P, size, hipMemcpyDeviceToHost);
// Free device vectors
hipFree(d_M);
hipFree(d_N);
hipFree(d_P);
}
int main(int argc, char *argv[]) {
float *h_M, *h_N, *h_P;
int dim = 16; // assume square matricies
h_M = (float*)malloc(sizeof(float)*dim*dim);
h_N = (float*)malloc(sizeof(float)*dim*dim);
h_P = (float*)malloc(sizeof(float)*dim*dim);
// fill M and N with random float numbers
srand(time(NULL));
for (int i = 0; i < dim ; i++) {
for (int j = 0; j < dim ; j++) {
h_M[i*dim+j] = ((((float)rand() / (float)(RAND_MAX)) * 10));
h_N[i*dim+j] = ((((float)rand() / (float)(RAND_MAX)) * 10));
}
}
// perform matrix addiction
matrixMul(h_P, h_M, h_N, dim);
/*********************************************************************************************************
// verifiy the result
int valueIsCorrect = 1;
float mult[dim][dim];
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
mult[i][j] = 0.0;
}
}
// Multiplying matrix firstMatrix and secondMatrix and storing in array mult.
for(int i = 0; i < dim; ++i) {
for(int j = 0; j < dim; ++j) {
for(int k = 0; k < dim; ++k) {
mult[i][j] += h_M[i*dim+k] * h_N[k*dim+j];
}
}
}
for (int i = 0; i < dim && valueIsCorrect; i++) {
for (int j = 0; j < dim; j++) {
printf("h_P[%d] != mult[%d][%d] --|-- %f != %f\n", (i*dim+j), i, j, h_P[i*dim+j], mult[i][j]);
if (h_P[i*dim+j] != mult[i][j]) {
valueIsCorrect = 0;
printf("see error above.....\n");
break;
}
}
}
********************************************************************************************************
* NON HA SENSO VERIFICARE LA CORRETTEZZA DEL RISULTATO SULL'HOST, VEDI 3.2 fino a 6.0 AL SEGUENTE LINK:
* http://docs.nvidia.com/cuda/floating-point/
********************************************************************************************************/
// Free host memory
free(h_M);
free(h_N);
free(h_P);
printf("ok multiplication completed with success!\n");
/*
if (valueIsCorrect) {
printf("ok multiplication completed with success!\n");
}
else printf("somthing was wrong!\n");
*/
return 0;
}
| ff63ac0249cfa1ea81140836c0225b8cc9caf275.cu | /*
* EXAMPLE OF TILED MATRIX-MATRIX MULTIPLICATION CHAPTER 4
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#include <math.h>
#define CHECK_ERROR(call) { \
cudaError_t err = call; \
if (err != cudaSuccess) { \
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); \
exit(err); \
} \
}
#define TILE_WIDTH 4
__global__
void matrixMulKernel(float *P, float *M, float *N, int Width) {
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int tx = threadIdx.x, bx = blockIdx.x;
int ty = threadIdx.y, by = blockIdx.y;
// identify row and column of the d_P element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
if ( Row < Width && Col < Width ) {
float pValue = 0;
// Loop over the d_M and d_N tiles required to compute the d_P element
for (int ph = 0; ph < Width/TILE_WIDTH; ph++) {
// Collaborative loading of d_M and d_N tiles n to the shared memory
Mds[ty][tx] = M[Row * Width + ph * TILE_WIDTH + tx];
Nds[ty][tx] = N[(ph * TILE_WIDTH + ty) * Width + Col];
__syncthreads();
for(int k = 0; k < TILE_WIDTH; k++){
pValue += Mds[ty][k]*Nds[k][tx];
}
__syncthreads();
}
P[Row*Width+Col] = pValue;
}
}
void matrixMul(float *h_P, float *h_M, float *h_N, int dim) {
int size = (dim*dim)*sizeof(float);
float *d_M, *d_N, *d_P;
//1. Allocate global memory on the device for d_Pin and d_Pout
// With this type of allocation it isn't possible acces using higher-dimensional indexing syntax
// it need to linearize first.
CHECK_ERROR(cudaMalloc((void**)&d_M, size));
CHECK_ERROR(cudaMalloc((void**)&d_N, size));
CHECK_ERROR(cudaMalloc((void**)&d_P, size)); // assume square matricies
// copy h_Pin to device memory
cudaMemcpy(d_M, h_M, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_N, h_N, size, cudaMemcpyHostToDevice);
//2. Kernel launch code - with TILe_WIDTH^2 threads per block
dim3 dimGrid(ceil(dim / 4.0),ceil(dim / 4.0),1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH,1);
matrixMulKernel<<<dimGrid, dimBlock>>>(d_P, d_M, d_N, dim);
//3. copy d_Pout from the device memory
cudaMemcpy(h_P, d_P, size, cudaMemcpyDeviceToHost);
// Free device vectors
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_P);
}
int main(int argc, char *argv[]) {
float *h_M, *h_N, *h_P;
int dim = 16; // assume square matricies
h_M = (float*)malloc(sizeof(float)*dim*dim);
h_N = (float*)malloc(sizeof(float)*dim*dim);
h_P = (float*)malloc(sizeof(float)*dim*dim);
// fill M and N with random float numbers
srand(time(NULL));
for (int i = 0; i < dim ; i++) {
for (int j = 0; j < dim ; j++) {
h_M[i*dim+j] = ((((float)rand() / (float)(RAND_MAX)) * 10));
h_N[i*dim+j] = ((((float)rand() / (float)(RAND_MAX)) * 10));
}
}
// perform matrix addiction
matrixMul(h_P, h_M, h_N, dim);
/*********************************************************************************************************
// verifiy the result
int valueIsCorrect = 1;
float mult[dim][dim];
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
mult[i][j] = 0.0;
}
}
// Multiplying matrix firstMatrix and secondMatrix and storing in array mult.
for(int i = 0; i < dim; ++i) {
for(int j = 0; j < dim; ++j) {
for(int k = 0; k < dim; ++k) {
mult[i][j] += h_M[i*dim+k] * h_N[k*dim+j];
}
}
}
for (int i = 0; i < dim && valueIsCorrect; i++) {
for (int j = 0; j < dim; j++) {
printf("h_P[%d] != mult[%d][%d] --|-- %f != %f\n", (i*dim+j), i, j, h_P[i*dim+j], mult[i][j]);
if (h_P[i*dim+j] != mult[i][j]) {
valueIsCorrect = 0;
printf("see error above.....\n");
break;
}
}
}
********************************************************************************************************
* NON HA SENSO VERIFICARE LA CORRETTEZZA DEL RISULTATO SULL'HOST, VEDI 3.2 fino a 6.0 AL SEGUENTE LINK:
* http://docs.nvidia.com/cuda/floating-point/
********************************************************************************************************/
// Free host memory
free(h_M);
free(h_N);
free(h_P);
printf("ok multiplication completed with success!\n");
/*
if (valueIsCorrect) {
printf("ok multiplication completed with success!\n");
}
else printf("somthing was wrong!\n");
*/
return 0;
}
|
37a455a8d06d66e42954a5208477d71aa704ea85.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<fstream>
#include<string>
#include<algorithm>
#include<vector>
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<limits>
#include<iomanip>
using namespace std;
#define BLOCK_SIZE 8
__global__ void kernel(int *dim,float *k_k, int *Xlocation, int *Ylocation, int *Zlocation, int *Width, int *Height, int *Depth, float *FixedTemperature, float *OldTemp, float *NewTemp, int *Xaxis, int *Yaxis, int *Zaxis, int *HeatSources){
if (*dim ==50){
__shared__ float temp[BLOCK_SIZE + 2][BLOCK_SIZE + 2];
int j = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.y*blockDim.y + threadIdx.y;
int leni, lenj;
if (i <= *Yaxis && j <= *Xaxis){
int threadj = threadIdx.x + 1;
int threadi = threadIdx.y + 1;
temp[threadi][threadj] = OldTemp[j+i*(*Yaxis)];
if (blockIdx.x==(int)*Xaxis/BLOCK_SIZE){
lenj = *Xaxis%BLOCK_SIZE;
}
else{
lenj = BLOCK_SIZE;
}
if (blockIdx.y ==(int)*Yaxis/BLOCK_SIZE){
leni = *Yaxis%BLOCK_SIZE;
}
else{
leni = BLOCK_SIZE;
}
if (threadIdx.x < 1){
if (j < 1){
temp[threadi][threadj - 1] = *(OldTemp + i*(*Yaxis) + j);
}
else{
temp[threadi][threadj - 1] = *(OldTemp + i*(*Yaxis) + j - 1);
}
if (j >= *Xaxis - lenj){
temp[threadi][threadj + lenj] = *(OldTemp + i*(*Yaxis)+j +lenj-1);
}
else{
temp[threadi][threadj + lenj] = *(OldTemp + i*(*Yaxis)+j +lenj);
}
}
if (threadIdx.y < 1 ){
if(i<1){
temp[threadi - 1][threadj] = *(OldTemp + i*(*Yaxis)+j);
}
else{
temp[threadi - 1][threadj] = *(OldTemp + (i-1)*(*Yaxis)+j);
}
if(i >= (*Yaxis) - leni){
temp[threadi+leni][threadj] = *(OldTemp+(i+leni-1)*(*Yaxis)+j);
}
else{
temp[threadi+leni][threadj] = *(OldTemp+(i+leni)*(*Yaxis)+j);
}
}
__syncthreads();
if (i < *Yaxis && j < *Xaxis){
NewTemp[j+i*(*Yaxis)] = temp[threadi][threadj] + *k_k*(temp[threadi-1][threadj] + temp[threadi][threadj -1] + temp[threadi+1][threadj] + temp[threadi][threadj+1] - 4*temp[threadi][threadj]);
for (int p = 0; p < *HeatSources; p++){
if((i > Ylocation[p]-1) && (i <= Ylocation[p] + Height[p]-1) && (j > Xlocation[p]-1) && (j <= Xlocation[p] + Width[p]-1)){
NewTemp[j+(i)*(*Yaxis)] = FixedTemperature[p];
}
}
OldTemp[j+i*(*Yaxis)] = NewTemp[j+i*(*Yaxis)];
}
}
}
else if (*dim == 51){
__shared__ float temp[BLOCK_SIZE + 2][BLOCK_SIZE + 2][BLOCK_SIZE + 2];
int j = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
int leni, lenj, lenk;
if (i <= *Yaxis && j <= *Xaxis && k <= *Zaxis){
int threadj = threadIdx.x + 1;
int threadi = threadIdx.y + 1;
int threadk = threadIdx.z + 1;
temp[threadi][threadj][threadk] = OldTemp[(j+i*(*Yaxis))*(*Zaxis) + k];
if (blockIdx.x==(int)*Xaxis/BLOCK_SIZE){
lenj = *Xaxis%BLOCK_SIZE;
}
else{
lenj = BLOCK_SIZE;
}
if (blockIdx.y ==(int)*Yaxis/BLOCK_SIZE){
leni = *Yaxis%BLOCK_SIZE;
}
else{
leni = BLOCK_SIZE;
}
if (blockIdx.z == (int)*Zaxis/BLOCK_SIZE){
lenk = *Zaxis%BLOCK_SIZE;
}
else{
lenk = BLOCK_SIZE;
}
if (threadIdx.x < 1){
if (j < 1){
temp[threadi][threadj - 1][threadk] = *(OldTemp + (j+i*(*Yaxis))*(*Zaxis) + k);
}
else{
temp[threadi][threadj - 1][threadk] = *(OldTemp + (j-1+i*(*Yaxis))*(*Zaxis) + k);
}
if (j >= *Xaxis - lenj){
temp[threadi][threadj + lenj][threadk] = *(OldTemp + (j+lenj - 1 + i*(*Yaxis))*(*Zaxis) + k);
}
else{
temp[threadi][threadj + lenj][threadk] = *(OldTemp + (j+lenj + i*(*Yaxis))*(*Zaxis) + k);
}
}
if (threadIdx.y < 1 ){
if(i<1){
temp[threadi - 1][threadj][threadk] = *(OldTemp + (j+i*(*Yaxis))*(*Zaxis) + k);
}
else{
temp[threadi - 1][threadj][threadk] = *(OldTemp + (j+(i-1)*(*Yaxis))*(*Zaxis) + k);
}
if(i >= (*Yaxis) - leni){
temp[threadi+leni][threadj][threadk] = *(OldTemp + (j+(i + leni-1)*(*Yaxis))*(*Zaxis) + k);
}
else{
temp[threadi+leni][threadj][threadk] = *(OldTemp + (j+(i + leni)*(*Yaxis))*(*Zaxis) + k);
}
}
if (threadIdx.z < 1 ){
if(k<1){
temp[threadi][threadj][threadk-1] = *(OldTemp + (j+i*(*Yaxis))*(*Zaxis) + k);
}
else{
temp[threadi][threadj][threadk-1] = *(OldTemp + (j+i*(*Yaxis))*(*Zaxis) + k - 1);
}
if(k >= (*Zaxis) - lenk){
temp[threadi][threadj][threadk + lenk] = *(OldTemp + (j+i*(*Yaxis))*(*Zaxis) + k + lenk - 1);
}
else{
temp[threadi][threadj][threadk + lenk] = *(OldTemp + (j+i*(*Yaxis))*(*Zaxis) + k + lenk);
}
}
__syncthreads();
if (i < *Yaxis && j < *Xaxis && k<*Zaxis){
NewTemp[(j+i*(*Yaxis))*(*Zaxis) + k] = temp[threadi][threadj][threadk] + *k_k*(temp[threadi-1][threadj][threadk] + temp[threadi][threadj -1][threadk] + temp[threadi+1][threadj][threadk] + temp[threadi][threadj+1][threadk]+temp[threadi][threadj][threadk-1]+temp[threadi][threadj][threadk+1] - 6*temp[threadi][threadj][threadk]);
for (int p = 0; p < *HeatSources; p++){
if((i > Ylocation[p]-1) && (i <= Ylocation[p] + Height[p]-1) && (j > Xlocation[p]-1) && (j <= Xlocation[p] + Width[p]-1)&& (k >= Zlocation[p])&& (k < Zlocation[p] + Depth[p])){
NewTemp[(j+i*(*Yaxis))*(*Zaxis) + k] = FixedTemperature[p];
}
}
OldTemp[(j+i*(*Yaxis))*(*Zaxis) + k] = NewTemp[(j+i*(*Yaxis))*(*Zaxis) + k];
}
}
}
}
int main(int argc, char const *argv[])
{
int StringLength = 0;
ifstream inFile(argv[1]);
string strOneLine;
int ParaNumber= 0;
int dim = 0;
vector <string> parastring;
float k_k, StartTemp, FixedTemperature[26];
int TimeSteps, Xaxis, Yaxis, Zaxis, Xlocation[26], Ylocation[26], Zlocation[26], Width[26], Height[26], Depth[26];
int HeatSources;
string CurrString;
int len;
while(inFile)
{
getline(inFile, strOneLine);
StringLength = strOneLine.length();
if (StringLength==0|| int(strOneLine[0]) == 13) continue;
for(int i = 0; i<StringLength; i ++){
if (strOneLine.at(i) == ' ') continue;
if (strOneLine.at(i) == '#')
{
break;
}
if (strOneLine.at(i) == ',') {
ParaNumber = ParaNumber + 1;
parastring.push_back(CurrString);
CurrString.clear();
continue;
}
if (ParaNumber == 0){
dim = int(strOneLine.at(i));
ParaNumber=ParaNumber+1;
break;
}
else
{
CurrString.push_back(strOneLine.at(i));
}
}
if(CurrString.length()!= 0){
parastring.push_back(CurrString);
CurrString.clear();
ParaNumber = ParaNumber + 1;
}
}
int *d_Xlocation, *d_Ylocation, *d_Zlocation, *d_Width, *d_Height, *d_Depth, *d_Xaxis, *d_Yaxis, *d_Zaxis, *d_dim, *d_HeatSources;
float *d_OldTemp, *d_NewTemp, *d_k, *d_FixedTemperature;
if (dim==50)
{
parastring[0].insert(0,1,'0');
k_k = atof(parastring[0].c_str());
TimeSteps = atoi(parastring[1].c_str());
Xaxis = atoi(parastring[2].c_str());
Yaxis = atoi(parastring[3].c_str());
StartTemp = atof(parastring[4].c_str());
HeatSources = (ParaNumber-5)/5;
for (int i=5;i<ParaNumber-5;i=i+5){
Xlocation[(i-5)/5] = atoi(parastring[i].c_str());
Ylocation[(i-5)/5] = atoi(parastring[i+1].c_str());
Width[(i-5)/5] = atoi(parastring[i+2].c_str());
Height[(i-5)/5] = atoi(parastring[i+3].c_str());
FixedTemperature[(i-5)/5] = atof(parastring[i+4].c_str());
}
//cout<<"Dimension = "<<dim<<endl;
//cout<<"k = "<<k_k<<endl;
//cout<<"Time Steps = "<<TimeSteps<<endl;
//cout<<"X Axis, Y axis = "<<Xaxis<<", "<<Yaxis<<endl;
//cout<<"Starting Temp = "<<StartTemp<<endl;
//for (int i=0; i<HeatSources; i++)
//{
// cout<<Xlocation[i]<<", "<<Ylocation[i]<<", "<<Width[i]<<", "<<Height[i]<<", "<<FixedTemperature[i]<<endl;
//}
if (Xaxis >= Yaxis){
len = Xaxis*Xaxis;
}
else{
len = Yaxis*Yaxis;
}
int size = (len)*sizeof(float);
float OldTemp[len] = {0};
float NewTemp[len] = {0};
for (int i = 0; i < Yaxis ; i ++){
for (int j = 0; j < Xaxis; j ++){
OldTemp[i*Yaxis + j] = StartTemp;
for (int p = 0; p < HeatSources; p++){
if((i > Ylocation[p]-1) && (i <= Ylocation[p] + Height[p]-1) && (j > Xlocation[p]-1) && (j <= Xlocation[p] + Width[p]-1)){
OldTemp[i*Yaxis + j] = FixedTemperature[p];
}
}
}
}
hipMalloc((void **)&d_OldTemp, size);
hipMalloc((void **)&d_NewTemp, size);
hipMalloc((void **)&d_Xlocation, HeatSources*sizeof(int));
hipMalloc((void **)&d_Ylocation, HeatSources*sizeof(int));
hipMalloc((void **)&d_Xaxis, sizeof(int));
hipMalloc((void **)&d_Yaxis, sizeof(int));
hipMalloc((void **)&d_k, sizeof(float));
hipMalloc((void **)&d_Width, HeatSources*sizeof(int));
hipMalloc((void **)&d_Height, HeatSources*sizeof(int));
hipMalloc((void **)&d_FixedTemperature, HeatSources*sizeof(float));
hipMalloc((void **)&d_Zlocation, HeatSources*sizeof(int));
hipMalloc((void **)&d_Depth, HeatSources*sizeof(int));
hipMalloc((void **)&d_Zaxis, sizeof(int));
hipMalloc((void **)&d_dim, sizeof(int));
hipMalloc((void **)&d_HeatSources, sizeof(int));
hipMemcpy(d_OldTemp, OldTemp, size, hipMemcpyHostToDevice);
hipMemcpy(d_NewTemp, NewTemp, size, hipMemcpyHostToDevice);
hipMemcpy(d_Xlocation, Xlocation, HeatSources*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_Ylocation, Ylocation, HeatSources*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_Xaxis, &Xaxis, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_Yaxis, &Yaxis, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_k, &k_k, sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_Width, Width, HeatSources*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_Height, Height, HeatSources*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_FixedTemperature, FixedTemperature, HeatSources*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_Zlocation, Zlocation, HeatSources*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_Zaxis, &Zaxis, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_Depth, Depth, HeatSources*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_dim, &dim, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_HeatSources, &HeatSources, sizeof(int), hipMemcpyHostToDevice);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((int)ceil(((Xaxis + BLOCK_SIZE - 1)/BLOCK_SIZE)),(int)(ceil(((Yaxis + BLOCK_SIZE - 1)/BLOCK_SIZE))));
for (int i = 0; i < TimeSteps; i++){
hipLaunchKernelGGL(( kernel), dim3(grid),dim3(block), 0, 0, d_dim, d_k, d_Xlocation, d_Ylocation, d_Zlocation, d_Width, d_Height, d_Depth, d_FixedTemperature, d_OldTemp, d_NewTemp, d_Xaxis, d_Yaxis, d_Zaxis, d_HeatSources);
}
hipMemcpy(NewTemp, d_NewTemp, size, hipMemcpyDeviceToHost);
hipFree(d_OldTemp);
hipFree(d_NewTemp);
hipFree(d_Xlocation);
hipFree(d_Ylocation);
hipFree(d_Xaxis);
hipFree(d_Yaxis);
hipFree(d_k);
hipFree(d_Width);
hipFree(d_Height);
hipFree(d_FixedTemperature);
hipFree(d_Zlocation);
hipFree(d_Zaxis);
hipFree(d_Depth);
hipFree(d_dim);
hipFree(d_HeatSources);
ofstream build ("heatOutput.csv", std::ofstream::out);
for(int i = 0; i <Yaxis; i++){
for(int j = 0; j <Xaxis - 1; j++){
build<<NewTemp[i*Yaxis + j]<<", ";
}
build<<NewTemp[i*Yaxis + Xaxis-1];
if(i != Yaxis -1){
build<<endl;
}
}
build.close();
}
if (dim ==51)
{
parastring[0].insert(0,1,'0');
k_k = atof(parastring[0].c_str());
TimeSteps = atoi(parastring[1].c_str());
Xaxis = atoi(parastring[2].c_str());
Yaxis = atoi(parastring[3].c_str());
Zaxis = atoi(parastring[4].c_str());
StartTemp = atof(parastring[5].c_str());
HeatSources = (ParaNumber-7)/5;
for (int i=6;i<ParaNumber-6;i=i+7){
Xlocation[(i-6)/7] = atoi(parastring[i].c_str());
Ylocation[(i-6)/7] = atoi(parastring[i+1].c_str());
Zlocation[(i-6)/7] = atoi(parastring[i+2].c_str());
Width[(i-6)/7] = atoi(parastring[i+3].c_str());
Height[(i-6)/7] = atoi(parastring[i+4].c_str());
Depth[(i-6)/7] = atoi(parastring[i+5].c_str());
FixedTemperature[(i-6)/7] = atof(parastring[i+6].c_str());
}
//cout<<"Dimension = "<<dim<<endl;
//cout<<"k = "<<k_k<<endl;
//cout<<"Time Steps = "<<TimeSteps<<endl;
//cout<<"X Axis, Y axis, Z axis = "<<Xaxis<<", "<<Yaxis<<", "<<Zaxis<<endl;
//cout<<"Starting Temp = "<<StartTemp<<endl;
//cout<<"HeatSources = "<<HeatSources<<endl;
//for (int i=0; i<HeatSources; i++)
//{
// cout<<Xlocation[i]<<", "<<Ylocation[i]<<", "<<Zlocation[i]<<", "<<Width[i]<<", "<<Height[i]<<", "<<Depth[i]<<", "<<FixedTemperature[i]<<endl;
//}
if (Xaxis >= Yaxis && Xaxis >= Zaxis){
len = Xaxis*Xaxis*Xaxis;
}
else if (Yaxis >= Xaxis && Yaxis >= Zaxis){
len = Yaxis*Yaxis*Yaxis;
}
else if (Zaxis>= Yaxis && Zaxis>=Yaxis){
len = Zaxis*Zaxis*Zaxis;
}
int size = (len)*sizeof(float);
float OldTemp[len] = {0};
float NewTemp[len] = {0};
for(int k = 0; k <Zaxis; k++){
for (int i = 0; i < Yaxis ; i ++){
for (int j = 0; j < Xaxis; j ++){
OldTemp[(j+i*(Yaxis))*(Zaxis) + k] = StartTemp;
for (int p = 0; p < HeatSources; p++){
if((i > Ylocation[p]-1) && (i <= Ylocation[p] + Height[p]-1) && (j > Xlocation[p]-1) && (j <= Xlocation[p] + Width[p]-1) && (k > Zlocation[p] - 1) && (k<= Zlocation[p] + Depth[p] - 1)){
OldTemp[(j+i*(Yaxis))*(Zaxis) + k] = FixedTemperature[p];
}
}
}
}
}
hipMalloc((void **)&d_OldTemp, size);
hipMalloc((void **)&d_NewTemp, size);
hipMalloc((void **)&d_Xlocation, HeatSources*sizeof(int));
hipMalloc((void **)&d_Ylocation, HeatSources*sizeof(int));
hipMalloc((void **)&d_Xaxis, sizeof(int));
hipMalloc((void **)&d_Yaxis, sizeof(int));
hipMalloc((void **)&d_k, sizeof(float));
hipMalloc((void **)&d_Width, HeatSources*sizeof(int));
hipMalloc((void **)&d_Height, HeatSources*sizeof(int));
hipMalloc((void **)&d_FixedTemperature, HeatSources*sizeof(float));
hipMalloc((void **)&d_Zlocation, HeatSources*sizeof(int));
hipMalloc((void **)&d_Depth, HeatSources*sizeof(int));
hipMalloc((void **)&d_Zaxis, sizeof(int));
hipMalloc((void **)&d_dim, sizeof(int));
hipMalloc((void **)&d_HeatSources, sizeof(int));
hipMemcpy(d_OldTemp, OldTemp, size, hipMemcpyHostToDevice);
hipMemcpy(d_NewTemp, NewTemp, size, hipMemcpyHostToDevice);
hipMemcpy(d_Xlocation, Xlocation, HeatSources*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_Ylocation, Ylocation, HeatSources*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_Xaxis, &Xaxis, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_Yaxis, &Yaxis, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_k, &k_k, sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_Width, Width, HeatSources*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_Height, Height, HeatSources*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_FixedTemperature, FixedTemperature, HeatSources*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_Zlocation, Zlocation, HeatSources*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_Zaxis, &Zaxis, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_Depth, Depth, HeatSources*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_dim, &dim, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_HeatSources, &HeatSources, sizeof(int), hipMemcpyHostToDevice);
dim3 block(BLOCK_SIZE, BLOCK_SIZE,BLOCK_SIZE);
dim3 grid((int)ceil(((Xaxis + BLOCK_SIZE - 1)/BLOCK_SIZE)),(int)(ceil(((Yaxis + BLOCK_SIZE - 1)/BLOCK_SIZE))),(int)(ceil(((Zaxis + BLOCK_SIZE - 1)/BLOCK_SIZE))));
for (int i = 0; i < TimeSteps; i++){
hipLaunchKernelGGL(( kernel), dim3(grid),dim3(block), 0, 0, d_dim, d_k, d_Xlocation, d_Ylocation, d_Zlocation, d_Width, d_Height, d_Depth, d_FixedTemperature, d_OldTemp, d_NewTemp, d_Xaxis, d_Yaxis, d_Zaxis, d_HeatSources);
}
hipMemcpy(NewTemp, d_NewTemp, size, hipMemcpyDeviceToHost);
hipFree(d_OldTemp);
hipFree(d_NewTemp);
hipFree(d_Xlocation);
hipFree(d_Ylocation);
hipFree(d_Xaxis);
hipFree(d_Yaxis);
hipFree(d_k);
hipFree(d_Width);
hipFree(d_Height);
hipFree(d_FixedTemperature);
hipFree(d_Zlocation);
hipFree(d_Zaxis);
hipFree(d_Depth);
hipFree(d_dim);
hipFree(d_HeatSources);
ofstream build ("heatOutput.csv", std::ofstream::out);
for (int l = 0; l < Zaxis; l++){
for(int i = 0; i <Yaxis; i++){
for(int j = 0; j <Xaxis - 1; j++){
build<<NewTemp[(j+i*(Yaxis))*(Zaxis) + l]<<", ";
}
build<<NewTemp[(Xaxis - 1 +i*(Yaxis))*(Zaxis) + l];
if(l != Zaxis -1 || i != Yaxis -1){
build<<endl;
}
}
if (l != Zaxis - 1){
build<<endl;
}
}
build.close();
}
return 0;
}
| 37a455a8d06d66e42954a5208477d71aa704ea85.cu | #include<iostream>
#include<fstream>
#include<string>
#include<algorithm>
#include<vector>
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<limits>
#include<iomanip>
using namespace std;
#define BLOCK_SIZE 8
__global__ void kernel(int *dim,float *k_k, int *Xlocation, int *Ylocation, int *Zlocation, int *Width, int *Height, int *Depth, float *FixedTemperature, float *OldTemp, float *NewTemp, int *Xaxis, int *Yaxis, int *Zaxis, int *HeatSources){
if (*dim ==50){
__shared__ float temp[BLOCK_SIZE + 2][BLOCK_SIZE + 2];
int j = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.y*blockDim.y + threadIdx.y;
int leni, lenj;
if (i <= *Yaxis && j <= *Xaxis){
int threadj = threadIdx.x + 1;
int threadi = threadIdx.y + 1;
temp[threadi][threadj] = OldTemp[j+i*(*Yaxis)];
if (blockIdx.x==(int)*Xaxis/BLOCK_SIZE){
lenj = *Xaxis%BLOCK_SIZE;
}
else{
lenj = BLOCK_SIZE;
}
if (blockIdx.y ==(int)*Yaxis/BLOCK_SIZE){
leni = *Yaxis%BLOCK_SIZE;
}
else{
leni = BLOCK_SIZE;
}
if (threadIdx.x < 1){
if (j < 1){
temp[threadi][threadj - 1] = *(OldTemp + i*(*Yaxis) + j);
}
else{
temp[threadi][threadj - 1] = *(OldTemp + i*(*Yaxis) + j - 1);
}
if (j >= *Xaxis - lenj){
temp[threadi][threadj + lenj] = *(OldTemp + i*(*Yaxis)+j +lenj-1);
}
else{
temp[threadi][threadj + lenj] = *(OldTemp + i*(*Yaxis)+j +lenj);
}
}
if (threadIdx.y < 1 ){
if(i<1){
temp[threadi - 1][threadj] = *(OldTemp + i*(*Yaxis)+j);
}
else{
temp[threadi - 1][threadj] = *(OldTemp + (i-1)*(*Yaxis)+j);
}
if(i >= (*Yaxis) - leni){
temp[threadi+leni][threadj] = *(OldTemp+(i+leni-1)*(*Yaxis)+j);
}
else{
temp[threadi+leni][threadj] = *(OldTemp+(i+leni)*(*Yaxis)+j);
}
}
__syncthreads();
if (i < *Yaxis && j < *Xaxis){
NewTemp[j+i*(*Yaxis)] = temp[threadi][threadj] + *k_k*(temp[threadi-1][threadj] + temp[threadi][threadj -1] + temp[threadi+1][threadj] + temp[threadi][threadj+1] - 4*temp[threadi][threadj]);
for (int p = 0; p < *HeatSources; p++){
if((i > Ylocation[p]-1) && (i <= Ylocation[p] + Height[p]-1) && (j > Xlocation[p]-1) && (j <= Xlocation[p] + Width[p]-1)){
NewTemp[j+(i)*(*Yaxis)] = FixedTemperature[p];
}
}
OldTemp[j+i*(*Yaxis)] = NewTemp[j+i*(*Yaxis)];
}
}
}
else if (*dim == 51){
__shared__ float temp[BLOCK_SIZE + 2][BLOCK_SIZE + 2][BLOCK_SIZE + 2];
int j = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
int leni, lenj, lenk;
if (i <= *Yaxis && j <= *Xaxis && k <= *Zaxis){
int threadj = threadIdx.x + 1;
int threadi = threadIdx.y + 1;
int threadk = threadIdx.z + 1;
temp[threadi][threadj][threadk] = OldTemp[(j+i*(*Yaxis))*(*Zaxis) + k];
if (blockIdx.x==(int)*Xaxis/BLOCK_SIZE){
lenj = *Xaxis%BLOCK_SIZE;
}
else{
lenj = BLOCK_SIZE;
}
if (blockIdx.y ==(int)*Yaxis/BLOCK_SIZE){
leni = *Yaxis%BLOCK_SIZE;
}
else{
leni = BLOCK_SIZE;
}
if (blockIdx.z == (int)*Zaxis/BLOCK_SIZE){
lenk = *Zaxis%BLOCK_SIZE;
}
else{
lenk = BLOCK_SIZE;
}
if (threadIdx.x < 1){
if (j < 1){
temp[threadi][threadj - 1][threadk] = *(OldTemp + (j+i*(*Yaxis))*(*Zaxis) + k);
}
else{
temp[threadi][threadj - 1][threadk] = *(OldTemp + (j-1+i*(*Yaxis))*(*Zaxis) + k);
}
if (j >= *Xaxis - lenj){
temp[threadi][threadj + lenj][threadk] = *(OldTemp + (j+lenj - 1 + i*(*Yaxis))*(*Zaxis) + k);
}
else{
temp[threadi][threadj + lenj][threadk] = *(OldTemp + (j+lenj + i*(*Yaxis))*(*Zaxis) + k);
}
}
if (threadIdx.y < 1 ){
if(i<1){
temp[threadi - 1][threadj][threadk] = *(OldTemp + (j+i*(*Yaxis))*(*Zaxis) + k);
}
else{
temp[threadi - 1][threadj][threadk] = *(OldTemp + (j+(i-1)*(*Yaxis))*(*Zaxis) + k);
}
if(i >= (*Yaxis) - leni){
temp[threadi+leni][threadj][threadk] = *(OldTemp + (j+(i + leni-1)*(*Yaxis))*(*Zaxis) + k);
}
else{
temp[threadi+leni][threadj][threadk] = *(OldTemp + (j+(i + leni)*(*Yaxis))*(*Zaxis) + k);
}
}
if (threadIdx.z < 1 ){
if(k<1){
temp[threadi][threadj][threadk-1] = *(OldTemp + (j+i*(*Yaxis))*(*Zaxis) + k);
}
else{
temp[threadi][threadj][threadk-1] = *(OldTemp + (j+i*(*Yaxis))*(*Zaxis) + k - 1);
}
if(k >= (*Zaxis) - lenk){
temp[threadi][threadj][threadk + lenk] = *(OldTemp + (j+i*(*Yaxis))*(*Zaxis) + k + lenk - 1);
}
else{
temp[threadi][threadj][threadk + lenk] = *(OldTemp + (j+i*(*Yaxis))*(*Zaxis) + k + lenk);
}
}
__syncthreads();
if (i < *Yaxis && j < *Xaxis && k<*Zaxis){
NewTemp[(j+i*(*Yaxis))*(*Zaxis) + k] = temp[threadi][threadj][threadk] + *k_k*(temp[threadi-1][threadj][threadk] + temp[threadi][threadj -1][threadk] + temp[threadi+1][threadj][threadk] + temp[threadi][threadj+1][threadk]+temp[threadi][threadj][threadk-1]+temp[threadi][threadj][threadk+1] - 6*temp[threadi][threadj][threadk]);
for (int p = 0; p < *HeatSources; p++){
if((i > Ylocation[p]-1) && (i <= Ylocation[p] + Height[p]-1) && (j > Xlocation[p]-1) && (j <= Xlocation[p] + Width[p]-1)&& (k >= Zlocation[p])&& (k < Zlocation[p] + Depth[p])){
NewTemp[(j+i*(*Yaxis))*(*Zaxis) + k] = FixedTemperature[p];
}
}
OldTemp[(j+i*(*Yaxis))*(*Zaxis) + k] = NewTemp[(j+i*(*Yaxis))*(*Zaxis) + k];
}
}
}
}
int main(int argc, char const *argv[])
{
int StringLength = 0;
ifstream inFile(argv[1]);
string strOneLine;
int ParaNumber= 0;
int dim = 0;
vector <string> parastring;
float k_k, StartTemp, FixedTemperature[26];
int TimeSteps, Xaxis, Yaxis, Zaxis, Xlocation[26], Ylocation[26], Zlocation[26], Width[26], Height[26], Depth[26];
int HeatSources;
string CurrString;
int len;
while(inFile)
{
getline(inFile, strOneLine);
StringLength = strOneLine.length();
if (StringLength==0|| int(strOneLine[0]) == 13) continue;
for(int i = 0; i<StringLength; i ++){
if (strOneLine.at(i) == ' ') continue;
if (strOneLine.at(i) == '#')
{
break;
}
if (strOneLine.at(i) == ',') {
ParaNumber = ParaNumber + 1;
parastring.push_back(CurrString);
CurrString.clear();
continue;
}
if (ParaNumber == 0){
dim = int(strOneLine.at(i));
ParaNumber=ParaNumber+1;
break;
}
else
{
CurrString.push_back(strOneLine.at(i));
}
}
if(CurrString.length()!= 0){
parastring.push_back(CurrString);
CurrString.clear();
ParaNumber = ParaNumber + 1;
}
}
int *d_Xlocation, *d_Ylocation, *d_Zlocation, *d_Width, *d_Height, *d_Depth, *d_Xaxis, *d_Yaxis, *d_Zaxis, *d_dim, *d_HeatSources;
float *d_OldTemp, *d_NewTemp, *d_k, *d_FixedTemperature;
if (dim==50)
{
parastring[0].insert(0,1,'0');
k_k = atof(parastring[0].c_str());
TimeSteps = atoi(parastring[1].c_str());
Xaxis = atoi(parastring[2].c_str());
Yaxis = atoi(parastring[3].c_str());
StartTemp = atof(parastring[4].c_str());
HeatSources = (ParaNumber-5)/5;
for (int i=5;i<ParaNumber-5;i=i+5){
Xlocation[(i-5)/5] = atoi(parastring[i].c_str());
Ylocation[(i-5)/5] = atoi(parastring[i+1].c_str());
Width[(i-5)/5] = atoi(parastring[i+2].c_str());
Height[(i-5)/5] = atoi(parastring[i+3].c_str());
FixedTemperature[(i-5)/5] = atof(parastring[i+4].c_str());
}
//cout<<"Dimension = "<<dim<<endl;
//cout<<"k = "<<k_k<<endl;
//cout<<"Time Steps = "<<TimeSteps<<endl;
//cout<<"X Axis, Y axis = "<<Xaxis<<", "<<Yaxis<<endl;
//cout<<"Starting Temp = "<<StartTemp<<endl;
//for (int i=0; i<HeatSources; i++)
//{
// cout<<Xlocation[i]<<", "<<Ylocation[i]<<", "<<Width[i]<<", "<<Height[i]<<", "<<FixedTemperature[i]<<endl;
//}
if (Xaxis >= Yaxis){
len = Xaxis*Xaxis;
}
else{
len = Yaxis*Yaxis;
}
int size = (len)*sizeof(float);
float OldTemp[len] = {0};
float NewTemp[len] = {0};
for (int i = 0; i < Yaxis ; i ++){
for (int j = 0; j < Xaxis; j ++){
OldTemp[i*Yaxis + j] = StartTemp;
for (int p = 0; p < HeatSources; p++){
if((i > Ylocation[p]-1) && (i <= Ylocation[p] + Height[p]-1) && (j > Xlocation[p]-1) && (j <= Xlocation[p] + Width[p]-1)){
OldTemp[i*Yaxis + j] = FixedTemperature[p];
}
}
}
}
cudaMalloc((void **)&d_OldTemp, size);
cudaMalloc((void **)&d_NewTemp, size);
cudaMalloc((void **)&d_Xlocation, HeatSources*sizeof(int));
cudaMalloc((void **)&d_Ylocation, HeatSources*sizeof(int));
cudaMalloc((void **)&d_Xaxis, sizeof(int));
cudaMalloc((void **)&d_Yaxis, sizeof(int));
cudaMalloc((void **)&d_k, sizeof(float));
cudaMalloc((void **)&d_Width, HeatSources*sizeof(int));
cudaMalloc((void **)&d_Height, HeatSources*sizeof(int));
cudaMalloc((void **)&d_FixedTemperature, HeatSources*sizeof(float));
cudaMalloc((void **)&d_Zlocation, HeatSources*sizeof(int));
cudaMalloc((void **)&d_Depth, HeatSources*sizeof(int));
cudaMalloc((void **)&d_Zaxis, sizeof(int));
cudaMalloc((void **)&d_dim, sizeof(int));
cudaMalloc((void **)&d_HeatSources, sizeof(int));
cudaMemcpy(d_OldTemp, OldTemp, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_NewTemp, NewTemp, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_Xlocation, Xlocation, HeatSources*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_Ylocation, Ylocation, HeatSources*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_Xaxis, &Xaxis, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_Yaxis, &Yaxis, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_k, &k_k, sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Width, Width, HeatSources*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_Height, Height, HeatSources*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_FixedTemperature, FixedTemperature, HeatSources*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Zlocation, Zlocation, HeatSources*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_Zaxis, &Zaxis, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_Depth, Depth, HeatSources*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_dim, &dim, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_HeatSources, &HeatSources, sizeof(int), cudaMemcpyHostToDevice);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((int)ceil(((Xaxis + BLOCK_SIZE - 1)/BLOCK_SIZE)),(int)(ceil(((Yaxis + BLOCK_SIZE - 1)/BLOCK_SIZE))));
for (int i = 0; i < TimeSteps; i++){
kernel<<<grid,block>>>(d_dim, d_k, d_Xlocation, d_Ylocation, d_Zlocation, d_Width, d_Height, d_Depth, d_FixedTemperature, d_OldTemp, d_NewTemp, d_Xaxis, d_Yaxis, d_Zaxis, d_HeatSources);
}
cudaMemcpy(NewTemp, d_NewTemp, size, cudaMemcpyDeviceToHost);
cudaFree(d_OldTemp);
cudaFree(d_NewTemp);
cudaFree(d_Xlocation);
cudaFree(d_Ylocation);
cudaFree(d_Xaxis);
cudaFree(d_Yaxis);
cudaFree(d_k);
cudaFree(d_Width);
cudaFree(d_Height);
cudaFree(d_FixedTemperature);
cudaFree(d_Zlocation);
cudaFree(d_Zaxis);
cudaFree(d_Depth);
cudaFree(d_dim);
cudaFree(d_HeatSources);
ofstream build ("heatOutput.csv", std::ofstream::out);
for(int i = 0; i <Yaxis; i++){
for(int j = 0; j <Xaxis - 1; j++){
build<<NewTemp[i*Yaxis + j]<<", ";
}
build<<NewTemp[i*Yaxis + Xaxis-1];
if(i != Yaxis -1){
build<<endl;
}
}
build.close();
}
if (dim ==51)
{
parastring[0].insert(0,1,'0');
k_k = atof(parastring[0].c_str());
TimeSteps = atoi(parastring[1].c_str());
Xaxis = atoi(parastring[2].c_str());
Yaxis = atoi(parastring[3].c_str());
Zaxis = atoi(parastring[4].c_str());
StartTemp = atof(parastring[5].c_str());
HeatSources = (ParaNumber-7)/5;
for (int i=6;i<ParaNumber-6;i=i+7){
Xlocation[(i-6)/7] = atoi(parastring[i].c_str());
Ylocation[(i-6)/7] = atoi(parastring[i+1].c_str());
Zlocation[(i-6)/7] = atoi(parastring[i+2].c_str());
Width[(i-6)/7] = atoi(parastring[i+3].c_str());
Height[(i-6)/7] = atoi(parastring[i+4].c_str());
Depth[(i-6)/7] = atoi(parastring[i+5].c_str());
FixedTemperature[(i-6)/7] = atof(parastring[i+6].c_str());
}
//cout<<"Dimension = "<<dim<<endl;
//cout<<"k = "<<k_k<<endl;
//cout<<"Time Steps = "<<TimeSteps<<endl;
//cout<<"X Axis, Y axis, Z axis = "<<Xaxis<<", "<<Yaxis<<", "<<Zaxis<<endl;
//cout<<"Starting Temp = "<<StartTemp<<endl;
//cout<<"HeatSources = "<<HeatSources<<endl;
//for (int i=0; i<HeatSources; i++)
//{
// cout<<Xlocation[i]<<", "<<Ylocation[i]<<", "<<Zlocation[i]<<", "<<Width[i]<<", "<<Height[i]<<", "<<Depth[i]<<", "<<FixedTemperature[i]<<endl;
//}
if (Xaxis >= Yaxis && Xaxis >= Zaxis){
len = Xaxis*Xaxis*Xaxis;
}
else if (Yaxis >= Xaxis && Yaxis >= Zaxis){
len = Yaxis*Yaxis*Yaxis;
}
else if (Zaxis>= Yaxis && Zaxis>=Yaxis){
len = Zaxis*Zaxis*Zaxis;
}
int size = (len)*sizeof(float);
float OldTemp[len] = {0};
float NewTemp[len] = {0};
for(int k = 0; k <Zaxis; k++){
for (int i = 0; i < Yaxis ; i ++){
for (int j = 0; j < Xaxis; j ++){
OldTemp[(j+i*(Yaxis))*(Zaxis) + k] = StartTemp;
for (int p = 0; p < HeatSources; p++){
if((i > Ylocation[p]-1) && (i <= Ylocation[p] + Height[p]-1) && (j > Xlocation[p]-1) && (j <= Xlocation[p] + Width[p]-1) && (k > Zlocation[p] - 1) && (k<= Zlocation[p] + Depth[p] - 1)){
OldTemp[(j+i*(Yaxis))*(Zaxis) + k] = FixedTemperature[p];
}
}
}
}
}
cudaMalloc((void **)&d_OldTemp, size);
cudaMalloc((void **)&d_NewTemp, size);
cudaMalloc((void **)&d_Xlocation, HeatSources*sizeof(int));
cudaMalloc((void **)&d_Ylocation, HeatSources*sizeof(int));
cudaMalloc((void **)&d_Xaxis, sizeof(int));
cudaMalloc((void **)&d_Yaxis, sizeof(int));
cudaMalloc((void **)&d_k, sizeof(float));
cudaMalloc((void **)&d_Width, HeatSources*sizeof(int));
cudaMalloc((void **)&d_Height, HeatSources*sizeof(int));
cudaMalloc((void **)&d_FixedTemperature, HeatSources*sizeof(float));
cudaMalloc((void **)&d_Zlocation, HeatSources*sizeof(int));
cudaMalloc((void **)&d_Depth, HeatSources*sizeof(int));
cudaMalloc((void **)&d_Zaxis, sizeof(int));
cudaMalloc((void **)&d_dim, sizeof(int));
cudaMalloc((void **)&d_HeatSources, sizeof(int));
cudaMemcpy(d_OldTemp, OldTemp, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_NewTemp, NewTemp, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_Xlocation, Xlocation, HeatSources*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_Ylocation, Ylocation, HeatSources*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_Xaxis, &Xaxis, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_Yaxis, &Yaxis, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_k, &k_k, sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Width, Width, HeatSources*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_Height, Height, HeatSources*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_FixedTemperature, FixedTemperature, HeatSources*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Zlocation, Zlocation, HeatSources*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_Zaxis, &Zaxis, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_Depth, Depth, HeatSources*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_dim, &dim, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_HeatSources, &HeatSources, sizeof(int), cudaMemcpyHostToDevice);
dim3 block(BLOCK_SIZE, BLOCK_SIZE,BLOCK_SIZE);
dim3 grid((int)ceil(((Xaxis + BLOCK_SIZE - 1)/BLOCK_SIZE)),(int)(ceil(((Yaxis + BLOCK_SIZE - 1)/BLOCK_SIZE))),(int)(ceil(((Zaxis + BLOCK_SIZE - 1)/BLOCK_SIZE))));
for (int i = 0; i < TimeSteps; i++){
kernel<<<grid,block>>>(d_dim, d_k, d_Xlocation, d_Ylocation, d_Zlocation, d_Width, d_Height, d_Depth, d_FixedTemperature, d_OldTemp, d_NewTemp, d_Xaxis, d_Yaxis, d_Zaxis, d_HeatSources);
}
cudaMemcpy(NewTemp, d_NewTemp, size, cudaMemcpyDeviceToHost);
cudaFree(d_OldTemp);
cudaFree(d_NewTemp);
cudaFree(d_Xlocation);
cudaFree(d_Ylocation);
cudaFree(d_Xaxis);
cudaFree(d_Yaxis);
cudaFree(d_k);
cudaFree(d_Width);
cudaFree(d_Height);
cudaFree(d_FixedTemperature);
cudaFree(d_Zlocation);
cudaFree(d_Zaxis);
cudaFree(d_Depth);
cudaFree(d_dim);
cudaFree(d_HeatSources);
ofstream build ("heatOutput.csv", std::ofstream::out);
for (int l = 0; l < Zaxis; l++){
for(int i = 0; i <Yaxis; i++){
for(int j = 0; j <Xaxis - 1; j++){
build<<NewTemp[(j+i*(Yaxis))*(Zaxis) + l]<<", ";
}
build<<NewTemp[(Xaxis - 1 +i*(Yaxis))*(Zaxis) + l];
if(l != Zaxis -1 || i != Yaxis -1){
build<<endl;
}
}
if (l != Zaxis - 1){
build<<endl;
}
}
build.close();
}
return 0;
}
|
81c1fa3476e8926d065164627fa8e454e4c60f7a.hip | // !!! This is a file automatically generated by hipify!!!
//nvcc -std=c++11 align.cu -o align -lboost_iostreams -lboost_system -lboost_filesystem -lpthread -Wno-deprecated-gpu-targets `pkg-config --cflags opencv4` `pkg-config --libs opencv4` `pkg-config --libs realsense2` `pkg-config --cflags realsense2`
#include <librealsense2/rs.hpp>
#include <librealsense2/rsutil.h>
#include <opencv2/opencv.hpp>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <atomic>
#include <chrono>
#include <cmath>
#include <iostream>
#include <map>
#include <mutex>
#include <thread>
using namespace std;
using namespace cv;
/* */
/* Device */
/* */
/* Given a point in 3D space, compute the corresponding pixel coordinates in an image with no distortion or forward distortion coefficients produced by the same camera */
/* Device function equivalent for the RealSense function rs2_project_point_to_pixel */
__device__ void project_point_to_pixel(float pixel[2], float intrin[9], int in_model, float point[3]) // intrin: fx(0), fy(1), ppx(2), ppy(3), coeff(4-8)
{
//assert(intrin->model != 2); // Cannot project to an inverse-distorted image
float x = point[0] / point[2], y = point[1] / point[2];
if(in_model == 1)
{
float r2 = x*x + y*y;
float f = 1 + intrin[4]*r2 + intrin[5]*r2*r2 + intrin[8]*r2*r2*r2;
x *= f;
y *= f;
float dx = x + 2*intrin[6]*x*y + intrin[7]*(r2 + 2*x*x);
float dy = y + 2*intrin[7]*x*y + intrin[6]*(r2 + 2*y*y);
x = dx;
y = dy;
}
if (in_model == 3)
{
float r = sqrtf(x*x + y*y);
float rd = (float)(1.0f / intrin[4] * atan(2 * r* tan(intrin[4] / 2.0f)));
x *= rd / r;
y *= rd / r;
}
pixel[0] = x * intrin[0] + intrin[2];
pixel[1] = y * intrin[1] + intrin[3];
}
/* Given pixel coordinates and depth in an image with no distortion or inverse distortion coefficients, compute the corresponding point in 3D space relative to the same camera */
/* Device function equivalent for the RealSense function rs2_deproject_pixel_to_point */
__device__ void deproject_pixel_to_point(float point[3], float intrin[9], int in_model, float pixel[2], float depth) // intrin: fx(0), fy(1), ppx(2), ppy(3), coeff(4-8)
{
assert(in_model != 1); // Cannot deproject from a forward-distorted image
assert(in_model != 3); // Cannot deproject to an ftheta image
//assert(in_model != RS2_DISTORTION_BROWN_CONRADY); // Cannot deproject to an brown conrady model
float x = (pixel[0] - intrin[2]) / intrin[0];
float y = (pixel[1] - intrin[3]) / intrin[1];
if(in_model == 2)
{
float r2 = x*x + y*y;
float f = 1 + intrin[4]*r2 + intrin[5]*r2*r2 + intrin[8]*r2*r2*r2;
float ux = x*f + 2*intrin[6]*x*y + intrin[7]*(r2 + 2*x*x);
float uy = y*f + 2*intrin[7]*x*y + intrin[6]*(r2 + 2*y*y);
x = ux;
y = uy;
}
point[0] = depth * x;
point[1] = depth * y;
point[2] = depth;
}
/* Transform 3D coordinates relative to one sensor to 3D coordinates relative to another viewpoint */
/* Device function equivalent for the RealSense function rs2_transform_point_to_point */
__device__ void transform_point_to_point(float to_point[3], float extrin[12], float from_point[3]) // extrin: Rotation(0-8), Translation(9-11)
{
to_point[0] = extrin[0] * from_point[0] + extrin[3] * from_point[1] + extrin[6] * from_point[2] + extrin[9];
to_point[1] = extrin[1] * from_point[0] + extrin[4] * from_point[1] + extrin[7] * from_point[2] + extrin[10];
to_point[2] = extrin[2] * from_point[0] + extrin[5] * from_point[1] + extrin[8] * from_point[2] + extrin[11];
}
/* Device function for rounding-off pixel co-ordinates */
__device__ int round_pix_x(float pix_x){
if (pix_x > 639.5 || pix_x < -0.5) {return -1;}
return (int)(pix_x - fmod((pix_x+0.5),1.0) + 0.5);
}
__device__ int round_pix_y(float pix_y){
if (pix_y > 479.5 || pix_y < -0.5) {return -1;}
return (int)(pix_y - fmod((pix_y+0.5),1.0) + 0.5);
}
/* Global function to be called from Host */
__global__ void transform_d_img(float cu_intrin[18], float cu_extrin[12], int cu_in_model[2], unsigned short cu_depth[640*480], unsigned short cu_tr_depth[640*480], double * depthScale){
int tid = threadIdx.x; // 0-479
int bid = blockIdx.x; // 0-639
if (cu_depth[tid*640+bid] < 0.05) return;
float f_point[3], t_point[3], t_pixel[2];
float pixel[2] = {bid, tid};
deproject_pixel_to_point(f_point, (cu_intrin+9), cu_in_model[1], pixel, (*depthScale)*cu_depth[tid*640+bid]);
transform_point_to_point(t_point, cu_extrin, f_point);
project_point_to_pixel(t_pixel, cu_intrin, cu_in_model[0], t_point);
int x = (int) (t_pixel[0]+0.5f);//round_pix_x(t_pixel[0]);
int y = (int) (t_pixel[1]+0.5f);//round_pix_y(t_pixel[1]);
if (x <= -1 || y <= -1 || x >= 639 || y >= 479) return;
cu_tr_depth[y*640+x] = cu_depth[tid*640+bid];
}
/* */
/* Host */
/* */
int main(){
// depth cam config
std::array<int, 2> depthRes = {640, 480};
//std::array<int, 2> IRRes = {640, 480};
std::array<int, 2> colorRes = {640, 480};
int colorFPS = 60;
//int IRFPS = 90;
int depthFPS = 90;
// create rs pipeline
rs2::pipeline pipe;
// create configuration
rs2::config rsCfg;
rsCfg.enable_stream(RS2_STREAM_COLOR, colorRes[0], colorRes[1], RS2_FORMAT_BGR8, colorFPS);
rsCfg.enable_stream(RS2_STREAM_DEPTH, depthRes[0], depthRes[1], RS2_FORMAT_Z16, depthFPS);
// start streaming
rs2::pipeline_profile profile = pipe.start(rsCfg);
// get color and depth streams
auto depth_stream = profile.get_stream(RS2_STREAM_DEPTH).as<rs2::video_stream_profile>();
auto color_stream = profile.get_stream(RS2_STREAM_COLOR).as<rs2::video_stream_profile>();
// get camera parameters
double depthScale = profile.get_device().first<rs2::depth_sensor>().get_depth_scale();
auto c_int = color_stream.get_intrinsics();
auto d_int = depth_stream.get_intrinsics();
auto d_to_c_ext = depth_stream.get_extrinsics_to(color_stream);
// Create new thread
rs2::frame_queue frameQueue(5);
std::atomic_bool alive {true};
/* This thread used solely to receive frames and check if color and depth frames are valid */
std::thread rxFrame([&]() {
while(alive) {
rs2::frameset frames = pipe.wait_for_frames();
auto cFrame = frames.get_color_frame();
auto dFrame = frames.get_depth_frame();
if (!cFrame || !dFrame) {
continue;
}
frameQueue.enqueue(frames);
}
});
rs2::frameset curFrame;
//auto start = std::chrono::high_resolution_clock::now();
char frameRate[10];
while(alive) {
/* Receive frames from other thread here */
frameQueue.poll_for_frame(&curFrame);
if (curFrame) {
auto colorFrame = curFrame.get_color_frame();
auto depthFrame = curFrame.get_depth_frame();
// Create Mat from frames
int color_width = colorFrame.get_width();
int color_height = colorFrame.get_height();
int depth_width = depthFrame.get_width();
int depth_height = depthFrame.get_height();
Mat color(Size(color_width, color_height), CV_8UC3, (void*)colorFrame.get_data(), Mat::AUTO_STEP);
Mat depth(Size(depth_width, depth_height), CV_16UC1, (void*)depthFrame.get_data(), Mat::AUTO_STEP);
auto start = std::chrono::high_resolution_clock::now();
/* Start kernel for aligning */
// Allocate memory for cu_intrin, cu_extrin, cu_in_model
float temp_intrin[18] = {c_int.fx, c_int.fy, c_int.ppx, c_int.ppy, c_int.coeffs[0], c_int.coeffs[1], c_int.coeffs[2], c_int.coeffs[3], c_int.coeffs[4], d_int.fx, d_int.fy, d_int.ppx, d_int.ppy, d_int.coeffs[0], d_int.coeffs[1], d_int.coeffs[2], d_int.coeffs[3], d_int.coeffs[4]};
float temp_extrin[12] = {d_to_c_ext.rotation[0], d_to_c_ext.rotation[1], d_to_c_ext.rotation[2], d_to_c_ext.rotation[3], d_to_c_ext.rotation[4], d_to_c_ext.rotation[5], d_to_c_ext.rotation[6], d_to_c_ext.rotation[7], d_to_c_ext.rotation[8], d_to_c_ext.translation[0], d_to_c_ext.translation[1], d_to_c_ext.translation[2]};
int temp_in_model[2] = {c_int.model, d_int.model};
Mat tr_depth = Mat::zeros(480, 640, CV_16UC1);
float * cu_intrin, * cu_extrin;
int * cu_in_model;
unsigned short * cu_depth, * cu_tr_depth;
double * cu_depth_scale;
hipMalloc((void **)&cu_intrin, 18*sizeof(float));
hipMalloc((void **)&cu_extrin, 12*sizeof(float));
hipMalloc((void **)&cu_in_model, 2*sizeof(int));
hipMalloc((void **)&cu_depth_scale, sizeof(double));
// Allocate memory for cu_depth, cu_tr_depth in GpuMat
//cv::cuda::GpuMat cu_depth(depth);
//cv::cuda::GpuMat cu_tr_depth(cv::Mat::zeros(depth_width, depth_height, CV_16UC1));
hipMalloc((void **)&cu_depth, 640*480*sizeof(unsigned short));
hipMalloc((void **)&cu_tr_depth, 640*480*sizeof(unsigned short));
// Initialize cu_intrin, cu_extrin, cu_in_model, cu_depth, cu_tr_depth(= [0])
hipMemcpy(cu_intrin, temp_intrin, 18*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(cu_extrin, temp_extrin, 12*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(cu_in_model, temp_in_model, 2*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(cu_depth_scale, &depthScale, sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(cu_depth, depth.ptr<unsigned short>(0), 640*480*sizeof(unsigned short), hipMemcpyHostToDevice);
// Call global function
hipLaunchKernelGGL(( transform_d_img), dim3(640), dim3(480), 0, 0, cu_intrin, cu_extrin, cu_in_model, cu_depth, cu_tr_depth, cu_depth_scale);
// Copy cu_tr_depth to tr_depth
//cu_tr_depth.download(tr_depth);
hipMemcpy(tr_depth.ptr<unsigned short>(0), cu_tr_depth, 640*480*sizeof(unsigned short), hipMemcpyDeviceToHost);
// hipFree() - cu_intrin, cu_extrin, cu_in_model, cu_depth, cu_tr_depth
hipFree(cu_intrin);
hipFree(cu_extrin);
hipFree(cu_in_model);
hipFree(cu_depth);
hipFree(cu_tr_depth);
hipFree(cu_depth_scale);
/* Parallel code end */
/* Determine latency in milliseconds between frames */
auto elapsed = std::chrono::high_resolution_clock::now() - start;
float milliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(elapsed).count();
printf("TIME: %02f\n", milliseconds);
snprintf(frameRate, sizeof(frameRate), "%02f\n", milliseconds);
putText(color, frameRate, Point(50, 50), FONT_HERSHEY_SCRIPT_SIMPLEX, 2, Scalar::all(255), 3, 3);
//start = std::chrono::high_resolution_clock::now();
// display images
double min, max, tr_min, tr_max;
cv::minMaxIdx(depth, &min, &max);
cv::minMaxIdx(tr_depth, &tr_min, &tr_max);
cv::Mat adjMap, tr_adjMap;
cv::convertScaleAbs(depth, adjMap, 255 / max);
cv::convertScaleAbs(tr_depth, tr_adjMap, 255 / tr_max);
imshow ("Colour Image", color);
imshow ("Depth Image", adjMap);
imshow ("Transformed Depth Image", tr_adjMap);
if (waitKey(1) >= 30){
destroyAllWindows();
alive = false;
}
}
}
rxFrame.join();
return 0;
}
| 81c1fa3476e8926d065164627fa8e454e4c60f7a.cu | //nvcc -std=c++11 align.cu -o align -lboost_iostreams -lboost_system -lboost_filesystem -lpthread -Wno-deprecated-gpu-targets `pkg-config --cflags opencv4` `pkg-config --libs opencv4` `pkg-config --libs realsense2` `pkg-config --cflags realsense2`
#include <librealsense2/rs.hpp>
#include <librealsense2/rsutil.h>
#include <opencv2/opencv.hpp>
#include <cuda.h>
#include <cuda_runtime.h>
#include <atomic>
#include <chrono>
#include <cmath>
#include <iostream>
#include <map>
#include <mutex>
#include <thread>
using namespace std;
using namespace cv;
/* */
/* Device */
/* */
/* Given a point in 3D space, compute the corresponding pixel coordinates in an image with no distortion or forward distortion coefficients produced by the same camera */
/* Device function equivalent for the RealSense function rs2_project_point_to_pixel */
__device__ void project_point_to_pixel(float pixel[2], float intrin[9], int in_model, float point[3]) // intrin: fx(0), fy(1), ppx(2), ppy(3), coeff(4-8)
{
//assert(intrin->model != 2); // Cannot project to an inverse-distorted image
float x = point[0] / point[2], y = point[1] / point[2];
if(in_model == 1)
{
float r2 = x*x + y*y;
float f = 1 + intrin[4]*r2 + intrin[5]*r2*r2 + intrin[8]*r2*r2*r2;
x *= f;
y *= f;
float dx = x + 2*intrin[6]*x*y + intrin[7]*(r2 + 2*x*x);
float dy = y + 2*intrin[7]*x*y + intrin[6]*(r2 + 2*y*y);
x = dx;
y = dy;
}
if (in_model == 3)
{
float r = sqrtf(x*x + y*y);
float rd = (float)(1.0f / intrin[4] * atan(2 * r* tan(intrin[4] / 2.0f)));
x *= rd / r;
y *= rd / r;
}
pixel[0] = x * intrin[0] + intrin[2];
pixel[1] = y * intrin[1] + intrin[3];
}
/* Given pixel coordinates and depth in an image with no distortion or inverse distortion coefficients, compute the corresponding point in 3D space relative to the same camera */
/* Device function equivalent for the RealSense function rs2_deproject_pixel_to_point */
__device__ void deproject_pixel_to_point(float point[3], float intrin[9], int in_model, float pixel[2], float depth) // intrin: fx(0), fy(1), ppx(2), ppy(3), coeff(4-8)
{
assert(in_model != 1); // Cannot deproject from a forward-distorted image
assert(in_model != 3); // Cannot deproject to an ftheta image
//assert(in_model != RS2_DISTORTION_BROWN_CONRADY); // Cannot deproject to an brown conrady model
float x = (pixel[0] - intrin[2]) / intrin[0];
float y = (pixel[1] - intrin[3]) / intrin[1];
if(in_model == 2)
{
float r2 = x*x + y*y;
float f = 1 + intrin[4]*r2 + intrin[5]*r2*r2 + intrin[8]*r2*r2*r2;
float ux = x*f + 2*intrin[6]*x*y + intrin[7]*(r2 + 2*x*x);
float uy = y*f + 2*intrin[7]*x*y + intrin[6]*(r2 + 2*y*y);
x = ux;
y = uy;
}
point[0] = depth * x;
point[1] = depth * y;
point[2] = depth;
}
/* Transform 3D coordinates relative to one sensor to 3D coordinates relative to another viewpoint */
/* Device function equivalent for the RealSense function rs2_transform_point_to_point */
__device__ void transform_point_to_point(float to_point[3], float extrin[12], float from_point[3]) // extrin: Rotation(0-8), Translation(9-11)
{
to_point[0] = extrin[0] * from_point[0] + extrin[3] * from_point[1] + extrin[6] * from_point[2] + extrin[9];
to_point[1] = extrin[1] * from_point[0] + extrin[4] * from_point[1] + extrin[7] * from_point[2] + extrin[10];
to_point[2] = extrin[2] * from_point[0] + extrin[5] * from_point[1] + extrin[8] * from_point[2] + extrin[11];
}
/* Device function for rounding-off pixel co-ordinates */
__device__ int round_pix_x(float pix_x){
if (pix_x > 639.5 || pix_x < -0.5) {return -1;}
return (int)(pix_x - fmod((pix_x+0.5),1.0) + 0.5);
}
__device__ int round_pix_y(float pix_y){
if (pix_y > 479.5 || pix_y < -0.5) {return -1;}
return (int)(pix_y - fmod((pix_y+0.5),1.0) + 0.5);
}
/* Global function to be called from Host */
__global__ void transform_d_img(float cu_intrin[18], float cu_extrin[12], int cu_in_model[2], unsigned short cu_depth[640*480], unsigned short cu_tr_depth[640*480], double * depthScale){
int tid = threadIdx.x; // 0-479
int bid = blockIdx.x; // 0-639
if (cu_depth[tid*640+bid] < 0.05) return;
float f_point[3], t_point[3], t_pixel[2];
float pixel[2] = {bid, tid};
deproject_pixel_to_point(f_point, (cu_intrin+9), cu_in_model[1], pixel, (*depthScale)*cu_depth[tid*640+bid]);
transform_point_to_point(t_point, cu_extrin, f_point);
project_point_to_pixel(t_pixel, cu_intrin, cu_in_model[0], t_point);
int x = (int) (t_pixel[0]+0.5f);//round_pix_x(t_pixel[0]);
int y = (int) (t_pixel[1]+0.5f);//round_pix_y(t_pixel[1]);
if (x <= -1 || y <= -1 || x >= 639 || y >= 479) return;
cu_tr_depth[y*640+x] = cu_depth[tid*640+bid];
}
/* */
/* Host */
/* */
int main(){
// depth cam config
std::array<int, 2> depthRes = {640, 480};
//std::array<int, 2> IRRes = {640, 480};
std::array<int, 2> colorRes = {640, 480};
int colorFPS = 60;
//int IRFPS = 90;
int depthFPS = 90;
// create rs pipeline
rs2::pipeline pipe;
// create configuration
rs2::config rsCfg;
rsCfg.enable_stream(RS2_STREAM_COLOR, colorRes[0], colorRes[1], RS2_FORMAT_BGR8, colorFPS);
rsCfg.enable_stream(RS2_STREAM_DEPTH, depthRes[0], depthRes[1], RS2_FORMAT_Z16, depthFPS);
// start streaming
rs2::pipeline_profile profile = pipe.start(rsCfg);
// get color and depth streams
auto depth_stream = profile.get_stream(RS2_STREAM_DEPTH).as<rs2::video_stream_profile>();
auto color_stream = profile.get_stream(RS2_STREAM_COLOR).as<rs2::video_stream_profile>();
// get camera parameters
double depthScale = profile.get_device().first<rs2::depth_sensor>().get_depth_scale();
auto c_int = color_stream.get_intrinsics();
auto d_int = depth_stream.get_intrinsics();
auto d_to_c_ext = depth_stream.get_extrinsics_to(color_stream);
// Create new thread
rs2::frame_queue frameQueue(5);
std::atomic_bool alive {true};
/* This thread used solely to receive frames and check if color and depth frames are valid */
std::thread rxFrame([&]() {
while(alive) {
rs2::frameset frames = pipe.wait_for_frames();
auto cFrame = frames.get_color_frame();
auto dFrame = frames.get_depth_frame();
if (!cFrame || !dFrame) {
continue;
}
frameQueue.enqueue(frames);
}
});
rs2::frameset curFrame;
//auto start = std::chrono::high_resolution_clock::now();
char frameRate[10];
while(alive) {
/* Receive frames from other thread here */
frameQueue.poll_for_frame(&curFrame);
if (curFrame) {
auto colorFrame = curFrame.get_color_frame();
auto depthFrame = curFrame.get_depth_frame();
// Create Mat from frames
int color_width = colorFrame.get_width();
int color_height = colorFrame.get_height();
int depth_width = depthFrame.get_width();
int depth_height = depthFrame.get_height();
Mat color(Size(color_width, color_height), CV_8UC3, (void*)colorFrame.get_data(), Mat::AUTO_STEP);
Mat depth(Size(depth_width, depth_height), CV_16UC1, (void*)depthFrame.get_data(), Mat::AUTO_STEP);
auto start = std::chrono::high_resolution_clock::now();
/* Start kernel for aligning */
// Allocate memory for cu_intrin, cu_extrin, cu_in_model
float temp_intrin[18] = {c_int.fx, c_int.fy, c_int.ppx, c_int.ppy, c_int.coeffs[0], c_int.coeffs[1], c_int.coeffs[2], c_int.coeffs[3], c_int.coeffs[4], d_int.fx, d_int.fy, d_int.ppx, d_int.ppy, d_int.coeffs[0], d_int.coeffs[1], d_int.coeffs[2], d_int.coeffs[3], d_int.coeffs[4]};
float temp_extrin[12] = {d_to_c_ext.rotation[0], d_to_c_ext.rotation[1], d_to_c_ext.rotation[2], d_to_c_ext.rotation[3], d_to_c_ext.rotation[4], d_to_c_ext.rotation[5], d_to_c_ext.rotation[6], d_to_c_ext.rotation[7], d_to_c_ext.rotation[8], d_to_c_ext.translation[0], d_to_c_ext.translation[1], d_to_c_ext.translation[2]};
int temp_in_model[2] = {c_int.model, d_int.model};
Mat tr_depth = Mat::zeros(480, 640, CV_16UC1);
float * cu_intrin, * cu_extrin;
int * cu_in_model;
unsigned short * cu_depth, * cu_tr_depth;
double * cu_depth_scale;
cudaMalloc((void **)&cu_intrin, 18*sizeof(float));
cudaMalloc((void **)&cu_extrin, 12*sizeof(float));
cudaMalloc((void **)&cu_in_model, 2*sizeof(int));
cudaMalloc((void **)&cu_depth_scale, sizeof(double));
// Allocate memory for cu_depth, cu_tr_depth in GpuMat
//cv::cuda::GpuMat cu_depth(depth);
//cv::cuda::GpuMat cu_tr_depth(cv::Mat::zeros(depth_width, depth_height, CV_16UC1));
cudaMalloc((void **)&cu_depth, 640*480*sizeof(unsigned short));
cudaMalloc((void **)&cu_tr_depth, 640*480*sizeof(unsigned short));
// Initialize cu_intrin, cu_extrin, cu_in_model, cu_depth, cu_tr_depth(= [0])
cudaMemcpy(cu_intrin, temp_intrin, 18*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(cu_extrin, temp_extrin, 12*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(cu_in_model, temp_in_model, 2*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cu_depth_scale, &depthScale, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(cu_depth, depth.ptr<unsigned short>(0), 640*480*sizeof(unsigned short), cudaMemcpyHostToDevice);
// Call global function
transform_d_img<<<640, 480>>>(cu_intrin, cu_extrin, cu_in_model, cu_depth, cu_tr_depth, cu_depth_scale);
// Copy cu_tr_depth to tr_depth
//cu_tr_depth.download(tr_depth);
cudaMemcpy(tr_depth.ptr<unsigned short>(0), cu_tr_depth, 640*480*sizeof(unsigned short), cudaMemcpyDeviceToHost);
// cudaFree() - cu_intrin, cu_extrin, cu_in_model, cu_depth, cu_tr_depth
cudaFree(cu_intrin);
cudaFree(cu_extrin);
cudaFree(cu_in_model);
cudaFree(cu_depth);
cudaFree(cu_tr_depth);
cudaFree(cu_depth_scale);
/* Parallel code end */
/* Determine latency in milliseconds between frames */
auto elapsed = std::chrono::high_resolution_clock::now() - start;
float milliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(elapsed).count();
printf("TIME: %02f\n", milliseconds);
snprintf(frameRate, sizeof(frameRate), "%02f\n", milliseconds);
putText(color, frameRate, Point(50, 50), FONT_HERSHEY_SCRIPT_SIMPLEX, 2, Scalar::all(255), 3, 3);
//start = std::chrono::high_resolution_clock::now();
// display images
double min, max, tr_min, tr_max;
cv::minMaxIdx(depth, &min, &max);
cv::minMaxIdx(tr_depth, &tr_min, &tr_max);
cv::Mat adjMap, tr_adjMap;
cv::convertScaleAbs(depth, adjMap, 255 / max);
cv::convertScaleAbs(tr_depth, tr_adjMap, 255 / tr_max);
imshow ("Colour Image", color);
imshow ("Depth Image", adjMap);
imshow ("Transformed Depth Image", tr_adjMap);
if (waitKey(1) >= 30){
destroyAllWindows();
alive = false;
}
}
}
rxFrame.join();
return 0;
}
|
c868c90e2daf932c7ac2836d381540bd0ea796ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "BuildGraph.h"
using namespace cv;
using namespace gpu;
using namespace device;
//template <typename T>
//__global__ void blendLinearKernel(int rows, int cols, int cn, const PtrStep<T> img1, const PtrStep<T> img2,
// const PtrStepf weights1, const PtrStepf weights2, PtrStep<T> result)
//{
// int x = blockIdx.x * blockDim.x + threadIdx.x;
// int y = blockIdx.y * blockDim.y + threadIdx.y;
//
// if (y < rows && x < cols)
// {
// int x_ = x / cn;
// float w1 = weights1.ptr(y)[x_];
// float w2 = weights2.ptr(y)[x_];
// T p1 = img1.ptr(y)[x];
// T p2 = img2.ptr(y)[x];
// result.ptr(y)[x] = (p1 * w1 + p2 * w2) / (w1 + w2 + 1e-5f);
// }
//}
//
//template <typename T>
//void blendLinearCaller(int rows, int cols, int cn, PtrStep<T> img1, PtrStep<T> img2, PtrStepf weights1, PtrStepf weights2, PtrStep<T> result, hipStream_t stream)
//{
// dim3 threads(16, 16);
// dim3 grid(divUp(cols * cn, threads.x), divUp(rows, threads.y));
//
// blendLinearKernel<<<grid, threads, 0, stream>>>(rows, cols * cn, cn, img1, img2, weights1, weights2, result);
// cudaSafeCall( hipGetLastError() );
//
// if (stream == 0)
// cudaSafeCall(hipDeviceSynchronize());
//}
//
//template void blendLinearCaller<float>(int, int, int, PtrStep<float>, PtrStep<float>, PtrStepf, PtrStepf, PtrStep<float>, hipStream_t stream);
__global__ void igpuBuildGraphKernel(int safeWidth, int safeHeight, int width, PtrStep<float> R, PtrStep<float> G, PtrStep<float> B, PtrStep<float> D, Edge3D *edges) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int a = (y * width + x), iter = 4*a;
if (x < safeWidth) {
edges[iter].a = a;
edges[iter].b = a + 1;
edges[iter].w = fabsf(D.ptr(y)[x] - D.ptr(y)[x+1]);
float rdiff = R.ptr(y)[x] - R.ptr(y)[x+1], gdiff = G.ptr(y)[x] - G.ptr(y)[x+1], bdiff = B.ptr(y)[x] - B.ptr(y)[x+1];
edges[iter].w2 = sqrtf(rdiff * rdiff + gdiff * gdiff + bdiff * bdiff);
edges[iter].valid = true;
iter++;
}
if (y < safeHeight) {
edges[iter].a = a;
edges[iter].b = a + width;
edges[iter].w = fabsf(D.ptr(y)[x] - D.ptr(y+1)[x]);
float rdiff = R.ptr(y)[x] - R.ptr(y+1)[x], gdiff = G.ptr(y)[x] - G.ptr(y+1)[x], bdiff = B.ptr(y)[x] - B.ptr(y+1)[x];
edges[iter].w2 = sqrtf(rdiff * rdiff + gdiff * gdiff + bdiff * bdiff);
edges[iter].valid = true;
iter++;
}
if (x < safeWidth && y < safeHeight) {
edges[iter].a = a;
edges[iter].b = a + width + 1;
edges[iter].w = fabsf(D.ptr(y)[x] - D.ptr(y+1)[x+1]);
float rdiff = R.ptr(y)[x] - R.ptr(y+1)[x+1], gdiff = G.ptr(y)[x] - G.ptr(y+1)[x+1], bdiff = B.ptr(y)[x] - B.ptr(y+1)[x+1];
edges[iter].w2 = sqrtf(rdiff * rdiff + gdiff * gdiff + bdiff * bdiff);
edges[iter].valid = true;
iter++;
}
if (x < safeWidth && y > 0) {
edges[iter].a = a;
edges[iter].b = a - width + 1;
edges[iter].w = fabsf(D.ptr(y)[x] - D.ptr(y-1)[x+1]);
float rdiff = R.ptr(y)[x] - R.ptr(y-1)[x+1], gdiff = G.ptr(y)[x] - G.ptr(y-1)[x+1], bdiff = B.ptr(y)[x] - B.ptr(y-1)[x+1];
edges[iter].w2 = sqrtf(rdiff * rdiff + gdiff * gdiff + bdiff * bdiff);
edges[iter].valid = true;
}
}
void igpuBuildGraph(Mat &R, Mat &G, Mat &B, Mat &D, Edge3D *edges, int numEdges) {
//hipSetDevice(0);
dim3 threads(16,16);
int cols = R.cols, rows = R.rows;
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
int widthMinus = cols - 1, heightMinus = rows - 1;
GpuMat gpuR(R), gpuG(G), gpuB(B), gpuD(D);
Edge3D *gpuEdges;
size_t edge_size = numEdges*sizeof(Edge3D);
hipMalloc(&gpuEdges, edge_size);
hipMemset(gpuEdges,0,edge_size);
hipMemcpy(gpuEdges,edges,edge_size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( igpuBuildGraphKernel), dim3(grid), dim3(threads), 0, 0, widthMinus, heightMinus, cols, gpuR, gpuG, gpuB, gpuD, gpuEdges);
cudaSafeCall( hipGetLastError() );
cudaSafeCall(hipDeviceSynchronize());
hipMemcpy(edges,gpuEdges,edge_size,hipMemcpyDeviceToHost);
hipFree(gpuEdges);
}
void thrustsort(Edge3D *pEdge, Edge3D *edgesEnd) {
thrust::device_vector<Edge3D> d_vec;
thrust::copy(pEdge,edgesEnd,d_vec.begin());
thrust::sort(d_vec.begin(),d_vec.end());
thrust::copy(d_vec.begin(),d_vec.end(),pEdge);
}
__device__ bool lessThan3DGPU(const Edge3D& a, const Edge3D& b) {
return a.w2 < b.w2;
}
void thrustsort2(Edge3D *pEdge, Edge3D *edgesEnd) {
thrust::device_vector<Edge3D> d_vec;
thrust::copy(pEdge,edgesEnd,d_vec.begin());
thrust::sort(d_vec.begin(),d_vec.end(), lessThan3DGPU);
thrust::copy(d_vec.begin(),d_vec.end(),pEdge);
} | c868c90e2daf932c7ac2836d381540bd0ea796ce.cu | #include "BuildGraph.h"
using namespace cv;
using namespace gpu;
using namespace device;
//template <typename T>
//__global__ void blendLinearKernel(int rows, int cols, int cn, const PtrStep<T> img1, const PtrStep<T> img2,
// const PtrStepf weights1, const PtrStepf weights2, PtrStep<T> result)
//{
// int x = blockIdx.x * blockDim.x + threadIdx.x;
// int y = blockIdx.y * blockDim.y + threadIdx.y;
//
// if (y < rows && x < cols)
// {
// int x_ = x / cn;
// float w1 = weights1.ptr(y)[x_];
// float w2 = weights2.ptr(y)[x_];
// T p1 = img1.ptr(y)[x];
// T p2 = img2.ptr(y)[x];
// result.ptr(y)[x] = (p1 * w1 + p2 * w2) / (w1 + w2 + 1e-5f);
// }
//}
//
//template <typename T>
//void blendLinearCaller(int rows, int cols, int cn, PtrStep<T> img1, PtrStep<T> img2, PtrStepf weights1, PtrStepf weights2, PtrStep<T> result, cudaStream_t stream)
//{
// dim3 threads(16, 16);
// dim3 grid(divUp(cols * cn, threads.x), divUp(rows, threads.y));
//
// blendLinearKernel<<<grid, threads, 0, stream>>>(rows, cols * cn, cn, img1, img2, weights1, weights2, result);
// cudaSafeCall( cudaGetLastError() );
//
// if (stream == 0)
// cudaSafeCall(cudaDeviceSynchronize());
//}
//
//template void blendLinearCaller<float>(int, int, int, PtrStep<float>, PtrStep<float>, PtrStepf, PtrStepf, PtrStep<float>, cudaStream_t stream);
__global__ void igpuBuildGraphKernel(int safeWidth, int safeHeight, int width, PtrStep<float> R, PtrStep<float> G, PtrStep<float> B, PtrStep<float> D, Edge3D *edges) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int a = (y * width + x), iter = 4*a;
if (x < safeWidth) {
edges[iter].a = a;
edges[iter].b = a + 1;
edges[iter].w = fabsf(D.ptr(y)[x] - D.ptr(y)[x+1]);
float rdiff = R.ptr(y)[x] - R.ptr(y)[x+1], gdiff = G.ptr(y)[x] - G.ptr(y)[x+1], bdiff = B.ptr(y)[x] - B.ptr(y)[x+1];
edges[iter].w2 = sqrtf(rdiff * rdiff + gdiff * gdiff + bdiff * bdiff);
edges[iter].valid = true;
iter++;
}
if (y < safeHeight) {
edges[iter].a = a;
edges[iter].b = a + width;
edges[iter].w = fabsf(D.ptr(y)[x] - D.ptr(y+1)[x]);
float rdiff = R.ptr(y)[x] - R.ptr(y+1)[x], gdiff = G.ptr(y)[x] - G.ptr(y+1)[x], bdiff = B.ptr(y)[x] - B.ptr(y+1)[x];
edges[iter].w2 = sqrtf(rdiff * rdiff + gdiff * gdiff + bdiff * bdiff);
edges[iter].valid = true;
iter++;
}
if (x < safeWidth && y < safeHeight) {
edges[iter].a = a;
edges[iter].b = a + width + 1;
edges[iter].w = fabsf(D.ptr(y)[x] - D.ptr(y+1)[x+1]);
float rdiff = R.ptr(y)[x] - R.ptr(y+1)[x+1], gdiff = G.ptr(y)[x] - G.ptr(y+1)[x+1], bdiff = B.ptr(y)[x] - B.ptr(y+1)[x+1];
edges[iter].w2 = sqrtf(rdiff * rdiff + gdiff * gdiff + bdiff * bdiff);
edges[iter].valid = true;
iter++;
}
if (x < safeWidth && y > 0) {
edges[iter].a = a;
edges[iter].b = a - width + 1;
edges[iter].w = fabsf(D.ptr(y)[x] - D.ptr(y-1)[x+1]);
float rdiff = R.ptr(y)[x] - R.ptr(y-1)[x+1], gdiff = G.ptr(y)[x] - G.ptr(y-1)[x+1], bdiff = B.ptr(y)[x] - B.ptr(y-1)[x+1];
edges[iter].w2 = sqrtf(rdiff * rdiff + gdiff * gdiff + bdiff * bdiff);
edges[iter].valid = true;
}
}
void igpuBuildGraph(Mat &R, Mat &G, Mat &B, Mat &D, Edge3D *edges, int numEdges) {
//cudaSetDevice(0);
dim3 threads(16,16);
int cols = R.cols, rows = R.rows;
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
int widthMinus = cols - 1, heightMinus = rows - 1;
GpuMat gpuR(R), gpuG(G), gpuB(B), gpuD(D);
Edge3D *gpuEdges;
size_t edge_size = numEdges*sizeof(Edge3D);
cudaMalloc(&gpuEdges, edge_size);
cudaMemset(gpuEdges,0,edge_size);
cudaMemcpy(gpuEdges,edges,edge_size,cudaMemcpyHostToDevice);
igpuBuildGraphKernel<<<grid, threads>>>(widthMinus, heightMinus, cols, gpuR, gpuG, gpuB, gpuD, gpuEdges);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall(cudaDeviceSynchronize());
cudaMemcpy(edges,gpuEdges,edge_size,cudaMemcpyDeviceToHost);
cudaFree(gpuEdges);
}
void thrustsort(Edge3D *pEdge, Edge3D *edgesEnd) {
thrust::device_vector<Edge3D> d_vec;
thrust::copy(pEdge,edgesEnd,d_vec.begin());
thrust::sort(d_vec.begin(),d_vec.end());
thrust::copy(d_vec.begin(),d_vec.end(),pEdge);
}
__device__ bool lessThan3DGPU(const Edge3D& a, const Edge3D& b) {
return a.w2 < b.w2;
}
void thrustsort2(Edge3D *pEdge, Edge3D *edgesEnd) {
thrust::device_vector<Edge3D> d_vec;
thrust::copy(pEdge,edgesEnd,d_vec.begin());
thrust::sort(d_vec.begin(),d_vec.end(), lessThan3DGPU);
thrust::copy(d_vec.begin(),d_vec.end(),pEdge);
} |
10fcd996e2c2547f81f1d0c46f254c285aefe3fd.hip | // !!! This is a file automatically generated by hipify!!!
/*********************************************************************
* Filename: blowfish_test.c
* Author: Brad Conte (brad AT bradconte.com)
* Copyright:
* Disclaimer: This code is presented "as is" without any guarantees.
* Details: Performs known-answer tests on the corresponding Blowfish
implementation. These tests do not encompass the full
range of available test vectors, however, if the tests
pass it is very, very likely that the code is correct
and was compiled properly. This code also serves as
example usage of the functions.
*********************************************************************/
/*************************** HEADER FILES ***************************/
#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#include <sys/stat.h>
#include "blowfish.h"
#include <hip/hip_runtime.h>
/*********************** FUNCTION DEFINITIONS ***********************/
/*int blowfish_test()
{
BYTE key1[8] = {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00};
BYTE key2[8] = {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
BYTE key3[24] = {0xF0,0xE1,0xD2,0xC3,0xB4,0xA5,0x96,0x87,
0x78,0x69,0x5A,0x4B,0x3C,0x2D,0x1E,0x0F,
0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77};
BYTE p1[BLOWFISH_BLOCK_SIZE] = {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00};
BYTE p2[BLOWFISH_BLOCK_SIZE] = {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
BYTE p3[BLOWFISH_BLOCK_SIZE] = {0xFE,0xDC,0xBA,0x98,0x76,0x54,0x32,0x10};
BYTE c1[BLOWFISH_BLOCK_SIZE] = {0x4e,0xf9,0x97,0x45,0x61,0x98,0xdd,0x78};
BYTE c2[BLOWFISH_BLOCK_SIZE] = {0x51,0x86,0x6f,0xd5,0xb8,0x5e,0xcb,0x8a};
BYTE c3[BLOWFISH_BLOCK_SIZE] = {0x05,0x04,0x4b,0x62,0xfa,0x52,0xd0,0x80};
BYTE enc_buf[BLOWFISH_BLOCK_SIZE];
BLOWFISH_KEY key;
int pass = 1;
// Test vector 1.
blowfish_key_setup(key1, &key, BLOWFISH_BLOCK_SIZE);
blowfish_encrypt(p1, enc_buf, &key);
pass = pass && !memcmp(c1, enc_buf, BLOWFISH_BLOCK_SIZE);
blowfish_decrypt(c1, enc_buf, &key);
pass = pass && !memcmp(p1, enc_buf, BLOWFISH_BLOCK_SIZE);
// Test vector 2.
blowfish_key_setup(key2, &key, BLOWFISH_BLOCK_SIZE);
blowfish_encrypt(p2, enc_buf, &key);
pass = pass && !memcmp(c2, enc_buf, BLOWFISH_BLOCK_SIZE);
blowfish_decrypt(c2, enc_buf, &key);
pass = pass && !memcmp(p2, enc_buf, BLOWFISH_BLOCK_SIZE);
// Test vector 3.
blowfish_key_setup(key3, &key, 24);
blowfish_encrypt(p3, enc_buf, &key);
pass = pass && !memcmp(c3, enc_buf, BLOWFISH_BLOCK_SIZE);
blowfish_decrypt(c3, enc_buf, &key);
pass = pass && !memcmp(p3, enc_buf, BLOWFISH_BLOCK_SIZE);
return(pass);
}*/
void enc_dec_file(char *filename)
{
/*********************** ABERTURA E LEITURA DO ARQUIVO DE ENTRADA ***********************/
BYTE *data;
BYTE *encrypted_data;
BYTE *decrypted_data;
//char *filename = "../../sample_files/king_james_bible.txt";
BYTE key1[8] = {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00};
/*BYTE key2[8] = {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
BYTE key3[24] = {0xF0,0xE1,0xD2,0xC3,0xB4,0xA5,0x96,0x87,
0x78,0x69,0x5A,0x4B,0x3C,0x2D,0x1E,0x0F,
0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77};*/
BLOWFISH_KEY key;
struct stat st;
size_t data_size_bytes = 0;
if (stat(filename, &st) == 0) { // provavelmente verifica se possivel atribuir o stat st ao arquivo base, e caso sim
data_size_bytes = sizeof(BYTE) * st.st_size;
data = (BYTE *) malloc(data_size_bytes); // reserva o tamanho do arquivo base em memria para o ponteiro data.
};
FILE *file = fopen(filename, "rb");
if(data != NULL && file){
int current_byte = 0;
while(fread(&data[current_byte], sizeof(BYTE), 1, file) == 1){
current_byte += 1;
};
};
encrypted_data = (BYTE *) malloc(data_size_bytes); // reserva espao em memoria para o arquivo criptogrfado
decrypted_data = (BYTE *) malloc(data_size_bytes); // o mesmo para o arquivo decodificado
BYTE *d_data; // ponteiro para os dados da imagem base no device
BYTE *d_encrypted_data; // ponteiro para os dados criptografados no device
BYTE *d_decrypted_data; // ponteiro para os dados descriptografados no device
// alocao de memria para os devices
hipMalloc((void **)&d_data, data_size_bytes);
hipMalloc((void **)&d_encrypted_data, data_size_bytes);
hipMalloc((void **)&d_decrypted_data, data_size_bytes);
hipMemcpy(d_data, data, data_size_bytes, hipMemcpyHostToDevice); // copia o array de dados para o device
BLOWFISH_KEY *d_key;
hipMalloc((void **)&d_key, sizeof(BLOWFISH_KEY));
blowfish_key_setup(key1, &key, BLOWFISH_BLOCK_SIZE);
hipMemcpy(d_key, &key, sizeof(BLOWFISH_KEY), hipMemcpyHostToDevice);
int threadsPerBlock = 256;
int blocksPerGrid = ((data_size_bytes + threadsPerBlock - 1) / threadsPerBlock)/BLOWFISH_BLOCK_SIZE;
hipLaunchKernelGGL(( blowfish_encrypt), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_data, d_encrypted_data, d_key, data_size_bytes);
hipLaunchKernelGGL(( blowfish_decrypt), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_encrypted_data, d_decrypted_data, d_key, data_size_bytes);
hipMemcpy(encrypted_data, d_encrypted_data, data_size_bytes, hipMemcpyDeviceToHost);
hipMemcpy(decrypted_data, d_decrypted_data, data_size_bytes, hipMemcpyDeviceToHost);
FILE *enc_file = fopen("file.enc", "wb+");
FILE *dec_file = fopen("file.dec", "wb+");
fwrite(encrypted_data, sizeof(BYTE) * st.st_size, 1, enc_file);
fwrite(decrypted_data, sizeof(BYTE) * st.st_size, 1, dec_file);
fclose(enc_file);
fclose(dec_file);
};
int main(int argc, char *argv[])
{
if (argc == 2) {
enc_dec_file(argv[1]);
}
/*
printf("Blowfish Tests: %s\n", aes_test() ? "SUCCEEDED" : "FAILED");
*/
return(0);
}
| 10fcd996e2c2547f81f1d0c46f254c285aefe3fd.cu | /*********************************************************************
* Filename: blowfish_test.c
* Author: Brad Conte (brad AT bradconte.com)
* Copyright:
* Disclaimer: This code is presented "as is" without any guarantees.
* Details: Performs known-answer tests on the corresponding Blowfish
implementation. These tests do not encompass the full
range of available test vectors, however, if the tests
pass it is very, very likely that the code is correct
and was compiled properly. This code also serves as
example usage of the functions.
*********************************************************************/
/*************************** HEADER FILES ***************************/
#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#include <sys/stat.h>
#include "blowfish.h"
#include <cuda_runtime.h>
/*********************** FUNCTION DEFINITIONS ***********************/
/*int blowfish_test()
{
BYTE key1[8] = {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00};
BYTE key2[8] = {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
BYTE key3[24] = {0xF0,0xE1,0xD2,0xC3,0xB4,0xA5,0x96,0x87,
0x78,0x69,0x5A,0x4B,0x3C,0x2D,0x1E,0x0F,
0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77};
BYTE p1[BLOWFISH_BLOCK_SIZE] = {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00};
BYTE p2[BLOWFISH_BLOCK_SIZE] = {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
BYTE p3[BLOWFISH_BLOCK_SIZE] = {0xFE,0xDC,0xBA,0x98,0x76,0x54,0x32,0x10};
BYTE c1[BLOWFISH_BLOCK_SIZE] = {0x4e,0xf9,0x97,0x45,0x61,0x98,0xdd,0x78};
BYTE c2[BLOWFISH_BLOCK_SIZE] = {0x51,0x86,0x6f,0xd5,0xb8,0x5e,0xcb,0x8a};
BYTE c3[BLOWFISH_BLOCK_SIZE] = {0x05,0x04,0x4b,0x62,0xfa,0x52,0xd0,0x80};
BYTE enc_buf[BLOWFISH_BLOCK_SIZE];
BLOWFISH_KEY key;
int pass = 1;
// Test vector 1.
blowfish_key_setup(key1, &key, BLOWFISH_BLOCK_SIZE);
blowfish_encrypt(p1, enc_buf, &key);
pass = pass && !memcmp(c1, enc_buf, BLOWFISH_BLOCK_SIZE);
blowfish_decrypt(c1, enc_buf, &key);
pass = pass && !memcmp(p1, enc_buf, BLOWFISH_BLOCK_SIZE);
// Test vector 2.
blowfish_key_setup(key2, &key, BLOWFISH_BLOCK_SIZE);
blowfish_encrypt(p2, enc_buf, &key);
pass = pass && !memcmp(c2, enc_buf, BLOWFISH_BLOCK_SIZE);
blowfish_decrypt(c2, enc_buf, &key);
pass = pass && !memcmp(p2, enc_buf, BLOWFISH_BLOCK_SIZE);
// Test vector 3.
blowfish_key_setup(key3, &key, 24);
blowfish_encrypt(p3, enc_buf, &key);
pass = pass && !memcmp(c3, enc_buf, BLOWFISH_BLOCK_SIZE);
blowfish_decrypt(c3, enc_buf, &key);
pass = pass && !memcmp(p3, enc_buf, BLOWFISH_BLOCK_SIZE);
return(pass);
}*/
void enc_dec_file(char *filename)
{
/*********************** ABERTURA E LEITURA DO ARQUIVO DE ENTRADA ***********************/
BYTE *data;
BYTE *encrypted_data;
BYTE *decrypted_data;
//char *filename = "../../sample_files/king_james_bible.txt";
BYTE key1[8] = {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00};
/*BYTE key2[8] = {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
BYTE key3[24] = {0xF0,0xE1,0xD2,0xC3,0xB4,0xA5,0x96,0x87,
0x78,0x69,0x5A,0x4B,0x3C,0x2D,0x1E,0x0F,
0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77};*/
BLOWFISH_KEY key;
struct stat st;
size_t data_size_bytes = 0;
if (stat(filename, &st) == 0) { // provavelmente verifica se é possivel atribuir o stat st ao arquivo base, e caso sim
data_size_bytes = sizeof(BYTE) * st.st_size;
data = (BYTE *) malloc(data_size_bytes); // reserva o tamanho do arquivo base em memória para o ponteiro data.
};
FILE *file = fopen(filename, "rb");
if(data != NULL && file){
int current_byte = 0;
while(fread(&data[current_byte], sizeof(BYTE), 1, file) == 1){
current_byte += 1;
};
};
encrypted_data = (BYTE *) malloc(data_size_bytes); // reserva espaço em memoria para o arquivo criptogrfado
decrypted_data = (BYTE *) malloc(data_size_bytes); // o mesmo para o arquivo decodificado
BYTE *d_data; // ponteiro para os dados da imagem base no device
BYTE *d_encrypted_data; // ponteiro para os dados criptografados no device
BYTE *d_decrypted_data; // ponteiro para os dados descriptografados no device
// alocação de memória para os devices
cudaMalloc((void **)&d_data, data_size_bytes);
cudaMalloc((void **)&d_encrypted_data, data_size_bytes);
cudaMalloc((void **)&d_decrypted_data, data_size_bytes);
cudaMemcpy(d_data, data, data_size_bytes, cudaMemcpyHostToDevice); // copia o array de dados para o device
BLOWFISH_KEY *d_key;
cudaMalloc((void **)&d_key, sizeof(BLOWFISH_KEY));
blowfish_key_setup(key1, &key, BLOWFISH_BLOCK_SIZE);
cudaMemcpy(d_key, &key, sizeof(BLOWFISH_KEY), cudaMemcpyHostToDevice);
int threadsPerBlock = 256;
int blocksPerGrid = ((data_size_bytes + threadsPerBlock - 1) / threadsPerBlock)/BLOWFISH_BLOCK_SIZE;
blowfish_encrypt<<<blocksPerGrid, threadsPerBlock>>>(d_data, d_encrypted_data, d_key, data_size_bytes);
blowfish_decrypt<<<blocksPerGrid, threadsPerBlock>>>(d_encrypted_data, d_decrypted_data, d_key, data_size_bytes);
cudaMemcpy(encrypted_data, d_encrypted_data, data_size_bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(decrypted_data, d_decrypted_data, data_size_bytes, cudaMemcpyDeviceToHost);
FILE *enc_file = fopen("file.enc", "wb+");
FILE *dec_file = fopen("file.dec", "wb+");
fwrite(encrypted_data, sizeof(BYTE) * st.st_size, 1, enc_file);
fwrite(decrypted_data, sizeof(BYTE) * st.st_size, 1, dec_file);
fclose(enc_file);
fclose(dec_file);
};
int main(int argc, char *argv[])
{
if (argc == 2) {
enc_dec_file(argv[1]);
}
/*
printf("Blowfish Tests: %s\n", aes_test() ? "SUCCEEDED" : "FAILED");
*/
return(0);
}
|
bbb18ddb3b9724155261c61ad8a0cdc4ba07f689.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "KNNBinningV2.cuh"
#include "../VectorUtils.cuh"
#include "CUDAKernelGlobals.cuh"
using namespace OpenSteer;
// Texture references.
texture< uint, hipTextureType3D, hipReadModeElementType > texCellIndicesNormalized;
texture< float4, hipTextureType1D, hipReadModeElementType > texPosition;
__constant__ float3 constWorldSizeV2;
__constant__ float3 constWorldStepV2;
__constant__ float3 constWorldStepNormalizedV2;
__constant__ uint3 constWorldCellsV2;
// Fetch the cell index from texCellIndicesNormalized at a given world {x,y,z} position.
//#define CELL_INDEX_NORMALIZED( pos ) ( tex3D( texCellIndicesNormalized, pos.x, pos.z, pos.y ) ) // <--- for boids.
#define CELL_INDEX_NORMALIZED( pos ) ( tex3D( texCellIndicesNormalized, pos.x, pos.y, pos.z ) ) // <--- for choke point.
// Fetch the cell index from texCellIndices at a given texel (x,y,z) coordinate.
//#define CELL_INDEX( x, y, z ) ( tex3D( texCellIndices, x, y, z ) )
// Kernel declarations.
extern "C"
{
// Bind the textures to the input hipArray.
__host__ void KNNBinningV2BindTexture( hipArray * pCudaArray );
// Unbind the textures.
__host__ void KNNBinningV2UnbindTexture( void );
__host__ void KNNBinningV2KernelBindTextures( uint const* pdBCellStart,
uint const* pdBCellEnd,
uint const* pdBIndices,
float4 const* pdBPositionSorted,
uint const* pdCellNeighbors,
uint const numCells,
uint const numB,
uint const neighborsPerCell );
__host__ void KNNBinningV2KernelUnbindTextures( void );
__host__ void KNNBinningV2ReorderDBBindTextures( float4 const* pdPosition,
uint const numAgents );
__host__ void KNNBinningV2ReorderDBUnbindTextures( void );
// Use to precompute the neighbors of each cell once per decomposition.
__global__ void KNNBinningV2ComputeCellNeighbors2D( bin_cell const* pdCells, // In: Cell data.
uint * pdCellNeighbors, // Out: Array of computed cell neighbors.
size_t const neighborsPerCell, // In: Number of neighbors per cell.
uint const radius, // In: Search radius.
size_t const numCells // In: Number of cells.
);
__global__ void KNNBinningV2ComputeCellNeighbors3D( bin_cell const* pdCells, // In: Cell data.
uint * pdCellNeighbors, // Out: Array of computed cell neighbors.
size_t const neighborsPerCell, // In: Number of neighbors per cell.
uint const radius, // In: Search radius.
size_t const numCells // In: Number of cells.
);
// Kernel to set initial bin indices of vehicles in the simulation.
__global__ void KNNBinningV2BuildDB( float4 const* pdPosition, // In: Positions of each agent.
size_t * pdAgentIndices, // Out: Indices of each agent.
size_t * pdCellIndices, // Out: Indices of the cell each agent is in.
size_t const numAgents
);
// Reorder the positions on pdCellIndices, and compute the cell start and end indices.
__global__ void KNNBinningV2ReorderDB( uint const* pdAgentIndices, // In: (sorted) agent index.
uint const* pdCellIndices, // In: (sorted) cell index agent is in.
float4 * pdPositionSorted, // Out: Sorted agent positions.
uint * pdCellStart, // Out: Start index of this cell in pdCellIndices.
uint * pdCellEnd, // Out: End index of this cell in pdCellIndices.
size_t const numAgents
);
__global__ void KNNBinningV2Kernel( // Group A
float4 const* pdAPositionSorted, // In: Sorted group A positions.
uint const* pdAIndices, // In: Sorted group A indices
uint const* pdACellIndices, // In: Sorted group A cell indices.
// Cell neighbor info.
uint const neighborsPerCell, // In: Number of neighbors per cell in the pdCellNeighbors array.
uint const radius, // In: Search radius (in cells) to consider.
// Output data.
uint * pdKNNIndices, // Out: Indices of K Nearest Neighbors in pdPosition.
float * pdKNNDistances, // Out: Distances of the K Nearest Neighbors in pdPosition.
uint const k, // In: Number of neighbors to consider.
uint const numA, // In: Size of group A.
uint const numB, // In: Size of group B.
bool const groupWithSelf // In: Are we testing this group with itself? (group A == group B)
);
}
__global__ void KNNBinningV2ComputeCellNeighbors3D( bin_cell const* pdCells, // In: Cell data.
uint * pdCellNeighbors, // Out: Array of computed cell neighbors.
size_t const neighborsPerCell, // In: Number of neighbors per cell.
uint const radius, // In: Search radius.
size_t const numCells // In: Number of cells.
)
{
int const index = (blockIdx.x * blockDim.x) + threadIdx.x;
if( index >= numCells )
return;
__shared__ float3 shPosition[KNN_THREADSPERBLOCK];
extern __shared__ uint shNeighboringCells[];
// Read the position of this thread's cell to shared memory.
shPosition[ threadIdx.x ] = pdCells[index].position;
// Normalize the positions.
POSITION_SH( threadIdx.x ).x = (POSITION_SH( threadIdx.x ).x + 0.5f * constWorldSizeV2.x) / constWorldSizeV2.x;
POSITION_SH( threadIdx.x ).y = (POSITION_SH( threadIdx.x ).y + 0.5f * constWorldSizeV2.y) / constWorldSizeV2.y;
POSITION_SH( threadIdx.x ).z = (POSITION_SH( threadIdx.x ).z + 0.5f * constWorldSizeV2.z) / constWorldSizeV2.z;
// Get the first cell index (radius 0).
shNeighboringCells[ threadIdx.x * neighborsPerCell ] = CELL_INDEX_NORMALIZED( POSITION_SH( threadIdx.x ) );
__syncthreads();
int i = 1;
// Compute the start offset into shNeighboringCells for this radius.
int offset = threadIdx.x * neighborsPerCell;
// For increasing radius...
for( int iCurrentRadius = 1; iCurrentRadius <= radius; iCurrentRadius++ )
{
for( int dy = -iCurrentRadius; dy <= iCurrentRadius; dy++ ) // World height.
{
for( int dz = -iCurrentRadius; dz <= iCurrentRadius; dz++ ) // World depth.
{
for( int dx = -iCurrentRadius; dx <= iCurrentRadius; dx++ ) // World width.
{
// Only do for the outside cells.
if( dz == -iCurrentRadius || dz == iCurrentRadius ||
dx == -iCurrentRadius || dx == iCurrentRadius ||
dy == -iCurrentRadius || dy == iCurrentRadius
)
{
float3 queryPosition = make_float3( POSITION_SH( threadIdx.x ).x + dx * constWorldStepNormalizedV2.x,
POSITION_SH( threadIdx.x ).y + dy * constWorldStepNormalizedV2.y,
POSITION_SH( threadIdx.x ).z + dz * constWorldStepNormalizedV2.z
);
uint cellIndex = CELL_INDEX_NORMALIZED( queryPosition );
// Do not add duplicate cells.
for( int iDup = 0; iDup < i; iDup++ )
{
if( shNeighboringCells[offset+iDup] == cellIndex )
{
cellIndex = UINT_MAX;
break;
}
}
shNeighboringCells[offset + i++] = cellIndex;
}
}
}
}
}
__syncthreads();
for( int i = 0; i < neighborsPerCell; i++ )
{
pdCellNeighbors[ index * neighborsPerCell + i ] = shNeighboringCells[ offset + i ];
}
}
__global__ void KNNBinningV2ComputeCellNeighbors2D( bin_cell const* pdCells, // In: Cell data.
uint * pdCellNeighbors, // Out: Array of computed cell neighbors.
size_t const neighborsPerCell, // In: Number of neighbors per cell.
uint const radius, // In: Search radius.
size_t const numCells // In: Number of cells.
)
{
int const index = (blockIdx.x * blockDim.x) + threadIdx.x;
if( index >= numCells )
return;
__shared__ float3 shPosition[KNN_THREADSPERBLOCK];
extern __shared__ uint shNeighboringCells[];
// Read the position of this thread's cell to shared memory.
shPosition[ threadIdx.x ] = pdCells[index].position;
// Normalize the positions.
POSITION_SH( threadIdx.x ).x = (POSITION_SH( threadIdx.x ).x + 0.5f * constWorldSizeV2.x) / constWorldSizeV2.x;
POSITION_SH( threadIdx.x ).y = (POSITION_SH( threadIdx.x ).y + 0.5f * constWorldSizeV2.y) / constWorldSizeV2.y;
POSITION_SH( threadIdx.x ).z = (POSITION_SH( threadIdx.x ).z + 0.5f * constWorldSizeV2.z) / constWorldSizeV2.z;
// Get the first cell index (radius 0).
shNeighboringCells[ threadIdx.x * neighborsPerCell ] = CELL_INDEX_NORMALIZED( POSITION_SH( threadIdx.x ) );
__syncthreads();
int i = 1;
// Compute the start offset into shNeighboringCells for this radius.
int offset = threadIdx.x * neighborsPerCell;
// For increasing radius...
for( int iCurrentRadius = 1; iCurrentRadius <= radius; iCurrentRadius++ )
{
for( int dz = -iCurrentRadius; dz <= iCurrentRadius; dz++ )
{
for( int dx = -iCurrentRadius; dx <= iCurrentRadius; dx++ )
{
// Only do for the outside cells.
if( dz == -iCurrentRadius || dz == iCurrentRadius || dx == -iCurrentRadius || dx == iCurrentRadius )
{
float3 queryPosition = make_float3( POSITION_SH( threadIdx.x ).x + dx * constWorldStepNormalizedV2.x,
POSITION_SH( threadIdx.x ).y,
POSITION_SH( threadIdx.x ).z + dz * constWorldStepNormalizedV2.z
);
uint cellIndex = CELL_INDEX_NORMALIZED( queryPosition );
// Do not add duplicate cells.
for( int iDup = 0; iDup < i; iDup++ )
{
if( shNeighboringCells[offset+iDup] == cellIndex )
cellIndex = UINT_MAX;
}
shNeighboringCells[offset + i++] = cellIndex;
}
}
}
}
__syncthreads();
for( int i = 0; i < neighborsPerCell; i++ )
{
pdCellNeighbors[ index * neighborsPerCell + i ] = shNeighboringCells[ offset + i ];
}
}
__host__ void KNNBinningV2BindTexture( hipArray * pdCudaArray )
{
static hipChannelFormatDesc const channelDesc = hipCreateChannelDesc< uint >();
texCellIndicesNormalized.normalized = true;
texCellIndicesNormalized.filterMode = hipFilterModePoint;
// Clamp out of bounds coordinates to the edge of the texture.
texCellIndicesNormalized.addressMode[0] = hipAddressModeClamp;
texCellIndicesNormalized.addressMode[1] = hipAddressModeClamp;
texCellIndicesNormalized.addressMode[2] = hipAddressModeClamp;
CUDA_SAFE_CALL( hipBindTextureToArray( texCellIndicesNormalized, pdCudaArray, channelDesc ) );
}
__host__ void KNNBinningV2UnbindTexture( void )
{
CUDA_SAFE_CALL( hipUnbindTexture( texCellIndicesNormalized ) );
}
__global__ void KNNBinningV2BuildDB( float4 const* pdPosition, // In: Positions of each agent.
size_t * pdAgentIndices, // Out: Indices of each agent.
size_t * pdCellIndices, // Out: Indices of the cell each agent is in.
size_t const numAgents
)
{
// Offset of this agent in the global array.
int const index = (blockIdx.x * blockDim.x) + threadIdx.x;
// Check bounds.
if( index >= numAgents )
return;
// Copy the positions to shared memory.
__shared__ float3 shPosition[THREADSPERBLOCK];
POSITION_SH( threadIdx.x ) = POSITION_F3( index );
// Normalize the positions.
POSITION_SH( threadIdx.x ).x = (POSITION_SH( threadIdx.x ).x + 0.5f * constWorldSizeV2.x) / constWorldSizeV2.x;
POSITION_SH( threadIdx.x ).y = (POSITION_SH( threadIdx.x ).y + 0.5f * constWorldSizeV2.y) / constWorldSizeV2.y;
POSITION_SH( threadIdx.x ).z = (POSITION_SH( threadIdx.x ).z + 0.5f * constWorldSizeV2.z) / constWorldSizeV2.z;
// Write the agent's cell index out to global memory.
pdCellIndices[index] = CELL_INDEX_NORMALIZED( POSITION_SH( threadIdx.x ) );
// Write the agent's index out to global memory.
pdAgentIndices[index] = index;
}
__host__ void KNNBinningV2ReorderDBBindTextures( float4 const* pdPosition,
uint const numAgents
)
{
static hipChannelFormatDesc const float4ChannelDesc = hipCreateChannelDesc< float4 >();
CUDA_SAFE_CALL( hipBindTexture( NULL, texPosition, pdPosition, float4ChannelDesc, numAgents * sizeof(float4) ) );
}
__host__ void KNNBinningV2ReorderDBUnbindTextures( void )
{
CUDA_SAFE_CALL( hipUnbindTexture( texPosition ) );
}
__global__ void KNNBinningV2ReorderDB( uint const* pdAgentIndices, // In: (sorted) agent index.
uint const* pdCellIndices, // In: (sorted) cell index agent is in.
float4 * pdPositionSorted, // Out: Sorted agent positions.
uint * pdCellStart, // Out: Start index of this cell in pdCellIndices.
uint * pdCellEnd, // Out: End index of this cell in pdCellIndices.
size_t const numAgents
)
{
// Offset of this agent.
int const index = (blockIdx.x * blockDim.x) + threadIdx.x;
// Check bounds.
if( index >= numAgents )
return;
__shared__ uint shCellIndices[THREADSPERBLOCK+1];
// Shared memory so we can coalesce the writes of sorted data to global memory.
__shared__ float4 shPositionSorted[THREADSPERBLOCK];
// Read the cell index of this agent.
uint iCellIndex = pdCellIndices[ index ];
__syncthreads();
// Store cell index data in shared memory so that we can look
// at the neighboring agent's value without two reads per thread.
shCellIndices[ threadIdx.x + 1 ] = iCellIndex;
if( index > 0 && threadIdx.x == 0 )
{
// First thread in block must load neighbor agent cell index.
shCellIndices[0] = pdCellIndices[ index - 1 ];
}
__syncthreads();
// If this agent has a different cell index to the previous
// agent then it must be the first in the cell,
// so store the index of this agent in the cell.
// As it isn't the first agent, it must also be the cell end of
// the previous particle's cell
if( index == 0 || iCellIndex != shCellIndices[ threadIdx.x ] )
{
pdCellStart[ iCellIndex ] = index;
if( index > 0 )
pdCellEnd[ shCellIndices[ threadIdx.x ] ] = index;
}
// If this is the last agent, the end index for the cell will be index + 1
if( index == (numAgents - 1) )
{
pdCellEnd[ iCellIndex ] = index + 1;
}
// Use the sorted index to reorder the position/direction/speed data.
uint const iSortedIndex = pdAgentIndices[ index ];
shPositionSorted[ threadIdx.x ] = tex1Dfetch( texPosition, iSortedIndex );
// Write to global memory.
pdPositionSorted[ index ] = shPositionSorted[ threadIdx.x ];
}
// Textures used by KNNBinningKernel.
texture< uint, hipTextureType1D, hipReadModeElementType> texBCellStart;
texture< uint, hipTextureType1D, hipReadModeElementType> texBCellEnd;
texture< uint, hipTextureType1D, hipReadModeElementType> texBIndices;
texture< float4, hipTextureType1D, hipReadModeElementType> texBPositionSorted;
texture< uint, hipTextureType1D, hipReadModeElementType > texCellNeighbors;
__host__ void KNNBinningV2KernelBindTextures( uint const* pdBCellStart,
uint const* pdBCellEnd,
uint const* pdBIndices,
float4 const* pdBPositionSorted,
uint const* pdCellNeighbors,
uint const numCells,
uint const numB,
uint const neighborsPerCell
)
{
static hipChannelFormatDesc const uintChannelDesc = hipCreateChannelDesc< uint >();
static hipChannelFormatDesc const float4ChannelDesc = hipCreateChannelDesc< float4 >();
CUDA_SAFE_CALL( hipBindTexture( NULL, texBCellStart, pdBCellStart, uintChannelDesc, numCells * sizeof(uint) ) );
CUDA_SAFE_CALL( hipBindTexture( NULL, texBCellEnd, pdBCellEnd, uintChannelDesc, numCells * sizeof(uint) ) );
CUDA_SAFE_CALL( hipBindTexture( NULL, texBIndices, pdBIndices, uintChannelDesc, numB * sizeof(uint) ) );
CUDA_SAFE_CALL( hipBindTexture( NULL, texBPositionSorted, pdBPositionSorted, float4ChannelDesc, numB * sizeof(float4) ) );
CUDA_SAFE_CALL( hipBindTexture( NULL, texCellNeighbors, pdCellNeighbors, uintChannelDesc, numCells * neighborsPerCell * sizeof(uint) ) );
}
__host__ void KNNBinningV2KernelUnbindTextures( void )
{
CUDA_SAFE_CALL( hipUnbindTexture( texBCellStart ) );
CUDA_SAFE_CALL( hipUnbindTexture( texBCellEnd ) );
CUDA_SAFE_CALL( hipUnbindTexture( texBIndices ) );
CUDA_SAFE_CALL( hipUnbindTexture( texBPositionSorted ) );
CUDA_SAFE_CALL( hipUnbindTexture( texCellNeighbors ) );
}
__global__ void KNNBinningV2Kernel( // Group A
float4 const* pdAPositionSorted, // In: Sorted group A positions.
uint const* pdAIndices, // In: Sorted group A indices
uint const* pdACellIndices, // In: Sorted group A cell indices.
// Cell neighbor info.
uint const neighborsPerCell, // In: Number of neighbors per cell in the pdCellNeighbors array.
uint const radius, // In: Search radius (in cells) to consider.
// Output data.
uint * pdKNNIndices, // Out: Indices of K Nearest Neighbors in pdPosition.
float * pdKNNDistances, // Out: Distances of the K Nearest Neighbors in pdPosition.
uint const k, // In: Number of neighbors to consider.
uint const numA, // In: Size of group A.
uint const numB, // In: Size of group B.
bool const groupWithSelf // In: Are we testing this group with itself? (group A == group B)
)
{
// Index of this agent.
int const AIndexSorted = (blockIdx.x * blockDim.x) + threadIdx.x;
// Check bounds.
if( AIndexSorted >= numA )
return;
__shared__ float3 shAPosition[THREADSPERBLOCK];
// Shared memory for local priority queue computations.
extern __shared__ uint shKNNIndices[];
float * shKNNDistances = (float*)shKNNIndices + THREADSPERBLOCK * k;
// Set all elements of shKNNDistances to FLT_MAX, shKNNIndices to UINT_MAX.
for( uint i = 0; i < k; i++ )
{
//shKNNIndices[(threadIdx.x * k) + i] = UINT_MAX;
//shKNNDistances[(threadIdx.x * k) + i] = FLT_MAX;
shKNNIndices[ threadIdx.x + i * THREADSPERBLOCK ] = UINT_MAX;
shKNNDistances[ threadIdx.x + i * THREADSPERBLOCK ] = FLT_MAX;
}
// Store this thread's index and cell index in registers.
uint const AIndex = pdAIndices[ AIndexSorted ];
uint cellIndex = pdACellIndices[ AIndexSorted ];
// Get the offset for the neighbors of this cell.
int const cellNeighborsOffset = cellIndex * neighborsPerCell;
// Coalesce read the positions.
shAPosition[ threadIdx.x ] = make_float3( pdAPositionSorted[ AIndexSorted ] );
// For each of the neighbors of the current cell...
for( int iCellNeighbor = 0; iCellNeighbor < neighborsPerCell; iCellNeighbor++ )
{
// Get the index of this neighbor.
cellIndex = tex1Dfetch( texCellNeighbors, cellNeighborsOffset + iCellNeighbor );
if( cellIndex == UINT_MAX ) // There is no neighboring cell in this position.
continue;
// For each member of group B in the cell...
for( uint BIndexSorted = tex1Dfetch( texBCellStart, cellIndex ); BIndexSorted < tex1Dfetch( texBCellEnd, cellIndex ); BIndexSorted++ )
{
// Get the index of the other agent (unsorted).
uint const BIndex = tex1Dfetch( texBIndices, BIndexSorted );
// Do not include self.
if( groupWithSelf && AIndex == BIndex )
continue;
// Compute the distance between this thread'a A position and the B position at otherIndexSorted
float const dist = float3_distance( shAPosition[threadIdx.x], make_float3( tex1Dfetch( texBPositionSorted, BIndexSorted ) ) );
//if( dist < shKNNDistances[(threadIdx.x * k) + (k - 1)] ) // Distance of the kth closest agent.
if( dist < shKNNDistances[ threadIdx.x + (k - 1) * THREADSPERBLOCK ] ) // Distance of the kth closest agent.
{
// Agent at index BIndex is the new (at least) kth closest. Set the distance and index in shared mem.
//shKNNDistances[(threadIdx.x * k) + (k - 1)] = dist;
//shKNNIndices[(threadIdx.x * k) + (k - 1)] = BIndex;
shKNNDistances[ threadIdx.x + (k - 1) * THREADSPERBLOCK ] = dist;
shKNNIndices[ threadIdx.x + (k - 1) * THREADSPERBLOCK ] = BIndex;
// Bubble the values up...
for( int slot = (k - 2); slot >= 0; slot-- )
{
//if( shKNNDistances[(threadIdx.x * k) + slot] > shKNNDistances[(threadIdx.x * k) + (slot + 1)] )
if( shKNNDistances[ threadIdx.x + slot * THREADSPERBLOCK ] > shKNNDistances[ threadIdx.x + (slot + 1) * THREADSPERBLOCK ] )
{
//swap( shKNNDistances[(threadIdx.x * k) + slot], shKNNDistances[(threadIdx.x * k) + (slot + 1)] );
//swap( shKNNIndices[(threadIdx.x * k) + slot], shKNNIndices[(threadIdx.x * k) + (slot + 1)] );
swap( shKNNDistances[ threadIdx.x + slot * THREADSPERBLOCK ], shKNNDistances[ threadIdx.x + (slot + 1) * THREADSPERBLOCK ] );
swap( shKNNIndices[ threadIdx.x + slot * THREADSPERBLOCK ], shKNNIndices[ threadIdx.x + (slot + 1) * THREADSPERBLOCK ] );
}
else
break;
}
}
}
}
__syncthreads();
// Write the shKNNIndices and shKNNDistances values out to global memory.
for( uint i = 0; i < k; i++ )
{
//pdKNNIndices[AIndex*k + i] = shKNNIndices[threadIdx.x*k + i];
//pdKNNDistances[AIndex*k + i] = shKNNDistances[threadIdx.x*k + i];
pdKNNIndices[ AIndex + i * numA ] = shKNNIndices[ threadIdx.x + i * THREADSPERBLOCK ];
pdKNNDistances[ AIndex + i * numA ] = shKNNDistances[ threadIdx.x + i * THREADSPERBLOCK ];
}
__syncthreads();
} | bbb18ddb3b9724155261c61ad8a0cdc4ba07f689.cu | #include "KNNBinningV2.cuh"
#include "../VectorUtils.cuh"
#include "CUDAKernelGlobals.cuh"
using namespace OpenSteer;
// Texture references.
texture< uint, cudaTextureType3D, cudaReadModeElementType > texCellIndicesNormalized;
texture< float4, cudaTextureType1D, cudaReadModeElementType > texPosition;
__constant__ float3 constWorldSizeV2;
__constant__ float3 constWorldStepV2;
__constant__ float3 constWorldStepNormalizedV2;
__constant__ uint3 constWorldCellsV2;
// Fetch the cell index from texCellIndicesNormalized at a given world {x,y,z} position.
//#define CELL_INDEX_NORMALIZED( pos ) ( tex3D( texCellIndicesNormalized, pos.x, pos.z, pos.y ) ) // <--- for boids.
#define CELL_INDEX_NORMALIZED( pos ) ( tex3D( texCellIndicesNormalized, pos.x, pos.y, pos.z ) ) // <--- for choke point.
// Fetch the cell index from texCellIndices at a given texel (x,y,z) coordinate.
//#define CELL_INDEX( x, y, z ) ( tex3D( texCellIndices, x, y, z ) )
// Kernel declarations.
extern "C"
{
// Bind the textures to the input cudaArray.
__host__ void KNNBinningV2BindTexture( cudaArray * pCudaArray );
// Unbind the textures.
__host__ void KNNBinningV2UnbindTexture( void );
__host__ void KNNBinningV2KernelBindTextures( uint const* pdBCellStart,
uint const* pdBCellEnd,
uint const* pdBIndices,
float4 const* pdBPositionSorted,
uint const* pdCellNeighbors,
uint const numCells,
uint const numB,
uint const neighborsPerCell );
__host__ void KNNBinningV2KernelUnbindTextures( void );
__host__ void KNNBinningV2ReorderDBBindTextures( float4 const* pdPosition,
uint const numAgents );
__host__ void KNNBinningV2ReorderDBUnbindTextures( void );
// Use to precompute the neighbors of each cell once per decomposition.
__global__ void KNNBinningV2ComputeCellNeighbors2D( bin_cell const* pdCells, // In: Cell data.
uint * pdCellNeighbors, // Out: Array of computed cell neighbors.
size_t const neighborsPerCell, // In: Number of neighbors per cell.
uint const radius, // In: Search radius.
size_t const numCells // In: Number of cells.
);
__global__ void KNNBinningV2ComputeCellNeighbors3D( bin_cell const* pdCells, // In: Cell data.
uint * pdCellNeighbors, // Out: Array of computed cell neighbors.
size_t const neighborsPerCell, // In: Number of neighbors per cell.
uint const radius, // In: Search radius.
size_t const numCells // In: Number of cells.
);
// Kernel to set initial bin indices of vehicles in the simulation.
__global__ void KNNBinningV2BuildDB( float4 const* pdPosition, // In: Positions of each agent.
size_t * pdAgentIndices, // Out: Indices of each agent.
size_t * pdCellIndices, // Out: Indices of the cell each agent is in.
size_t const numAgents
);
// Reorder the positions on pdCellIndices, and compute the cell start and end indices.
__global__ void KNNBinningV2ReorderDB( uint const* pdAgentIndices, // In: (sorted) agent index.
uint const* pdCellIndices, // In: (sorted) cell index agent is in.
float4 * pdPositionSorted, // Out: Sorted agent positions.
uint * pdCellStart, // Out: Start index of this cell in pdCellIndices.
uint * pdCellEnd, // Out: End index of this cell in pdCellIndices.
size_t const numAgents
);
__global__ void KNNBinningV2Kernel( // Group A
float4 const* pdAPositionSorted, // In: Sorted group A positions.
uint const* pdAIndices, // In: Sorted group A indices
uint const* pdACellIndices, // In: Sorted group A cell indices.
// Cell neighbor info.
uint const neighborsPerCell, // In: Number of neighbors per cell in the pdCellNeighbors array.
uint const radius, // In: Search radius (in cells) to consider.
// Output data.
uint * pdKNNIndices, // Out: Indices of K Nearest Neighbors in pdPosition.
float * pdKNNDistances, // Out: Distances of the K Nearest Neighbors in pdPosition.
uint const k, // In: Number of neighbors to consider.
uint const numA, // In: Size of group A.
uint const numB, // In: Size of group B.
bool const groupWithSelf // In: Are we testing this group with itself? (group A == group B)
);
}
__global__ void KNNBinningV2ComputeCellNeighbors3D( bin_cell const* pdCells, // In: Cell data.
uint * pdCellNeighbors, // Out: Array of computed cell neighbors.
size_t const neighborsPerCell, // In: Number of neighbors per cell.
uint const radius, // In: Search radius.
size_t const numCells // In: Number of cells.
)
{
int const index = (blockIdx.x * blockDim.x) + threadIdx.x;
if( index >= numCells )
return;
__shared__ float3 shPosition[KNN_THREADSPERBLOCK];
extern __shared__ uint shNeighboringCells[];
// Read the position of this thread's cell to shared memory.
shPosition[ threadIdx.x ] = pdCells[index].position;
// Normalize the positions.
POSITION_SH( threadIdx.x ).x = (POSITION_SH( threadIdx.x ).x + 0.5f * constWorldSizeV2.x) / constWorldSizeV2.x;
POSITION_SH( threadIdx.x ).y = (POSITION_SH( threadIdx.x ).y + 0.5f * constWorldSizeV2.y) / constWorldSizeV2.y;
POSITION_SH( threadIdx.x ).z = (POSITION_SH( threadIdx.x ).z + 0.5f * constWorldSizeV2.z) / constWorldSizeV2.z;
// Get the first cell index (radius 0).
shNeighboringCells[ threadIdx.x * neighborsPerCell ] = CELL_INDEX_NORMALIZED( POSITION_SH( threadIdx.x ) );
__syncthreads();
int i = 1;
// Compute the start offset into shNeighboringCells for this radius.
int offset = threadIdx.x * neighborsPerCell;
// For increasing radius...
for( int iCurrentRadius = 1; iCurrentRadius <= radius; iCurrentRadius++ )
{
for( int dy = -iCurrentRadius; dy <= iCurrentRadius; dy++ ) // World height.
{
for( int dz = -iCurrentRadius; dz <= iCurrentRadius; dz++ ) // World depth.
{
for( int dx = -iCurrentRadius; dx <= iCurrentRadius; dx++ ) // World width.
{
// Only do for the outside cells.
if( dz == -iCurrentRadius || dz == iCurrentRadius ||
dx == -iCurrentRadius || dx == iCurrentRadius ||
dy == -iCurrentRadius || dy == iCurrentRadius
)
{
float3 queryPosition = make_float3( POSITION_SH( threadIdx.x ).x + dx * constWorldStepNormalizedV2.x,
POSITION_SH( threadIdx.x ).y + dy * constWorldStepNormalizedV2.y,
POSITION_SH( threadIdx.x ).z + dz * constWorldStepNormalizedV2.z
);
uint cellIndex = CELL_INDEX_NORMALIZED( queryPosition );
// Do not add duplicate cells.
for( int iDup = 0; iDup < i; iDup++ )
{
if( shNeighboringCells[offset+iDup] == cellIndex )
{
cellIndex = UINT_MAX;
break;
}
}
shNeighboringCells[offset + i++] = cellIndex;
}
}
}
}
}
__syncthreads();
for( int i = 0; i < neighborsPerCell; i++ )
{
pdCellNeighbors[ index * neighborsPerCell + i ] = shNeighboringCells[ offset + i ];
}
}
__global__ void KNNBinningV2ComputeCellNeighbors2D( bin_cell const* pdCells, // In: Cell data.
uint * pdCellNeighbors, // Out: Array of computed cell neighbors.
size_t const neighborsPerCell, // In: Number of neighbors per cell.
uint const radius, // In: Search radius.
size_t const numCells // In: Number of cells.
)
{
int const index = (blockIdx.x * blockDim.x) + threadIdx.x;
if( index >= numCells )
return;
__shared__ float3 shPosition[KNN_THREADSPERBLOCK];
extern __shared__ uint shNeighboringCells[];
// Read the position of this thread's cell to shared memory.
shPosition[ threadIdx.x ] = pdCells[index].position;
// Normalize the positions.
POSITION_SH( threadIdx.x ).x = (POSITION_SH( threadIdx.x ).x + 0.5f * constWorldSizeV2.x) / constWorldSizeV2.x;
POSITION_SH( threadIdx.x ).y = (POSITION_SH( threadIdx.x ).y + 0.5f * constWorldSizeV2.y) / constWorldSizeV2.y;
POSITION_SH( threadIdx.x ).z = (POSITION_SH( threadIdx.x ).z + 0.5f * constWorldSizeV2.z) / constWorldSizeV2.z;
// Get the first cell index (radius 0).
shNeighboringCells[ threadIdx.x * neighborsPerCell ] = CELL_INDEX_NORMALIZED( POSITION_SH( threadIdx.x ) );
__syncthreads();
int i = 1;
// Compute the start offset into shNeighboringCells for this radius.
int offset = threadIdx.x * neighborsPerCell;
// For increasing radius...
for( int iCurrentRadius = 1; iCurrentRadius <= radius; iCurrentRadius++ )
{
for( int dz = -iCurrentRadius; dz <= iCurrentRadius; dz++ )
{
for( int dx = -iCurrentRadius; dx <= iCurrentRadius; dx++ )
{
// Only do for the outside cells.
if( dz == -iCurrentRadius || dz == iCurrentRadius || dx == -iCurrentRadius || dx == iCurrentRadius )
{
float3 queryPosition = make_float3( POSITION_SH( threadIdx.x ).x + dx * constWorldStepNormalizedV2.x,
POSITION_SH( threadIdx.x ).y,
POSITION_SH( threadIdx.x ).z + dz * constWorldStepNormalizedV2.z
);
uint cellIndex = CELL_INDEX_NORMALIZED( queryPosition );
// Do not add duplicate cells.
for( int iDup = 0; iDup < i; iDup++ )
{
if( shNeighboringCells[offset+iDup] == cellIndex )
cellIndex = UINT_MAX;
}
shNeighboringCells[offset + i++] = cellIndex;
}
}
}
}
__syncthreads();
for( int i = 0; i < neighborsPerCell; i++ )
{
pdCellNeighbors[ index * neighborsPerCell + i ] = shNeighboringCells[ offset + i ];
}
}
__host__ void KNNBinningV2BindTexture( cudaArray * pdCudaArray )
{
static cudaChannelFormatDesc const channelDesc = cudaCreateChannelDesc< uint >();
texCellIndicesNormalized.normalized = true;
texCellIndicesNormalized.filterMode = cudaFilterModePoint;
// Clamp out of bounds coordinates to the edge of the texture.
texCellIndicesNormalized.addressMode[0] = cudaAddressModeClamp;
texCellIndicesNormalized.addressMode[1] = cudaAddressModeClamp;
texCellIndicesNormalized.addressMode[2] = cudaAddressModeClamp;
CUDA_SAFE_CALL( cudaBindTextureToArray( texCellIndicesNormalized, pdCudaArray, channelDesc ) );
}
__host__ void KNNBinningV2UnbindTexture( void )
{
CUDA_SAFE_CALL( cudaUnbindTexture( texCellIndicesNormalized ) );
}
__global__ void KNNBinningV2BuildDB( float4 const* pdPosition, // In: Positions of each agent.
size_t * pdAgentIndices, // Out: Indices of each agent.
size_t * pdCellIndices, // Out: Indices of the cell each agent is in.
size_t const numAgents
)
{
// Offset of this agent in the global array.
int const index = (blockIdx.x * blockDim.x) + threadIdx.x;
// Check bounds.
if( index >= numAgents )
return;
// Copy the positions to shared memory.
__shared__ float3 shPosition[THREADSPERBLOCK];
POSITION_SH( threadIdx.x ) = POSITION_F3( index );
// Normalize the positions.
POSITION_SH( threadIdx.x ).x = (POSITION_SH( threadIdx.x ).x + 0.5f * constWorldSizeV2.x) / constWorldSizeV2.x;
POSITION_SH( threadIdx.x ).y = (POSITION_SH( threadIdx.x ).y + 0.5f * constWorldSizeV2.y) / constWorldSizeV2.y;
POSITION_SH( threadIdx.x ).z = (POSITION_SH( threadIdx.x ).z + 0.5f * constWorldSizeV2.z) / constWorldSizeV2.z;
// Write the agent's cell index out to global memory.
pdCellIndices[index] = CELL_INDEX_NORMALIZED( POSITION_SH( threadIdx.x ) );
// Write the agent's index out to global memory.
pdAgentIndices[index] = index;
}
__host__ void KNNBinningV2ReorderDBBindTextures( float4 const* pdPosition,
uint const numAgents
)
{
static cudaChannelFormatDesc const float4ChannelDesc = cudaCreateChannelDesc< float4 >();
CUDA_SAFE_CALL( cudaBindTexture( NULL, texPosition, pdPosition, float4ChannelDesc, numAgents * sizeof(float4) ) );
}
__host__ void KNNBinningV2ReorderDBUnbindTextures( void )
{
CUDA_SAFE_CALL( cudaUnbindTexture( texPosition ) );
}
__global__ void KNNBinningV2ReorderDB( uint const* pdAgentIndices, // In: (sorted) agent index.
uint const* pdCellIndices, // In: (sorted) cell index agent is in.
float4 * pdPositionSorted, // Out: Sorted agent positions.
uint * pdCellStart, // Out: Start index of this cell in pdCellIndices.
uint * pdCellEnd, // Out: End index of this cell in pdCellIndices.
size_t const numAgents
)
{
// Offset of this agent.
int const index = (blockIdx.x * blockDim.x) + threadIdx.x;
// Check bounds.
if( index >= numAgents )
return;
__shared__ uint shCellIndices[THREADSPERBLOCK+1];
// Shared memory so we can coalesce the writes of sorted data to global memory.
__shared__ float4 shPositionSorted[THREADSPERBLOCK];
// Read the cell index of this agent.
uint iCellIndex = pdCellIndices[ index ];
__syncthreads();
// Store cell index data in shared memory so that we can look
// at the neighboring agent's value without two reads per thread.
shCellIndices[ threadIdx.x + 1 ] = iCellIndex;
if( index > 0 && threadIdx.x == 0 )
{
// First thread in block must load neighbor agent cell index.
shCellIndices[0] = pdCellIndices[ index - 1 ];
}
__syncthreads();
// If this agent has a different cell index to the previous
// agent then it must be the first in the cell,
// so store the index of this agent in the cell.
// As it isn't the first agent, it must also be the cell end of
// the previous particle's cell
if( index == 0 || iCellIndex != shCellIndices[ threadIdx.x ] )
{
pdCellStart[ iCellIndex ] = index;
if( index > 0 )
pdCellEnd[ shCellIndices[ threadIdx.x ] ] = index;
}
// If this is the last agent, the end index for the cell will be index + 1
if( index == (numAgents - 1) )
{
pdCellEnd[ iCellIndex ] = index + 1;
}
// Use the sorted index to reorder the position/direction/speed data.
uint const iSortedIndex = pdAgentIndices[ index ];
shPositionSorted[ threadIdx.x ] = tex1Dfetch( texPosition, iSortedIndex );
// Write to global memory.
pdPositionSorted[ index ] = shPositionSorted[ threadIdx.x ];
}
// Textures used by KNNBinningKernel.
texture< uint, cudaTextureType1D, cudaReadModeElementType> texBCellStart;
texture< uint, cudaTextureType1D, cudaReadModeElementType> texBCellEnd;
texture< uint, cudaTextureType1D, cudaReadModeElementType> texBIndices;
texture< float4, cudaTextureType1D, cudaReadModeElementType> texBPositionSorted;
texture< uint, cudaTextureType1D, cudaReadModeElementType > texCellNeighbors;
__host__ void KNNBinningV2KernelBindTextures( uint const* pdBCellStart,
uint const* pdBCellEnd,
uint const* pdBIndices,
float4 const* pdBPositionSorted,
uint const* pdCellNeighbors,
uint const numCells,
uint const numB,
uint const neighborsPerCell
)
{
static cudaChannelFormatDesc const uintChannelDesc = cudaCreateChannelDesc< uint >();
static cudaChannelFormatDesc const float4ChannelDesc = cudaCreateChannelDesc< float4 >();
CUDA_SAFE_CALL( cudaBindTexture( NULL, texBCellStart, pdBCellStart, uintChannelDesc, numCells * sizeof(uint) ) );
CUDA_SAFE_CALL( cudaBindTexture( NULL, texBCellEnd, pdBCellEnd, uintChannelDesc, numCells * sizeof(uint) ) );
CUDA_SAFE_CALL( cudaBindTexture( NULL, texBIndices, pdBIndices, uintChannelDesc, numB * sizeof(uint) ) );
CUDA_SAFE_CALL( cudaBindTexture( NULL, texBPositionSorted, pdBPositionSorted, float4ChannelDesc, numB * sizeof(float4) ) );
CUDA_SAFE_CALL( cudaBindTexture( NULL, texCellNeighbors, pdCellNeighbors, uintChannelDesc, numCells * neighborsPerCell * sizeof(uint) ) );
}
__host__ void KNNBinningV2KernelUnbindTextures( void )
{
CUDA_SAFE_CALL( cudaUnbindTexture( texBCellStart ) );
CUDA_SAFE_CALL( cudaUnbindTexture( texBCellEnd ) );
CUDA_SAFE_CALL( cudaUnbindTexture( texBIndices ) );
CUDA_SAFE_CALL( cudaUnbindTexture( texBPositionSorted ) );
CUDA_SAFE_CALL( cudaUnbindTexture( texCellNeighbors ) );
}
__global__ void KNNBinningV2Kernel( // Group A
float4 const* pdAPositionSorted, // In: Sorted group A positions.
uint const* pdAIndices, // In: Sorted group A indices
uint const* pdACellIndices, // In: Sorted group A cell indices.
// Cell neighbor info.
uint const neighborsPerCell, // In: Number of neighbors per cell in the pdCellNeighbors array.
uint const radius, // In: Search radius (in cells) to consider.
// Output data.
uint * pdKNNIndices, // Out: Indices of K Nearest Neighbors in pdPosition.
float * pdKNNDistances, // Out: Distances of the K Nearest Neighbors in pdPosition.
uint const k, // In: Number of neighbors to consider.
uint const numA, // In: Size of group A.
uint const numB, // In: Size of group B.
bool const groupWithSelf // In: Are we testing this group with itself? (group A == group B)
)
{
// Index of this agent.
int const AIndexSorted = (blockIdx.x * blockDim.x) + threadIdx.x;
// Check bounds.
if( AIndexSorted >= numA )
return;
__shared__ float3 shAPosition[THREADSPERBLOCK];
// Shared memory for local priority queue computations.
extern __shared__ uint shKNNIndices[];
float * shKNNDistances = (float*)shKNNIndices + THREADSPERBLOCK * k;
// Set all elements of shKNNDistances to FLT_MAX, shKNNIndices to UINT_MAX.
for( uint i = 0; i < k; i++ )
{
//shKNNIndices[(threadIdx.x * k) + i] = UINT_MAX;
//shKNNDistances[(threadIdx.x * k) + i] = FLT_MAX;
shKNNIndices[ threadIdx.x + i * THREADSPERBLOCK ] = UINT_MAX;
shKNNDistances[ threadIdx.x + i * THREADSPERBLOCK ] = FLT_MAX;
}
// Store this thread's index and cell index in registers.
uint const AIndex = pdAIndices[ AIndexSorted ];
uint cellIndex = pdACellIndices[ AIndexSorted ];
// Get the offset for the neighbors of this cell.
int const cellNeighborsOffset = cellIndex * neighborsPerCell;
// Coalesce read the positions.
shAPosition[ threadIdx.x ] = make_float3( pdAPositionSorted[ AIndexSorted ] );
// For each of the neighbors of the current cell...
for( int iCellNeighbor = 0; iCellNeighbor < neighborsPerCell; iCellNeighbor++ )
{
// Get the index of this neighbor.
cellIndex = tex1Dfetch( texCellNeighbors, cellNeighborsOffset + iCellNeighbor );
if( cellIndex == UINT_MAX ) // There is no neighboring cell in this position.
continue;
// For each member of group B in the cell...
for( uint BIndexSorted = tex1Dfetch( texBCellStart, cellIndex ); BIndexSorted < tex1Dfetch( texBCellEnd, cellIndex ); BIndexSorted++ )
{
// Get the index of the other agent (unsorted).
uint const BIndex = tex1Dfetch( texBIndices, BIndexSorted );
// Do not include self.
if( groupWithSelf && AIndex == BIndex )
continue;
// Compute the distance between this thread'a A position and the B position at otherIndexSorted
float const dist = float3_distance( shAPosition[threadIdx.x], make_float3( tex1Dfetch( texBPositionSorted, BIndexSorted ) ) );
//if( dist < shKNNDistances[(threadIdx.x * k) + (k - 1)] ) // Distance of the kth closest agent.
if( dist < shKNNDistances[ threadIdx.x + (k - 1) * THREADSPERBLOCK ] ) // Distance of the kth closest agent.
{
// Agent at index BIndex is the new (at least) kth closest. Set the distance and index in shared mem.
//shKNNDistances[(threadIdx.x * k) + (k - 1)] = dist;
//shKNNIndices[(threadIdx.x * k) + (k - 1)] = BIndex;
shKNNDistances[ threadIdx.x + (k - 1) * THREADSPERBLOCK ] = dist;
shKNNIndices[ threadIdx.x + (k - 1) * THREADSPERBLOCK ] = BIndex;
// Bubble the values up...
for( int slot = (k - 2); slot >= 0; slot-- )
{
//if( shKNNDistances[(threadIdx.x * k) + slot] > shKNNDistances[(threadIdx.x * k) + (slot + 1)] )
if( shKNNDistances[ threadIdx.x + slot * THREADSPERBLOCK ] > shKNNDistances[ threadIdx.x + (slot + 1) * THREADSPERBLOCK ] )
{
//swap( shKNNDistances[(threadIdx.x * k) + slot], shKNNDistances[(threadIdx.x * k) + (slot + 1)] );
//swap( shKNNIndices[(threadIdx.x * k) + slot], shKNNIndices[(threadIdx.x * k) + (slot + 1)] );
swap( shKNNDistances[ threadIdx.x + slot * THREADSPERBLOCK ], shKNNDistances[ threadIdx.x + (slot + 1) * THREADSPERBLOCK ] );
swap( shKNNIndices[ threadIdx.x + slot * THREADSPERBLOCK ], shKNNIndices[ threadIdx.x + (slot + 1) * THREADSPERBLOCK ] );
}
else
break;
}
}
}
}
__syncthreads();
// Write the shKNNIndices and shKNNDistances values out to global memory.
for( uint i = 0; i < k; i++ )
{
//pdKNNIndices[AIndex*k + i] = shKNNIndices[threadIdx.x*k + i];
//pdKNNDistances[AIndex*k + i] = shKNNDistances[threadIdx.x*k + i];
pdKNNIndices[ AIndex + i * numA ] = shKNNIndices[ threadIdx.x + i * THREADSPERBLOCK ];
pdKNNDistances[ AIndex + i * numA ] = shKNNDistances[ threadIdx.x + i * THREADSPERBLOCK ];
}
__syncthreads();
} |
d81f9f49acd767a43084620036da35c4dd0d536f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "InterpolateFromMemBlock.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input1 = NULL;
hipMalloc(&input1, XSIZE*YSIZE);
float *input2 = NULL;
hipMalloc(&input2, XSIZE*YSIZE);
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
float *weightMemBlock = NULL;
hipMalloc(&weightMemBlock, XSIZE*YSIZE);
int inputSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
InterpolateFromMemBlock), dim3(gridBlock),dim3(threadBlock), 0, 0, input1,input2,output,weightMemBlock,inputSize);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
InterpolateFromMemBlock), dim3(gridBlock),dim3(threadBlock), 0, 0, input1,input2,output,weightMemBlock,inputSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
InterpolateFromMemBlock), dim3(gridBlock),dim3(threadBlock), 0, 0, input1,input2,output,weightMemBlock,inputSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d81f9f49acd767a43084620036da35c4dd0d536f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "InterpolateFromMemBlock.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input1 = NULL;
cudaMalloc(&input1, XSIZE*YSIZE);
float *input2 = NULL;
cudaMalloc(&input2, XSIZE*YSIZE);
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
float *weightMemBlock = NULL;
cudaMalloc(&weightMemBlock, XSIZE*YSIZE);
int inputSize = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
InterpolateFromMemBlock<<<gridBlock,threadBlock>>>(input1,input2,output,weightMemBlock,inputSize);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
InterpolateFromMemBlock<<<gridBlock,threadBlock>>>(input1,input2,output,weightMemBlock,inputSize);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
InterpolateFromMemBlock<<<gridBlock,threadBlock>>>(input1,input2,output,weightMemBlock,inputSize);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
1b27aee1ceaca19e15556efa815dd2a4b932fe6b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/**
* @file resize.cu
* @brief The kernel and invocation definitions of resizing an image.
*/
#include "resize.h"
#include "utility.hpp"
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#define MAX(a,b) ((a) > (b) ? (a) : (b))
#define INC(x, l) ((x + 1) >= (l) ? (x) : ((x) + 1))
#define INTER_RESIZE_COEF_BITS 11
#define INTER_RESIZE_COEF_SCALE (1 << INTER_RESIZE_COEF_BITS)
#define CAST_BITS (INTER_RESIZE_COEF_BITS << 1)
using namespace ppl::common;
namespace ppl {
namespace cv {
namespace cuda {
#define DEVICE_INLINE
#if defined(DEVICE_INLINE)
# define __DEVICE__ __device__ __forceinline__
#else
# define __DEVICE__ __device__
#endif
template <typename T>
__DEVICE__
T bilinearSampleUchar(T t[][2], int x0, int x1, int y0, int y1);
template <>
__DEVICE__
uchar2 bilinearSampleUchar(uchar2 t[][2], int x0, int x1, int y0, int y1) {
int a0 = y0 * x0;
int a1 = y0 * x1;
int a2 = y1 * x0;
int a3 = y1 * x1;
int2 ret;
uchar2 final_ret;
ret.x = t[0][0].x * a0 + t[0][1].x * a1 + t[1][0].x * a2 + t[1][1].x * a3;
final_ret.x = (ret.x + (1<<(CAST_BITS-1))) >> CAST_BITS;
ret.y = t[0][0].y * a0 + t[0][1].y * a1 + t[1][0].y * a2 + t[1][1].y * a3;
final_ret.y = (ret.y + (1<<(CAST_BITS-1))) >> CAST_BITS;
return final_ret;
}
template <>
__DEVICE__
uchar3 bilinearSampleUchar(uchar3 t[][2], int x0, int x1, int y0, int y1) {
int a0 = y0 * x0;
int a1 = y0 * x1;
int a2 = y1 * x0;
int a3 = y1 * x1;
int3 ret;
uchar3 final_ret;
ret.x = t[0][0].x * a0 + t[0][1].x * a1 + t[1][0].x * a2 + t[1][1].x * a3;
final_ret.x = (ret.x + (1<<(CAST_BITS-1))) >> CAST_BITS;
ret.y = t[0][0].y * a0 + t[0][1].y * a1 + t[1][0].y * a2 + t[1][1].y * a3;
final_ret.y = (ret.y + (1<<(CAST_BITS-1))) >> CAST_BITS;
ret.z = t[0][0].z * a0 + t[0][1].z * a1 + t[1][0].z * a2 + t[1][1].z * a3;
final_ret.z = (ret.z + (1<<(CAST_BITS-1))) >> CAST_BITS;
return final_ret;
}
template <>
__DEVICE__
uchar4 bilinearSampleUchar(uchar4 t[][2], int x0, int x1, int y0, int y1) {
int a0 = y0 * x0;
int a1 = y0 * x1;
int a2 = y1 * x0;
int a3 = y1 * x1;
int4 ret;
uchar4 final_ret;
ret.x = t[0][0].x * a0 + t[0][1].x * a1 + t[1][0].x * a2 + t[1][1].x * a3;
final_ret.x = (ret.x + (1<<(CAST_BITS-1))) >> CAST_BITS;
ret.y = t[0][0].y * a0 + t[0][1].y * a1 + t[1][0].y * a2 + t[1][1].y * a3;
final_ret.y = (ret.y + (1<<(CAST_BITS-1))) >> CAST_BITS;
ret.z = t[0][0].z * a0 + t[0][1].z * a1 + t[1][0].z * a2 + t[1][1].z * a3;
final_ret.z = (ret.z + (1<<(CAST_BITS-1))) >> CAST_BITS;
ret.w = t[0][0].w * a0 + t[0][1].w * a1 + t[1][0].w * a2 + t[1][1].w * a3;
final_ret.w = (ret.w + (1<<(CAST_BITS-1))) >> CAST_BITS;
return final_ret;
}
/***************************** ResizeLinear() ******************************/
__global__
void resizeLinearKernel(const uchar* src, int src_rows, int src_cols,
int channels, int src_stride, uchar* dst, int dst_rows,
int dst_cols, int dst_stride, float col_scale,
float row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
float fy = ((element_y + 0.5f) * row_scale - 0.5f);
float fx = ((element_x + 0.5f) * col_scale - 0.5f);
int sy = floor(fy);
int sx = floor(fx);
fy -= sy;
fx -= sx;
if (sy < 0) {
sy = 0;
fy = 0;
}
if (sx < 0) {
sx = 0;
fx = 0;
}
if (sy >= src_rows) {
sy = src_rows - 1;
fy = 0;
}
if (sx >= src_cols) {
sx = src_cols - 1;
fx = 0;
}
int sy_ = INC(sy, src_rows);
int cbufy[2];
fy = fy * INTER_RESIZE_COEF_SCALE;
cbufy[0] = rint(INTER_RESIZE_COEF_SCALE - fy);
cbufy[1] = rint(fy);
int sx_ = INC(sx, src_cols);
int cbufx[2];
fx = fx * INTER_RESIZE_COEF_SCALE;
cbufx[0] = rint(INTER_RESIZE_COEF_SCALE - rint(fx));
cbufx[1] = rint(fx);
if (channels == 1) {
int src_index0 = sy * src_stride + sx;
int src_index1 = sy * src_stride + sx_;
int src_index2 = sy_ * src_stride + sx;
int src_index3 = sy_ * src_stride + sx_;
int dst_index = element_y * dst_stride + element_x;
int sum = 0;
sum = cbufy[0] * cbufx[0] * src[src_index0] +
cbufy[0] * cbufx[1] * src[src_index1] +
cbufy[1] * cbufx[0] * src[src_index2] +
cbufy[1] * cbufx[1] * src[src_index3];
dst[dst_index] = (sum + (1 << (CAST_BITS - 1))) >> CAST_BITS;
}
else if (channels == 2) {
uchar2* input0 = (uchar2*)((uchar*)src + sy * src_stride);
uchar2* input1 = (uchar2*)((uchar*)src + sy_ * src_stride);
uchar2* output = (uchar2*)((uchar*)dst + element_y * dst_stride);
uchar2 t[2][2];
t[0][0] = input0[sx];
t[0][1] = input0[sx_];
t[1][0] = input1[sx];
t[1][1] = input1[sx_];
output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0],
cbufy[1]);
}
else if (channels == 3) {
uchar3* input0 = (uchar3*)((uchar*)src + sy * src_stride);
uchar3* input1 = (uchar3*)((uchar*)src + sy_ * src_stride);
uchar3* output = (uchar3*)((uchar*)dst + element_y * dst_stride);
uchar3 t[2][2];
t[0][0] = input0[sx];
t[0][1] = input0[sx_];
t[1][0] = input1[sx];
t[1][1] = input1[sx_];
output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0],
cbufy[1]);
}
else {
uchar4* input0 = (uchar4*)((uchar*)src + sy * src_stride);
uchar4* input1 = (uchar4*)((uchar*)src + sy_ * src_stride);
uchar4* output = (uchar4*)((uchar*)dst + element_y * dst_stride);
uchar4 t[2][2];
t[0][0] = input0[sx];
t[0][1] = input0[sx_];
t[1][0] = input1[sx];
t[1][1] = input1[sx_];
output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0],
cbufy[1]);
}
}
__global__
void resizeLinearKernel(const float* src, int src_rows, int src_cols,
int channels, int src_stride, float* dst, int dst_rows,
int dst_cols, int dst_stride, double col_scale,
float row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
float fx = ((element_x + 0.5f) * col_scale - 0.5f);
float fy = ((element_y + 0.5f) * row_scale - 0.5f);
int sx = floor(fx);
int sy = floor(fy);
fx -= sx;
fy -= sy;
if (sy < 0) {
sy = 0;
fy = 0;
}
if (sx < 0) {
sx = 0;
fx = 0;
}
if (sy >= src_rows) {
sy = src_rows - 1;
fy = 0;
}
if (sx >= src_cols) {
sx = src_cols - 1;
fx = 0;
}
int sy_ = INC(sy,src_rows);
float cbufy[2];
cbufy[0] = 1.f - fy;
cbufy[1] = 1.f - cbufy[0];
int sx_ = INC(sx,src_cols);
float cbufx[2];
cbufx[0] = 1.f - fx;
cbufx[1] = 1.f - cbufx[0];
if (channels == 1) {
int index = sy * src_stride;
float src1 = src[index + sx];
float src2 = src[index + sx_];
float value1 = cbufy[0] * cbufx[0] * src1;
float value2 = cbufy[0] * cbufx[1] * src2;
float sum = 0.f;
sum += value1 + value2;
index = sy_ * src_stride;
src1 = src[index + sx];
src2 = src[index + sx_];
value1 = cbufy[1] * cbufx[0] * src1;
value2 = cbufy[1] * cbufx[1] * src2;
sum += value1 + value2;
index = element_y * dst_stride + element_x;
dst[index] = sum;
}
else if (channels == 3) {
int index = sy * src_stride;
float3 src1 = ((float3*)(src + index))[sx];
float3 src2 = ((float3*)(src + index))[sx_];
float3 value1 = cbufy[0] * cbufx[0] * src1;
float3 value2 = cbufy[0] * cbufx[1] * src2;
float3 sum = make_float3(0.f, 0.f, 0.f);
sum += value1;
sum += value2;
index = sy_ * src_stride;
src1 = ((float3*)(src + index))[sx];
src2 = ((float3*)(src + index))[sx_];
value1 = cbufy[1] * cbufx[0] * src1;
value2 = cbufy[1] * cbufx[1] * src2;
sum += value1;
sum += value2;
float3* output = (float3*)(dst + element_y * dst_stride);
output[element_x] = sum;
}
else {
int index = sy * src_stride;
float4 src1 = ((float4*)(src + index))[sx];
float4 src2 = ((float4*)(src + index))[sx_];
float4 value1 = cbufy[0] * cbufx[0] * src1;
float4 value2 = cbufy[0] * cbufx[1] * src2;
float4 sum = make_float4(0.f, 0.f, 0.f, 0.f);
sum += value1;
sum += value2;
index = sy_ * src_stride;
src1 = ((float4*)(src + index))[sx];
src2 = ((float4*)(src + index))[sx_];
value1 = cbufy[1] * cbufx[0] * src1;
value2 = cbufy[1] * cbufx[1] * src2;
sum += value1;
sum += value2;
float4* output = (float4*)(dst + element_y * dst_stride);
output[element_x] = sum;
}
}
RetCode resizeLinear(const uchar* src, int src_rows, int src_cols, int channels,
int src_stride, uchar* dst, int dst_rows, int dst_cols,
int dst_stride, hipStream_t stream) {
if (src == nullptr || dst == nullptr || src_rows < 1 || src_cols < 1 ||
dst_rows < 1 || dst_cols < 1 ||
(channels != 1 && channels != 3 && channels != 4) ||
src_stride < src_cols * channels ||
dst_stride < dst_cols * channels) {
return RC_INVALID_VALUE;
}
if (src_rows == dst_rows && src_cols == dst_cols &&
src_stride == dst_stride) {
if (src != dst) {
hipError_t code;
code = hipMemcpyAsync(dst, src, src_rows * src_stride * sizeof(uchar),
hipMemcpyDeviceToDevice);
if (code != hipSuccess) {
return RC_DEVICE_MEMORY_ERROR;
}
}
return RC_SUCCESS;
}
const int kBlockX = 32;
const int kBlockY = 16;
dim3 block(kBlockX, kBlockY);
dim3 grid;
grid.x = (dst_cols + kBlockX -1) / kBlockX;
grid.y = (dst_rows + kBlockY - 1) / kBlockY;
float col_scale = (double)src_cols / dst_cols;
float row_scale = (double)src_rows / dst_rows;
hipLaunchKernelGGL(( resizeLinearKernel), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols,
channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale,
row_scale);
return RC_SUCCESS;
}
RetCode resizeLinear(const float* src, int src_rows, int src_cols, int channels,
int src_stride, float* dst, int dst_rows, int dst_cols,
int dst_stride, hipStream_t stream) {
if (src == nullptr || dst == nullptr || src_rows < 1 || src_cols < 1 ||
dst_rows < 1 || dst_cols < 1 ||
(channels != 1 && channels != 3 && channels != 4) ||
src_stride < src_cols * channels ||
dst_stride < dst_cols * channels) {
return RC_INVALID_VALUE;
}
if (src_rows == dst_rows && src_cols == dst_cols &&
src_stride == dst_stride) {
if (src != dst) {
hipError_t code;
code = hipMemcpyAsync(dst, src, src_rows * src_stride * sizeof(float),
hipMemcpyDeviceToDevice);
if (code != hipSuccess) {
return RC_DEVICE_MEMORY_ERROR;
}
}
return RC_SUCCESS;
}
const int kBlockX = 32;
const int kBlockY = 4;
dim3 block(kBlockX, kBlockY);
dim3 grid;
grid.x = (dst_cols + kBlockX -1) / kBlockX;
grid.y = (dst_rows + kBlockY - 1) / kBlockY;
double col_scale = (double)src_cols / dst_cols;
float row_scale = (double)src_rows / dst_rows;
hipLaunchKernelGGL(( resizeLinearKernel), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols,
channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale,
row_scale);
return RC_SUCCESS;
}
template <>
RetCode ResizeLinear<uchar, 1>(hipStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const uchar* inData,
int outHeight,
int outWidth,
int outWidthStride,
uchar* outData) {
RetCode code = resizeLinear(inData, inHeight, inWidth, 1, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
template <>
RetCode ResizeLinear<uchar, 3>(hipStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const uchar* inData,
int outHeight,
int outWidth,
int outWidthStride,
uchar* outData) {
RetCode code = resizeLinear(inData, inHeight, inWidth, 3, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
template <>
RetCode ResizeLinear<uchar, 4>(hipStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const uchar* inData,
int outHeight,
int outWidth,
int outWidthStride,
uchar* outData) {
RetCode code = resizeLinear(inData, inHeight, inWidth, 4, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
template <>
RetCode ResizeLinear<float, 1>(hipStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const float* inData,
int outHeight,
int outWidth,
int outWidthStride,
float* outData) {
RetCode code = resizeLinear(inData, inHeight, inWidth, 1, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
template <>
RetCode ResizeLinear<float, 3>(hipStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const float* inData,
int outHeight,
int outWidth,
int outWidthStride,
float* outData) {
RetCode code = resizeLinear(inData, inHeight, inWidth, 3, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
template <>
RetCode ResizeLinear<float, 4>(hipStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const float* inData,
int outHeight,
int outWidth,
int outWidthStride,
float* outData) {
RetCode code = resizeLinear(inData, inHeight, inWidth, 4, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
/************************** resizeNearestPoint() ***************************/
template <typename T0, typename T1>
__global__
void resizeNearestPointKernel(const T1* src, int src_rows, int src_cols,
int channels, int src_stride, T1* dst,
int dst_rows, int dst_cols, int dst_stride,
float col_scale, float row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
int sy = element_y * row_scale;
sy = MIN(sy, src_rows - 1);
int sx = element_x * col_scale;
sx = MIN(sx, src_cols - 1);
T0* input = (T0*)(src + sy* src_stride);
T0* output = (T0*)(dst + element_y * dst_stride);
output[element_x] = input[sx];
}
RetCode resizeNearestPoint(const uchar* src, int src_rows, int src_cols,
int channels, int src_stride, uchar* dst,
int dst_rows, int dst_cols, int dst_stride,
hipStream_t stream) {
if (src == nullptr || dst == nullptr || src_rows < 1 || src_cols < 1 ||
dst_rows < 1 || dst_cols < 1 ||
(channels != 1 && channels != 3 && channels != 4) ||
src_stride < src_cols * channels ||
dst_stride < dst_cols * channels) {
return RC_INVALID_VALUE;
}
if (src_rows == dst_rows && src_cols == dst_cols &&
src_stride == dst_stride) {
if (src != dst) {
hipError_t code;
code = hipMemcpyAsync(dst, src, src_rows * src_stride * sizeof(uchar),
hipMemcpyDeviceToDevice);
if (code != hipSuccess) {
return RC_DEVICE_MEMORY_ERROR;
}
}
return RC_SUCCESS;
}
const int kBlockX = 32;
const int kBlockY = 4;
dim3 block(kBlockX, kBlockY);
dim3 grid;
grid.x = (dst_cols + kBlockX -1) / kBlockX;
grid.y = (dst_rows + kBlockY - 1) / kBlockY;
float col_scale = (double)src_cols / dst_cols;
float row_scale = (double)src_rows / dst_rows;
if (channels == 1) {
hipLaunchKernelGGL(( resizeNearestPointKernel<uchar, uchar>), dim3(grid), dim3(block), 0, stream, src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
else if (channels == 3) {
hipLaunchKernelGGL(( resizeNearestPointKernel<uchar3, uchar>), dim3(grid), dim3(block), 0, stream, src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
else {
hipLaunchKernelGGL(( resizeNearestPointKernel<uchar4, uchar>), dim3(grid), dim3(block), 0, stream, src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
return RC_SUCCESS;
}
RetCode resizeNearestPoint(const float* src, int src_rows, int src_cols,
int channels, int src_stride, float* dst,
int dst_rows, int dst_cols, int dst_stride,
hipStream_t stream) {
if (src == nullptr || dst == nullptr || src_rows < 1 || src_cols < 1 ||
dst_rows < 1 || dst_cols < 1 ||
(channels != 1 && channels != 3 && channels != 4) ||
src_stride < src_cols * channels ||
dst_stride < dst_cols * channels) {
return RC_INVALID_VALUE;
}
if (src_rows == dst_rows && src_cols == dst_cols &&
src_stride == dst_stride) {
if (src != dst) {
hipError_t code;
code = hipMemcpyAsync(dst, src, src_rows * src_stride * sizeof(float),
hipMemcpyDeviceToDevice);
if (code != hipSuccess) {
return RC_DEVICE_MEMORY_ERROR;
}
}
return RC_SUCCESS;
}
const int kBlockX = 32;
const int kBlockY = 4;
dim3 block(kBlockX, kBlockY);
dim3 grid;
grid.x = (dst_cols + kBlockX -1) / kBlockX;
grid.y = (dst_rows + kBlockY - 1) / kBlockY;
float col_scale = (double)src_cols / dst_cols;
float row_scale = (double)src_rows / dst_rows;
if (channels == 1) {
hipLaunchKernelGGL(( resizeNearestPointKernel<float, float>), dim3(grid), dim3(block), 0, stream, src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
else if (channels == 3) {
hipLaunchKernelGGL(( resizeNearestPointKernel<float3, float>), dim3(grid), dim3(block), 0, stream, src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
else {
hipLaunchKernelGGL(( resizeNearestPointKernel<float4, float>), dim3(grid), dim3(block), 0, stream, src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
return RC_SUCCESS;
}
template <>
RetCode ResizeNearestPoint<uchar, 1>(hipStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const uchar* inData,
int outHeight,
int outWidth,
int outWidthStride,
uchar* outData) {
RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 1, inWidthStride,
outData, outHeight, outWidth,
outWidthStride, stream);
return code;
}
template <>
RetCode ResizeNearestPoint<uchar, 3>(hipStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const uchar* inData,
int outHeight,
int outWidth,
int outWidthStride,
uchar* outData) {
RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 3, inWidthStride,
outData, outHeight, outWidth,
outWidthStride, stream);
return code;
}
template <>
RetCode ResizeNearestPoint<uchar, 4>(hipStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const uchar* inData,
int outHeight,
int outWidth,
int outWidthStride,
uchar* outData) {
RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 4, inWidthStride,
outData, outHeight, outWidth,
outWidthStride, stream);
return code;
}
template <>
RetCode ResizeNearestPoint<float, 1>(hipStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const float* inData,
int outHeight,
int outWidth,
int outWidthStride,
float* outData) {
RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 1, inWidthStride,
outData, outHeight, outWidth,
outWidthStride, stream);
return code;
}
template <>
RetCode ResizeNearestPoint<float, 3>(hipStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const float* inData,
int outHeight,
int outWidth,
int outWidthStride,
float* outData) {
RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 3, inWidthStride,
outData, outHeight, outWidth,
outWidthStride, stream);
return code;
}
template <>
RetCode ResizeNearestPoint<float, 4>(hipStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const float* inData,
int outHeight,
int outWidth,
int outWidthStride,
float* outData) {
RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 4, inWidthStride,
outData, outHeight, outWidth,
outWidthStride, stream);
return code;
}
/****************************** ResizeArea() *******************************/
template <typename T>
__global__
void resizeAreaKernel0C1(const T* src, int src_rows, int src_cols, int channels,
int src_stride, T* dst, int dst_rows, int dst_cols,
int dst_stride, int col_scale, int row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
int x_start = element_x * col_scale;
int y_start = element_y * row_scale;
int x_end = x_start + col_scale;
int y_end = y_start + row_scale;
x_end = (x_end <= src_cols) ? x_end : src_cols;
y_end = (y_end <= src_rows) ? y_end : src_rows;
int area = (x_end - x_start) * (y_end - y_start);
float sum = 0.f;
T* input;
for (int i = y_start; i < y_end; ++i) {
input = (T*)(src + i * src_stride);
for (int j = x_start; j < x_end; ++j) {
sum += input[j];
}
}
sum /= area;
T* output = (T*)(dst + element_y * dst_stride);
if (sizeof(T) == 1) {
output[element_x] = saturate_cast(sum);
}
else {
output[element_x] = sum;
}
}
template <typename T0, typename T1>
__global__
void resizeAreaKernel0C2(const T1* src, int src_rows, int src_cols,
int channels, int src_stride, T1* dst, int dst_rows,
int dst_cols, int dst_stride, int col_scale,
int row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
int x_start = element_x * col_scale;
int y_start = element_y * row_scale;
int x_end = x_start + col_scale;
int y_end = y_start + row_scale;
x_end = (x_end <= src_cols) ? x_end : src_cols;
y_end = (y_end <= src_rows) ? y_end : src_rows;
int area = (x_end - x_start) * (y_end - y_start);
float2 sum = make_float2(0.f, 0.f);
T0* input;
for (int i = y_start; i < y_end; ++i) {
input = (T0*)(src + i * src_stride);
for (int j = x_start; j < x_end; ++j) {
sum += input[j];
}
}
sum /= area;
T0* output = (T0*)(dst + element_y * dst_stride);
output[element_x] = saturate_cast_vector<T0, float2>(sum);
}
template <typename T0, typename T1>
__global__
void resizeAreaKernel0C3(const T1* src, int src_rows, int src_cols,
int channels, int src_stride, T1* dst, int dst_rows,
int dst_cols, int dst_stride, int col_scale,
int row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
int x_start = element_x * col_scale;
int y_start = element_y * row_scale;
int x_end = x_start + col_scale;
int y_end = y_start + row_scale;
x_end = (x_end <= src_cols) ? x_end : src_cols;
y_end = (y_end <= src_rows) ? y_end : src_rows;
int area = (x_end - x_start) * (y_end - y_start);
float3 sum = make_float3(0.f, 0.f, 0.f);
T0* input;
for (int i = y_start; i < y_end; ++i) {
input = (T0*)(src + i * src_stride);
for (int j = x_start; j < x_end; ++j) {
sum += input[j];
}
}
sum /= area;
T0* output = (T0*)(dst + element_y * dst_stride);
output[element_x] = saturate_cast_vector<T0, float3>(sum);
}
template <typename T0, typename T1>
__global__
void resizeAreaKernel0C4(const T1* src, int src_rows, int src_cols,
int channels, int src_stride, T1* dst, int dst_rows,
int dst_cols, int dst_stride, int col_scale,
int row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
int x_start = element_x * col_scale;
int y_start = element_y * row_scale;
int x_end = x_start + col_scale;
int y_end = y_start + row_scale;
x_end = (x_end <= src_cols) ? x_end : src_cols;
y_end = (y_end <= src_rows) ? y_end : src_rows;
int area = (x_end - x_start) * (y_end - y_start);
float4 sum = make_float4(0.f, 0.f, 0.f, 0.f);
T0* input;
for (int i = y_start; i < y_end; ++i) {
input = (T0*)(src + i * src_stride);
for (int j = x_start; j < x_end; ++j) {
sum += input[j];
}
}
sum /= area;
T0* output = (T0*)(dst + element_y * dst_stride);
output[element_x] = saturate_cast_vector<T0, float4>(sum);
}
template <typename T>
__global__
void resizeAreaKernel1C1(const T* src, int src_rows, int src_cols, int channels,
int src_stride, T* dst, int dst_rows, int dst_cols,
int dst_stride, float col_scale, float row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
float fsy1 = element_y * row_scale;
float fsy2 = fsy1 + row_scale;
int sy1 = ceilf(fsy1);
int sy2 = floorf(fsy2);
float fsx1 = element_x * col_scale;
float fsx2 = fsx1 + col_scale;
int sx1 = ceilf(fsx1);
int sx2 = floorf(fsx2);
T* input;
float sum = 0.f;
float area = fminf(col_scale, src_cols - fsx1) *
fminf(row_scale, src_rows - fsy1);
if (sy1 - fsy1 > 1e-3) {
input = (T*)(src + (sy1 - 1) * src_stride);
if (sx1 - fsx1 > 1e-3) {
sum = sum + input[sx1 - 1] * (sy1 - fsy1) * (sx1 - fsx1);
}
for (int dx = sx1; dx < sx2; ++dx) {
sum = sum + input[dx] * (sy1 - fsy1);
}
if (fsx2 - sx2 > 1e-3) {
sum = sum + input[sx2] * (sy1 - fsy1) * (fsx2 - sx2);
}
}
input = (T*)(src + sy1 * src_stride);
for (int dy = sy1; dy < sy2; ++dy) {
if (sx1 - fsx1 > 1e-3) {
sum = sum + input[sx1 - 1] * ((sx1 - fsx1));
}
for (int dx = sx1; dx < sx2; ++dx) {
sum = sum + input[dx];
}
if (fsx2 - sx2 > 1e-3) {
sum = sum + input[sx2] * ((fsx2 - sx2));
}
input += src_stride;
}
if (fsy2 - sy2 > 1e-3) {
if (sx1 - fsx1 > 1e-3) {
sum = sum + input[sx1 - 1] * (fsy2 - sy2) * (sx1 - fsx1);
}
for (int dx = sx1; dx < sx2; ++dx) {
sum = sum + input[dx] * (fsy2 - sy2);
}
if (fsx2 - sx2 > 1e-3) {
sum = sum + input[sx2] * (fsy2 - sy2) * (fsx2 - sx2);
}
}
sum = sum / area;
T* output = (T*)(dst + element_y * dst_stride);
if (sizeof(T) == 1) {
output[element_x] = saturate_cast(sum);
}
else {
output[element_x] = sum;
}
}
template <typename T0, typename T1>
__global__
void resizeAreaKernel1C2(const T1* src, int src_rows, int src_cols,
int channels, int src_stride, T1* dst, int dst_rows,
int dst_cols, int dst_stride, float col_scale,
float row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
float fsy1 = element_y * row_scale;
float fsy2 = fsy1 + row_scale;
int sy1 = ceilf(fsy1);
int sy2 = floorf(fsy2);
float fsx1 = element_x * col_scale;
float fsx2 = fsx1 + col_scale;
int sx1 = ceilf(fsx1);
int sx2 = floorf(fsx2);
T0* input;
float2 value;
float2 sum = make_float2(0.f, 0.f);
float area = fminf(col_scale, src_cols - fsx1) *
fminf(row_scale, src_rows - fsy1);
if (sy1 - fsy1 > 1e-3) {
input = (T0*)(src + (sy1 - 1) * src_stride);
if (sx1 - fsx1 > 1e-3) {
value = (sy1 - fsy1) * (sx1 - fsx1) * input[sx1 - 1];
sum += value;
}
for (int dx = sx1; dx < sx2; ++dx) {
value = (sy1 - fsy1) * input[dx];
sum += value;
}
if (fsx2 - sx2 > 1e-3) {
value = (sy1 - fsy1) * (fsx2 - sx2) * input[sx2];
sum += value;
}
}
input = (T0*)(src + sy1 * src_stride);
for (int dy = sy1; dy < sy2; ++dy) {
if (sx1 - fsx1 > 1e-3) {
value = (sx1 - fsx1) * input[sx1 - 1];
sum += value;
}
for (int dx = sx1; dx < sx2; ++dx) {
sum += input[dx];
}
if (fsx2 - sx2 > 1e-3) {
value = (fsx2 - sx2) * input[sx2];
sum += value;
}
input = (T0*)((T1*)input + src_stride);
}
if (fsy2 - sy2 > 1e-3) {
if (sx1 - fsx1 > 1e-3) {
value = (fsy2 - sy2) * (sx1 - fsx1) * input[sx1 - 1];
sum += value;
}
for (int dx = sx1; dx < sx2; ++dx) {
value = (fsy2 - sy2) * input[dx];
sum += value;
}
if (fsx2 - sx2 > 1e-3) {
value = (fsy2 - sy2) * (fsx2 - sx2) * input[sx2];
sum += value;
}
}
sum /= area;
T0* output = (T0*)(dst + element_y * dst_stride);
output[element_x] = saturate_cast_vector<T0, float2>(sum);
}
template <typename T0, typename T1>
__global__
void resizeAreaKernel1C3(const T1* src, int src_rows, int src_cols,
int channels, int src_stride, T1* dst, int dst_rows,
int dst_cols, int dst_stride, float col_scale,
float row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
float fsy1 = element_y * row_scale;
float fsy2 = fsy1 + row_scale;
int sy1 = ceilf(fsy1);
int sy2 = floorf(fsy2);
float fsx1 = element_x * col_scale;
float fsx2 = fsx1 + col_scale;
int sx1 = ceilf(fsx1);
int sx2 = floorf(fsx2);
T0* input;
float3 value;
float3 sum = make_float3(0.f, 0.f, 0.f);
float area = fminf(col_scale, src_cols - fsx1) *
fminf(row_scale, src_rows - fsy1);
if (sy1 - fsy1 > 1e-3) {
input = (T0*)(src + (sy1 - 1) * src_stride);
if (sx1 - fsx1 > 1e-3) {
value = (sy1 - fsy1) * (sx1 - fsx1) * input[sx1 - 1];
sum += value;
}
for (int dx = sx1; dx < sx2; ++dx) {
value = (sy1 - fsy1) * input[dx];
sum += value;
}
if (fsx2 - sx2 > 1e-3) {
value = (sy1 - fsy1) * (fsx2 - sx2) * input[sx2];
sum += value;
}
}
input = (T0*)(src + sy1 * src_stride);
for (int dy = sy1; dy < sy2; ++dy) {
if (sx1 - fsx1 > 1e-3) {
value = (sx1 - fsx1) * input[sx1 - 1];
sum += value;
}
for (int dx = sx1; dx < sx2; ++dx) {
sum += input[dx];
}
if (fsx2 - sx2 > 1e-3) {
value = (fsx2 - sx2) * input[sx2];
sum += value;
}
input = (T0*)((T1*)input + src_stride);
}
if (fsy2 - sy2 > 1e-3) {
if (sx1 - fsx1 > 1e-3) {
value = (fsy2 - sy2) * (sx1 - fsx1) * input[sx1 - 1];
sum += value;
}
for (int dx = sx1; dx < sx2; ++dx) {
value = (fsy2 - sy2) * input[dx];
sum += value;
}
if (fsx2 - sx2 > 1e-3) {
value = (fsy2 - sy2) * (fsx2 - sx2) * input[sx2];
sum += value;
}
}
sum /= area;
T0* output = (T0*)(dst + element_y * dst_stride);
output[element_x] = saturate_cast_vector<T0, float3>(sum);
}
template <typename T0, typename T1>
__global__
void resizeAreaKernel1C4(const T1* src, int src_rows, int src_cols,
int channels, int src_stride, T1* dst, int dst_rows,
int dst_cols, int dst_stride, float col_scale,
float row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
float fsy1 = element_y * row_scale;
float fsy2 = fsy1 + row_scale;
int sy1 = ceilf(fsy1);
int sy2 = floorf(fsy2);
float fsx1 = element_x * col_scale;
float fsx2 = fsx1 + col_scale;
int sx1 = ceilf(fsx1);
int sx2 = floorf(fsx2);
T0* input;
float4 value;
float4 sum = make_float4(0.f, 0.f, 0.f, 0.f);
float area = fminf(col_scale, src_cols - fsx1) *
fminf(row_scale, src_rows - fsy1);
if (sy1 - fsy1 > 1e-3) {
input = (T0*)(src + (sy1 - 1) * src_stride);
if (sx1 - fsx1 > 1e-3) {
value = (sy1 - fsy1) * (sx1 - fsx1) * input[sx1 - 1];
sum += value;
}
for (int dx = sx1; dx < sx2; ++dx) {
value = (sy1 - fsy1) * input[dx];
sum += value;
}
if (fsx2 - sx2 > 1e-3) {
value = (sy1 - fsy1) * (fsx2 - sx2) * input[sx2];
sum += value;
}
}
input = (T0*)(src + sy1 * src_stride);
for (int dy = sy1; dy < sy2; ++dy) {
if (sx1 - fsx1 > 1e-3) {
value = (sx1 - fsx1) * input[sx1 - 1];
sum += value;
}
for (int dx = sx1; dx < sx2; ++dx) {
sum += input[dx];
}
if (fsx2 - sx2 > 1e-3) {
value = (fsx2 - sx2) * input[sx2];
sum += value;
}
input = (T0*)((T1*)input + src_stride);
}
if (fsy2 - sy2 > 1e-3) {
if (sx1 - fsx1 > 1e-3) {
value = (fsy2 - sy2) * (sx1 - fsx1) * input[sx1 - 1];
sum += value;
}
for (int dx = sx1; dx < sx2; ++dx) {
value = (fsy2 - sy2) * input[dx];
sum += value;
}
if (fsx2 - sx2 > 1e-3) {
value = (fsy2 - sy2) * (fsx2 - sx2) * input[sx2];
sum += value;
}
}
sum /= area;
T0* output = (T0*)(dst + element_y * dst_stride);
output[element_x] = saturate_cast_vector<T0, float4>(sum);
}
__global__
void resizeAreaKernel2(const uchar* src, int src_rows, int src_cols,
int channels, int src_stride, uchar* dst, int dst_rows,
int dst_cols, int dst_stride, float col_scale,
float row_scale, float inv_col_scale,
float inv_row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
int sy = floor(element_y * row_scale);
int sx = floor(element_x * col_scale);
float fy = element_y + 1 - (sy + 1) * inv_row_scale;
float fx = element_x + 1 - (sx + 1) * inv_col_scale;
fy = fy <= 0 ? 0.f : fy - floor(fy);
fx = fx <= 0 ? 0.f : fx - floor(fx);
if (sy < 0) {
sy = 0;
fy = 0;
}
if (sx < 0) {
sx = 0;
fx = 0;
}
if (sy >= src_rows) {
sy = src_rows - 1;
fy = 0;
}
if (sx >= src_cols) {
sx = src_cols - 1;
fx = 0;
}
int sy_ = INC(sy, src_rows);
int cbufy[2];
fy = fy * INTER_RESIZE_COEF_SCALE;
cbufy[0] = rint(INTER_RESIZE_COEF_SCALE - fy);
cbufy[1] = rint(fy);
int sx_ = INC(sx, src_cols);
int cbufx[2];
fx = fx * INTER_RESIZE_COEF_SCALE;
cbufx[0] = rint(INTER_RESIZE_COEF_SCALE - rint(fx));
cbufx[1] = rint(fx);
if (channels == 1) {
int src_index0 = sy * src_stride + sx;
int src_index1 = sy * src_stride + sx_;
int src_index2 = sy_ * src_stride + sx;
int src_index3 = sy_ * src_stride + sx_;
int dst_index = element_y * dst_stride + element_x;
int sum = 0;
sum = cbufy[0] * cbufx[0] * src[src_index0] +
cbufy[0] * cbufx[1] * src[src_index1] +
cbufy[1] * cbufx[0] * src[src_index2] +
cbufy[1] * cbufx[1] * src[src_index3];
dst[dst_index] = (sum + (1 << (CAST_BITS - 1))) >> CAST_BITS;
}
else if (channels == 2) {
uchar2* input0 = (uchar2*)((uchar*)src + sy * src_stride);
uchar2* input1 = (uchar2*)((uchar*)src + sy_ * src_stride);
uchar2* output = (uchar2*)((uchar*)dst + element_y * dst_stride);
uchar2 t[2][2];
t[0][0] = input0[sx];
t[0][1] = input0[sx_];
t[1][0] = input1[sx];
t[1][1] = input1[sx_];
output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0],
cbufy[1]);
}
else if (channels == 3) {
uchar3* input0 = (uchar3*)((uchar*)src + sy * src_stride);
uchar3* input1 = (uchar3*)((uchar*)src + sy_ * src_stride);
uchar3* output = (uchar3*)((uchar*)dst + element_y * dst_stride);
uchar3 t[2][2];
t[0][0] = input0[sx];
t[0][1] = input0[sx_];
t[1][0] = input1[sx];
t[1][1] = input1[sx_];
output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0],
cbufy[1]);
}
else {
uchar4* input0 = (uchar4*)((uchar*)src + sy * src_stride);
uchar4* input1 = (uchar4*)((uchar*)src + sy_ * src_stride);
uchar4* output = (uchar4*)((uchar*)dst + element_y * dst_stride);
uchar4 t[2][2];
t[0][0] = input0[sx];
t[0][1] = input0[sx_];
t[1][0] = input1[sx];
t[1][1] = input1[sx_];
output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0],
cbufy[1]);
}
}
__global__
void resizeAreaKernel2(const float* src, int src_rows, int src_cols,
int channels, int src_stride, float* dst, int dst_rows,
int dst_cols, int dst_stride, double col_scale,
float row_scale, float inv_col_scale,
float inv_row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
int sy = floor(element_y * row_scale);
int sx = floor(element_x * col_scale);
float fy = element_y + 1 - (sy + 1) * inv_row_scale;
float fx = element_x + 1 - (sx + 1) * inv_col_scale;
fy = fy <= 0 ? 0.f : fy - floor(fy);
fx = fx <= 0 ? 0.f : fx - floor(fx);
if (sy < 0) {
sy = 0;
fy = 0;
}
if (sx < 0) {
sx = 0;
fx = 0;
}
if (sy >= src_rows) {
sy = src_rows - 1;
fy = 0;
}
if (sx >= src_cols) {
sx = src_cols - 1;
fx = 0;
}
int sy_ = INC(sy,src_rows);
float cbufy[2];
cbufy[0] = 1.f - fy;
cbufy[1] = 1.f - cbufy[0];
int sx_ = INC(sx,src_cols);
float cbufx[2];
cbufx[0] = 1.f - fx;
cbufx[1] = 1.f - cbufx[0];
if (channels == 1) {
int index = sy * src_stride;
float src1 = src[index + sx];
float src2 = src[index + sx_];
float value1 = cbufy[0] * cbufx[0] * src1;
float value2 = cbufy[0] * cbufx[1] * src2;
float sum = 0.f;
sum += value1 + value2;
index = sy_ * src_stride;
src1 = src[index + sx];
src2 = src[index + sx_];
value1 = cbufy[1] * cbufx[0] * src1;
value2 = cbufy[1] * cbufx[1] * src2;
sum += value1 + value2;
index = element_y * dst_stride + element_x;
dst[index] = sum;
}
else if (channels == 3) {
int index = sy * src_stride;
float3 src1 = ((float3*)(src + index))[sx];
float3 src2 = ((float3*)(src + index))[sx_];
float3 value1 = cbufy[0] * cbufx[0] * src1;
float3 value2 = cbufy[0] * cbufx[1] * src2;
float3 sum = make_float3(0.f, 0.f, 0.f);
sum += value1;
sum += value2;
index = sy_ * src_stride;
src1 = ((float3*)(src + index))[sx];
src2 = ((float3*)(src + index))[sx_];
value1 = cbufy[1] * cbufx[0] * src1;
value2 = cbufy[1] * cbufx[1] * src2;
sum += value1;
sum += value2;
float3* output = (float3*)(dst + element_y * dst_stride);
output[element_x] = sum;
}
else {
int index = sy * src_stride;
float4 src1 = ((float4*)(src + index))[sx];
float4 src2 = ((float4*)(src + index))[sx_];
float4 value1 = cbufy[0] * cbufx[0] * src1;
float4 value2 = cbufy[0] * cbufx[1] * src2;
float4 sum = make_float4(0.f, 0.f, 0.f, 0.f);
sum += value1;
sum += value2;
index = sy_ * src_stride;
src1 = ((float4*)(src + index))[sx];
src2 = ((float4*)(src + index))[sx_];
value1 = cbufy[1] * cbufx[0] * src1;
value2 = cbufy[1] * cbufx[1] * src2;
sum += value1;
sum += value2;
float4* output = (float4*)(dst + element_y * dst_stride);
output[element_x] = sum;
}
}
RetCode resizeArea(const uchar* src, int src_rows, int src_cols, int channels,
int src_stride, uchar* dst, int dst_rows, int dst_cols,
int dst_stride, hipStream_t stream) {
if (src == nullptr || dst == nullptr || src_rows < 1 || src_cols < 1 ||
dst_rows < 1 || dst_cols < 1 ||
(channels != 1 && channels != 3 && channels != 4) ||
src_stride < src_cols * channels ||
dst_stride < dst_cols * channels) {
return RC_INVALID_VALUE;
}
if (src_rows == dst_rows && src_cols == dst_cols &&
src_stride == dst_stride) {
if (src != dst) {
hipError_t code;
code = hipMemcpyAsync(dst, src, src_rows * src_stride * sizeof(uchar),
hipMemcpyDeviceToDevice);
if (code != hipSuccess) {
return RC_DEVICE_MEMORY_ERROR;
}
}
return RC_SUCCESS;
}
const int kBlockX = 32;
const int kBlockY = 16;
dim3 block(kBlockX, kBlockY);
dim3 grid;
grid.x = (dst_cols + kBlockX -1) / kBlockX;
grid.y = (dst_rows + kBlockY - 1) / kBlockY;
float col_scale = (double)src_cols / dst_cols;
float row_scale = (double)src_rows / dst_rows;
float inv_col_scale = 1.0 / col_scale;
float inv_row_scale = 1.0 / row_scale;
if (src_cols > dst_cols && src_rows > dst_rows) {
if (src_cols % dst_cols == 0 && src_rows % dst_rows == 0) {
if (channels == 1) {
hipLaunchKernelGGL(( resizeAreaKernel0C1<uchar>), dim3(grid), dim3(block), 0, stream, src, src_rows,
src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride,
col_scale, row_scale);
}
else if (channels == 3) {
hipLaunchKernelGGL(( resizeAreaKernel0C3<uchar3, uchar>), dim3(grid), dim3(block), 0, stream, src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
else {
hipLaunchKernelGGL(( resizeAreaKernel0C4<uchar4, uchar>), dim3(grid), dim3(block), 0, stream, src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
}
else {
if (channels == 1) {
hipLaunchKernelGGL(( resizeAreaKernel1C1<uchar>), dim3(grid), dim3(block), 0, stream, src, src_rows,
src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride,
col_scale, row_scale);
}
else if (channels == 3) {
hipLaunchKernelGGL(( resizeAreaKernel1C3<uchar3, uchar>), dim3(grid), dim3(block), 0, stream, src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
else {
hipLaunchKernelGGL(( resizeAreaKernel1C4<uchar4, uchar>), dim3(grid), dim3(block), 0, stream, src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
}
}
else {
hipLaunchKernelGGL(( resizeAreaKernel2), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols,
channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale,
row_scale, inv_col_scale, inv_row_scale);
}
return RC_SUCCESS;
}
RetCode resizeArea(const float* src, int src_rows, int src_cols, int channels,
int src_stride, float* dst, int dst_rows, int dst_cols,
int dst_stride, hipStream_t stream) {
if (src == nullptr || dst == nullptr || src_rows < 1 || src_cols < 1 ||
dst_rows < 1 || dst_cols < 1 ||
(channels != 1 && channels != 3 && channels != 4) ||
src_stride < src_cols * channels ||
dst_stride < dst_cols * channels) {
return RC_INVALID_VALUE;
}
if (src_rows == dst_rows && src_cols == dst_cols &&
src_stride == dst_stride) {
if (src != dst) {
hipError_t code;
code = hipMemcpyAsync(dst, src, src_rows * src_stride * sizeof(float),
hipMemcpyDeviceToDevice);
if (code != hipSuccess) {
return RC_DEVICE_MEMORY_ERROR;
}
}
return RC_SUCCESS;
}
const int kBlockX = 32;
const int kBlockY = 16;
dim3 block(kBlockX, kBlockY);
dim3 grid;
grid.x = (dst_cols + kBlockX -1) / kBlockX;
grid.y = (dst_rows + kBlockY - 1) / kBlockY;
double col_scale = (double)src_cols / dst_cols;
float row_scale = (double)src_rows / dst_rows;
float inv_col_scale = 1.0 / col_scale;
float inv_row_scale = 1.0 / row_scale;
if (src_cols > dst_cols && src_rows > dst_rows) {
if (src_cols % dst_cols == 0 && src_rows % dst_rows == 0) {
if (channels == 1) {
hipLaunchKernelGGL(( resizeAreaKernel0C1<float>), dim3(grid), dim3(block), 0, stream, src, src_rows,
src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride,
col_scale, row_scale);
}
else if (channels == 3) {
hipLaunchKernelGGL(( resizeAreaKernel0C3<float3, float>), dim3(grid), dim3(block), 0, stream, src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
else {
hipLaunchKernelGGL(( resizeAreaKernel0C4<float4, float>), dim3(grid), dim3(block), 0, stream, src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
}
else {
if (channels == 1) {
hipLaunchKernelGGL(( resizeAreaKernel1C1<float>), dim3(grid), dim3(block), 0, stream, src, src_rows,
src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride,
col_scale, row_scale);
}
else if (channels == 3) {
hipLaunchKernelGGL(( resizeAreaKernel1C3<float3, float>), dim3(grid), dim3(block), 0, stream, src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
else {
hipLaunchKernelGGL(( resizeAreaKernel1C4<float4, float>), dim3(grid), dim3(block), 0, stream, src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
}
}
else {
hipLaunchKernelGGL(( resizeAreaKernel2), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols,
channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale,
row_scale, inv_col_scale, inv_row_scale);
}
return RC_SUCCESS;
}
template <>
RetCode ResizeArea<uchar, 1>(hipStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const uchar* inData,
int outHeight,
int outWidth,
int outWidthStride,
uchar* outData) {
RetCode code = resizeArea(inData, inHeight, inWidth, 1, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
template <>
RetCode ResizeArea<uchar, 3>(hipStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const uchar* inData,
int outHeight,
int outWidth,
int outWidthStride,
uchar* outData) {
RetCode code = resizeArea(inData, inHeight, inWidth, 3, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
template <>
RetCode ResizeArea<uchar, 4>(hipStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const uchar* inData,
int outHeight,
int outWidth,
int outWidthStride,
uchar* outData) {
RetCode code = resizeArea(inData, inHeight, inWidth, 4, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
template <>
RetCode ResizeArea<float, 1>(hipStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const float* inData,
int outHeight,
int outWidth,
int outWidthStride,
float* outData) {
RetCode code = resizeArea(inData, inHeight, inWidth, 1, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
template <>
RetCode ResizeArea<float, 3>(hipStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const float* inData,
int outHeight,
int outWidth,
int outWidthStride,
float* outData) {
RetCode code = resizeArea(inData, inHeight, inWidth, 3, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
template <>
RetCode ResizeArea<float, 4>(hipStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const float* inData,
int outHeight,
int outWidth,
int outWidthStride,
float* outData) {
RetCode code = resizeArea(inData, inHeight, inWidth, 4, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
} // namespace cuda
} // namespace cv
} // namespace ppl
| 1b27aee1ceaca19e15556efa815dd2a4b932fe6b.cu | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/**
* @file resize.cu
* @brief The kernel and invocation definitions of resizing an image.
*/
#include "resize.h"
#include "utility.hpp"
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#define MAX(a,b) ((a) > (b) ? (a) : (b))
#define INC(x, l) ((x + 1) >= (l) ? (x) : ((x) + 1))
#define INTER_RESIZE_COEF_BITS 11
#define INTER_RESIZE_COEF_SCALE (1 << INTER_RESIZE_COEF_BITS)
#define CAST_BITS (INTER_RESIZE_COEF_BITS << 1)
using namespace ppl::common;
namespace ppl {
namespace cv {
namespace cuda {
#define DEVICE_INLINE
#if defined(DEVICE_INLINE)
# define __DEVICE__ __device__ __forceinline__
#else
# define __DEVICE__ __device__
#endif
template <typename T>
__DEVICE__
T bilinearSampleUchar(T t[][2], int x0, int x1, int y0, int y1);
template <>
__DEVICE__
uchar2 bilinearSampleUchar(uchar2 t[][2], int x0, int x1, int y0, int y1) {
int a0 = y0 * x0;
int a1 = y0 * x1;
int a2 = y1 * x0;
int a3 = y1 * x1;
int2 ret;
uchar2 final_ret;
ret.x = t[0][0].x * a0 + t[0][1].x * a1 + t[1][0].x * a2 + t[1][1].x * a3;
final_ret.x = (ret.x + (1<<(CAST_BITS-1))) >> CAST_BITS;
ret.y = t[0][0].y * a0 + t[0][1].y * a1 + t[1][0].y * a2 + t[1][1].y * a3;
final_ret.y = (ret.y + (1<<(CAST_BITS-1))) >> CAST_BITS;
return final_ret;
}
template <>
__DEVICE__
uchar3 bilinearSampleUchar(uchar3 t[][2], int x0, int x1, int y0, int y1) {
int a0 = y0 * x0;
int a1 = y0 * x1;
int a2 = y1 * x0;
int a3 = y1 * x1;
int3 ret;
uchar3 final_ret;
ret.x = t[0][0].x * a0 + t[0][1].x * a1 + t[1][0].x * a2 + t[1][1].x * a3;
final_ret.x = (ret.x + (1<<(CAST_BITS-1))) >> CAST_BITS;
ret.y = t[0][0].y * a0 + t[0][1].y * a1 + t[1][0].y * a2 + t[1][1].y * a3;
final_ret.y = (ret.y + (1<<(CAST_BITS-1))) >> CAST_BITS;
ret.z = t[0][0].z * a0 + t[0][1].z * a1 + t[1][0].z * a2 + t[1][1].z * a3;
final_ret.z = (ret.z + (1<<(CAST_BITS-1))) >> CAST_BITS;
return final_ret;
}
template <>
__DEVICE__
uchar4 bilinearSampleUchar(uchar4 t[][2], int x0, int x1, int y0, int y1) {
int a0 = y0 * x0;
int a1 = y0 * x1;
int a2 = y1 * x0;
int a3 = y1 * x1;
int4 ret;
uchar4 final_ret;
ret.x = t[0][0].x * a0 + t[0][1].x * a1 + t[1][0].x * a2 + t[1][1].x * a3;
final_ret.x = (ret.x + (1<<(CAST_BITS-1))) >> CAST_BITS;
ret.y = t[0][0].y * a0 + t[0][1].y * a1 + t[1][0].y * a2 + t[1][1].y * a3;
final_ret.y = (ret.y + (1<<(CAST_BITS-1))) >> CAST_BITS;
ret.z = t[0][0].z * a0 + t[0][1].z * a1 + t[1][0].z * a2 + t[1][1].z * a3;
final_ret.z = (ret.z + (1<<(CAST_BITS-1))) >> CAST_BITS;
ret.w = t[0][0].w * a0 + t[0][1].w * a1 + t[1][0].w * a2 + t[1][1].w * a3;
final_ret.w = (ret.w + (1<<(CAST_BITS-1))) >> CAST_BITS;
return final_ret;
}
/***************************** ResizeLinear() ******************************/
__global__
void resizeLinearKernel(const uchar* src, int src_rows, int src_cols,
int channels, int src_stride, uchar* dst, int dst_rows,
int dst_cols, int dst_stride, float col_scale,
float row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
float fy = ((element_y + 0.5f) * row_scale - 0.5f);
float fx = ((element_x + 0.5f) * col_scale - 0.5f);
int sy = floor(fy);
int sx = floor(fx);
fy -= sy;
fx -= sx;
if (sy < 0) {
sy = 0;
fy = 0;
}
if (sx < 0) {
sx = 0;
fx = 0;
}
if (sy >= src_rows) {
sy = src_rows - 1;
fy = 0;
}
if (sx >= src_cols) {
sx = src_cols - 1;
fx = 0;
}
int sy_ = INC(sy, src_rows);
int cbufy[2];
fy = fy * INTER_RESIZE_COEF_SCALE;
cbufy[0] = rint(INTER_RESIZE_COEF_SCALE - fy);
cbufy[1] = rint(fy);
int sx_ = INC(sx, src_cols);
int cbufx[2];
fx = fx * INTER_RESIZE_COEF_SCALE;
cbufx[0] = rint(INTER_RESIZE_COEF_SCALE - rint(fx));
cbufx[1] = rint(fx);
if (channels == 1) {
int src_index0 = sy * src_stride + sx;
int src_index1 = sy * src_stride + sx_;
int src_index2 = sy_ * src_stride + sx;
int src_index3 = sy_ * src_stride + sx_;
int dst_index = element_y * dst_stride + element_x;
int sum = 0;
sum = cbufy[0] * cbufx[0] * src[src_index0] +
cbufy[0] * cbufx[1] * src[src_index1] +
cbufy[1] * cbufx[0] * src[src_index2] +
cbufy[1] * cbufx[1] * src[src_index3];
dst[dst_index] = (sum + (1 << (CAST_BITS - 1))) >> CAST_BITS;
}
else if (channels == 2) {
uchar2* input0 = (uchar2*)((uchar*)src + sy * src_stride);
uchar2* input1 = (uchar2*)((uchar*)src + sy_ * src_stride);
uchar2* output = (uchar2*)((uchar*)dst + element_y * dst_stride);
uchar2 t[2][2];
t[0][0] = input0[sx];
t[0][1] = input0[sx_];
t[1][0] = input1[sx];
t[1][1] = input1[sx_];
output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0],
cbufy[1]);
}
else if (channels == 3) {
uchar3* input0 = (uchar3*)((uchar*)src + sy * src_stride);
uchar3* input1 = (uchar3*)((uchar*)src + sy_ * src_stride);
uchar3* output = (uchar3*)((uchar*)dst + element_y * dst_stride);
uchar3 t[2][2];
t[0][0] = input0[sx];
t[0][1] = input0[sx_];
t[1][0] = input1[sx];
t[1][1] = input1[sx_];
output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0],
cbufy[1]);
}
else {
uchar4* input0 = (uchar4*)((uchar*)src + sy * src_stride);
uchar4* input1 = (uchar4*)((uchar*)src + sy_ * src_stride);
uchar4* output = (uchar4*)((uchar*)dst + element_y * dst_stride);
uchar4 t[2][2];
t[0][0] = input0[sx];
t[0][1] = input0[sx_];
t[1][0] = input1[sx];
t[1][1] = input1[sx_];
output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0],
cbufy[1]);
}
}
__global__
void resizeLinearKernel(const float* src, int src_rows, int src_cols,
int channels, int src_stride, float* dst, int dst_rows,
int dst_cols, int dst_stride, double col_scale,
float row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
float fx = ((element_x + 0.5f) * col_scale - 0.5f);
float fy = ((element_y + 0.5f) * row_scale - 0.5f);
int sx = floor(fx);
int sy = floor(fy);
fx -= sx;
fy -= sy;
if (sy < 0) {
sy = 0;
fy = 0;
}
if (sx < 0) {
sx = 0;
fx = 0;
}
if (sy >= src_rows) {
sy = src_rows - 1;
fy = 0;
}
if (sx >= src_cols) {
sx = src_cols - 1;
fx = 0;
}
int sy_ = INC(sy,src_rows);
float cbufy[2];
cbufy[0] = 1.f - fy;
cbufy[1] = 1.f - cbufy[0];
int sx_ = INC(sx,src_cols);
float cbufx[2];
cbufx[0] = 1.f - fx;
cbufx[1] = 1.f - cbufx[0];
if (channels == 1) {
int index = sy * src_stride;
float src1 = src[index + sx];
float src2 = src[index + sx_];
float value1 = cbufy[0] * cbufx[0] * src1;
float value2 = cbufy[0] * cbufx[1] * src2;
float sum = 0.f;
sum += value1 + value2;
index = sy_ * src_stride;
src1 = src[index + sx];
src2 = src[index + sx_];
value1 = cbufy[1] * cbufx[0] * src1;
value2 = cbufy[1] * cbufx[1] * src2;
sum += value1 + value2;
index = element_y * dst_stride + element_x;
dst[index] = sum;
}
else if (channels == 3) {
int index = sy * src_stride;
float3 src1 = ((float3*)(src + index))[sx];
float3 src2 = ((float3*)(src + index))[sx_];
float3 value1 = cbufy[0] * cbufx[0] * src1;
float3 value2 = cbufy[0] * cbufx[1] * src2;
float3 sum = make_float3(0.f, 0.f, 0.f);
sum += value1;
sum += value2;
index = sy_ * src_stride;
src1 = ((float3*)(src + index))[sx];
src2 = ((float3*)(src + index))[sx_];
value1 = cbufy[1] * cbufx[0] * src1;
value2 = cbufy[1] * cbufx[1] * src2;
sum += value1;
sum += value2;
float3* output = (float3*)(dst + element_y * dst_stride);
output[element_x] = sum;
}
else {
int index = sy * src_stride;
float4 src1 = ((float4*)(src + index))[sx];
float4 src2 = ((float4*)(src + index))[sx_];
float4 value1 = cbufy[0] * cbufx[0] * src1;
float4 value2 = cbufy[0] * cbufx[1] * src2;
float4 sum = make_float4(0.f, 0.f, 0.f, 0.f);
sum += value1;
sum += value2;
index = sy_ * src_stride;
src1 = ((float4*)(src + index))[sx];
src2 = ((float4*)(src + index))[sx_];
value1 = cbufy[1] * cbufx[0] * src1;
value2 = cbufy[1] * cbufx[1] * src2;
sum += value1;
sum += value2;
float4* output = (float4*)(dst + element_y * dst_stride);
output[element_x] = sum;
}
}
RetCode resizeLinear(const uchar* src, int src_rows, int src_cols, int channels,
int src_stride, uchar* dst, int dst_rows, int dst_cols,
int dst_stride, cudaStream_t stream) {
if (src == nullptr || dst == nullptr || src_rows < 1 || src_cols < 1 ||
dst_rows < 1 || dst_cols < 1 ||
(channels != 1 && channels != 3 && channels != 4) ||
src_stride < src_cols * channels ||
dst_stride < dst_cols * channels) {
return RC_INVALID_VALUE;
}
if (src_rows == dst_rows && src_cols == dst_cols &&
src_stride == dst_stride) {
if (src != dst) {
cudaError_t code;
code = cudaMemcpyAsync(dst, src, src_rows * src_stride * sizeof(uchar),
cudaMemcpyDeviceToDevice);
if (code != cudaSuccess) {
return RC_DEVICE_MEMORY_ERROR;
}
}
return RC_SUCCESS;
}
const int kBlockX = 32;
const int kBlockY = 16;
dim3 block(kBlockX, kBlockY);
dim3 grid;
grid.x = (dst_cols + kBlockX -1) / kBlockX;
grid.y = (dst_rows + kBlockY - 1) / kBlockY;
float col_scale = (double)src_cols / dst_cols;
float row_scale = (double)src_rows / dst_rows;
resizeLinearKernel<<<grid, block, 0, stream>>>(src, src_rows, src_cols,
channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale,
row_scale);
return RC_SUCCESS;
}
RetCode resizeLinear(const float* src, int src_rows, int src_cols, int channels,
int src_stride, float* dst, int dst_rows, int dst_cols,
int dst_stride, cudaStream_t stream) {
if (src == nullptr || dst == nullptr || src_rows < 1 || src_cols < 1 ||
dst_rows < 1 || dst_cols < 1 ||
(channels != 1 && channels != 3 && channels != 4) ||
src_stride < src_cols * channels ||
dst_stride < dst_cols * channels) {
return RC_INVALID_VALUE;
}
if (src_rows == dst_rows && src_cols == dst_cols &&
src_stride == dst_stride) {
if (src != dst) {
cudaError_t code;
code = cudaMemcpyAsync(dst, src, src_rows * src_stride * sizeof(float),
cudaMemcpyDeviceToDevice);
if (code != cudaSuccess) {
return RC_DEVICE_MEMORY_ERROR;
}
}
return RC_SUCCESS;
}
const int kBlockX = 32;
const int kBlockY = 4;
dim3 block(kBlockX, kBlockY);
dim3 grid;
grid.x = (dst_cols + kBlockX -1) / kBlockX;
grid.y = (dst_rows + kBlockY - 1) / kBlockY;
double col_scale = (double)src_cols / dst_cols;
float row_scale = (double)src_rows / dst_rows;
resizeLinearKernel<<<grid, block, 0, stream>>>(src, src_rows, src_cols,
channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale,
row_scale);
return RC_SUCCESS;
}
template <>
RetCode ResizeLinear<uchar, 1>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const uchar* inData,
int outHeight,
int outWidth,
int outWidthStride,
uchar* outData) {
RetCode code = resizeLinear(inData, inHeight, inWidth, 1, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
template <>
RetCode ResizeLinear<uchar, 3>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const uchar* inData,
int outHeight,
int outWidth,
int outWidthStride,
uchar* outData) {
RetCode code = resizeLinear(inData, inHeight, inWidth, 3, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
template <>
RetCode ResizeLinear<uchar, 4>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const uchar* inData,
int outHeight,
int outWidth,
int outWidthStride,
uchar* outData) {
RetCode code = resizeLinear(inData, inHeight, inWidth, 4, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
template <>
RetCode ResizeLinear<float, 1>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const float* inData,
int outHeight,
int outWidth,
int outWidthStride,
float* outData) {
RetCode code = resizeLinear(inData, inHeight, inWidth, 1, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
template <>
RetCode ResizeLinear<float, 3>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const float* inData,
int outHeight,
int outWidth,
int outWidthStride,
float* outData) {
RetCode code = resizeLinear(inData, inHeight, inWidth, 3, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
template <>
RetCode ResizeLinear<float, 4>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const float* inData,
int outHeight,
int outWidth,
int outWidthStride,
float* outData) {
RetCode code = resizeLinear(inData, inHeight, inWidth, 4, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
/************************** resizeNearestPoint() ***************************/
template <typename T0, typename T1>
__global__
void resizeNearestPointKernel(const T1* src, int src_rows, int src_cols,
int channels, int src_stride, T1* dst,
int dst_rows, int dst_cols, int dst_stride,
float col_scale, float row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
int sy = element_y * row_scale;
sy = MIN(sy, src_rows - 1);
int sx = element_x * col_scale;
sx = MIN(sx, src_cols - 1);
T0* input = (T0*)(src + sy* src_stride);
T0* output = (T0*)(dst + element_y * dst_stride);
output[element_x] = input[sx];
}
RetCode resizeNearestPoint(const uchar* src, int src_rows, int src_cols,
int channels, int src_stride, uchar* dst,
int dst_rows, int dst_cols, int dst_stride,
cudaStream_t stream) {
if (src == nullptr || dst == nullptr || src_rows < 1 || src_cols < 1 ||
dst_rows < 1 || dst_cols < 1 ||
(channels != 1 && channels != 3 && channels != 4) ||
src_stride < src_cols * channels ||
dst_stride < dst_cols * channels) {
return RC_INVALID_VALUE;
}
if (src_rows == dst_rows && src_cols == dst_cols &&
src_stride == dst_stride) {
if (src != dst) {
cudaError_t code;
code = cudaMemcpyAsync(dst, src, src_rows * src_stride * sizeof(uchar),
cudaMemcpyDeviceToDevice);
if (code != cudaSuccess) {
return RC_DEVICE_MEMORY_ERROR;
}
}
return RC_SUCCESS;
}
const int kBlockX = 32;
const int kBlockY = 4;
dim3 block(kBlockX, kBlockY);
dim3 grid;
grid.x = (dst_cols + kBlockX -1) / kBlockX;
grid.y = (dst_rows + kBlockY - 1) / kBlockY;
float col_scale = (double)src_cols / dst_cols;
float row_scale = (double)src_rows / dst_rows;
if (channels == 1) {
resizeNearestPointKernel<uchar, uchar><<<grid, block, 0, stream>>>(src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
else if (channels == 3) {
resizeNearestPointKernel<uchar3, uchar><<<grid, block, 0, stream>>>(src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
else {
resizeNearestPointKernel<uchar4, uchar><<<grid, block, 0, stream>>>(src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
return RC_SUCCESS;
}
RetCode resizeNearestPoint(const float* src, int src_rows, int src_cols,
int channels, int src_stride, float* dst,
int dst_rows, int dst_cols, int dst_stride,
cudaStream_t stream) {
if (src == nullptr || dst == nullptr || src_rows < 1 || src_cols < 1 ||
dst_rows < 1 || dst_cols < 1 ||
(channels != 1 && channels != 3 && channels != 4) ||
src_stride < src_cols * channels ||
dst_stride < dst_cols * channels) {
return RC_INVALID_VALUE;
}
if (src_rows == dst_rows && src_cols == dst_cols &&
src_stride == dst_stride) {
if (src != dst) {
cudaError_t code;
code = cudaMemcpyAsync(dst, src, src_rows * src_stride * sizeof(float),
cudaMemcpyDeviceToDevice);
if (code != cudaSuccess) {
return RC_DEVICE_MEMORY_ERROR;
}
}
return RC_SUCCESS;
}
const int kBlockX = 32;
const int kBlockY = 4;
dim3 block(kBlockX, kBlockY);
dim3 grid;
grid.x = (dst_cols + kBlockX -1) / kBlockX;
grid.y = (dst_rows + kBlockY - 1) / kBlockY;
float col_scale = (double)src_cols / dst_cols;
float row_scale = (double)src_rows / dst_rows;
if (channels == 1) {
resizeNearestPointKernel<float, float><<<grid, block, 0, stream>>>(src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
else if (channels == 3) {
resizeNearestPointKernel<float3, float><<<grid, block, 0, stream>>>(src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
else {
resizeNearestPointKernel<float4, float><<<grid, block, 0, stream>>>(src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
return RC_SUCCESS;
}
template <>
RetCode ResizeNearestPoint<uchar, 1>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const uchar* inData,
int outHeight,
int outWidth,
int outWidthStride,
uchar* outData) {
RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 1, inWidthStride,
outData, outHeight, outWidth,
outWidthStride, stream);
return code;
}
template <>
RetCode ResizeNearestPoint<uchar, 3>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const uchar* inData,
int outHeight,
int outWidth,
int outWidthStride,
uchar* outData) {
RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 3, inWidthStride,
outData, outHeight, outWidth,
outWidthStride, stream);
return code;
}
template <>
RetCode ResizeNearestPoint<uchar, 4>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const uchar* inData,
int outHeight,
int outWidth,
int outWidthStride,
uchar* outData) {
RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 4, inWidthStride,
outData, outHeight, outWidth,
outWidthStride, stream);
return code;
}
template <>
RetCode ResizeNearestPoint<float, 1>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const float* inData,
int outHeight,
int outWidth,
int outWidthStride,
float* outData) {
RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 1, inWidthStride,
outData, outHeight, outWidth,
outWidthStride, stream);
return code;
}
template <>
RetCode ResizeNearestPoint<float, 3>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const float* inData,
int outHeight,
int outWidth,
int outWidthStride,
float* outData) {
RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 3, inWidthStride,
outData, outHeight, outWidth,
outWidthStride, stream);
return code;
}
template <>
RetCode ResizeNearestPoint<float, 4>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const float* inData,
int outHeight,
int outWidth,
int outWidthStride,
float* outData) {
RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 4, inWidthStride,
outData, outHeight, outWidth,
outWidthStride, stream);
return code;
}
/****************************** ResizeArea() *******************************/
template <typename T>
__global__
void resizeAreaKernel0C1(const T* src, int src_rows, int src_cols, int channels,
int src_stride, T* dst, int dst_rows, int dst_cols,
int dst_stride, int col_scale, int row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
int x_start = element_x * col_scale;
int y_start = element_y * row_scale;
int x_end = x_start + col_scale;
int y_end = y_start + row_scale;
x_end = (x_end <= src_cols) ? x_end : src_cols;
y_end = (y_end <= src_rows) ? y_end : src_rows;
int area = (x_end - x_start) * (y_end - y_start);
float sum = 0.f;
T* input;
for (int i = y_start; i < y_end; ++i) {
input = (T*)(src + i * src_stride);
for (int j = x_start; j < x_end; ++j) {
sum += input[j];
}
}
sum /= area;
T* output = (T*)(dst + element_y * dst_stride);
if (sizeof(T) == 1) {
output[element_x] = saturate_cast(sum);
}
else {
output[element_x] = sum;
}
}
template <typename T0, typename T1>
__global__
void resizeAreaKernel0C2(const T1* src, int src_rows, int src_cols,
int channels, int src_stride, T1* dst, int dst_rows,
int dst_cols, int dst_stride, int col_scale,
int row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
int x_start = element_x * col_scale;
int y_start = element_y * row_scale;
int x_end = x_start + col_scale;
int y_end = y_start + row_scale;
x_end = (x_end <= src_cols) ? x_end : src_cols;
y_end = (y_end <= src_rows) ? y_end : src_rows;
int area = (x_end - x_start) * (y_end - y_start);
float2 sum = make_float2(0.f, 0.f);
T0* input;
for (int i = y_start; i < y_end; ++i) {
input = (T0*)(src + i * src_stride);
for (int j = x_start; j < x_end; ++j) {
sum += input[j];
}
}
sum /= area;
T0* output = (T0*)(dst + element_y * dst_stride);
output[element_x] = saturate_cast_vector<T0, float2>(sum);
}
template <typename T0, typename T1>
__global__
void resizeAreaKernel0C3(const T1* src, int src_rows, int src_cols,
int channels, int src_stride, T1* dst, int dst_rows,
int dst_cols, int dst_stride, int col_scale,
int row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
int x_start = element_x * col_scale;
int y_start = element_y * row_scale;
int x_end = x_start + col_scale;
int y_end = y_start + row_scale;
x_end = (x_end <= src_cols) ? x_end : src_cols;
y_end = (y_end <= src_rows) ? y_end : src_rows;
int area = (x_end - x_start) * (y_end - y_start);
float3 sum = make_float3(0.f, 0.f, 0.f);
T0* input;
for (int i = y_start; i < y_end; ++i) {
input = (T0*)(src + i * src_stride);
for (int j = x_start; j < x_end; ++j) {
sum += input[j];
}
}
sum /= area;
T0* output = (T0*)(dst + element_y * dst_stride);
output[element_x] = saturate_cast_vector<T0, float3>(sum);
}
template <typename T0, typename T1>
__global__
void resizeAreaKernel0C4(const T1* src, int src_rows, int src_cols,
int channels, int src_stride, T1* dst, int dst_rows,
int dst_cols, int dst_stride, int col_scale,
int row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
int x_start = element_x * col_scale;
int y_start = element_y * row_scale;
int x_end = x_start + col_scale;
int y_end = y_start + row_scale;
x_end = (x_end <= src_cols) ? x_end : src_cols;
y_end = (y_end <= src_rows) ? y_end : src_rows;
int area = (x_end - x_start) * (y_end - y_start);
float4 sum = make_float4(0.f, 0.f, 0.f, 0.f);
T0* input;
for (int i = y_start; i < y_end; ++i) {
input = (T0*)(src + i * src_stride);
for (int j = x_start; j < x_end; ++j) {
sum += input[j];
}
}
sum /= area;
T0* output = (T0*)(dst + element_y * dst_stride);
output[element_x] = saturate_cast_vector<T0, float4>(sum);
}
template <typename T>
__global__
void resizeAreaKernel1C1(const T* src, int src_rows, int src_cols, int channels,
int src_stride, T* dst, int dst_rows, int dst_cols,
int dst_stride, float col_scale, float row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
float fsy1 = element_y * row_scale;
float fsy2 = fsy1 + row_scale;
int sy1 = ceilf(fsy1);
int sy2 = floorf(fsy2);
float fsx1 = element_x * col_scale;
float fsx2 = fsx1 + col_scale;
int sx1 = ceilf(fsx1);
int sx2 = floorf(fsx2);
T* input;
float sum = 0.f;
float area = fminf(col_scale, src_cols - fsx1) *
fminf(row_scale, src_rows - fsy1);
if (sy1 - fsy1 > 1e-3) {
input = (T*)(src + (sy1 - 1) * src_stride);
if (sx1 - fsx1 > 1e-3) {
sum = sum + input[sx1 - 1] * (sy1 - fsy1) * (sx1 - fsx1);
}
for (int dx = sx1; dx < sx2; ++dx) {
sum = sum + input[dx] * (sy1 - fsy1);
}
if (fsx2 - sx2 > 1e-3) {
sum = sum + input[sx2] * (sy1 - fsy1) * (fsx2 - sx2);
}
}
input = (T*)(src + sy1 * src_stride);
for (int dy = sy1; dy < sy2; ++dy) {
if (sx1 - fsx1 > 1e-3) {
sum = sum + input[sx1 - 1] * ((sx1 - fsx1));
}
for (int dx = sx1; dx < sx2; ++dx) {
sum = sum + input[dx];
}
if (fsx2 - sx2 > 1e-3) {
sum = sum + input[sx2] * ((fsx2 - sx2));
}
input += src_stride;
}
if (fsy2 - sy2 > 1e-3) {
if (sx1 - fsx1 > 1e-3) {
sum = sum + input[sx1 - 1] * (fsy2 - sy2) * (sx1 - fsx1);
}
for (int dx = sx1; dx < sx2; ++dx) {
sum = sum + input[dx] * (fsy2 - sy2);
}
if (fsx2 - sx2 > 1e-3) {
sum = sum + input[sx2] * (fsy2 - sy2) * (fsx2 - sx2);
}
}
sum = sum / area;
T* output = (T*)(dst + element_y * dst_stride);
if (sizeof(T) == 1) {
output[element_x] = saturate_cast(sum);
}
else {
output[element_x] = sum;
}
}
template <typename T0, typename T1>
__global__
void resizeAreaKernel1C2(const T1* src, int src_rows, int src_cols,
int channels, int src_stride, T1* dst, int dst_rows,
int dst_cols, int dst_stride, float col_scale,
float row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
float fsy1 = element_y * row_scale;
float fsy2 = fsy1 + row_scale;
int sy1 = ceilf(fsy1);
int sy2 = floorf(fsy2);
float fsx1 = element_x * col_scale;
float fsx2 = fsx1 + col_scale;
int sx1 = ceilf(fsx1);
int sx2 = floorf(fsx2);
T0* input;
float2 value;
float2 sum = make_float2(0.f, 0.f);
float area = fminf(col_scale, src_cols - fsx1) *
fminf(row_scale, src_rows - fsy1);
if (sy1 - fsy1 > 1e-3) {
input = (T0*)(src + (sy1 - 1) * src_stride);
if (sx1 - fsx1 > 1e-3) {
value = (sy1 - fsy1) * (sx1 - fsx1) * input[sx1 - 1];
sum += value;
}
for (int dx = sx1; dx < sx2; ++dx) {
value = (sy1 - fsy1) * input[dx];
sum += value;
}
if (fsx2 - sx2 > 1e-3) {
value = (sy1 - fsy1) * (fsx2 - sx2) * input[sx2];
sum += value;
}
}
input = (T0*)(src + sy1 * src_stride);
for (int dy = sy1; dy < sy2; ++dy) {
if (sx1 - fsx1 > 1e-3) {
value = (sx1 - fsx1) * input[sx1 - 1];
sum += value;
}
for (int dx = sx1; dx < sx2; ++dx) {
sum += input[dx];
}
if (fsx2 - sx2 > 1e-3) {
value = (fsx2 - sx2) * input[sx2];
sum += value;
}
input = (T0*)((T1*)input + src_stride);
}
if (fsy2 - sy2 > 1e-3) {
if (sx1 - fsx1 > 1e-3) {
value = (fsy2 - sy2) * (sx1 - fsx1) * input[sx1 - 1];
sum += value;
}
for (int dx = sx1; dx < sx2; ++dx) {
value = (fsy2 - sy2) * input[dx];
sum += value;
}
if (fsx2 - sx2 > 1e-3) {
value = (fsy2 - sy2) * (fsx2 - sx2) * input[sx2];
sum += value;
}
}
sum /= area;
T0* output = (T0*)(dst + element_y * dst_stride);
output[element_x] = saturate_cast_vector<T0, float2>(sum);
}
template <typename T0, typename T1>
__global__
void resizeAreaKernel1C3(const T1* src, int src_rows, int src_cols,
int channels, int src_stride, T1* dst, int dst_rows,
int dst_cols, int dst_stride, float col_scale,
float row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
float fsy1 = element_y * row_scale;
float fsy2 = fsy1 + row_scale;
int sy1 = ceilf(fsy1);
int sy2 = floorf(fsy2);
float fsx1 = element_x * col_scale;
float fsx2 = fsx1 + col_scale;
int sx1 = ceilf(fsx1);
int sx2 = floorf(fsx2);
T0* input;
float3 value;
float3 sum = make_float3(0.f, 0.f, 0.f);
float area = fminf(col_scale, src_cols - fsx1) *
fminf(row_scale, src_rows - fsy1);
if (sy1 - fsy1 > 1e-3) {
input = (T0*)(src + (sy1 - 1) * src_stride);
if (sx1 - fsx1 > 1e-3) {
value = (sy1 - fsy1) * (sx1 - fsx1) * input[sx1 - 1];
sum += value;
}
for (int dx = sx1; dx < sx2; ++dx) {
value = (sy1 - fsy1) * input[dx];
sum += value;
}
if (fsx2 - sx2 > 1e-3) {
value = (sy1 - fsy1) * (fsx2 - sx2) * input[sx2];
sum += value;
}
}
input = (T0*)(src + sy1 * src_stride);
for (int dy = sy1; dy < sy2; ++dy) {
if (sx1 - fsx1 > 1e-3) {
value = (sx1 - fsx1) * input[sx1 - 1];
sum += value;
}
for (int dx = sx1; dx < sx2; ++dx) {
sum += input[dx];
}
if (fsx2 - sx2 > 1e-3) {
value = (fsx2 - sx2) * input[sx2];
sum += value;
}
input = (T0*)((T1*)input + src_stride);
}
if (fsy2 - sy2 > 1e-3) {
if (sx1 - fsx1 > 1e-3) {
value = (fsy2 - sy2) * (sx1 - fsx1) * input[sx1 - 1];
sum += value;
}
for (int dx = sx1; dx < sx2; ++dx) {
value = (fsy2 - sy2) * input[dx];
sum += value;
}
if (fsx2 - sx2 > 1e-3) {
value = (fsy2 - sy2) * (fsx2 - sx2) * input[sx2];
sum += value;
}
}
sum /= area;
T0* output = (T0*)(dst + element_y * dst_stride);
output[element_x] = saturate_cast_vector<T0, float3>(sum);
}
template <typename T0, typename T1>
__global__
void resizeAreaKernel1C4(const T1* src, int src_rows, int src_cols,
int channels, int src_stride, T1* dst, int dst_rows,
int dst_cols, int dst_stride, float col_scale,
float row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
float fsy1 = element_y * row_scale;
float fsy2 = fsy1 + row_scale;
int sy1 = ceilf(fsy1);
int sy2 = floorf(fsy2);
float fsx1 = element_x * col_scale;
float fsx2 = fsx1 + col_scale;
int sx1 = ceilf(fsx1);
int sx2 = floorf(fsx2);
T0* input;
float4 value;
float4 sum = make_float4(0.f, 0.f, 0.f, 0.f);
float area = fminf(col_scale, src_cols - fsx1) *
fminf(row_scale, src_rows - fsy1);
if (sy1 - fsy1 > 1e-3) {
input = (T0*)(src + (sy1 - 1) * src_stride);
if (sx1 - fsx1 > 1e-3) {
value = (sy1 - fsy1) * (sx1 - fsx1) * input[sx1 - 1];
sum += value;
}
for (int dx = sx1; dx < sx2; ++dx) {
value = (sy1 - fsy1) * input[dx];
sum += value;
}
if (fsx2 - sx2 > 1e-3) {
value = (sy1 - fsy1) * (fsx2 - sx2) * input[sx2];
sum += value;
}
}
input = (T0*)(src + sy1 * src_stride);
for (int dy = sy1; dy < sy2; ++dy) {
if (sx1 - fsx1 > 1e-3) {
value = (sx1 - fsx1) * input[sx1 - 1];
sum += value;
}
for (int dx = sx1; dx < sx2; ++dx) {
sum += input[dx];
}
if (fsx2 - sx2 > 1e-3) {
value = (fsx2 - sx2) * input[sx2];
sum += value;
}
input = (T0*)((T1*)input + src_stride);
}
if (fsy2 - sy2 > 1e-3) {
if (sx1 - fsx1 > 1e-3) {
value = (fsy2 - sy2) * (sx1 - fsx1) * input[sx1 - 1];
sum += value;
}
for (int dx = sx1; dx < sx2; ++dx) {
value = (fsy2 - sy2) * input[dx];
sum += value;
}
if (fsx2 - sx2 > 1e-3) {
value = (fsy2 - sy2) * (fsx2 - sx2) * input[sx2];
sum += value;
}
}
sum /= area;
T0* output = (T0*)(dst + element_y * dst_stride);
output[element_x] = saturate_cast_vector<T0, float4>(sum);
}
__global__
void resizeAreaKernel2(const uchar* src, int src_rows, int src_cols,
int channels, int src_stride, uchar* dst, int dst_rows,
int dst_cols, int dst_stride, float col_scale,
float row_scale, float inv_col_scale,
float inv_row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
int sy = floor(element_y * row_scale);
int sx = floor(element_x * col_scale);
float fy = element_y + 1 - (sy + 1) * inv_row_scale;
float fx = element_x + 1 - (sx + 1) * inv_col_scale;
fy = fy <= 0 ? 0.f : fy - floor(fy);
fx = fx <= 0 ? 0.f : fx - floor(fx);
if (sy < 0) {
sy = 0;
fy = 0;
}
if (sx < 0) {
sx = 0;
fx = 0;
}
if (sy >= src_rows) {
sy = src_rows - 1;
fy = 0;
}
if (sx >= src_cols) {
sx = src_cols - 1;
fx = 0;
}
int sy_ = INC(sy, src_rows);
int cbufy[2];
fy = fy * INTER_RESIZE_COEF_SCALE;
cbufy[0] = rint(INTER_RESIZE_COEF_SCALE - fy);
cbufy[1] = rint(fy);
int sx_ = INC(sx, src_cols);
int cbufx[2];
fx = fx * INTER_RESIZE_COEF_SCALE;
cbufx[0] = rint(INTER_RESIZE_COEF_SCALE - rint(fx));
cbufx[1] = rint(fx);
if (channels == 1) {
int src_index0 = sy * src_stride + sx;
int src_index1 = sy * src_stride + sx_;
int src_index2 = sy_ * src_stride + sx;
int src_index3 = sy_ * src_stride + sx_;
int dst_index = element_y * dst_stride + element_x;
int sum = 0;
sum = cbufy[0] * cbufx[0] * src[src_index0] +
cbufy[0] * cbufx[1] * src[src_index1] +
cbufy[1] * cbufx[0] * src[src_index2] +
cbufy[1] * cbufx[1] * src[src_index3];
dst[dst_index] = (sum + (1 << (CAST_BITS - 1))) >> CAST_BITS;
}
else if (channels == 2) {
uchar2* input0 = (uchar2*)((uchar*)src + sy * src_stride);
uchar2* input1 = (uchar2*)((uchar*)src + sy_ * src_stride);
uchar2* output = (uchar2*)((uchar*)dst + element_y * dst_stride);
uchar2 t[2][2];
t[0][0] = input0[sx];
t[0][1] = input0[sx_];
t[1][0] = input1[sx];
t[1][1] = input1[sx_];
output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0],
cbufy[1]);
}
else if (channels == 3) {
uchar3* input0 = (uchar3*)((uchar*)src + sy * src_stride);
uchar3* input1 = (uchar3*)((uchar*)src + sy_ * src_stride);
uchar3* output = (uchar3*)((uchar*)dst + element_y * dst_stride);
uchar3 t[2][2];
t[0][0] = input0[sx];
t[0][1] = input0[sx_];
t[1][0] = input1[sx];
t[1][1] = input1[sx_];
output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0],
cbufy[1]);
}
else {
uchar4* input0 = (uchar4*)((uchar*)src + sy * src_stride);
uchar4* input1 = (uchar4*)((uchar*)src + sy_ * src_stride);
uchar4* output = (uchar4*)((uchar*)dst + element_y * dst_stride);
uchar4 t[2][2];
t[0][0] = input0[sx];
t[0][1] = input0[sx_];
t[1][0] = input1[sx];
t[1][1] = input1[sx_];
output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0],
cbufy[1]);
}
}
__global__
void resizeAreaKernel2(const float* src, int src_rows, int src_cols,
int channels, int src_stride, float* dst, int dst_rows,
int dst_cols, int dst_stride, double col_scale,
float row_scale, float inv_col_scale,
float inv_row_scale) {
int element_x = blockIdx.x * blockDim.x + threadIdx.x;
int element_y = blockIdx.y * blockDim.y + threadIdx.y;
if (element_y >= dst_rows || element_x >= dst_cols) {
return;
}
int sy = floor(element_y * row_scale);
int sx = floor(element_x * col_scale);
float fy = element_y + 1 - (sy + 1) * inv_row_scale;
float fx = element_x + 1 - (sx + 1) * inv_col_scale;
fy = fy <= 0 ? 0.f : fy - floor(fy);
fx = fx <= 0 ? 0.f : fx - floor(fx);
if (sy < 0) {
sy = 0;
fy = 0;
}
if (sx < 0) {
sx = 0;
fx = 0;
}
if (sy >= src_rows) {
sy = src_rows - 1;
fy = 0;
}
if (sx >= src_cols) {
sx = src_cols - 1;
fx = 0;
}
int sy_ = INC(sy,src_rows);
float cbufy[2];
cbufy[0] = 1.f - fy;
cbufy[1] = 1.f - cbufy[0];
int sx_ = INC(sx,src_cols);
float cbufx[2];
cbufx[0] = 1.f - fx;
cbufx[1] = 1.f - cbufx[0];
if (channels == 1) {
int index = sy * src_stride;
float src1 = src[index + sx];
float src2 = src[index + sx_];
float value1 = cbufy[0] * cbufx[0] * src1;
float value2 = cbufy[0] * cbufx[1] * src2;
float sum = 0.f;
sum += value1 + value2;
index = sy_ * src_stride;
src1 = src[index + sx];
src2 = src[index + sx_];
value1 = cbufy[1] * cbufx[0] * src1;
value2 = cbufy[1] * cbufx[1] * src2;
sum += value1 + value2;
index = element_y * dst_stride + element_x;
dst[index] = sum;
}
else if (channels == 3) {
int index = sy * src_stride;
float3 src1 = ((float3*)(src + index))[sx];
float3 src2 = ((float3*)(src + index))[sx_];
float3 value1 = cbufy[0] * cbufx[0] * src1;
float3 value2 = cbufy[0] * cbufx[1] * src2;
float3 sum = make_float3(0.f, 0.f, 0.f);
sum += value1;
sum += value2;
index = sy_ * src_stride;
src1 = ((float3*)(src + index))[sx];
src2 = ((float3*)(src + index))[sx_];
value1 = cbufy[1] * cbufx[0] * src1;
value2 = cbufy[1] * cbufx[1] * src2;
sum += value1;
sum += value2;
float3* output = (float3*)(dst + element_y * dst_stride);
output[element_x] = sum;
}
else {
int index = sy * src_stride;
float4 src1 = ((float4*)(src + index))[sx];
float4 src2 = ((float4*)(src + index))[sx_];
float4 value1 = cbufy[0] * cbufx[0] * src1;
float4 value2 = cbufy[0] * cbufx[1] * src2;
float4 sum = make_float4(0.f, 0.f, 0.f, 0.f);
sum += value1;
sum += value2;
index = sy_ * src_stride;
src1 = ((float4*)(src + index))[sx];
src2 = ((float4*)(src + index))[sx_];
value1 = cbufy[1] * cbufx[0] * src1;
value2 = cbufy[1] * cbufx[1] * src2;
sum += value1;
sum += value2;
float4* output = (float4*)(dst + element_y * dst_stride);
output[element_x] = sum;
}
}
RetCode resizeArea(const uchar* src, int src_rows, int src_cols, int channels,
int src_stride, uchar* dst, int dst_rows, int dst_cols,
int dst_stride, cudaStream_t stream) {
if (src == nullptr || dst == nullptr || src_rows < 1 || src_cols < 1 ||
dst_rows < 1 || dst_cols < 1 ||
(channels != 1 && channels != 3 && channels != 4) ||
src_stride < src_cols * channels ||
dst_stride < dst_cols * channels) {
return RC_INVALID_VALUE;
}
if (src_rows == dst_rows && src_cols == dst_cols &&
src_stride == dst_stride) {
if (src != dst) {
cudaError_t code;
code = cudaMemcpyAsync(dst, src, src_rows * src_stride * sizeof(uchar),
cudaMemcpyDeviceToDevice);
if (code != cudaSuccess) {
return RC_DEVICE_MEMORY_ERROR;
}
}
return RC_SUCCESS;
}
const int kBlockX = 32;
const int kBlockY = 16;
dim3 block(kBlockX, kBlockY);
dim3 grid;
grid.x = (dst_cols + kBlockX -1) / kBlockX;
grid.y = (dst_rows + kBlockY - 1) / kBlockY;
float col_scale = (double)src_cols / dst_cols;
float row_scale = (double)src_rows / dst_rows;
float inv_col_scale = 1.0 / col_scale;
float inv_row_scale = 1.0 / row_scale;
if (src_cols > dst_cols && src_rows > dst_rows) {
if (src_cols % dst_cols == 0 && src_rows % dst_rows == 0) {
if (channels == 1) {
resizeAreaKernel0C1<uchar><<<grid, block, 0, stream>>>(src, src_rows,
src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride,
col_scale, row_scale);
}
else if (channels == 3) {
resizeAreaKernel0C3<uchar3, uchar><<<grid, block, 0, stream>>>(src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
else {
resizeAreaKernel0C4<uchar4, uchar><<<grid, block, 0, stream>>>(src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
}
else {
if (channels == 1) {
resizeAreaKernel1C1<uchar><<<grid, block, 0, stream>>>(src, src_rows,
src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride,
col_scale, row_scale);
}
else if (channels == 3) {
resizeAreaKernel1C3<uchar3, uchar><<<grid, block, 0, stream>>>(src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
else {
resizeAreaKernel1C4<uchar4, uchar><<<grid, block, 0, stream>>>(src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
}
}
else {
resizeAreaKernel2<<<grid, block, 0, stream>>>(src, src_rows, src_cols,
channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale,
row_scale, inv_col_scale, inv_row_scale);
}
return RC_SUCCESS;
}
RetCode resizeArea(const float* src, int src_rows, int src_cols, int channels,
int src_stride, float* dst, int dst_rows, int dst_cols,
int dst_stride, cudaStream_t stream) {
if (src == nullptr || dst == nullptr || src_rows < 1 || src_cols < 1 ||
dst_rows < 1 || dst_cols < 1 ||
(channels != 1 && channels != 3 && channels != 4) ||
src_stride < src_cols * channels ||
dst_stride < dst_cols * channels) {
return RC_INVALID_VALUE;
}
if (src_rows == dst_rows && src_cols == dst_cols &&
src_stride == dst_stride) {
if (src != dst) {
cudaError_t code;
code = cudaMemcpyAsync(dst, src, src_rows * src_stride * sizeof(float),
cudaMemcpyDeviceToDevice);
if (code != cudaSuccess) {
return RC_DEVICE_MEMORY_ERROR;
}
}
return RC_SUCCESS;
}
const int kBlockX = 32;
const int kBlockY = 16;
dim3 block(kBlockX, kBlockY);
dim3 grid;
grid.x = (dst_cols + kBlockX -1) / kBlockX;
grid.y = (dst_rows + kBlockY - 1) / kBlockY;
double col_scale = (double)src_cols / dst_cols;
float row_scale = (double)src_rows / dst_rows;
float inv_col_scale = 1.0 / col_scale;
float inv_row_scale = 1.0 / row_scale;
if (src_cols > dst_cols && src_rows > dst_rows) {
if (src_cols % dst_cols == 0 && src_rows % dst_rows == 0) {
if (channels == 1) {
resizeAreaKernel0C1<float><<<grid, block, 0, stream>>>(src, src_rows,
src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride,
col_scale, row_scale);
}
else if (channels == 3) {
resizeAreaKernel0C3<float3, float><<<grid, block, 0, stream>>>(src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
else {
resizeAreaKernel0C4<float4, float><<<grid, block, 0, stream>>>(src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
}
else {
if (channels == 1) {
resizeAreaKernel1C1<float><<<grid, block, 0, stream>>>(src, src_rows,
src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride,
col_scale, row_scale);
}
else if (channels == 3) {
resizeAreaKernel1C3<float3, float><<<grid, block, 0, stream>>>(src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
else {
resizeAreaKernel1C4<float4, float><<<grid, block, 0, stream>>>(src,
src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols,
dst_stride, col_scale, row_scale);
}
}
}
else {
resizeAreaKernel2<<<grid, block, 0, stream>>>(src, src_rows, src_cols,
channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale,
row_scale, inv_col_scale, inv_row_scale);
}
return RC_SUCCESS;
}
template <>
RetCode ResizeArea<uchar, 1>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const uchar* inData,
int outHeight,
int outWidth,
int outWidthStride,
uchar* outData) {
RetCode code = resizeArea(inData, inHeight, inWidth, 1, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
template <>
RetCode ResizeArea<uchar, 3>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const uchar* inData,
int outHeight,
int outWidth,
int outWidthStride,
uchar* outData) {
RetCode code = resizeArea(inData, inHeight, inWidth, 3, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
template <>
RetCode ResizeArea<uchar, 4>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const uchar* inData,
int outHeight,
int outWidth,
int outWidthStride,
uchar* outData) {
RetCode code = resizeArea(inData, inHeight, inWidth, 4, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
template <>
RetCode ResizeArea<float, 1>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const float* inData,
int outHeight,
int outWidth,
int outWidthStride,
float* outData) {
RetCode code = resizeArea(inData, inHeight, inWidth, 1, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
template <>
RetCode ResizeArea<float, 3>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const float* inData,
int outHeight,
int outWidth,
int outWidthStride,
float* outData) {
RetCode code = resizeArea(inData, inHeight, inWidth, 3, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
template <>
RetCode ResizeArea<float, 4>(cudaStream_t stream,
int inHeight,
int inWidth,
int inWidthStride,
const float* inData,
int outHeight,
int outWidth,
int outWidthStride,
float* outData) {
RetCode code = resizeArea(inData, inHeight, inWidth, 4, inWidthStride,
outData, outHeight, outWidth, outWidthStride,
stream);
return code;
}
} // namespace cuda
} // namespace cv
} // namespace ppl
|
ae0f3cc67dee0245f73cc1f8ab5bcadb55303178.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backref_re_hip.cuh"
#include <strings/regex/regex.cuh>
#include <strings/utilities.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/strings/replace_re.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <regex>
namespace cudf {
namespace strings {
namespace detail {
namespace {
/**
* @brief Return the capturing group index pattern to use with the given replacement string.
*
* Only two patterns are supported at this time `\d` and `${d}` where `d` is an integer in
* the range 1-99. The `\d` pattern is returned by default unless no `\d` pattern is found in
* the `repl` string,
*
* Reference: https://www.regular-expressions.info/refreplacebackref.html
*/
std::string get_backref_pattern(std::string const& repl)
{
std::string const backslash_pattern = "\\\\(\\d+)";
std::string const bracket_pattern = "\\$\\{(\\d+)\\}";
std::smatch m;
return std::regex_search(repl, m, std::regex(backslash_pattern)) ? backslash_pattern
: bracket_pattern;
}
/**
* @brief Parse the back-ref index and position values from a given replace format.
*
* The back-ref numbers are expected to be 1-based.
*
* Returns a modified string without back-ref indicators and a vector of back-ref
* byte position pairs. These are used by the device code to build the output
* string by placing the captured group elements into the replace format.
*
* For example, for input string 'hello \2 and \1' the returned `backref_type` vector
* contains `[(2,6),(1,11)]` and the returned string is 'hello and '.
*/
std::pair<std::string, std::vector<backref_type>> parse_backrefs(std::string const& repl)
{
std::vector<backref_type> backrefs;
std::string str = repl; // make a modifiable copy
std::smatch m;
std::regex ex(get_backref_pattern(repl));
std::string rtn;
size_type byte_offset = 0;
while (std::regex_search(str, m, ex) && !m.empty()) {
// parse the back-ref index number
size_type const index = static_cast<size_type>(std::atoi(std::string{m[1]}.c_str()));
CUDF_EXPECTS(index > 0 && index < 100, "Group index numbers must be in the range 1-99");
// store the new byte offset and index value
size_type const position = static_cast<size_type>(m.position(0));
byte_offset += position;
backrefs.push_back({index, byte_offset});
// update the output string
rtn += str.substr(0, position);
// remove the back-ref pattern to continue parsing
str = str.substr(position + static_cast<size_type>(m.length(0)));
}
if (!str.empty()) // add the remainder
rtn += str; // of the string
return {rtn, backrefs};
}
} // namespace
//
std::unique_ptr<column> replace_with_backrefs(
strings_column_view const& strings,
std::string const& pattern,
std::string const& replacement,
regex_flags const flags,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
if (strings.is_empty()) return make_empty_column(type_id::STRING);
CUDF_EXPECTS(!pattern.empty(), "Parameter pattern must not be empty");
CUDF_EXPECTS(!replacement.empty(), "Parameter replacement must not be empty");
auto d_strings = column_device_view::create(strings.parent(), stream);
// compile regex into device object
auto d_prog =
reprog_device::create(pattern, flags, get_character_flags_table(), strings.size(), stream);
auto const regex_insts = d_prog->insts_counts();
// parse the repl string for back-ref indicators
auto const parse_result = parse_backrefs(replacement);
rmm::device_uvector<backref_type> backrefs =
cudf::detail::make_device_uvector_async(parse_result.second, stream);
string_scalar repl_scalar(parse_result.first, true, stream);
string_view const d_repl_template = repl_scalar.value();
using BackRefIterator = decltype(backrefs.begin());
// create child columns
auto [offsets, chars] = [&] {
if (regex_insts <= RX_SMALL_INSTS) {
return make_strings_children(
backrefs_fn<BackRefIterator, RX_STACK_SMALL>{
*d_strings, *d_prog, d_repl_template, backrefs.begin(), backrefs.end()},
strings.size(),
stream,
mr);
} else if (regex_insts <= RX_MEDIUM_INSTS) {
return make_strings_children(
backrefs_fn<BackRefIterator, RX_STACK_MEDIUM>{
*d_strings, *d_prog, d_repl_template, backrefs.begin(), backrefs.end()},
strings.size(),
stream,
mr);
} else if (regex_insts <= RX_LARGE_INSTS) {
return make_strings_children(
backrefs_fn<BackRefIterator, RX_STACK_LARGE>{
*d_strings, *d_prog, d_repl_template, backrefs.begin(), backrefs.end()},
strings.size(),
stream,
mr);
} else {
return make_strings_children(
backrefs_fn<BackRefIterator, RX_STACK_ANY>{
*d_strings, *d_prog, d_repl_template, backrefs.begin(), backrefs.end()},
strings.size(),
stream,
mr);
}
}();
return make_strings_column(strings.size(),
std::move(offsets),
std::move(chars),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr));
}
} // namespace detail
// external API
std::unique_ptr<column> replace_with_backrefs(strings_column_view const& strings,
std::string const& pattern,
std::string const& replacement,
regex_flags const flags,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace_with_backrefs(
strings, pattern, replacement, flags, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
| ae0f3cc67dee0245f73cc1f8ab5bcadb55303178.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backref_re.cuh"
#include <strings/regex/regex.cuh>
#include <strings/utilities.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/strings/replace_re.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <regex>
namespace cudf {
namespace strings {
namespace detail {
namespace {
/**
* @brief Return the capturing group index pattern to use with the given replacement string.
*
* Only two patterns are supported at this time `\d` and `${d}` where `d` is an integer in
* the range 1-99. The `\d` pattern is returned by default unless no `\d` pattern is found in
* the `repl` string,
*
* Reference: https://www.regular-expressions.info/refreplacebackref.html
*/
std::string get_backref_pattern(std::string const& repl)
{
std::string const backslash_pattern = "\\\\(\\d+)";
std::string const bracket_pattern = "\\$\\{(\\d+)\\}";
std::smatch m;
return std::regex_search(repl, m, std::regex(backslash_pattern)) ? backslash_pattern
: bracket_pattern;
}
/**
* @brief Parse the back-ref index and position values from a given replace format.
*
* The back-ref numbers are expected to be 1-based.
*
* Returns a modified string without back-ref indicators and a vector of back-ref
* byte position pairs. These are used by the device code to build the output
* string by placing the captured group elements into the replace format.
*
* For example, for input string 'hello \2 and \1' the returned `backref_type` vector
* contains `[(2,6),(1,11)]` and the returned string is 'hello and '.
*/
std::pair<std::string, std::vector<backref_type>> parse_backrefs(std::string const& repl)
{
std::vector<backref_type> backrefs;
std::string str = repl; // make a modifiable copy
std::smatch m;
std::regex ex(get_backref_pattern(repl));
std::string rtn;
size_type byte_offset = 0;
while (std::regex_search(str, m, ex) && !m.empty()) {
// parse the back-ref index number
size_type const index = static_cast<size_type>(std::atoi(std::string{m[1]}.c_str()));
CUDF_EXPECTS(index > 0 && index < 100, "Group index numbers must be in the range 1-99");
// store the new byte offset and index value
size_type const position = static_cast<size_type>(m.position(0));
byte_offset += position;
backrefs.push_back({index, byte_offset});
// update the output string
rtn += str.substr(0, position);
// remove the back-ref pattern to continue parsing
str = str.substr(position + static_cast<size_type>(m.length(0)));
}
if (!str.empty()) // add the remainder
rtn += str; // of the string
return {rtn, backrefs};
}
} // namespace
//
std::unique_ptr<column> replace_with_backrefs(
strings_column_view const& strings,
std::string const& pattern,
std::string const& replacement,
regex_flags const flags,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
if (strings.is_empty()) return make_empty_column(type_id::STRING);
CUDF_EXPECTS(!pattern.empty(), "Parameter pattern must not be empty");
CUDF_EXPECTS(!replacement.empty(), "Parameter replacement must not be empty");
auto d_strings = column_device_view::create(strings.parent(), stream);
// compile regex into device object
auto d_prog =
reprog_device::create(pattern, flags, get_character_flags_table(), strings.size(), stream);
auto const regex_insts = d_prog->insts_counts();
// parse the repl string for back-ref indicators
auto const parse_result = parse_backrefs(replacement);
rmm::device_uvector<backref_type> backrefs =
cudf::detail::make_device_uvector_async(parse_result.second, stream);
string_scalar repl_scalar(parse_result.first, true, stream);
string_view const d_repl_template = repl_scalar.value();
using BackRefIterator = decltype(backrefs.begin());
// create child columns
auto [offsets, chars] = [&] {
if (regex_insts <= RX_SMALL_INSTS) {
return make_strings_children(
backrefs_fn<BackRefIterator, RX_STACK_SMALL>{
*d_strings, *d_prog, d_repl_template, backrefs.begin(), backrefs.end()},
strings.size(),
stream,
mr);
} else if (regex_insts <= RX_MEDIUM_INSTS) {
return make_strings_children(
backrefs_fn<BackRefIterator, RX_STACK_MEDIUM>{
*d_strings, *d_prog, d_repl_template, backrefs.begin(), backrefs.end()},
strings.size(),
stream,
mr);
} else if (regex_insts <= RX_LARGE_INSTS) {
return make_strings_children(
backrefs_fn<BackRefIterator, RX_STACK_LARGE>{
*d_strings, *d_prog, d_repl_template, backrefs.begin(), backrefs.end()},
strings.size(),
stream,
mr);
} else {
return make_strings_children(
backrefs_fn<BackRefIterator, RX_STACK_ANY>{
*d_strings, *d_prog, d_repl_template, backrefs.begin(), backrefs.end()},
strings.size(),
stream,
mr);
}
}();
return make_strings_column(strings.size(),
std::move(offsets),
std::move(chars),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr));
}
} // namespace detail
// external API
std::unique_ptr<column> replace_with_backrefs(strings_column_view const& strings,
std::string const& pattern,
std::string const& replacement,
regex_flags const flags,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace_with_backrefs(
strings, pattern, replacement, flags, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
|
18330453a4a403da3c1c447db24bacc6cf27a9e4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h> //Import libraries and files
#include <stdlib.h>
#include "kernel.hip"
int main (){
/*
Allocate memory for the database on the host and the device
*/
/*make an array of char 0-9 and A-Z and randomize it*/
int Array[36] = {0 1 2 3 4 5 6 7 8 9 A B C D E F G H I J K L M N O P Q R S T U V W X Y Z};
/*
random variable that is 0 or 1
if 0 generate a random integer (0-9)
if 1 generate a random letter(A-Z)
fill two dimensional array with these characters
*/
int seed = 1;
srand(seed);
int M = 35;
A_col = 7;
A_row = 100;
A_sz = A_row*A_col;
B_sz = A_row;
A_h = (float*) malloc( sizeof(float)*(A_sz) );
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; }
B_h = (float*) malloc( sizeof(float)*B_sz );
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; }
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" size Of vector: %u x %u\n ", VecSize);
}
| 18330453a4a403da3c1c447db24bacc6cf27a9e4.cu | #include <stdio.h> //Import libraries and files
#include <stdlib.h>
#include "kernel.cu"
int main (){
/*
Allocate memory for the database on the host and the device
*/
/*make an array of char 0-9 and A-Z and randomize it*/
int Array[36] = {0 1 2 3 4 5 6 7 8 9 A B C D E F G H I J K L M N O P Q R S T U V W X Y Z};
/*
random variable that is 0 or 1
if 0 generate a random integer (0-9)
if 1 generate a random letter(A-Z)
fill two dimensional array with these characters
*/
int seed = 1;
srand(seed);
int M = 35;
A_col = 7;
A_row = 100;
A_sz = A_row*A_col;
B_sz = A_row;
A_h = (float*) malloc( sizeof(float)*(A_sz) );
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; }
B_h = (float*) malloc( sizeof(float)*B_sz );
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; }
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" size Of vector: %u x %u\n ", VecSize);
}
|
6ac97d7318ad6f7e6ad156ebfa225b29bac19e07.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define _CRT_SECURE_NO_WARNINGS
#include <stdlib.h>
#include <stdio.h>
#include "sentinel-fileutilsmsg.h"
#define isdecimal(ch) ((ch) >= '0' && (ch) <= '9')
struct group { short gr_gid; };
__device__ struct group *getgrnam(char *name) { return nullptr; }
__device__ __managed__ struct group *m_getgrnam_rc;
__global__ void g_getgrnam(char *name)
{
m_getgrnam_rc = getgrnam(name);
}
struct group *getgrnam_(char *str)
{
size_t strLength = strlen(str) + 1;
char *d_str;
hipMalloc(&d_str, strLength);
hipMemcpy(d_str, str, strLength, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( g_getgrnam), dim3(1),dim3(1), 0, 0, d_str);
hipFree(d_str);
return m_getgrnam_rc;
}
__forceinline int dchgrp_(char *str, int gid) { fileutils_dchgrp msg(str, gid); return msg.RC; }
int main(int argc, char **argv)
{
atexit(sentinelClientShutdown);
sentinelClientInitialize();
char *cp = argv[1];
int gid;
struct group *grp;
if (isdecimal(*cp)) {
gid = 0;
while (isdecimal(*cp))
gid = gid * 10 + (*cp++ - '0');
if (*cp) {
fprintf(stderr, "Bad gid value\n");
exit(1);
}
}
else {
grp = getgrnam_(cp);
if (!grp) {
fprintf(stderr, "Unknown group name\n");
exit(1);
}
gid = grp->gr_gid;
}
//
argc--;
argv++;
while (argc-- > 1) {
argv++;
if (dchgrp_(*argv, gid))
perror(*argv);
}
exit(0);
}
| 6ac97d7318ad6f7e6ad156ebfa225b29bac19e07.cu | #define _CRT_SECURE_NO_WARNINGS
#include <stdlib.h>
#include <stdio.h>
#include "sentinel-fileutilsmsg.h"
#define isdecimal(ch) ((ch) >= '0' && (ch) <= '9')
struct group { short gr_gid; };
__device__ struct group *getgrnam(char *name) { return nullptr; }
__device__ __managed__ struct group *m_getgrnam_rc;
__global__ void g_getgrnam(char *name)
{
m_getgrnam_rc = getgrnam(name);
}
struct group *getgrnam_(char *str)
{
size_t strLength = strlen(str) + 1;
char *d_str;
cudaMalloc(&d_str, strLength);
cudaMemcpy(d_str, str, strLength, cudaMemcpyHostToDevice);
g_getgrnam<<<1,1>>>(d_str);
cudaFree(d_str);
return m_getgrnam_rc;
}
__forceinline int dchgrp_(char *str, int gid) { fileutils_dchgrp msg(str, gid); return msg.RC; }
int main(int argc, char **argv)
{
atexit(sentinelClientShutdown);
sentinelClientInitialize();
char *cp = argv[1];
int gid;
struct group *grp;
if (isdecimal(*cp)) {
gid = 0;
while (isdecimal(*cp))
gid = gid * 10 + (*cp++ - '0');
if (*cp) {
fprintf(stderr, "Bad gid value\n");
exit(1);
}
}
else {
grp = getgrnam_(cp);
if (!grp) {
fprintf(stderr, "Unknown group name\n");
exit(1);
}
gid = grp->gr_gid;
}
//
argc--;
argv++;
while (argc-- > 1) {
argv++;
if (dchgrp_(*argv, gid))
perror(*argv);
}
exit(0);
}
|
b991de98d32e297ddc27fd4cc8ac482739a4013c.hip | // !!! This is a file automatically generated by hipify!!!
// includes, system
#include <iostream>
#include <assert.h>
// Here you can set the device ID that was assigned to you
#define MYDEVICE 1
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char *msg);
///////////////////////////////////////////////////////////////////////////////
// Program main
///////////////////////////////////////////////////////////////////////////////
int main()
{
hipSetDevice(MYDEVICE);
// pointer and dimension for host memory
int dimA = 8;
float *h_a;
// pointers for device memory
float *d_a, *d_b;
// allocate and initialize host memory
// Bonus: try using hipHostMalloc in place of malloc
// it has the same syntax as hipMalloc, but it enables asynchronous copies
h_a = (float *) malloc(dimA*sizeof(float));
for (int i = 0; i<dimA; ++i)
{
h_a[i] = i;
}
// Part 1 of 5: allocate device memory
size_t memSize = dimA*sizeof(float);
hipMalloc(&d_a, memSize);
hipMalloc(&d_b, memSize);
// Part 2 of 5: host to device memory copy
hipMemcpy(d_a, h_a, memSize, hipMemcpyHostToDevice);
// Part 3 of 5: device to device memory copy
hipMemcpy(d_b, d_a, memSize, hipMemcpyDeviceToDevice);
// clear host memory
for (int i=0; i<dimA; ++i )
{
h_a[i] = 0.f;
}
// Part 4 of 5: device to host copy
hipMemcpy(h_a, d_b, memSize, hipMemcpyDeviceToHost);
// Check for any CUDA errors
checkCUDAError("hipMemcpy calls");
// verify the data on the host is correct
for (int i=0; i<dimA; ++i)
{
assert(h_a[i] == (float) i);
}
// Part 5 of 5: free device memory pointers d_a and d_b
hipFree(d_a);
hipFree(d_b);
// Check for any CUDA errors
checkCUDAError("hipFree");
// free host memory pointer h_a
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
std::cout << "Correct!" << std::endl;
return 0;
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
std::cerr << "Cuda error: " << msg << " " << hipGetErrorString(err) << std::endl;
exit(-1);
}
}
| b991de98d32e297ddc27fd4cc8ac482739a4013c.cu | // includes, system
#include <iostream>
#include <assert.h>
// Here you can set the device ID that was assigned to you
#define MYDEVICE 1
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char *msg);
///////////////////////////////////////////////////////////////////////////////
// Program main
///////////////////////////////////////////////////////////////////////////////
int main()
{
cudaSetDevice(MYDEVICE);
// pointer and dimension for host memory
int dimA = 8;
float *h_a;
// pointers for device memory
float *d_a, *d_b;
// allocate and initialize host memory
// Bonus: try using cudaMallocHost in place of malloc
// it has the same syntax as cudaMalloc, but it enables asynchronous copies
h_a = (float *) malloc(dimA*sizeof(float));
for (int i = 0; i<dimA; ++i)
{
h_a[i] = i;
}
// Part 1 of 5: allocate device memory
size_t memSize = dimA*sizeof(float);
cudaMalloc(&d_a, memSize);
cudaMalloc(&d_b, memSize);
// Part 2 of 5: host to device memory copy
cudaMemcpy(d_a, h_a, memSize, cudaMemcpyHostToDevice);
// Part 3 of 5: device to device memory copy
cudaMemcpy(d_b, d_a, memSize, cudaMemcpyDeviceToDevice);
// clear host memory
for (int i=0; i<dimA; ++i )
{
h_a[i] = 0.f;
}
// Part 4 of 5: device to host copy
cudaMemcpy(h_a, d_b, memSize, cudaMemcpyDeviceToHost);
// Check for any CUDA errors
checkCUDAError("cudaMemcpy calls");
// verify the data on the host is correct
for (int i=0; i<dimA; ++i)
{
assert(h_a[i] == (float) i);
}
// Part 5 of 5: free device memory pointers d_a and d_b
cudaFree(d_a);
cudaFree(d_b);
// Check for any CUDA errors
checkCUDAError("cudaFree");
// free host memory pointer h_a
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
std::cout << "Correct!" << std::endl;
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
std::cerr << "Cuda error: " << msg << " " << cudaGetErrorString(err) << std::endl;
exit(-1);
}
}
|
6ce7b60fed10ae6da82064cfb3f33a4e6450244c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "OverlayKernel.h"
#define CLAMP_255(x) x < 0 ? 0 : (x > 255 ? 255 : x)
#define CLAMP_INT8(x) x < -128 ? -128 : (x > 127 ? 127 : x)
/* __fsub_rn(overlay_pixel, 16) was added for libstreamer because in libstreamer setRGBAData for y componenet 16 was added so dont use this function for any other purpose */
#define OVERLAY_Y_ALPHA(src_pixel, overlay_pixel, dst_pixel, alpha) \
do \
{ \
Npp32f temp_ = __fmul_rn(overlay_pixel, alpha); \
temp_ = __fadd_rn(src_pixel, temp_); \
dst_pixel = CLAMP_255(temp_); \
} while(0)
#define UV_OVERLAY_ALPHA(src_pixel, overlay_pixel, dst_pixel, alpha) \
do \
{ \
Npp32f temp = __fmul_rn(__fsub_rn(overlay_pixel, 128), alpha); \
temp = __fadd_rn(__fsub_rn(src_pixel, 128), temp); \
dst_pixel = 128 + (CLAMP_INT8(temp)); \
} while(0)
#define OVERLAY_Y(src_pixel, overlay_pixel, dst_pixel, alpha) \
do \
{ \
Npp32f temp = __fsub_rn(overlay_pixel, 16); \
if(alpha != -1) \
{ \
OVERLAY_Y_ALPHA(src_pixel, temp, dst_pixel, alpha); \
break; \
} \
if(temp == 0) \
{ \
dst_pixel = src_pixel; \
} \
else \
{ \
dst_pixel = CLAMP_255(temp); \
} \
} while(0)
#define UV_OVERLAY(src_pixel, overlay_pixel, dst_pixel, alpha) \
do \
{ \
if(alpha != -1) \
{ \
UV_OVERLAY_ALPHA(src_pixel, overlay_pixel, dst_pixel, alpha); \
break; \
} \
dst_pixel = overlay_pixel == 128 ? src_pixel:overlay_pixel; \
} while(0)
__global__ void yuvOverlayKernel(const uchar4* Y, const uchar4* U, const uchar4* V, const uchar4* overlay_y, const uchar4* overlay_u, const uchar4* overlay_v, uchar4* Yout, uchar4* Uout, uchar4* Vout,
float alpha, int step_y, int step_uv, int overlayStep_y, int overlayStep_uv, int width_y, int height_y, int width_uv, int height_uv)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width_y || y >= height_y)
{
return;
}
int offset = y * step_y + x;
int overlayOffset = y * overlayStep_y + x;
OVERLAY_Y(Y[offset].x, overlay_y[overlayOffset].x, Yout[offset].x, alpha);
OVERLAY_Y(Y[offset].y, overlay_y[overlayOffset].y, Yout[offset].y, alpha);
OVERLAY_Y(Y[offset].z, overlay_y[overlayOffset].z, Yout[offset].z, alpha);
OVERLAY_Y(Y[offset].w, overlay_y[overlayOffset].w, Yout[offset].w, alpha);
if(x >= width_uv || y >= height_uv)
{
return;
}
offset = y * step_uv + x;
overlayOffset = y * overlayStep_uv + x;
UV_OVERLAY(U[offset].x, overlay_u[overlayOffset].x, Uout[offset].x, alpha);
UV_OVERLAY(U[offset].y, overlay_u[overlayOffset].y, Uout[offset].y, alpha);
UV_OVERLAY(U[offset].z, overlay_u[overlayOffset].z, Uout[offset].z, alpha);
UV_OVERLAY(U[offset].w, overlay_u[overlayOffset].w, Uout[offset].w, alpha);
UV_OVERLAY(V[offset].x, overlay_v[overlayOffset].x, Vout[offset].x, alpha);
UV_OVERLAY(V[offset].y, overlay_v[overlayOffset].y, Vout[offset].y, alpha);
UV_OVERLAY(V[offset].z, overlay_v[overlayOffset].z, Vout[offset].z, alpha);
UV_OVERLAY(V[offset].w, overlay_v[overlayOffset].w, Vout[offset].w, alpha);
}
void launchYUVOverlayKernel(const Npp8u* src[3], const Npp8u* overlay[3], Npp8u* dst[3], Npp32f alpha, int srcStep[2], int overlayStep[2], NppiSize size, hipStream_t stream)
{
auto mod = size.width % 8;
if (mod != 0)
{
// we would just process few extra pixels - step is anyway bigger than width and is aligned by 512/256
size.width += 8 - mod;
}
auto width = size.width >> 2;
int srcStep_y = srcStep[0] >> 2;
int srcStep_uv = srcStep[1] >> 2;
int overlayStep_y = overlayStep[0] >> 2;
int overlayStep_uv = overlayStep[1] >> 2;
dim3 block(32, 32);
dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y);
yuvOverlayKernel << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(src[0]),
reinterpret_cast<const uchar4*>(src[1]),
reinterpret_cast<const uchar4*>(src[2]),
reinterpret_cast<const uchar4*>(overlay[0]),
reinterpret_cast<const uchar4*>(overlay[1]),
reinterpret_cast<const uchar4*>(overlay[2]),
reinterpret_cast<uchar4*>(dst[0]),
reinterpret_cast<uchar4*>(dst[1]),
reinterpret_cast<uchar4*>(dst[2]),
alpha, srcStep_y, srcStep_uv, overlayStep_y, overlayStep_uv, width, size.height, width >> 1, size.height >> 1);
} | 6ce7b60fed10ae6da82064cfb3f33a4e6450244c.cu | #include "OverlayKernel.h"
#define CLAMP_255(x) x < 0 ? 0 : (x > 255 ? 255 : x)
#define CLAMP_INT8(x) x < -128 ? -128 : (x > 127 ? 127 : x)
/* __fsub_rn(overlay_pixel, 16) was added for libstreamer because in libstreamer setRGBAData for y componenet 16 was added so dont use this function for any other purpose */
#define OVERLAY_Y_ALPHA(src_pixel, overlay_pixel, dst_pixel, alpha) \
do \
{ \
Npp32f temp_ = __fmul_rn(overlay_pixel, alpha); \
temp_ = __fadd_rn(src_pixel, temp_); \
dst_pixel = CLAMP_255(temp_); \
} while(0)
#define UV_OVERLAY_ALPHA(src_pixel, overlay_pixel, dst_pixel, alpha) \
do \
{ \
Npp32f temp = __fmul_rn(__fsub_rn(overlay_pixel, 128), alpha); \
temp = __fadd_rn(__fsub_rn(src_pixel, 128), temp); \
dst_pixel = 128 + (CLAMP_INT8(temp)); \
} while(0)
#define OVERLAY_Y(src_pixel, overlay_pixel, dst_pixel, alpha) \
do \
{ \
Npp32f temp = __fsub_rn(overlay_pixel, 16); \
if(alpha != -1) \
{ \
OVERLAY_Y_ALPHA(src_pixel, temp, dst_pixel, alpha); \
break; \
} \
if(temp == 0) \
{ \
dst_pixel = src_pixel; \
} \
else \
{ \
dst_pixel = CLAMP_255(temp); \
} \
} while(0)
#define UV_OVERLAY(src_pixel, overlay_pixel, dst_pixel, alpha) \
do \
{ \
if(alpha != -1) \
{ \
UV_OVERLAY_ALPHA(src_pixel, overlay_pixel, dst_pixel, alpha); \
break; \
} \
dst_pixel = overlay_pixel == 128 ? src_pixel:overlay_pixel; \
} while(0)
__global__ void yuvOverlayKernel(const uchar4* Y, const uchar4* U, const uchar4* V, const uchar4* overlay_y, const uchar4* overlay_u, const uchar4* overlay_v, uchar4* Yout, uchar4* Uout, uchar4* Vout,
float alpha, int step_y, int step_uv, int overlayStep_y, int overlayStep_uv, int width_y, int height_y, int width_uv, int height_uv)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width_y || y >= height_y)
{
return;
}
int offset = y * step_y + x;
int overlayOffset = y * overlayStep_y + x;
OVERLAY_Y(Y[offset].x, overlay_y[overlayOffset].x, Yout[offset].x, alpha);
OVERLAY_Y(Y[offset].y, overlay_y[overlayOffset].y, Yout[offset].y, alpha);
OVERLAY_Y(Y[offset].z, overlay_y[overlayOffset].z, Yout[offset].z, alpha);
OVERLAY_Y(Y[offset].w, overlay_y[overlayOffset].w, Yout[offset].w, alpha);
if(x >= width_uv || y >= height_uv)
{
return;
}
offset = y * step_uv + x;
overlayOffset = y * overlayStep_uv + x;
UV_OVERLAY(U[offset].x, overlay_u[overlayOffset].x, Uout[offset].x, alpha);
UV_OVERLAY(U[offset].y, overlay_u[overlayOffset].y, Uout[offset].y, alpha);
UV_OVERLAY(U[offset].z, overlay_u[overlayOffset].z, Uout[offset].z, alpha);
UV_OVERLAY(U[offset].w, overlay_u[overlayOffset].w, Uout[offset].w, alpha);
UV_OVERLAY(V[offset].x, overlay_v[overlayOffset].x, Vout[offset].x, alpha);
UV_OVERLAY(V[offset].y, overlay_v[overlayOffset].y, Vout[offset].y, alpha);
UV_OVERLAY(V[offset].z, overlay_v[overlayOffset].z, Vout[offset].z, alpha);
UV_OVERLAY(V[offset].w, overlay_v[overlayOffset].w, Vout[offset].w, alpha);
}
void launchYUVOverlayKernel(const Npp8u* src[3], const Npp8u* overlay[3], Npp8u* dst[3], Npp32f alpha, int srcStep[2], int overlayStep[2], NppiSize size, cudaStream_t stream)
{
auto mod = size.width % 8;
if (mod != 0)
{
// we would just process few extra pixels - step is anyway bigger than width and is aligned by 512/256
size.width += 8 - mod;
}
auto width = size.width >> 2;
int srcStep_y = srcStep[0] >> 2;
int srcStep_uv = srcStep[1] >> 2;
int overlayStep_y = overlayStep[0] >> 2;
int overlayStep_uv = overlayStep[1] >> 2;
dim3 block(32, 32);
dim3 grid((width + block.x - 1) / block.x, (size.height + block.y - 1) / block.y);
yuvOverlayKernel << <grid, block, 0, stream >> > (reinterpret_cast<const uchar4*>(src[0]),
reinterpret_cast<const uchar4*>(src[1]),
reinterpret_cast<const uchar4*>(src[2]),
reinterpret_cast<const uchar4*>(overlay[0]),
reinterpret_cast<const uchar4*>(overlay[1]),
reinterpret_cast<const uchar4*>(overlay[2]),
reinterpret_cast<uchar4*>(dst[0]),
reinterpret_cast<uchar4*>(dst[1]),
reinterpret_cast<uchar4*>(dst[2]),
alpha, srcStep_y, srcStep_uv, overlayStep_y, overlayStep_uv, width, size.height, width >> 1, size.height >> 1);
} |
3c49c3ad89e8b2737794a5ea52af479a2187f026.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lock_hip.cuh"
#include "barrier.cuh"
#include "timer.cuh"
#include <stdio.h>
#include <stdint.h>
#define NUM_BLOCKS 10
#define WARPS_PER_BLOCK 10 // max of 32
#define NUM_WARPS (NUM_BLOCKS * WARPS_PER_BLOCK)
const unsigned WARP_SIZE = 32;
/** the interface to a barrier */
/** The interface to a global (device-wide) barrier. */
class IBarrier {
protected:
const unsigned m_expected;
unsigned arrived;
bool sense;
public:
/** Initialize the barrier */
__device__ IBarrier(const unsigned count) : m_expected(count) {
arrived = 0;
sense = true;
}
/** Each calling thread waits at this call until the barrier's count
has been reached. No thread leaves the barrier until all threads have
arrived. */
__device__ virtual void wait() = 0;
};
/** A sense-reversing centralized global (device-wide) barrier. This
barrier can only be called by one thread from each warp. Branch
divergence issues must be handled by the caller. */
class SpinBarrier : public IBarrier {
protected:
WarpLevelLock warpLock;
public:
__device__ SpinBarrier(const unsigned count) : IBarrier(count) {}
__device__ virtual void wait() {
// TODO: your code here
}
};
/** A sense-reversing two-level global (device-wide) barrier. This barrier
performs block-level barrier before coordinating across blocks. This barrier
can safely be called by every thread within a warp. */
class TwoLevelBarrier : public SpinBarrier {
public:
__device__ TwoLevelBarrier(const unsigned count) : SpinBarrier(count) {}
__device__ virtual void wait() {
// TODO: your code here
}
};
__device__ SpinBarrier* d_SpinBar = NULL;
__device__ TwoLevelBarrier* d_2LBar = NULL;
__global__ void initBarriers() {
assert(blockIdx.x == 0 && threadIdx.x == 0);
d_SpinBar = new SpinBarrier(NUM_WARPS);
d_2LBar = new TwoLevelBarrier(NUM_BLOCKS);
}
__global__ void destroyBarriers() {
assert(blockIdx.x == 0 && threadIdx.x == 0);
delete d_SpinBar;
delete d_2LBar;
}
__global__ void rotateRows(const BarrierFlavor flavor, int* array, const int arrayDim) {
// NB: only one thread per warp runs this code to avoid branch divergence issues
if (threadIdx.x % warpSize != 0) { return; }
int in_c = ((blockDim.x * blockIdx.x) + threadIdx.x) / warpSize;
int out_c = (in_c + 1) % arrayDim;
for (int r = 0; r < arrayDim-1; r++) {
// copy (row r, column c) to (row r+1, column c+1)
array[((r+1) * arrayDim) + out_c] = array[(r * arrayDim) + in_c];
if (flavor == SPIN_BARRIER) {
d_SpinBar->wait();
} else if (flavor == TWO_LEVEL_BARRIER) {
d_2LBar->wait();
} else {
assert(false);
}
}
}
void barrierTest(const BarrierFlavor flavor) {
hipError_t cudaStatus;
CudaTimer timer;
const unsigned numWarps = NUM_BLOCKS * WARPS_PER_BLOCK;
const unsigned arraySizeBytes = numWarps * numWarps * sizeof(int);
// ALLOCATE DEVICE MEMORY
timer.start();
int* h_array = new int[numWarps * numWarps];
memset(h_array, 0, arraySizeBytes);
uint64_t rowSum = 0;
for (int i = 0; i < numWarps; i++) { // initialize first row with random values
h_array[i] = rand();
rowSum += h_array[i];
//printf("%d ", h_array[i]);
}
//printf(" // initialized row with sum of %llu\n", rowSum);
int* d_array;
cudaStatus = hipMalloc(&d_array, arraySizeBytes);
checkCudaErrors(cudaStatus);
cudaStatus = hipMemcpy(d_array, h_array, arraySizeBytes, hipMemcpyHostToDevice);
checkCudaErrors(cudaStatus);
hipLaunchKernelGGL(( initBarriers), dim3(1), dim3(1), 0, 0, );
cudaStatus = hipGetLastError();
checkCudaErrors(cudaStatus);
printf("Setup device memory: %3.1f ms \n", timer.stop());
// LAUNCH KERNEL
timer.start();
hipLaunchKernelGGL(( rotateRows), dim3(NUM_BLOCKS), dim3(WARPS_PER_BLOCK * WARP_SIZE), 0, 0, flavor, d_array, numWarps);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
checkCudaErrors(cudaStatus);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
checkCudaErrors(cudaStatus);
printf("Barrier kernel time: %3.1f ms \n", timer.stop());
// COPY ARRAY BACK TO HOST
timer.start();
cudaStatus = hipMemcpy(h_array, d_array, arraySizeBytes, hipMemcpyDeviceToHost);
checkCudaErrors(cudaStatus);
printf("Copy from device: %3.1f ms \n", timer.stop());
// CHECK ARRAY VALUES ARE CORRECT
// each row and column should have the same sum
const uint64_t expected = rowSum;
bool foundError = false;
// check rows
for (int i = 0; i < numWarps; i++) {
uint64_t sum = 0;
for (int j = 0; j < numWarps; j++) {
sum += h_array[(i * numWarps) + j];
}
if (sum != expected) {
printf("Expected row %d to have sum of %llu BUT GOT %llu INSTEAD :-(\n", i, expected, sum);
foundError = true;
}
}
// check columns
for (int i = 0; i < numWarps; i++) {
uint64_t sum = 0;
for (int j = 0; j < numWarps; j++) {
sum += h_array[(j * numWarps) + i];
}
if (sum != expected) {
printf("Expected column %d to have sum of %llu BUT GOT %llu INSTEAD :-(\n", i, expected, sum);
foundError = true;
}
}
/* print h_array for debugging
for (int i = 0; i < numWarps; i++) {
for (int j = 0; j < numWarps; j++) {
printf("%d ", h_array[(i * numWarps) + j]);
}
printf("\n");
}
*/
if (!foundError) {
printf("Each row/column has expected sum of %llu\n", expected);
}
// CLEANUP
hipLaunchKernelGGL(( destroyBarriers), dim3(1), dim3(1), 0, 0, );
cudaStatus = hipGetLastError();
checkCudaErrors(cudaStatus);
cudaStatus = hipFree(d_array);
checkCudaErrors(cudaStatus);
delete h_array;
} | 3c49c3ad89e8b2737794a5ea52af479a2187f026.cu | #include "lock.cuh"
#include "barrier.cuh"
#include "timer.cuh"
#include <stdio.h>
#include <stdint.h>
#define NUM_BLOCKS 10
#define WARPS_PER_BLOCK 10 // max of 32
#define NUM_WARPS (NUM_BLOCKS * WARPS_PER_BLOCK)
const unsigned WARP_SIZE = 32;
/** the interface to a barrier */
/** The interface to a global (device-wide) barrier. */
class IBarrier {
protected:
const unsigned m_expected;
unsigned arrived;
bool sense;
public:
/** Initialize the barrier */
__device__ IBarrier(const unsigned count) : m_expected(count) {
arrived = 0;
sense = true;
}
/** Each calling thread waits at this call until the barrier's count
has been reached. No thread leaves the barrier until all threads have
arrived. */
__device__ virtual void wait() = 0;
};
/** A sense-reversing centralized global (device-wide) barrier. This
barrier can only be called by one thread from each warp. Branch
divergence issues must be handled by the caller. */
class SpinBarrier : public IBarrier {
protected:
WarpLevelLock warpLock;
public:
__device__ SpinBarrier(const unsigned count) : IBarrier(count) {}
__device__ virtual void wait() {
// TODO: your code here
}
};
/** A sense-reversing two-level global (device-wide) barrier. This barrier
performs block-level barrier before coordinating across blocks. This barrier
can safely be called by every thread within a warp. */
class TwoLevelBarrier : public SpinBarrier {
public:
__device__ TwoLevelBarrier(const unsigned count) : SpinBarrier(count) {}
__device__ virtual void wait() {
// TODO: your code here
}
};
__device__ SpinBarrier* d_SpinBar = NULL;
__device__ TwoLevelBarrier* d_2LBar = NULL;
__global__ void initBarriers() {
assert(blockIdx.x == 0 && threadIdx.x == 0);
d_SpinBar = new SpinBarrier(NUM_WARPS);
d_2LBar = new TwoLevelBarrier(NUM_BLOCKS);
}
__global__ void destroyBarriers() {
assert(blockIdx.x == 0 && threadIdx.x == 0);
delete d_SpinBar;
delete d_2LBar;
}
__global__ void rotateRows(const BarrierFlavor flavor, int* array, const int arrayDim) {
// NB: only one thread per warp runs this code to avoid branch divergence issues
if (threadIdx.x % warpSize != 0) { return; }
int in_c = ((blockDim.x * blockIdx.x) + threadIdx.x) / warpSize;
int out_c = (in_c + 1) % arrayDim;
for (int r = 0; r < arrayDim-1; r++) {
// copy (row r, column c) to (row r+1, column c+1)
array[((r+1) * arrayDim) + out_c] = array[(r * arrayDim) + in_c];
if (flavor == SPIN_BARRIER) {
d_SpinBar->wait();
} else if (flavor == TWO_LEVEL_BARRIER) {
d_2LBar->wait();
} else {
assert(false);
}
}
}
void barrierTest(const BarrierFlavor flavor) {
cudaError_t cudaStatus;
CudaTimer timer;
const unsigned numWarps = NUM_BLOCKS * WARPS_PER_BLOCK;
const unsigned arraySizeBytes = numWarps * numWarps * sizeof(int);
// ALLOCATE DEVICE MEMORY
timer.start();
int* h_array = new int[numWarps * numWarps];
memset(h_array, 0, arraySizeBytes);
uint64_t rowSum = 0;
for (int i = 0; i < numWarps; i++) { // initialize first row with random values
h_array[i] = rand();
rowSum += h_array[i];
//printf("%d ", h_array[i]);
}
//printf(" // initialized row with sum of %llu\n", rowSum);
int* d_array;
cudaStatus = cudaMalloc(&d_array, arraySizeBytes);
checkCudaErrors(cudaStatus);
cudaStatus = cudaMemcpy(d_array, h_array, arraySizeBytes, cudaMemcpyHostToDevice);
checkCudaErrors(cudaStatus);
initBarriers<<<1, 1>>>();
cudaStatus = cudaGetLastError();
checkCudaErrors(cudaStatus);
printf("Setup device memory: %3.1f ms \n", timer.stop());
// LAUNCH KERNEL
timer.start();
rotateRows<<<NUM_BLOCKS, WARPS_PER_BLOCK * WARP_SIZE>>>(flavor, d_array, numWarps);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
checkCudaErrors(cudaStatus);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
checkCudaErrors(cudaStatus);
printf("Barrier kernel time: %3.1f ms \n", timer.stop());
// COPY ARRAY BACK TO HOST
timer.start();
cudaStatus = cudaMemcpy(h_array, d_array, arraySizeBytes, cudaMemcpyDeviceToHost);
checkCudaErrors(cudaStatus);
printf("Copy from device: %3.1f ms \n", timer.stop());
// CHECK ARRAY VALUES ARE CORRECT
// each row and column should have the same sum
const uint64_t expected = rowSum;
bool foundError = false;
// check rows
for (int i = 0; i < numWarps; i++) {
uint64_t sum = 0;
for (int j = 0; j < numWarps; j++) {
sum += h_array[(i * numWarps) + j];
}
if (sum != expected) {
printf("Expected row %d to have sum of %llu BUT GOT %llu INSTEAD :-(\n", i, expected, sum);
foundError = true;
}
}
// check columns
for (int i = 0; i < numWarps; i++) {
uint64_t sum = 0;
for (int j = 0; j < numWarps; j++) {
sum += h_array[(j * numWarps) + i];
}
if (sum != expected) {
printf("Expected column %d to have sum of %llu BUT GOT %llu INSTEAD :-(\n", i, expected, sum);
foundError = true;
}
}
/* print h_array for debugging
for (int i = 0; i < numWarps; i++) {
for (int j = 0; j < numWarps; j++) {
printf("%d ", h_array[(i * numWarps) + j]);
}
printf("\n");
}
*/
if (!foundError) {
printf("Each row/column has expected sum of %llu\n", expected);
}
// CLEANUP
destroyBarriers<<<1, 1>>>();
cudaStatus = cudaGetLastError();
checkCudaErrors(cudaStatus);
cudaStatus = cudaFree(d_array);
checkCudaErrors(cudaStatus);
delete h_array;
} |
4f2b01ecd623b99de210fcadd7102d083d6fbaa3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
#define kBNLL_THRESHOLD 50.;
template <typename Dtype>
__global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ?
in[index] + log(1. + exp(-in[index])) :
log(1. + exp(in[index]));
}
}
template <typename Dtype>
void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BNLLForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void BNLLBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
Dtype expval = exp(min(in_data[index], Dtype(50.)));
out_diff[index] = in_diff[index] * expval / (expval + 1.);
}
}
template <typename Dtype>
void BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BNLLBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer);
} // namespace caffe
| 4f2b01ecd623b99de210fcadd7102d083d6fbaa3.cu | #include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
#define kBNLL_THRESHOLD 50.;
template <typename Dtype>
__global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ?
in[index] + log(1. + exp(-in[index])) :
log(1. + exp(in[index]));
}
}
template <typename Dtype>
void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
BNLLForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void BNLLBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
Dtype expval = exp(min(in_data[index], Dtype(50.)));
out_diff[index] = in_diff[index] * expval / (expval + 1.);
}
}
template <typename Dtype>
void BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
BNLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer);
} // namespace caffe
|
driver_functions.hip | // !!! This is a file automatically generated by hipify!!!
// RUN: %run_test hipify "%s" "%t" --skip-excluded-preprocessor-conditional-blocks %hipify_args %clang_args
// CHECK: #include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <string>
#include <stdio.h>
int main() {
printf("09. CUDA Driver API Functions synthetic test\n");
unsigned int flags = 0;
size_t bytes = 0;
size_t bytes_2 = 0;
void* image = nullptr;
std::string name = "str";
// CHECK: hipDevice_t device;
// CHECK-NEXT: hipCtx_t context;
// CHECK-NEXT: hipFuncCache_t func_cache;
// CHECK-NEXT: hipLimit_t limit;
// CHECK-NEXT: hipSharedMemConfig pconfig;
// CHECK-NEXT: hipFunction_t function;
// CHECK-NEXT: hipModule_t module_;
// CHECK-NEXT: hipDeviceptr_t deviceptr;
// CHECK-NEXT: hipDeviceptr_t deviceptr_2;
// CHECK-NEXT: hipTexRef texref;
// CHECK-NEXT: hipJitOption jit_option;
// CHECK-NEXT: hipArray_t array_;
// CHECK-NEXT: HIP_ARRAY3D_DESCRIPTOR ARRAY3D_DESCRIPTOR;
// CHECK-NEXT: HIP_ARRAY_DESCRIPTOR ARRAY_DESCRIPTOR;
// CHECK-NEXT: hipIpcEventHandle_t ipcEventHandle;
// CHECK-NEXT: hipEvent_t event_;
// CHECK-NEXT: hipIpcMemHandle_t ipcMemHandle;
// CHECK-NEXT: hip_Memcpy2D MEMCPY2D;
// CHECK-NEXT: HIP_MEMCPY3D MEMCPY3D;
// CHECK-NEXT: hipStream_t stream;
// CHECK-NEXT: hipMipmappedArray_t mipmappedArray;
hipDevice_t device;
hipCtx_t context;
hipFuncCache func_cache;
hipLimit_t limit;
hipSharedMemConfig pconfig;
hipFunction_t function;
hipModule_t module_;
hipDeviceptr_t deviceptr;
hipDeviceptr_t deviceptr_2;
textureReference texref;
hipJitOption jit_option;
hipArray * array_;
HIP_ARRAY3D_DESCRIPTOR ARRAY3D_DESCRIPTOR;
HIP_ARRAY_DESCRIPTOR ARRAY_DESCRIPTOR;
hipIpcEventHandle ipcEventHandle;
hipEvent_t event_;
hipIpcMemHandle ipcMemHandle;
hip_Memcpy2D MEMCPY2D;
HIP_MEMCPY3D MEMCPY3D;
hipStream_t stream;
hipMipmappedArray_t mipmappedArray;
// CUDA: hipError_t CUDAAPI hipInit(unsigned int Flags);
// HIP: hipError_t hipInit(unsigned int flags);
// CHECK: hipError_t result = hipInit(flags);
hipError_t result = hipInit(flags);
int driverVersion = 0;
// CUDA: hipError_t CUDAAPI hipDriverGetVersion(int *driverVersion);
// HIP: hipError_t hipDriverGetVersion(int* driverVersion);
// CHECK: result = hipDriverGetVersion(&driverVersion);
result = hipDriverGetVersion(&driverVersion);
int ordinal = 0;
// CUDA: hipError_t CUDAAPI hipDeviceGet(hipDevice_t *device, int ordinal);
// HIP: hipError_t hipDeviceGet(hipDevice_t* device, int ordinal);
// CHECK: result = hipDeviceGet(&device, ordinal);
result = hipDeviceGet(&device, ordinal);
int pi = 0;
// CHECK: hipDeviceAttribute_t device_attribute = hipDeviceAttributePciBusId;
hipDeviceAttribute_t device_attribute = hipDeviceAttributePciBusId;
// CUDA: hipError_t CUDAAPI hipDeviceGetAttribute(int *pi, hipDeviceAttribute_t attrib, hipDevice_t dev);
// HIP: hipError_t hipDeviceGetAttribute(int* pi, hipDeviceAttribute_t attr, int deviceId);
// CHECK: result = hipDeviceGetAttribute(&pi, device_attribute, device);
result = hipDeviceGetAttribute(&pi, device_attribute, device);
int count = 0;
// CUDA: hipError_t CUDAAPI hipGetDeviceCount(int *count);
// HIP: hipError_t hipGetDeviceCount(int* count);
// CHECK: result = hipGetDeviceCount(&count);
result = hipGetDeviceCount(&count);
// CUDA: hipError_t CUDAAPI cuDeviceTotalMem(size_t *bytes, hipDevice_t dev);
// HIP: hipError_t hipDeviceTotalMem(size_t* bytes, hipDevice_t device);
// CHECK: result = hipDeviceTotalMem(&bytes, device);
// CHECK-NEXT: result = hipDeviceTotalMem(&bytes, device);
result = cuDeviceTotalMem(&bytes, device);
result = hipDeviceTotalMem(&bytes, device);
int major = 0, minor = 0;
// CUDA: __CUDA_DEPRECATED hipError_t CUDAAPI hipDeviceComputeCapability(int *major, int *minor, hipDevice_t dev);
// HIP: hipError_t hipDeviceComputeCapability(int* major, int* minor, hipDevice_t device);
// CHECK: result = hipDeviceComputeCapability(&major, &minor, device);
result = hipDeviceComputeCapability(&major, &minor, device);
int active = 0;
// CUDA: hipError_t CUDAAPI hipDevicePrimaryCtxGetState(hipDevice_t dev, unsigned int *flags, int *active);
// HIP: hipError_t hipDevicePrimaryCtxGetState(hipDevice_t dev, unsigned int* flags, int* active);
// CHECK: result = hipDevicePrimaryCtxGetState(device, &flags, &active);
result = hipDevicePrimaryCtxGetState(device, &flags, &active);
// CUDA: hipError_t CUDAAPI hipDevicePrimaryCtxRelease(hipDevice_t dev);
// HIP: hipError_t hipDevicePrimaryCtxRelease(hipDevice_t dev);
// CHECK: result = hipDevicePrimaryCtxRelease(device);
result = hipDevicePrimaryCtxRelease(device);
#if TORCH_HIP_VERSION > 10020
// CHECK: result = hipDevicePrimaryCtxRelease(device);
result = cuDevicePrimaryCtxRelease_v2(device);
#endif
// CUDA: hipError_t CUDAAPI hipDevicePrimaryCtxReset(hipDevice_t dev);
// HIP: hipError_t hipDevicePrimaryCtxReset(hipDevice_t dev);
// CHECK: result = hipDevicePrimaryCtxReset(device);
result = hipDevicePrimaryCtxReset(device);
#if TORCH_HIP_VERSION > 10020
// CHECK: result = hipDevicePrimaryCtxReset(device);
result = cuDevicePrimaryCtxReset_v2(device);
#endif
// CUDA: hipError_t CUDAAPI hipDevicePrimaryCtxRetain(hipCtx_t *pctx, hipDevice_t dev);
// HIP: hipError_t hipDevicePrimaryCtxRetain(hipCtx_t* pctx, hipDevice_t dev);
// CHECK: result = hipDevicePrimaryCtxRetain(&context, device);
result = hipDevicePrimaryCtxRetain(&context, device);
// CUDA: hipError_t CUDAAPI hipDevicePrimaryCtxSetFlags(hipDevice_t dev, unsigned int flags);
// HIP: hipError_t hipDevicePrimaryCtxSetFlags(hipDevice_t dev, unsigned int flags);
// CHECK: result = hipDevicePrimaryCtxSetFlags(device, flags);
result = hipDevicePrimaryCtxSetFlags(device, flags);
#if TORCH_HIP_VERSION > 10020
// CHECK: result = hipDevicePrimaryCtxSetFlags(device, flags);
result = cuDevicePrimaryCtxSetFlags_v2(device, flags);
#endif
// CUDA: hipError_t CUDAAPI hipCtxCreate(hipCtx_t *pctx, unsigned int flags, hipDevice_t dev);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxCreate(hipCtx_t *ctx, unsigned int flags, hipDevice_t device);
// CHECK: result = hipCtxCreate(&context, flags, device);
// CHECK-NEXT: result = hipCtxCreate(&context, flags, device);
result = hipCtxCreate(&context, flags, device);
result = hipCtxCreate(&context, flags, device);
// CUDA: hipError_t CUDAAPI hipCtxDestroy(hipCtx_t ctx);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxDestroy(hipCtx_t ctx);
// CHECK: result = hipCtxDestroy(context);
// CHECK-NEXT: result = hipCtxDestroy(context);
result = hipCtxDestroy(context);
result = hipCtxDestroy(context);
unsigned int version = 0;
// CUDA: hipError_t CUDAAPI hipCtxGetApiVersion(hipCtx_t ctx, unsigned int *version);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetApiVersion(hipCtx_t ctx, int* apiVersion);
// CHECK: result = hipCtxGetApiVersion(context, &version);
result = hipCtxGetApiVersion(context, &version);
// CUDA: hipError_t CUDAAPI hipCtxGetCacheConfig(hipFuncCache *pconfig);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetCacheConfig(hipFuncCache_t* cacheConfig);
// CHECK: result = hipCtxGetCacheConfig(&func_cache);
result = hipCtxGetCacheConfig(&func_cache);
// CUDA: hipError_t CUDAAPI hipCtxGetCurrent(hipCtx_t *pctx);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetCurrent(hipCtx_t* ctx);
// CHECK: result = hipCtxGetCurrent(&context);
result = hipCtxGetCurrent(&context);
// CUDA: hipError_t CUDAAPI hipCtxGetDevice(hipDevice_t *device);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetDevice(hipDevice_t* device);
// CHECK: result = hipCtxGetDevice(&device);
result = hipCtxGetDevice(&device);
// CUDA: hipError_t CUDAAPI hipCtxGetFlags(unsigned int *flags);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetFlags(unsigned int* flags);
// CHECK: result = hipCtxGetFlags(&flags);
result = hipCtxGetFlags(&flags);
size_t pvalue = 0;
// CUDA: hipError_t CUDAAPI hipCtxGetLimit(size_t *pvalue, hipLimit_t limit);
// HIP: hipError_t hipDeviceGetLimit(size_t* pValue, enum hipLimit_t limit);
// CHECK: result = hipDeviceGetLimit(&pvalue, limit);
result = hipCtxGetLimit(&pvalue, limit);
// CUDA: hipError_t CUDAAPI hipCtxGetSharedMemConfig(hipSharedMemConfig *pConfig);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetSharedMemConfig(hipSharedMemConfig* pConfig);
// CHECK: result = hipCtxGetSharedMemConfig(&pconfig);
result = hipCtxGetSharedMemConfig(&pconfig);
int leastPriority = 0, greatestPriority = 0;
// CUDA: hipError_t CUDAAPI hipCtxGetStreamPriorityRange(int *leastPriority, int *greatestPriority);
// HIP: hipError_t hipDeviceGetStreamPriorityRange(int* leastPriority, int* greatestPriority);
// CHECK: result = hipDeviceGetStreamPriorityRange(&leastPriority, &greatestPriority);
result = hipCtxGetStreamPriorityRange(&leastPriority, &greatestPriority);
// CUDA: hipError_t CUDAAPI cuCtxPopCurrent(hipCtx_t *pctx);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxPopCurrent(hipCtx_t* ctx);
// CHECK: result = hipCtxPopCurrent(&context);
// CHECK-NEXT: result = hipCtxPopCurrent(&context);
result = cuCtxPopCurrent(&context);
result = hipCtxPopCurrent(&context);
// CUDA: hipError_t CUDAAPI cuCtxPushCurrent(hipCtx_t ctx);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxPushCurrent(hipCtx_t ctx);
// CHECK: result = hipCtxPushCurrent(context);
// CHECK-NEXT: result = hipCtxPushCurrent(context);
result = cuCtxPushCurrent(context);
result = hipCtxPushCurrent(context);
// CUDA: hipError_t CUDAAPI hipCtxSetCacheConfig(hipFuncCache config);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxSetCacheConfig(hipFuncCache_t cacheConfig);
// CHECK: result = hipCtxSetCacheConfig(func_cache);
result = hipCtxSetCacheConfig(func_cache);
// CUDA: hipError_t CUDAAPI hipCtxSetCurrent(hipCtx_t ctx);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxSetCurrent(hipCtx_t ctx);
// CHECK: result = hipCtxSetCurrent(context);
result = hipCtxSetCurrent(context);
// CUDA: hipError_t CUDAAPI hipCtxSetSharedMemConfig(hipSharedMemConfig config);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxSetSharedMemConfig(hipSharedMemConfig config);
// CHECK: result = hipCtxSetSharedMemConfig(pconfig);
result = hipCtxSetSharedMemConfig(pconfig);
// CUDA: hipError_t CUDAAPI hipCtxSynchronize(void);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxSynchronize(void);
// CHECK: result = hipCtxSynchronize();
result = hipCtxSynchronize();
// CUDA: hipError_t CUDAAPI hipModuleGetFunction(hipFunction_t *hfunc, hipModule_t hmod, const char *name);
// HIP: hipError_t hipModuleGetFunction(hipFunction_t* function, hipModule_t module, const char* kname);
// CHECK: result = hipModuleGetFunction(&function, module_, name.c_str());
result = hipModuleGetFunction(&function, module_, name.c_str());
// CUDA: hipError_t CUDAAPI hipModuleGetGlobal(hipDeviceptr_t *dptr, size_t *bytes, hipModule_t hmod, const char *name);
// HIP: hipError_t hipModuleGetGlobal(hipDeviceptr_t* dptr, size_t* bytes, hipModule_t hmod, const char* name);
// CHECK: result = hipModuleGetGlobal(&deviceptr, &bytes, module_, name.c_str());
// CHECK-NEXT: result = hipModuleGetGlobal(&deviceptr, &bytes, module_, name.c_str());
result = hipModuleGetGlobal(&deviceptr, &bytes, module_, name.c_str());
result = hipModuleGetGlobal(&deviceptr, &bytes, module_, name.c_str());
// CUDA: hipError_t CUDAAPI hipModuleGetTexRef(textureReference *pTexRef, hipModule_t hmod, const char *name);
// HIP: hipError_t hipModuleGetTexRef(textureReference** texRef, hipModule_t hmod, const char* name);
// CHECK: result = hipModuleGetTexRef(&texref, module_, name.c_str());
result = hipModuleGetTexRef(&texref, module_, name.c_str());
// CUDA: hipError_t CUDAAPI hipModuleLoad(hipModule_t *module, const char *fname);
// HIP: hipError_t hipModuleLoad(hipModule_t* module, const char* fname);
// CHECK: result = hipModuleLoad(&module_, name.c_str());
result = hipModuleLoad(&module_, name.c_str());
// CUDA: hipError_t CUDAAPI hipModuleLoadData(hipModule_t *module, const void *image);
// HIP: hipError_t hipModuleLoadData(hipModule_t* module, const void* image);
// CHECK: result = hipModuleLoadData(&module_, image);
result = hipModuleLoadData(&module_, image);
unsigned int numOptions = 0;
void* optionValues = nullptr;
// CUDA: hipError_t CUDAAPI hipModuleLoadDataEx(hipModule_t *module, const void *image, unsigned int numOptions, hipJitOption *options, void **optionValues);
// HIP: hipError_t hipModuleLoadDataEx(hipModule_t* module, const void* image, unsigned int numOptions, hipJitOption* options, void** optionValues);
// CHECK: result = hipModuleLoadDataEx(&module_, image, numOptions, &jit_option, &optionValues);
result = hipModuleLoadDataEx(&module_, image, numOptions, &jit_option, &optionValues);
// CUDA: hipError_t CUDAAPI hipModuleUnload(hipModule_t hmod);
// HIP: hipError_t hipModuleUnload(hipModule_t module);
// CHECK: result = hipModuleUnload(module_);
result = hipModuleUnload(module_);
// CUDA: hipError_t CUDAAPI hipArray3DCreate(hipArray * *pHandle, const HIP_ARRAY3D_DESCRIPTOR *pAllocateArray);
// HIP: hipError_t hipArray3DCreate(hipArray** array, const HIP_ARRAY3D_DESCRIPTOR* pAllocateArray);
// CHECK: result = hipArray3DCreate(&array_, &ARRAY3D_DESCRIPTOR);
// CHECK-NEXT: result = hipArray3DCreate(&array_, &ARRAY3D_DESCRIPTOR);
result = hipArray3DCreate(&array_, &ARRAY3D_DESCRIPTOR);
result = cuArray3DCreate_v2(&array_, &ARRAY3D_DESCRIPTOR);
// CUDA: hipError_t CUDAAPI hipArrayCreate(hipArray * *pHandle, const HIP_ARRAY_DESCRIPTOR *pAllocateArray);
// HIP: hipError_t hipArrayCreate(hipArray** pHandle, const HIP_ARRAY_DESCRIPTOR* pAllocateArray);
// CHECK: result = hipArrayCreate(&array_, &ARRAY_DESCRIPTOR);
// CHECK: result = hipArrayCreate(&array_, &ARRAY_DESCRIPTOR);
result = hipArrayCreate(&array_, &ARRAY_DESCRIPTOR);
result = cuArrayCreate_v2(&array_, &ARRAY_DESCRIPTOR);
// CUDA: hipError_t CUDAAPI hipArrayDestroy(hipArray * hArray);
// HIP: hipError_t hipArrayDestroy(hipArray* array);
// CHECK: result = hipArrayDestroy(array_);
result = hipArrayDestroy(array_);
std::string pciBusId;
// CUDA: hipError_t CUDAAPI hipDeviceGetByPCIBusId(hipDevice_t *dev, const char *pciBusId);
// HIP: hipError_t hipDeviceGetByPCIBusId(int* device, const char* pciBusId);
// CHECK: result = hipDeviceGetByPCIBusId(&device, pciBusId.c_str());
result = hipDeviceGetByPCIBusId(&device, pciBusId.c_str());
int len = 0;
char* pciBusId_ = const_cast<char*>(pciBusId.c_str());
// CUDA: hipError_t CUDAAPI hipDeviceGetPCIBusId(char *pciBusId, int len, hipDevice_t dev);
// HIP: hipError_t hipDeviceGetPCIBusId(char* pciBusId, int len, int device);
// CHECK: result = hipDeviceGetPCIBusId(pciBusId_, len, device);
result = hipDeviceGetPCIBusId(pciBusId_, len, device);
// CUDA: hipError_t CUDAAPI hipIpcCloseMemHandle(hipDeviceptr_t dptr);
// HIP: hipError_t hipIpcCloseMemHandle(void* devPtr);
// CHECK: result = hipIpcCloseMemHandle(deviceptr);
result = hipIpcCloseMemHandle(deviceptr);
// CUDA: hipError_t CUDAAPI hipIpcGetEventHandle(hipIpcEventHandle *pHandle, hipEvent_t event);
// HIP: hipError_t hipIpcGetEventHandle(hipIpcEventHandle_t* handle, hipEvent_t event);
// CHECK: result = hipIpcGetEventHandle(&ipcEventHandle, event_);
result = hipIpcGetEventHandle(&ipcEventHandle, event_);
// CUDA: hipError_t CUDAAPI hipIpcGetMemHandle(hipIpcMemHandle *pHandle, hipDeviceptr_t dptr);
// HIP: hipError_t hipIpcGetMemHandle(hipIpcMemHandle_t* handle, void* devPtr);
// CHECK: result = hipIpcGetMemHandle(&ipcMemHandle, deviceptr);
result = hipIpcGetMemHandle(&ipcMemHandle, deviceptr);
// CUDA: hipError_t CUDAAPI hipIpcOpenEventHandle(hipEvent_t *phEvent, hipIpcEventHandle handle);
// HIP: hipError_t hipIpcOpenEventHandle(hipEvent_t* event, hipIpcEventHandle_t handle);
// CHECK: result = hipIpcOpenEventHandle(&event_, ipcEventHandle);
result = hipIpcOpenEventHandle(&event_, ipcEventHandle);
// CUDA: hipError_t CUDAAPI hipIpcOpenMemHandle(hipDeviceptr_t *pdptr, hipIpcMemHandle handle, unsigned int Flags);
// HIP: hipError_t hipIpcOpenMemHandle(void** devPtr, hipIpcMemHandle_t handle, unsigned int flags);
// CHECK: result = hipIpcOpenMemHandle(&deviceptr, ipcMemHandle, flags);
result = hipIpcOpenMemHandle(&deviceptr, ipcMemHandle, flags);
// CUDA: hipError_t CUDAAPI cuMemAlloc(hipDeviceptr_t *dptr, size_t bytesize);
// HIP: hipError_t hipMalloc(void** ptr, size_t size);
// CHECK: result = hipMalloc(&deviceptr, bytes);
// CHECK-NEXT: result = hipMalloc(&deviceptr, bytes);
result = cuMemAlloc(&deviceptr, bytes);
result = hipMalloc(&deviceptr, bytes);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////// TODO: Get rid of additional attribute 'unsigned int flags' used by HIP without a default value ///////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// CUDA: hipError_t CUDAAPI hipMemAllocHost(void **pp, size_t bytesize);
// HIP: DEPRECATED("use hipHostMalloc instead") hipError_t hipHostAlloc(void** ptr, size_t size, unsigned int flags);
// TODO: should be hipHostAlloc(&image, bytes, 0);
// CHECK: result = hipHostAlloc(&image, bytes);
// CHECK-NEXT: result = hipHostAlloc(&image, bytes);
result = hipMemAllocHost(&image, bytes);
result = cuMemAllocHost_v2(&image, bytes);
// CUDA: hipError_t CUDAAPI hipMemAllocManaged(hipDeviceptr_t *dptr, size_t bytesize, unsigned int flags);
// HIP: hipError_t hipMallocManaged(void** dev_ptr, size_t size, unsigned int flags __dparm(hipMemAttachGlobal));
// CHECK: result = hipMallocManaged(&deviceptr, bytes, flags);
result = hipMemAllocManaged(&deviceptr, bytes, flags);
size_t pitch = 0, width = 0, height = 0;
// CUDA: hipError_t CUDAAPI hipMemAllocPitch__(hipDeviceptr_t *dptr, size_t *pPitch, size_t WidthInBytes, size_t Height, unsigned int ElementSizeBytes);
// HIP: hipError_t hipMemAllocPitch(hipDeviceptr_t* dptr, size_t* pitch, size_t widthInBytes, size_t height, unsigned int elementSizeBytes);
// CHECK: result = hipMemAllocPitch(&deviceptr, &pitch, width, height, bytes);
// CHECK-NEXT: result = hipMemAllocPitch(&deviceptr, &pitch, width, height, bytes);
result = hipMemAllocPitch__(&deviceptr, &pitch, width, height, bytes);
result = cuMemAllocPitch_v2(&deviceptr, &pitch, width, height, bytes);
// CUDA: hipError_t CUDAAPI hipMemcpy2D__(const hip_Memcpy2D *pCopy);
// HIP: hipError_t hipMemcpyParam2D(const hip_Memcpy2D* pCopy);
// CHECK: result = hipMemcpyParam2D(&MEMCPY2D);
// CHECK-NEXT: result = hipMemcpyParam2D(&MEMCPY2D);
result = hipMemcpy2D__(&MEMCPY2D);
result = cuMemcpy2D_v2(&MEMCPY2D);
// CUDA: hipError_t CUDAAPI hipMemcpy2DAsync__(const hip_Memcpy2D *pCopy, hipStream_t hStream);
// HIP: hipError_t hipMemcpyParam2DAsync(const hip_Memcpy2D* pCopy, hipStream_t stream __dparm(0));
// CHECK: result = hipMemcpyParam2DAsync(&MEMCPY2D, stream);
// CHECK-NEXT: result = hipMemcpyParam2DAsync(&MEMCPY2D, stream);
result = hipMemcpy2DAsync__(&MEMCPY2D, stream);
result = cuMemcpy2DAsync_v2(&MEMCPY2D, stream);
// CUDA: hipError_t CUDAAPI hipMemcpy2DUnaligned(const hip_Memcpy2D *pCopy);
// HIP: hipError_t hipDrvMemcpy2DUnaligned(const hip_Memcpy2D* pCopy);
// CHECK: result = hipDrvMemcpy2DUnaligned(&MEMCPY2D);
// CHECK-NEXT: result = hipDrvMemcpy2DUnaligned(&MEMCPY2D);
result = hipMemcpy2DUnaligned(&MEMCPY2D);
result = cuMemcpy2DUnaligned_v2(&MEMCPY2D);
// CUDA: hipError_t CUDAAPI hipMemcpy3D__(const HIP_MEMCPY3D *pCopy);
// HIP: hipError_t hipDrvMemcpy3D(const HIP_MEMCPY3D* pCopy);
// CHECK: result = hipDrvMemcpy3D(&MEMCPY3D);
// CHECK-NEXT: result = hipDrvMemcpy3D(&MEMCPY3D);
result = hipMemcpy3D__(&MEMCPY3D);
result = cuMemcpy3D_v2(&MEMCPY3D);
// CUDA: hipError_t CUDAAPI hipMemcpy3DAsync__(const HIP_MEMCPY3D *pCopy, hipStream_t hStream);
// HIP: hipError_t hipDrvMemcpy3DAsync(const HIP_MEMCPY3D* pCopy, hipStream_t stream);
// CHECK: result = hipDrvMemcpy3DAsync(&MEMCPY3D, stream);
// CHECK-NEXT: result = hipDrvMemcpy3DAsync(&MEMCPY3D, stream);
result = hipMemcpy3DAsync__(&MEMCPY3D, stream);
result = cuMemcpy3DAsync_v2(&MEMCPY3D, stream);
void* dsthost = nullptr;
size_t offset = 0;
// CUDA: hipError_t CUDAAPI hipMemcpyAtoH(void *dstHost, hipArray * srcArray, size_t srcOffset, size_t ByteCount);
// HIP: hipError_t hipMemcpyAtoH(void* dst, hipArray* srcArray, size_t srcOffset, size_t count);
// CHECK: result = hipMemcpyAtoH(dsthost, array_, offset, bytes);
// CHECK-NEXT: result = hipMemcpyAtoH(dsthost, array_, offset, bytes);
result = hipMemcpyAtoH(dsthost, array_, offset, bytes);
result = cuMemcpyAtoH_v2(dsthost, array_, offset, bytes);
// CUDA: hipError_t CUDAAPI cuMemcpyDtoD(hipDeviceptr_t dstDevice, hipDeviceptr_t srcDevice, size_t ByteCount);
// HIP: hipError_t hipMemcpyDtoD(hipDeviceptr_t dst, hipDeviceptr_t src, size_t sizeBytes);
// CHECK: result = hipMemcpyDtoD(deviceptr, deviceptr, bytes);
// CHECK-NEXT: result = hipMemcpyDtoD(deviceptr, deviceptr, bytes);
result = cuMemcpyDtoD(deviceptr, deviceptr, bytes);
result = hipMemcpyDtoD(deviceptr, deviceptr, bytes);
// CUDA: hipError_t CUDAAPI cuMemcpyDtoDAsync(hipDeviceptr_t dstDevice, hipDeviceptr_t srcDevice, size_t ByteCount, hipStream_t hStream);
// HIP: hipError_t hipMemcpyDtoDAsync(hipDeviceptr_t dst, hipDeviceptr_t src, size_t sizeBytes, hipStream_t stream);
// CHECK: result = hipMemcpyDtoDAsync(deviceptr, deviceptr, bytes, stream);
// CHECK-NEXT: result = hipMemcpyDtoDAsync(deviceptr, deviceptr, bytes, stream);
result = cuMemcpyDtoDAsync(deviceptr, deviceptr, bytes, stream);
result = hipMemcpyDtoDAsync(deviceptr, deviceptr, bytes, stream);
// CUDA: hipError_t CUDAAPI cuMemcpyDtoH(void *dstHost, hipDeviceptr_t srcDevice, size_t ByteCount);
// HIP: hipError_t hipMemcpyDtoH(void* dst, hipDeviceptr_t src, size_t sizeBytes);
// CHECK: result = hipMemcpyDtoH(dsthost, deviceptr, bytes);
// CHECK-NEXT: result = hipMemcpyDtoH(dsthost, deviceptr, bytes);
result = cuMemcpyDtoH(dsthost, deviceptr, bytes);
result = hipMemcpyDtoH(dsthost, deviceptr, bytes);
// CUDA: hipError_t CUDAAPI cuMemcpyDtoHAsync(void *dstHost, hipDeviceptr_t srcDevice, size_t ByteCount, hipStream_t hStream);
// HIP: hipError_t hipMemcpyDtoHAsync(void* dst, hipDeviceptr_t src, size_t sizeBytes, hipStream_t stream);
// CHECK: result = hipMemcpyDtoHAsync(dsthost, deviceptr, bytes, stream);
// CHECK-NEXT: result = hipMemcpyDtoHAsync(dsthost, deviceptr, bytes, stream);
result = cuMemcpyDtoHAsync(dsthost, deviceptr, bytes, stream);
result = hipMemcpyDtoHAsync(dsthost, deviceptr, bytes, stream);
// CUDA: hipError_t CUDAAPI hipMemcpyHtoA(hipArray * dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount);
// HIP: hipError_t hipMemcpyHtoA(hipArray* dstArray, size_t dstOffset, const void* srcHost, size_t count);
// CHECK: result = hipMemcpyHtoA(array_, offset, dsthost, bytes);
// CHECK-NEXT: result = hipMemcpyHtoA(array_, offset, dsthost, bytes);
result = hipMemcpyHtoA(array_, offset, dsthost, bytes);
result = cuMemcpyHtoA_v2(array_, offset, dsthost, bytes);
// CUDA: hipError_t CUDAAPI cuMemcpyHtoD(hipDeviceptr_t dstDevice, const void *srcHost, size_t ByteCount);
// HIP: hipError_t hipMemcpyHtoD(hipDeviceptr_t dst, void* src, size_t sizeBytes);
// CHECK: result = hipMemcpyHtoD(deviceptr, dsthost, bytes);
// CHECK-NEXT: result = hipMemcpyHtoD(deviceptr, dsthost, bytes);
result = cuMemcpyHtoD(deviceptr, dsthost, bytes);
result = hipMemcpyHtoD(deviceptr, dsthost, bytes);
// CUDA: hipError_t CUDAAPI cuMemcpyHtoDAsync(hipDeviceptr_t dstDevice, const void *srcHost, size_t ByteCount, hipStream_t hStream);
// HIP: hipError_t hipMemcpyHtoDAsync(hipDeviceptr_t dst, void* src, size_t sizeBytes, hipStream_t stream);
// CHECK: result = hipMemcpyHtoDAsync(deviceptr, dsthost, bytes, stream);
// CHECK-NEXT: result = hipMemcpyHtoDAsync(deviceptr, dsthost, bytes, stream);
result = cuMemcpyHtoDAsync(deviceptr, dsthost, bytes, stream);
result = hipMemcpyHtoDAsync(deviceptr, dsthost, bytes, stream);
// CUDA: hipError_t CUDAAPI hipFree(hipDeviceptr_t dptr);
// HIP: hipError_t hipFree(void* ptr);
// CHECK: result = hipFree(deviceptr);
// CHECK-NEXT: result = hipFree(deviceptr);
result = hipFree(deviceptr);
result = hipFree(deviceptr);
// CUDA: hipError_t CUDAAPI hipHostFree(void *p);
// HIP: hipError_t hipHostFree(void* ptr);
// CHECK: result = hipHostFree(image);
result = hipHostFree(image);
// CUDA: hipError_t CUDAAPI hipMemGetAddressRange(hipDeviceptr_t *pbase, size_t *psize, hipDeviceptr_t dptr);
// HIP: hipError_t hipMemGetAddressRange(hipDeviceptr_t* pbase, size_t* psize, hipDeviceptr_t dptr);
// CHECK: result = hipMemGetAddressRange(&deviceptr, &bytes, deviceptr_2);
// CHECK-NEXT: result = hipMemGetAddressRange(&deviceptr, &bytes, deviceptr_2);
result = hipMemGetAddressRange(&deviceptr, &bytes, deviceptr_2);
result = hipMemGetAddressRange(&deviceptr, &bytes, deviceptr_2);
// CUDA: hipError_t CUDAAPI cuMemGetInfo(size_t *free, size_t *total);
// HIP: hipError_t hipMemGetInfo(size_t* free, size_t* total);
// CHECK: result = hipMemGetInfo(&bytes, &bytes_2);
// CHECK-NEXT: result = hipMemGetInfo(&bytes, &bytes_2);
result = cuMemGetInfo(&bytes, &bytes_2);
result = hipMemGetInfo(&bytes, &bytes_2);
// CUDA: hipError_t CUDAAPI hipHostMalloc(void **pp, size_t bytesize, unsigned int Flags);
// HIP: DEPRECATED("use hipHostMalloc instead") hipError_t hipHostAlloc(void** ptr, size_t size, unsigned int flags);
// CHECK: result = hipHostAlloc(&image, bytes, flags);
result = hipHostMalloc(&image, bytes, flags);
// CUDA: hipError_t CUDAAPI hipMemHostGetDevicePointer(hipDeviceptr_t *pdptr, void *p, unsigned int Flags);
// HIP: hipError_t hipHostGetDevicePointer(void** devPtr, void* hstPtr, unsigned int flags);
// CHECK: result = hipHostGetDevicePointer(&deviceptr, image, flags);
// CHECK-NEXT: result = hipHostGetDevicePointer(&deviceptr, image, flags);
result = hipMemHostGetDevicePointer(&deviceptr, image, flags);
result = cuMemHostGetDevicePointer_v2(&deviceptr, image, flags);
// CUDA: hipError_t CUDAAPI hipMemHostGetFlags(unsigned int *pFlags, void *p);
// HIP: hipError_t hipHostGetFlags(&flags, image);
// CHECK: result = hipHostGetFlags(&flags, image);
result = hipMemHostGetFlags(&flags, image);
// CUDA: hipError_t CUDAAPI cuMemHostRegister(void *p, size_t bytesize, unsigned int Flags);
// HIP: hipError_t hipHostRegister(void* hostPtr, size_t sizeBytes, unsigned int flags);
// CHECK: result = hipHostRegister(image, bytes, flags);
// CHECK-NEXT: result = hipHostRegister(image, bytes, flags);
result = cuMemHostRegister(image, bytes, flags);
result = hipHostRegister(image, bytes, flags);
// CUDA: hipError_t CUDAAPI hipHostUnregister(void *p);
// HIP: hipError_t hipHostUnregister(void* hostPtr);
// CHECK: result = hipHostUnregister(image);
result = hipHostUnregister(image);
unsigned short us = 0;
// CUDA: hipError_t CUDAAPI cuMemsetD16(hipDeviceptr_t dstDevice, unsigned short us, size_t N);
// HIP: hipError_t hipMemsetD16(hipDeviceptr_t dest, unsigned short value, size_t count);
// CHECK: result = hipMemsetD16(deviceptr, us, bytes);
// CHECK-NEXT: result = hipMemsetD16(deviceptr, us, bytes);
result = cuMemsetD16(deviceptr, us, bytes);
result = hipMemsetD16(deviceptr, us, bytes);
// CUDA: hipError_t CUDAAPI hipMemsetD16Async(hipDeviceptr_t dstDevice, unsigned short us, size_t N, hipStream_t hStream);
// HIP: hipError_t hipMemsetD16Async(hipDeviceptr_t dest, unsigned short value, size_t count, hipStream_t stream __dparm(0));
// CHECK: result = hipMemsetD16Async(deviceptr, us, bytes, stream);
result = hipMemsetD16Async(deviceptr, us, bytes, stream);
// CUDA: hipError_t CUDAAPI cuMemsetD32(hipDeviceptr_t dstDevice, unsigned int ui, size_t N)
// HIP: hipError_t hipMemsetD32(hipDeviceptr_t dest, int value, size_t count);
// CHECK: result = hipMemsetD32(deviceptr, flags, bytes);
// CHECK-NEXT: result = hipMemsetD32(deviceptr, flags, bytes);
result = cuMemsetD32(deviceptr, flags, bytes);
result = hipMemset(deviceptr, flags, bytes);
// CUDA: hipError_t CUDAAPI hipMemsetAsync(hipDeviceptr_t dstDevice, unsigned int ui, size_t N, hipStream_t hStream);
// HIP: hipError_t hipMemsetD32Async(hipDeviceptr_t dst, int value, size_t count, hipStream_t stream __dparm(0));
// CHECK: result = hipMemsetD32Async(deviceptr, flags, bytes, stream);
result = hipMemsetAsync(deviceptr, flags, bytes, stream);
unsigned char uc = 0;
// CUDA: hipError_t CUDAAPI cuMemsetD8(hipDeviceptr_t dstDevice, unsigned char uc, size_t N);
// HIP: hipError_t hipMemsetD8(hipDeviceptr_t dest, unsigned char value, size_t count);
// CHECK: result = hipMemsetD8(deviceptr, uc, bytes);
// CHECK-NEXT: result = hipMemsetD8(deviceptr, uc, bytes);
result = cuMemsetD8(deviceptr, uc, bytes);
result = hipMemsetD8(deviceptr, uc, bytes);
// CUDA: hipError_t CUDAAPI hipMemsetD8Async(hipDeviceptr_t dstDevice, unsigned char uc, size_t N, hipStream_t hStream);
// HIP: hipError_t hipMemsetD8Async(hipDeviceptr_t dest, unsigned char value, size_t count, hipStream_t stream __dparm(0));
// CHECK: result = hipMemsetD8Async(deviceptr, uc, bytes, stream);
result = hipMemsetD8Async(deviceptr, uc, bytes, stream);
// CUDA: hipError_t CUDAAPI hipMipmappedArrayCreate(hipMipmappedArray_t *pHandle, const HIP_ARRAY3D_DESCRIPTOR *pMipmappedArrayDesc, unsigned int numMipmapLevels);
// HIP: hipError_t hipMipmappedArrayCreate(hipMipmappedArray_t* pHandle, HIP_ARRAY3D_DESCRIPTOR* pMipmappedArrayDesc, unsigned int numMipmapLevels);
// CHECK: result = hipMipmappedArrayCreate(&mipmappedArray, &ARRAY3D_DESCRIPTOR, flags);
result = hipMipmappedArrayCreate(&mipmappedArray, &ARRAY3D_DESCRIPTOR, flags);
// CUDA: hipError_t CUDAAPI hipMipmappedArrayDestroy(hipMipmappedArray_t hMipmappedArray);
// HIP: hipError_t hipMipmappedArrayDestroy(hipMipmappedArray_t hMipmappedArray);
// CHECK: result = hipMipmappedArrayDestroy(mipmappedArray);
result = hipMipmappedArrayDestroy(mipmappedArray);
// CUDA: hipError_t CUDAAPI hipMipmappedArrayGetLevel(hipArray * *pLevelArray, hipMipmappedArray_t hMipmappedArray, unsigned int level);
// HIP: hipError_t hipMipmappedArrayGetLevel(hipArray_t* pLevelArray, hipMipmappedArray_t hMipMappedArray, unsigned int level);
// CHECK: result = hipMipmappedArrayGetLevel(&array_, mipmappedArray, flags);
result = hipMipmappedArrayGetLevel(&array_, mipmappedArray, flags);
return 0;
}
| driver_functions.cu | // RUN: %run_test hipify "%s" "%t" --skip-excluded-preprocessor-conditional-blocks %hipify_args %clang_args
// CHECK: #include <hip/hip_runtime.h>
#include <cuda.h>
#include <string>
#include <stdio.h>
int main() {
printf("09. CUDA Driver API Functions synthetic test\n");
unsigned int flags = 0;
size_t bytes = 0;
size_t bytes_2 = 0;
void* image = nullptr;
std::string name = "str";
// CHECK: hipDevice_t device;
// CHECK-NEXT: hipCtx_t context;
// CHECK-NEXT: hipFuncCache_t func_cache;
// CHECK-NEXT: hipLimit_t limit;
// CHECK-NEXT: hipSharedMemConfig pconfig;
// CHECK-NEXT: hipFunction_t function;
// CHECK-NEXT: hipModule_t module_;
// CHECK-NEXT: hipDeviceptr_t deviceptr;
// CHECK-NEXT: hipDeviceptr_t deviceptr_2;
// CHECK-NEXT: hipTexRef texref;
// CHECK-NEXT: hipJitOption jit_option;
// CHECK-NEXT: hipArray_t array_;
// CHECK-NEXT: HIP_ARRAY3D_DESCRIPTOR ARRAY3D_DESCRIPTOR;
// CHECK-NEXT: HIP_ARRAY_DESCRIPTOR ARRAY_DESCRIPTOR;
// CHECK-NEXT: hipIpcEventHandle_t ipcEventHandle;
// CHECK-NEXT: hipEvent_t event_;
// CHECK-NEXT: hipIpcMemHandle_t ipcMemHandle;
// CHECK-NEXT: hip_Memcpy2D MEMCPY2D;
// CHECK-NEXT: HIP_MEMCPY3D MEMCPY3D;
// CHECK-NEXT: hipStream_t stream;
// CHECK-NEXT: hipMipmappedArray_t mipmappedArray;
CUdevice device;
CUcontext context;
CUfunc_cache func_cache;
CUlimit limit;
CUsharedconfig pconfig;
CUfunction function;
CUmodule module_;
CUdeviceptr deviceptr;
CUdeviceptr deviceptr_2;
CUtexref texref;
CUjit_option jit_option;
CUarray array_;
CUDA_ARRAY3D_DESCRIPTOR ARRAY3D_DESCRIPTOR;
CUDA_ARRAY_DESCRIPTOR ARRAY_DESCRIPTOR;
CUipcEventHandle ipcEventHandle;
CUevent event_;
CUipcMemHandle ipcMemHandle;
CUDA_MEMCPY2D MEMCPY2D;
CUDA_MEMCPY3D MEMCPY3D;
CUstream stream;
CUmipmappedArray mipmappedArray;
// CUDA: CUresult CUDAAPI cuInit(unsigned int Flags);
// HIP: hipError_t hipInit(unsigned int flags);
// CHECK: hipError_t result = hipInit(flags);
CUresult result = cuInit(flags);
int driverVersion = 0;
// CUDA: CUresult CUDAAPI cuDriverGetVersion(int *driverVersion);
// HIP: hipError_t hipDriverGetVersion(int* driverVersion);
// CHECK: result = hipDriverGetVersion(&driverVersion);
result = cuDriverGetVersion(&driverVersion);
int ordinal = 0;
// CUDA: CUresult CUDAAPI cuDeviceGet(CUdevice *device, int ordinal);
// HIP: hipError_t hipDeviceGet(hipDevice_t* device, int ordinal);
// CHECK: result = hipDeviceGet(&device, ordinal);
result = cuDeviceGet(&device, ordinal);
int pi = 0;
// CHECK: hipDeviceAttribute_t device_attribute = hipDeviceAttributePciBusId;
CUdevice_attribute device_attribute = CU_DEVICE_ATTRIBUTE_PCI_BUS_ID;
// CUDA: CUresult CUDAAPI cuDeviceGetAttribute(int *pi, CUdevice_attribute attrib, CUdevice dev);
// HIP: hipError_t hipDeviceGetAttribute(int* pi, hipDeviceAttribute_t attr, int deviceId);
// CHECK: result = hipDeviceGetAttribute(&pi, device_attribute, device);
result = cuDeviceGetAttribute(&pi, device_attribute, device);
int count = 0;
// CUDA: CUresult CUDAAPI cuDeviceGetCount(int *count);
// HIP: hipError_t hipGetDeviceCount(int* count);
// CHECK: result = hipGetDeviceCount(&count);
result = cuDeviceGetCount(&count);
// CUDA: CUresult CUDAAPI cuDeviceTotalMem(size_t *bytes, CUdevice dev);
// HIP: hipError_t hipDeviceTotalMem(size_t* bytes, hipDevice_t device);
// CHECK: result = hipDeviceTotalMem(&bytes, device);
// CHECK-NEXT: result = hipDeviceTotalMem(&bytes, device);
result = cuDeviceTotalMem(&bytes, device);
result = cuDeviceTotalMem_v2(&bytes, device);
int major = 0, minor = 0;
// CUDA: __CUDA_DEPRECATED CUresult CUDAAPI cuDeviceComputeCapability(int *major, int *minor, CUdevice dev);
// HIP: hipError_t hipDeviceComputeCapability(int* major, int* minor, hipDevice_t device);
// CHECK: result = hipDeviceComputeCapability(&major, &minor, device);
result = cuDeviceComputeCapability(&major, &minor, device);
int active = 0;
// CUDA: CUresult CUDAAPI cuDevicePrimaryCtxGetState(CUdevice dev, unsigned int *flags, int *active);
// HIP: hipError_t hipDevicePrimaryCtxGetState(hipDevice_t dev, unsigned int* flags, int* active);
// CHECK: result = hipDevicePrimaryCtxGetState(device, &flags, &active);
result = cuDevicePrimaryCtxGetState(device, &flags, &active);
// CUDA: CUresult CUDAAPI cuDevicePrimaryCtxRelease(CUdevice dev);
// HIP: hipError_t hipDevicePrimaryCtxRelease(hipDevice_t dev);
// CHECK: result = hipDevicePrimaryCtxRelease(device);
result = cuDevicePrimaryCtxRelease(device);
#if CUDA_VERSION > 10020
// CHECK: result = hipDevicePrimaryCtxRelease(device);
result = cuDevicePrimaryCtxRelease_v2(device);
#endif
// CUDA: CUresult CUDAAPI cuDevicePrimaryCtxReset(CUdevice dev);
// HIP: hipError_t hipDevicePrimaryCtxReset(hipDevice_t dev);
// CHECK: result = hipDevicePrimaryCtxReset(device);
result = cuDevicePrimaryCtxReset(device);
#if CUDA_VERSION > 10020
// CHECK: result = hipDevicePrimaryCtxReset(device);
result = cuDevicePrimaryCtxReset_v2(device);
#endif
// CUDA: CUresult CUDAAPI cuDevicePrimaryCtxRetain(CUcontext *pctx, CUdevice dev);
// HIP: hipError_t hipDevicePrimaryCtxRetain(hipCtx_t* pctx, hipDevice_t dev);
// CHECK: result = hipDevicePrimaryCtxRetain(&context, device);
result = cuDevicePrimaryCtxRetain(&context, device);
// CUDA: CUresult CUDAAPI cuDevicePrimaryCtxSetFlags(CUdevice dev, unsigned int flags);
// HIP: hipError_t hipDevicePrimaryCtxSetFlags(hipDevice_t dev, unsigned int flags);
// CHECK: result = hipDevicePrimaryCtxSetFlags(device, flags);
result = cuDevicePrimaryCtxSetFlags(device, flags);
#if CUDA_VERSION > 10020
// CHECK: result = hipDevicePrimaryCtxSetFlags(device, flags);
result = cuDevicePrimaryCtxSetFlags_v2(device, flags);
#endif
// CUDA: CUresult CUDAAPI cuCtxCreate(CUcontext *pctx, unsigned int flags, CUdevice dev);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxCreate(hipCtx_t *ctx, unsigned int flags, hipDevice_t device);
// CHECK: result = hipCtxCreate(&context, flags, device);
// CHECK-NEXT: result = hipCtxCreate(&context, flags, device);
result = cuCtxCreate(&context, flags, device);
result = cuCtxCreate_v2(&context, flags, device);
// CUDA: CUresult CUDAAPI cuCtxDestroy(CUcontext ctx);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxDestroy(hipCtx_t ctx);
// CHECK: result = hipCtxDestroy(context);
// CHECK-NEXT: result = hipCtxDestroy(context);
result = cuCtxDestroy(context);
result = cuCtxDestroy_v2(context);
unsigned int version = 0;
// CUDA: CUresult CUDAAPI cuCtxGetApiVersion(CUcontext ctx, unsigned int *version);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetApiVersion(hipCtx_t ctx, int* apiVersion);
// CHECK: result = hipCtxGetApiVersion(context, &version);
result = cuCtxGetApiVersion(context, &version);
// CUDA: CUresult CUDAAPI cuCtxGetCacheConfig(CUfunc_cache *pconfig);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetCacheConfig(hipFuncCache_t* cacheConfig);
// CHECK: result = hipCtxGetCacheConfig(&func_cache);
result = cuCtxGetCacheConfig(&func_cache);
// CUDA: CUresult CUDAAPI cuCtxGetCurrent(CUcontext *pctx);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetCurrent(hipCtx_t* ctx);
// CHECK: result = hipCtxGetCurrent(&context);
result = cuCtxGetCurrent(&context);
// CUDA: CUresult CUDAAPI cuCtxGetDevice(CUdevice *device);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetDevice(hipDevice_t* device);
// CHECK: result = hipCtxGetDevice(&device);
result = cuCtxGetDevice(&device);
// CUDA: CUresult CUDAAPI cuCtxGetFlags(unsigned int *flags);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetFlags(unsigned int* flags);
// CHECK: result = hipCtxGetFlags(&flags);
result = cuCtxGetFlags(&flags);
size_t pvalue = 0;
// CUDA: CUresult CUDAAPI cuCtxGetLimit(size_t *pvalue, CUlimit limit);
// HIP: hipError_t hipDeviceGetLimit(size_t* pValue, enum hipLimit_t limit);
// CHECK: result = hipDeviceGetLimit(&pvalue, limit);
result = cuCtxGetLimit(&pvalue, limit);
// CUDA: CUresult CUDAAPI cuCtxGetSharedMemConfig(CUsharedconfig *pConfig);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetSharedMemConfig(hipSharedMemConfig* pConfig);
// CHECK: result = hipCtxGetSharedMemConfig(&pconfig);
result = cuCtxGetSharedMemConfig(&pconfig);
int leastPriority = 0, greatestPriority = 0;
// CUDA: CUresult CUDAAPI cuCtxGetStreamPriorityRange(int *leastPriority, int *greatestPriority);
// HIP: hipError_t hipDeviceGetStreamPriorityRange(int* leastPriority, int* greatestPriority);
// CHECK: result = hipDeviceGetStreamPriorityRange(&leastPriority, &greatestPriority);
result = cuCtxGetStreamPriorityRange(&leastPriority, &greatestPriority);
// CUDA: CUresult CUDAAPI cuCtxPopCurrent(CUcontext *pctx);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxPopCurrent(hipCtx_t* ctx);
// CHECK: result = hipCtxPopCurrent(&context);
// CHECK-NEXT: result = hipCtxPopCurrent(&context);
result = cuCtxPopCurrent(&context);
result = cuCtxPopCurrent_v2(&context);
// CUDA: CUresult CUDAAPI cuCtxPushCurrent(CUcontext ctx);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxPushCurrent(hipCtx_t ctx);
// CHECK: result = hipCtxPushCurrent(context);
// CHECK-NEXT: result = hipCtxPushCurrent(context);
result = cuCtxPushCurrent(context);
result = cuCtxPushCurrent_v2(context);
// CUDA: CUresult CUDAAPI cuCtxSetCacheConfig(CUfunc_cache config);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxSetCacheConfig(hipFuncCache_t cacheConfig);
// CHECK: result = hipCtxSetCacheConfig(func_cache);
result = cuCtxSetCacheConfig(func_cache);
// CUDA: CUresult CUDAAPI cuCtxSetCurrent(CUcontext ctx);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxSetCurrent(hipCtx_t ctx);
// CHECK: result = hipCtxSetCurrent(context);
result = cuCtxSetCurrent(context);
// CUDA: CUresult CUDAAPI cuCtxSetSharedMemConfig(CUsharedconfig config);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxSetSharedMemConfig(hipSharedMemConfig config);
// CHECK: result = hipCtxSetSharedMemConfig(pconfig);
result = cuCtxSetSharedMemConfig(pconfig);
// CUDA: CUresult CUDAAPI cuCtxSynchronize(void);
// HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxSynchronize(void);
// CHECK: result = hipCtxSynchronize();
result = cuCtxSynchronize();
// CUDA: CUresult CUDAAPI cuModuleGetFunction(CUfunction *hfunc, CUmodule hmod, const char *name);
// HIP: hipError_t hipModuleGetFunction(hipFunction_t* function, hipModule_t module, const char* kname);
// CHECK: result = hipModuleGetFunction(&function, module_, name.c_str());
result = cuModuleGetFunction(&function, module_, name.c_str());
// CUDA: CUresult CUDAAPI cuModuleGetGlobal(CUdeviceptr *dptr, size_t *bytes, CUmodule hmod, const char *name);
// HIP: hipError_t hipModuleGetGlobal(hipDeviceptr_t* dptr, size_t* bytes, hipModule_t hmod, const char* name);
// CHECK: result = hipModuleGetGlobal(&deviceptr, &bytes, module_, name.c_str());
// CHECK-NEXT: result = hipModuleGetGlobal(&deviceptr, &bytes, module_, name.c_str());
result = cuModuleGetGlobal(&deviceptr, &bytes, module_, name.c_str());
result = cuModuleGetGlobal_v2(&deviceptr, &bytes, module_, name.c_str());
// CUDA: CUresult CUDAAPI cuModuleGetTexRef(CUtexref *pTexRef, CUmodule hmod, const char *name);
// HIP: hipError_t hipModuleGetTexRef(textureReference** texRef, hipModule_t hmod, const char* name);
// CHECK: result = hipModuleGetTexRef(&texref, module_, name.c_str());
result = cuModuleGetTexRef(&texref, module_, name.c_str());
// CUDA: CUresult CUDAAPI cuModuleLoad(CUmodule *module, const char *fname);
// HIP: hipError_t hipModuleLoad(hipModule_t* module, const char* fname);
// CHECK: result = hipModuleLoad(&module_, name.c_str());
result = cuModuleLoad(&module_, name.c_str());
// CUDA: CUresult CUDAAPI cuModuleLoadData(CUmodule *module, const void *image);
// HIP: hipError_t hipModuleLoadData(hipModule_t* module, const void* image);
// CHECK: result = hipModuleLoadData(&module_, image);
result = cuModuleLoadData(&module_, image);
unsigned int numOptions = 0;
void* optionValues = nullptr;
// CUDA: CUresult CUDAAPI cuModuleLoadDataEx(CUmodule *module, const void *image, unsigned int numOptions, CUjit_option *options, void **optionValues);
// HIP: hipError_t hipModuleLoadDataEx(hipModule_t* module, const void* image, unsigned int numOptions, hipJitOption* options, void** optionValues);
// CHECK: result = hipModuleLoadDataEx(&module_, image, numOptions, &jit_option, &optionValues);
result = cuModuleLoadDataEx(&module_, image, numOptions, &jit_option, &optionValues);
// CUDA: CUresult CUDAAPI cuModuleUnload(CUmodule hmod);
// HIP: hipError_t hipModuleUnload(hipModule_t module);
// CHECK: result = hipModuleUnload(module_);
result = cuModuleUnload(module_);
// CUDA: CUresult CUDAAPI cuArray3DCreate(CUarray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR *pAllocateArray);
// HIP: hipError_t hipArray3DCreate(hipArray** array, const HIP_ARRAY3D_DESCRIPTOR* pAllocateArray);
// CHECK: result = hipArray3DCreate(&array_, &ARRAY3D_DESCRIPTOR);
// CHECK-NEXT: result = hipArray3DCreate(&array_, &ARRAY3D_DESCRIPTOR);
result = cuArray3DCreate(&array_, &ARRAY3D_DESCRIPTOR);
result = cuArray3DCreate_v2(&array_, &ARRAY3D_DESCRIPTOR);
// CUDA: CUresult CUDAAPI cuArrayCreate(CUarray *pHandle, const CUDA_ARRAY_DESCRIPTOR *pAllocateArray);
// HIP: hipError_t hipArrayCreate(hipArray** pHandle, const HIP_ARRAY_DESCRIPTOR* pAllocateArray);
// CHECK: result = hipArrayCreate(&array_, &ARRAY_DESCRIPTOR);
// CHECK: result = hipArrayCreate(&array_, &ARRAY_DESCRIPTOR);
result = cuArrayCreate(&array_, &ARRAY_DESCRIPTOR);
result = cuArrayCreate_v2(&array_, &ARRAY_DESCRIPTOR);
// CUDA: CUresult CUDAAPI cuArrayDestroy(CUarray hArray);
// HIP: hipError_t hipArrayDestroy(hipArray* array);
// CHECK: result = hipArrayDestroy(array_);
result = cuArrayDestroy(array_);
std::string pciBusId;
// CUDA: CUresult CUDAAPI cuDeviceGetByPCIBusId(CUdevice *dev, const char *pciBusId);
// HIP: hipError_t hipDeviceGetByPCIBusId(int* device, const char* pciBusId);
// CHECK: result = hipDeviceGetByPCIBusId(&device, pciBusId.c_str());
result = cuDeviceGetByPCIBusId(&device, pciBusId.c_str());
int len = 0;
char* pciBusId_ = const_cast<char*>(pciBusId.c_str());
// CUDA: CUresult CUDAAPI cuDeviceGetPCIBusId(char *pciBusId, int len, CUdevice dev);
// HIP: hipError_t hipDeviceGetPCIBusId(char* pciBusId, int len, int device);
// CHECK: result = hipDeviceGetPCIBusId(pciBusId_, len, device);
result = cuDeviceGetPCIBusId(pciBusId_, len, device);
// CUDA: CUresult CUDAAPI cuIpcCloseMemHandle(CUdeviceptr dptr);
// HIP: hipError_t hipIpcCloseMemHandle(void* devPtr);
// CHECK: result = hipIpcCloseMemHandle(deviceptr);
result = cuIpcCloseMemHandle(deviceptr);
// CUDA: CUresult CUDAAPI cuIpcGetEventHandle(CUipcEventHandle *pHandle, CUevent event);
// HIP: hipError_t hipIpcGetEventHandle(hipIpcEventHandle_t* handle, hipEvent_t event);
// CHECK: result = hipIpcGetEventHandle(&ipcEventHandle, event_);
result = cuIpcGetEventHandle(&ipcEventHandle, event_);
// CUDA: CUresult CUDAAPI cuIpcGetMemHandle(CUipcMemHandle *pHandle, CUdeviceptr dptr);
// HIP: hipError_t hipIpcGetMemHandle(hipIpcMemHandle_t* handle, void* devPtr);
// CHECK: result = hipIpcGetMemHandle(&ipcMemHandle, deviceptr);
result = cuIpcGetMemHandle(&ipcMemHandle, deviceptr);
// CUDA: CUresult CUDAAPI cuIpcOpenEventHandle(CUevent *phEvent, CUipcEventHandle handle);
// HIP: hipError_t hipIpcOpenEventHandle(hipEvent_t* event, hipIpcEventHandle_t handle);
// CHECK: result = hipIpcOpenEventHandle(&event_, ipcEventHandle);
result = cuIpcOpenEventHandle(&event_, ipcEventHandle);
// CUDA: CUresult CUDAAPI cuIpcOpenMemHandle(CUdeviceptr *pdptr, CUipcMemHandle handle, unsigned int Flags);
// HIP: hipError_t hipIpcOpenMemHandle(void** devPtr, hipIpcMemHandle_t handle, unsigned int flags);
// CHECK: result = hipIpcOpenMemHandle(&deviceptr, ipcMemHandle, flags);
result = cuIpcOpenMemHandle(&deviceptr, ipcMemHandle, flags);
// CUDA: CUresult CUDAAPI cuMemAlloc(CUdeviceptr *dptr, size_t bytesize);
// HIP: hipError_t hipMalloc(void** ptr, size_t size);
// CHECK: result = hipMalloc(&deviceptr, bytes);
// CHECK-NEXT: result = hipMalloc(&deviceptr, bytes);
result = cuMemAlloc(&deviceptr, bytes);
result = cuMemAlloc_v2(&deviceptr, bytes);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////// TODO: Get rid of additional attribute 'unsigned int flags' used by HIP without a default value ///////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// CUDA: CUresult CUDAAPI cuMemAllocHost(void **pp, size_t bytesize);
// HIP: DEPRECATED("use hipHostMalloc instead") hipError_t hipHostAlloc(void** ptr, size_t size, unsigned int flags);
// TODO: should be hipHostAlloc(&image, bytes, 0);
// CHECK: result = hipHostAlloc(&image, bytes);
// CHECK-NEXT: result = hipHostAlloc(&image, bytes);
result = cuMemAllocHost(&image, bytes);
result = cuMemAllocHost_v2(&image, bytes);
// CUDA: CUresult CUDAAPI cuMemAllocManaged(CUdeviceptr *dptr, size_t bytesize, unsigned int flags);
// HIP: hipError_t hipMallocManaged(void** dev_ptr, size_t size, unsigned int flags __dparm(hipMemAttachGlobal));
// CHECK: result = hipMallocManaged(&deviceptr, bytes, flags);
result = cuMemAllocManaged(&deviceptr, bytes, flags);
size_t pitch = 0, width = 0, height = 0;
// CUDA: CUresult CUDAAPI cuMemAllocPitch(CUdeviceptr *dptr, size_t *pPitch, size_t WidthInBytes, size_t Height, unsigned int ElementSizeBytes);
// HIP: hipError_t hipMemAllocPitch(hipDeviceptr_t* dptr, size_t* pitch, size_t widthInBytes, size_t height, unsigned int elementSizeBytes);
// CHECK: result = hipMemAllocPitch(&deviceptr, &pitch, width, height, bytes);
// CHECK-NEXT: result = hipMemAllocPitch(&deviceptr, &pitch, width, height, bytes);
result = cuMemAllocPitch(&deviceptr, &pitch, width, height, bytes);
result = cuMemAllocPitch_v2(&deviceptr, &pitch, width, height, bytes);
// CUDA: CUresult CUDAAPI cuMemcpy2D(const CUDA_MEMCPY2D *pCopy);
// HIP: hipError_t hipMemcpyParam2D(const hip_Memcpy2D* pCopy);
// CHECK: result = hipMemcpyParam2D(&MEMCPY2D);
// CHECK-NEXT: result = hipMemcpyParam2D(&MEMCPY2D);
result = cuMemcpy2D(&MEMCPY2D);
result = cuMemcpy2D_v2(&MEMCPY2D);
// CUDA: CUresult CUDAAPI cuMemcpy2DAsync(const CUDA_MEMCPY2D *pCopy, CUstream hStream);
// HIP: hipError_t hipMemcpyParam2DAsync(const hip_Memcpy2D* pCopy, hipStream_t stream __dparm(0));
// CHECK: result = hipMemcpyParam2DAsync(&MEMCPY2D, stream);
// CHECK-NEXT: result = hipMemcpyParam2DAsync(&MEMCPY2D, stream);
result = cuMemcpy2DAsync(&MEMCPY2D, stream);
result = cuMemcpy2DAsync_v2(&MEMCPY2D, stream);
// CUDA: CUresult CUDAAPI cuMemcpy2DUnaligned(const CUDA_MEMCPY2D *pCopy);
// HIP: hipError_t hipDrvMemcpy2DUnaligned(const hip_Memcpy2D* pCopy);
// CHECK: result = hipDrvMemcpy2DUnaligned(&MEMCPY2D);
// CHECK-NEXT: result = hipDrvMemcpy2DUnaligned(&MEMCPY2D);
result = cuMemcpy2DUnaligned(&MEMCPY2D);
result = cuMemcpy2DUnaligned_v2(&MEMCPY2D);
// CUDA: CUresult CUDAAPI cuMemcpy3D(const CUDA_MEMCPY3D *pCopy);
// HIP: hipError_t hipDrvMemcpy3D(const HIP_MEMCPY3D* pCopy);
// CHECK: result = hipDrvMemcpy3D(&MEMCPY3D);
// CHECK-NEXT: result = hipDrvMemcpy3D(&MEMCPY3D);
result = cuMemcpy3D(&MEMCPY3D);
result = cuMemcpy3D_v2(&MEMCPY3D);
// CUDA: CUresult CUDAAPI cuMemcpy3DAsync(const CUDA_MEMCPY3D *pCopy, CUstream hStream);
// HIP: hipError_t hipDrvMemcpy3DAsync(const HIP_MEMCPY3D* pCopy, hipStream_t stream);
// CHECK: result = hipDrvMemcpy3DAsync(&MEMCPY3D, stream);
// CHECK-NEXT: result = hipDrvMemcpy3DAsync(&MEMCPY3D, stream);
result = cuMemcpy3DAsync(&MEMCPY3D, stream);
result = cuMemcpy3DAsync_v2(&MEMCPY3D, stream);
void* dsthost = nullptr;
size_t offset = 0;
// CUDA: CUresult CUDAAPI cuMemcpyAtoH(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount);
// HIP: hipError_t hipMemcpyAtoH(void* dst, hipArray* srcArray, size_t srcOffset, size_t count);
// CHECK: result = hipMemcpyAtoH(dsthost, array_, offset, bytes);
// CHECK-NEXT: result = hipMemcpyAtoH(dsthost, array_, offset, bytes);
result = cuMemcpyAtoH(dsthost, array_, offset, bytes);
result = cuMemcpyAtoH_v2(dsthost, array_, offset, bytes);
// CUDA: CUresult CUDAAPI cuMemcpyDtoD(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount);
// HIP: hipError_t hipMemcpyDtoD(hipDeviceptr_t dst, hipDeviceptr_t src, size_t sizeBytes);
// CHECK: result = hipMemcpyDtoD(deviceptr, deviceptr, bytes);
// CHECK-NEXT: result = hipMemcpyDtoD(deviceptr, deviceptr, bytes);
result = cuMemcpyDtoD(deviceptr, deviceptr, bytes);
result = cuMemcpyDtoD_v2(deviceptr, deviceptr, bytes);
// CUDA: CUresult CUDAAPI cuMemcpyDtoDAsync(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream);
// HIP: hipError_t hipMemcpyDtoDAsync(hipDeviceptr_t dst, hipDeviceptr_t src, size_t sizeBytes, hipStream_t stream);
// CHECK: result = hipMemcpyDtoDAsync(deviceptr, deviceptr, bytes, stream);
// CHECK-NEXT: result = hipMemcpyDtoDAsync(deviceptr, deviceptr, bytes, stream);
result = cuMemcpyDtoDAsync(deviceptr, deviceptr, bytes, stream);
result = cuMemcpyDtoDAsync_v2(deviceptr, deviceptr, bytes, stream);
// CUDA: CUresult CUDAAPI cuMemcpyDtoH(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount);
// HIP: hipError_t hipMemcpyDtoH(void* dst, hipDeviceptr_t src, size_t sizeBytes);
// CHECK: result = hipMemcpyDtoH(dsthost, deviceptr, bytes);
// CHECK-NEXT: result = hipMemcpyDtoH(dsthost, deviceptr, bytes);
result = cuMemcpyDtoH(dsthost, deviceptr, bytes);
result = cuMemcpyDtoH_v2(dsthost, deviceptr, bytes);
// CUDA: CUresult CUDAAPI cuMemcpyDtoHAsync(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream);
// HIP: hipError_t hipMemcpyDtoHAsync(void* dst, hipDeviceptr_t src, size_t sizeBytes, hipStream_t stream);
// CHECK: result = hipMemcpyDtoHAsync(dsthost, deviceptr, bytes, stream);
// CHECK-NEXT: result = hipMemcpyDtoHAsync(dsthost, deviceptr, bytes, stream);
result = cuMemcpyDtoHAsync(dsthost, deviceptr, bytes, stream);
result = cuMemcpyDtoHAsync_v2(dsthost, deviceptr, bytes, stream);
// CUDA: CUresult CUDAAPI cuMemcpyHtoA(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount);
// HIP: hipError_t hipMemcpyHtoA(hipArray* dstArray, size_t dstOffset, const void* srcHost, size_t count);
// CHECK: result = hipMemcpyHtoA(array_, offset, dsthost, bytes);
// CHECK-NEXT: result = hipMemcpyHtoA(array_, offset, dsthost, bytes);
result = cuMemcpyHtoA(array_, offset, dsthost, bytes);
result = cuMemcpyHtoA_v2(array_, offset, dsthost, bytes);
// CUDA: CUresult CUDAAPI cuMemcpyHtoD(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount);
// HIP: hipError_t hipMemcpyHtoD(hipDeviceptr_t dst, void* src, size_t sizeBytes);
// CHECK: result = hipMemcpyHtoD(deviceptr, dsthost, bytes);
// CHECK-NEXT: result = hipMemcpyHtoD(deviceptr, dsthost, bytes);
result = cuMemcpyHtoD(deviceptr, dsthost, bytes);
result = cuMemcpyHtoD_v2(deviceptr, dsthost, bytes);
// CUDA: CUresult CUDAAPI cuMemcpyHtoDAsync(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount, CUstream hStream);
// HIP: hipError_t hipMemcpyHtoDAsync(hipDeviceptr_t dst, void* src, size_t sizeBytes, hipStream_t stream);
// CHECK: result = hipMemcpyHtoDAsync(deviceptr, dsthost, bytes, stream);
// CHECK-NEXT: result = hipMemcpyHtoDAsync(deviceptr, dsthost, bytes, stream);
result = cuMemcpyHtoDAsync(deviceptr, dsthost, bytes, stream);
result = cuMemcpyHtoDAsync_v2(deviceptr, dsthost, bytes, stream);
// CUDA: CUresult CUDAAPI cuMemFree(CUdeviceptr dptr);
// HIP: hipError_t hipFree(void* ptr);
// CHECK: result = hipFree(deviceptr);
// CHECK-NEXT: result = hipFree(deviceptr);
result = cuMemFree(deviceptr);
result = cuMemFree_v2(deviceptr);
// CUDA: CUresult CUDAAPI cuMemFreeHost(void *p);
// HIP: hipError_t hipHostFree(void* ptr);
// CHECK: result = hipHostFree(image);
result = cuMemFreeHost(image);
// CUDA: CUresult CUDAAPI cuMemGetAddressRange(CUdeviceptr *pbase, size_t *psize, CUdeviceptr dptr);
// HIP: hipError_t hipMemGetAddressRange(hipDeviceptr_t* pbase, size_t* psize, hipDeviceptr_t dptr);
// CHECK: result = hipMemGetAddressRange(&deviceptr, &bytes, deviceptr_2);
// CHECK-NEXT: result = hipMemGetAddressRange(&deviceptr, &bytes, deviceptr_2);
result = cuMemGetAddressRange(&deviceptr, &bytes, deviceptr_2);
result = cuMemGetAddressRange_v2(&deviceptr, &bytes, deviceptr_2);
// CUDA: CUresult CUDAAPI cuMemGetInfo(size_t *free, size_t *total);
// HIP: hipError_t hipMemGetInfo(size_t* free, size_t* total);
// CHECK: result = hipMemGetInfo(&bytes, &bytes_2);
// CHECK-NEXT: result = hipMemGetInfo(&bytes, &bytes_2);
result = cuMemGetInfo(&bytes, &bytes_2);
result = cuMemGetInfo_v2(&bytes, &bytes_2);
// CUDA: CUresult CUDAAPI cuMemHostAlloc(void **pp, size_t bytesize, unsigned int Flags);
// HIP: DEPRECATED("use hipHostMalloc instead") hipError_t hipHostAlloc(void** ptr, size_t size, unsigned int flags);
// CHECK: result = hipHostAlloc(&image, bytes, flags);
result = cuMemHostAlloc(&image, bytes, flags);
// CUDA: CUresult CUDAAPI cuMemHostGetDevicePointer(CUdeviceptr *pdptr, void *p, unsigned int Flags);
// HIP: hipError_t hipHostGetDevicePointer(void** devPtr, void* hstPtr, unsigned int flags);
// CHECK: result = hipHostGetDevicePointer(&deviceptr, image, flags);
// CHECK-NEXT: result = hipHostGetDevicePointer(&deviceptr, image, flags);
result = cuMemHostGetDevicePointer(&deviceptr, image, flags);
result = cuMemHostGetDevicePointer_v2(&deviceptr, image, flags);
// CUDA: CUresult CUDAAPI cuMemHostGetFlags(unsigned int *pFlags, void *p);
// HIP: hipError_t hipHostGetFlags(&flags, image);
// CHECK: result = hipHostGetFlags(&flags, image);
result = cuMemHostGetFlags(&flags, image);
// CUDA: CUresult CUDAAPI cuMemHostRegister(void *p, size_t bytesize, unsigned int Flags);
// HIP: hipError_t hipHostRegister(void* hostPtr, size_t sizeBytes, unsigned int flags);
// CHECK: result = hipHostRegister(image, bytes, flags);
// CHECK-NEXT: result = hipHostRegister(image, bytes, flags);
result = cuMemHostRegister(image, bytes, flags);
result = cuMemHostRegister_v2(image, bytes, flags);
// CUDA: CUresult CUDAAPI cuMemHostUnregister(void *p);
// HIP: hipError_t hipHostUnregister(void* hostPtr);
// CHECK: result = hipHostUnregister(image);
result = cuMemHostUnregister(image);
unsigned short us = 0;
// CUDA: CUresult CUDAAPI cuMemsetD16(CUdeviceptr dstDevice, unsigned short us, size_t N);
// HIP: hipError_t hipMemsetD16(hipDeviceptr_t dest, unsigned short value, size_t count);
// CHECK: result = hipMemsetD16(deviceptr, us, bytes);
// CHECK-NEXT: result = hipMemsetD16(deviceptr, us, bytes);
result = cuMemsetD16(deviceptr, us, bytes);
result = cuMemsetD16_v2(deviceptr, us, bytes);
// CUDA: CUresult CUDAAPI cuMemsetD16Async(CUdeviceptr dstDevice, unsigned short us, size_t N, CUstream hStream);
// HIP: hipError_t hipMemsetD16Async(hipDeviceptr_t dest, unsigned short value, size_t count, hipStream_t stream __dparm(0));
// CHECK: result = hipMemsetD16Async(deviceptr, us, bytes, stream);
result = cuMemsetD16Async(deviceptr, us, bytes, stream);
// CUDA: CUresult CUDAAPI cuMemsetD32(CUdeviceptr dstDevice, unsigned int ui, size_t N)
// HIP: hipError_t hipMemsetD32(hipDeviceptr_t dest, int value, size_t count);
// CHECK: result = hipMemsetD32(deviceptr, flags, bytes);
// CHECK-NEXT: result = hipMemsetD32(deviceptr, flags, bytes);
result = cuMemsetD32(deviceptr, flags, bytes);
result = cuMemsetD32_v2(deviceptr, flags, bytes);
// CUDA: CUresult CUDAAPI cuMemsetD32Async(CUdeviceptr dstDevice, unsigned int ui, size_t N, CUstream hStream);
// HIP: hipError_t hipMemsetD32Async(hipDeviceptr_t dst, int value, size_t count, hipStream_t stream __dparm(0));
// CHECK: result = hipMemsetD32Async(deviceptr, flags, bytes, stream);
result = cuMemsetD32Async(deviceptr, flags, bytes, stream);
unsigned char uc = 0;
// CUDA: CUresult CUDAAPI cuMemsetD8(CUdeviceptr dstDevice, unsigned char uc, size_t N);
// HIP: hipError_t hipMemsetD8(hipDeviceptr_t dest, unsigned char value, size_t count);
// CHECK: result = hipMemsetD8(deviceptr, uc, bytes);
// CHECK-NEXT: result = hipMemsetD8(deviceptr, uc, bytes);
result = cuMemsetD8(deviceptr, uc, bytes);
result = cuMemsetD8_v2(deviceptr, uc, bytes);
// CUDA: CUresult CUDAAPI cuMemsetD8Async(CUdeviceptr dstDevice, unsigned char uc, size_t N, CUstream hStream);
// HIP: hipError_t hipMemsetD8Async(hipDeviceptr_t dest, unsigned char value, size_t count, hipStream_t stream __dparm(0));
// CHECK: result = hipMemsetD8Async(deviceptr, uc, bytes, stream);
result = cuMemsetD8Async(deviceptr, uc, bytes, stream);
// CUDA: CUresult CUDAAPI cuMipmappedArrayCreate(CUmipmappedArray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR *pMipmappedArrayDesc, unsigned int numMipmapLevels);
// HIP: hipError_t hipMipmappedArrayCreate(hipMipmappedArray_t* pHandle, HIP_ARRAY3D_DESCRIPTOR* pMipmappedArrayDesc, unsigned int numMipmapLevels);
// CHECK: result = hipMipmappedArrayCreate(&mipmappedArray, &ARRAY3D_DESCRIPTOR, flags);
result = cuMipmappedArrayCreate(&mipmappedArray, &ARRAY3D_DESCRIPTOR, flags);
// CUDA: CUresult CUDAAPI cuMipmappedArrayDestroy(CUmipmappedArray hMipmappedArray);
// HIP: hipError_t hipMipmappedArrayDestroy(hipMipmappedArray_t hMipmappedArray);
// CHECK: result = hipMipmappedArrayDestroy(mipmappedArray);
result = cuMipmappedArrayDestroy(mipmappedArray);
// CUDA: CUresult CUDAAPI cuMipmappedArrayGetLevel(CUarray *pLevelArray, CUmipmappedArray hMipmappedArray, unsigned int level);
// HIP: hipError_t hipMipmappedArrayGetLevel(hipArray_t* pLevelArray, hipMipmappedArray_t hMipMappedArray, unsigned int level);
// CHECK: result = hipMipmappedArrayGetLevel(&array_, mipmappedArray, flags);
result = cuMipmappedArrayGetLevel(&array_, mipmappedArray, flags);
return 0;
}
|
e0ab71fcf075fe9053984cc803fb5544244ff776.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include "distort.h"
float calc_shift(float x1, float x2, float cx, float k, float thresh)
{
float x3 = x1 + (x2 - x1) * 0.5f;
float result1 = x1 + ((x1 - cx) * k * ((x1 - cx) * (x1 - cx)));
float result3 = x3 + ((x3 - cx) * k * ((x3 - cx) * (x3 - cx)));
if(result1 > -thresh and result1 < thresh)
return x1;
if(result3 < 0)
{
return calc_shift(x3, x2, cx, k, thresh);
}
else
{
return calc_shift(x1, x3, cx, k, thresh);
}
}
__host__ __device__
inline float getRadialX(float x, float y, const struct Properties* prop)
{
x = (x * prop->xscale + prop->xshift);
y = (y * prop->yscale + prop->yshift);
float result = x + ((x - prop->centerX) * prop->K *
((x - prop->centerX) * (x - prop->centerX) + (y - prop->centerY) * (y - prop->centerY)));
return result;
}
__host__ __device__
inline float getRadialY(float x, float y, const struct Properties* prop)
{
x = (x * prop->xscale + prop->xshift);
y = (y * prop->yscale + prop->yshift);
float result = y + ((y - prop->centerY) * prop->K *
((x - prop->centerX) * (x - prop->centerX) + (y - prop->centerY) * (y - prop->centerY)));
return result;
}
__host__ __device__
inline void sampleImageTest(const uchar3* src, float idx0, float idx1,
uchar3& result, const struct Properties* prop)
{
// if one of index is out-of-bound
if((idx0 < 0) || (idx1 < 0) || (idx0 > prop->height - 1) || (idx1 > prop->width - 1))
{
result.x = 0;
result.y = 0;
result.z = 0;
return;
}
int idx0_floor = (int)floorf(idx0);
int idx0_ceil = (int)ceilf(idx0);
int idx1_floor = (int)floorf(idx1);
int idx1_ceil = (int)ceilf(idx1);
uchar3 s1 = src[(idx0_floor * prop->width) + idx1_floor];
uchar3 s2 = src[(idx0_floor * prop->width) + idx1_ceil];
uchar3 s3 = src[(idx0_ceil * prop->width) + idx1_ceil];
uchar3 s4 = src[(idx0_ceil * prop->width) + idx1_floor];
float x = idx0 - idx0_floor;
float y = idx1 - idx1_floor;
result.x = s1.x * (1.f - x) * (1.f - y) + s2.x * (1.f - x) * y + s3.x * x * y + s4.x * x * (1.f - y);
result.y = s1.y * (1.f - x) * (1.f - y) + s2.y * (1.f - x) * y + s3.y * x * y + s4.y * x * (1.f - y);
result.z = s1.z * (1.f - x) * (1.f - y) + s2.z * (1.f - x) * y + s3.z * x * y + s4.z * x * (1.f - y);
}
__global__ void barrel_distort (
const uchar3 *__restrict__ src,
uchar3 *__restrict__ dst,
const struct Properties *__restrict__ prop)
{
int h = blockIdx.y * blockDim.y + threadIdx.y;
int w = blockIdx.x * blockDim.x + threadIdx.x;
if (w < prop->width && h < prop->height) {
float x = getRadialX((float)w, (float)h, prop);
float y = getRadialY((float)w, (float)h, prop);
uchar3 temp;
sampleImageTest(src, y, x, temp, prop);
dst[(h * prop->width) + w] = temp;
}
}
void reference (
const uchar3 *src,
uchar3 *dst,
const struct Properties *prop)
{
for (int h = 0; h < prop->height; h++) {
for (int w = 0; w < prop->width; w++) {
float x = getRadialX((float)w, (float)h, prop);
float y = getRadialY((float)w, (float)h, prop);
uchar3 temp;
sampleImageTest(src, y, x, temp, prop);
dst[(h * prop->width) + w] = temp;
}
}
}
| e0ab71fcf075fe9053984cc803fb5544244ff776.cu | #include <cmath>
#include "distort.h"
float calc_shift(float x1, float x2, float cx, float k, float thresh)
{
float x3 = x1 + (x2 - x1) * 0.5f;
float result1 = x1 + ((x1 - cx) * k * ((x1 - cx) * (x1 - cx)));
float result3 = x3 + ((x3 - cx) * k * ((x3 - cx) * (x3 - cx)));
if(result1 > -thresh and result1 < thresh)
return x1;
if(result3 < 0)
{
return calc_shift(x3, x2, cx, k, thresh);
}
else
{
return calc_shift(x1, x3, cx, k, thresh);
}
}
__host__ __device__
inline float getRadialX(float x, float y, const struct Properties* prop)
{
x = (x * prop->xscale + prop->xshift);
y = (y * prop->yscale + prop->yshift);
float result = x + ((x - prop->centerX) * prop->K *
((x - prop->centerX) * (x - prop->centerX) + (y - prop->centerY) * (y - prop->centerY)));
return result;
}
__host__ __device__
inline float getRadialY(float x, float y, const struct Properties* prop)
{
x = (x * prop->xscale + prop->xshift);
y = (y * prop->yscale + prop->yshift);
float result = y + ((y - prop->centerY) * prop->K *
((x - prop->centerX) * (x - prop->centerX) + (y - prop->centerY) * (y - prop->centerY)));
return result;
}
__host__ __device__
inline void sampleImageTest(const uchar3* src, float idx0, float idx1,
uchar3& result, const struct Properties* prop)
{
// if one of index is out-of-bound
if((idx0 < 0) || (idx1 < 0) || (idx0 > prop->height - 1) || (idx1 > prop->width - 1))
{
result.x = 0;
result.y = 0;
result.z = 0;
return;
}
int idx0_floor = (int)floorf(idx0);
int idx0_ceil = (int)ceilf(idx0);
int idx1_floor = (int)floorf(idx1);
int idx1_ceil = (int)ceilf(idx1);
uchar3 s1 = src[(idx0_floor * prop->width) + idx1_floor];
uchar3 s2 = src[(idx0_floor * prop->width) + idx1_ceil];
uchar3 s3 = src[(idx0_ceil * prop->width) + idx1_ceil];
uchar3 s4 = src[(idx0_ceil * prop->width) + idx1_floor];
float x = idx0 - idx0_floor;
float y = idx1 - idx1_floor;
result.x = s1.x * (1.f - x) * (1.f - y) + s2.x * (1.f - x) * y + s3.x * x * y + s4.x * x * (1.f - y);
result.y = s1.y * (1.f - x) * (1.f - y) + s2.y * (1.f - x) * y + s3.y * x * y + s4.y * x * (1.f - y);
result.z = s1.z * (1.f - x) * (1.f - y) + s2.z * (1.f - x) * y + s3.z * x * y + s4.z * x * (1.f - y);
}
__global__ void barrel_distort (
const uchar3 *__restrict__ src,
uchar3 *__restrict__ dst,
const struct Properties *__restrict__ prop)
{
int h = blockIdx.y * blockDim.y + threadIdx.y;
int w = blockIdx.x * blockDim.x + threadIdx.x;
if (w < prop->width && h < prop->height) {
float x = getRadialX((float)w, (float)h, prop);
float y = getRadialY((float)w, (float)h, prop);
uchar3 temp;
sampleImageTest(src, y, x, temp, prop);
dst[(h * prop->width) + w] = temp;
}
}
void reference (
const uchar3 *src,
uchar3 *dst,
const struct Properties *prop)
{
for (int h = 0; h < prop->height; h++) {
for (int w = 0; w < prop->width; w++) {
float x = getRadialX((float)w, (float)h, prop);
float y = getRadialY((float)w, (float)h, prop);
uchar3 temp;
sampleImageTest(src, y, x, temp, prop);
dst[(h * prop->width) + w] = temp;
}
}
}
|
aadae5c39048b64f6b833696c41a817cfd4d0d60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kSwapColumns(float* source, float* target, float* indices1, float* indices2, int cols, int width, int height){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float temp;
unsigned int column, row, source_pos, target_pos;
for (unsigned int i = idx; i < height * cols; i += numThreads) {
column = i / height;
row = i % height;
source_pos = height * (int)indices1[column] + row;
target_pos = height * (int)indices2[column] + row;
temp = source[source_pos];
source[source_pos] = target[target_pos];
target[target_pos] = temp;
}
} | aadae5c39048b64f6b833696c41a817cfd4d0d60.cu | #include "includes.h"
__global__ void kSwapColumns(float* source, float* target, float* indices1, float* indices2, int cols, int width, int height){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float temp;
unsigned int column, row, source_pos, target_pos;
for (unsigned int i = idx; i < height * cols; i += numThreads) {
column = i / height;
row = i % height;
source_pos = height * (int)indices1[column] + row;
target_pos = height * (int)indices2[column] + row;
temp = source[source_pos];
source[source_pos] = target[target_pos];
target[target_pos] = temp;
}
} |
7b4514ddaa06ce38f2f63e13235f98f112cfd95f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 10000000
__global__ void c_code(void){
}
int main(void) {hipLaunchKernelGGL((
c_code), dim3(1),dim3(1), 0, 0, );
int A[10000000], B[10000000], C[10000000];
int i;
for (i=0; i<N; i++) {
C[i] = A[i] + B[i];
};
return 0;
}
| 7b4514ddaa06ce38f2f63e13235f98f112cfd95f.cu | #include <stdio.h>
#define N 10000000
__global__ void c_code(void){
}
int main(void) {
c_code<<<1,1>>>();
int A[10000000], B[10000000], C[10000000];
int i;
for (i=0; i<N; i++) {
C[i] = A[i] + B[i];
};
return 0;
}
|
81a78adbda17b1e41b5a7ab335605637366ffc98.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sparse_fully_connected_layer_updater_cuda.h"
#include <hip/hip_runtime.h>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "neural_network_cudnn_exception.h"
#include "../sparse_convolution_layer.h"
namespace nnforge
{
namespace cuda
{
extern __shared__ float arr_sh[];
#define OUTPUT_ELEM_COUNT_BLOCK_SIZE 4
__global__ void sparse_fully_connected_upd_kernel(
float * __restrict output_neurons,
const float * __restrict input_neurons,
const float * __restrict weights,
const int * __restrict column_indices,
const int * __restrict row_ptrs,
int output_elem_count_per_entry,
int input_elem_count_per_entry,
int entry_count,
int input_feature_map_block_size,
int window_size)
{
int row_id = blockIdx.y * blockDim.y + threadIdx.y;
if (row_id >= output_elem_count_per_entry)
return;
int start_column_index = __load_nc(row_ptrs + row_id);
int end_column_index = __load_nc(row_ptrs + row_id + 1);
int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
int base_column_index_offset = (thread_id_x >> 5) * input_feature_map_block_size;
int base_nnz_index = start_column_index + base_column_index_offset;
if (base_nnz_index >= end_column_index)
return;
int base_entry_id = (blockIdx.z * blockDim.z + threadIdx.z) * OUTPUT_ELEM_COUNT_BLOCK_SIZE;
if (base_entry_id >= entry_count)
return;
int lane_id = thread_id_x & 31;
int it_count = min(input_feature_map_block_size, end_column_index - base_nnz_index);
int thread_id = blockDim.x * (threadIdx.z * blockDim.y + threadIdx.y) + threadIdx.x;
int warp_id = thread_id >> 5;
volatile int * column_indices_sh = (int *)arr_sh;
if (lane_id < it_count)
column_indices_sh[thread_id] = column_indices[base_nnz_index + lane_id];
int window_it_count = (window_size + 31) >> 5;
bool valid[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
int entry_ids[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
{
valid[i] = (i < (entry_count - base_entry_id));
entry_ids[i] = valid[i] ? (base_entry_id + i) : (entry_count - 1);
}
float sums[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] = 0.0F;
for(int i = 0; i < it_count; ++i)
{
int index = base_nnz_index + i;
int column_id = column_indices_sh[warp_id * 32 + i];
int local_weight_id = lane_id;
for(int j = 0; j < window_it_count; ++j)
{
if (local_weight_id < window_size)
{
float w = __load_nc(weights + (int)(index * window_size + local_weight_id));
#pragma unroll
for(int k = 0; k < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++k)
{
float inp = __load_nc(input_neurons + entry_ids[k] * input_elem_count_per_entry + column_id * window_size + local_weight_id);
sums[k] += w * inp;
}
}
local_weight_id += 32;
}
}
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] += __shfl_xor(sums[i], tx);
}
if (lane_id == 0)
{
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
if (valid[i])
atomicAdd(output_neurons + (base_entry_id + i) * output_elem_count_per_entry + row_id, sums[i]);
}
}
#define OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE 4
__global__ void sparse_fully_connected_backprop_upd_kernel(
const float * __restrict output_errors,
float * __restrict input_errors,
const float * __restrict weights,
const int * __restrict column_indices,
const int * __restrict row_ptrs,
int output_elem_count_per_entry,
int input_elem_count_per_entry,
int entry_count,
int window_size)
{
int row_id = blockIdx.y * blockDim.y + threadIdx.y;
if (row_id >= output_elem_count_per_entry)
return;
int start_column_index = __load_nc(row_ptrs + row_id);
int end_column_index = __load_nc(row_ptrs + row_id + 1);
int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
int base_column_index_offset = thread_id_x >> 5;
int base_nnz_index = start_column_index + base_column_index_offset;
if (base_nnz_index >= end_column_index)
return;
int base_entry_id = (blockIdx.z * blockDim.z + threadIdx.z) * OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE;
if (base_entry_id >= entry_count)
return;
bool valid[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE];
int entry_ids[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE];
int max_local_entry_count = min(OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE, entry_count - base_entry_id);
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE; ++i)
{
valid[i] = (i < max_local_entry_count);
entry_ids[i] = valid[i] ? (base_entry_id + i) : (entry_count - 1);
}
int column_id = __load_nc(column_indices + base_nnz_index);
int window_it_count = (window_size + 31) >> 5;
int lane_id = thread_id_x & 31;
int thread_id = blockDim.x * (threadIdx.z * blockDim.y + threadIdx.y) + threadIdx.x;
int warp_id = thread_id >> 5;
volatile float * output_errors_sh = arr_sh + warp_id * OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE;
if (lane_id < max_local_entry_count)
output_errors_sh[lane_id] = __load_nc(output_errors + (int)((base_entry_id + lane_id) * output_elem_count_per_entry + row_id));
int local_weight_id = lane_id;
for(int j = 0; j < window_it_count; ++j)
{
if (local_weight_id < window_size)
{
float w = __load_nc(weights + (int)(base_nnz_index * window_size + local_weight_id));
#pragma unroll
for(int k = 0; k < OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE; ++k)
{
if (valid[k])
{
float input_error = output_errors_sh[k] * w;
atomicAdd(input_errors + (int)(entry_ids[k] * input_elem_count_per_entry + column_id * window_size + local_weight_id), input_error);
}
}
}
local_weight_id += 32;
}
}
__global__ void sparse_fully_connected_update_weights_kernel(
const float * __restrict output_errors,
const float * __restrict input_neurons,
float * __restrict gradient_weights,
const int * __restrict column_indices,
const int * __restrict row_ptrs,
int output_elem_count_per_entry,
int input_elem_count_per_entry,
int entry_block_size,
int entry_count,
int window_size)
{
int row_id = blockIdx.y * blockDim.y + threadIdx.y;
if (row_id >= output_elem_count_per_entry)
return;
int start_column_index = __load_nc(row_ptrs + row_id);
int end_column_index = __load_nc(row_ptrs + row_id + 1);
int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
int base_column_index_offset = thread_id_x >> 5;
int base_nnz_index = start_column_index + base_column_index_offset;
if (base_nnz_index >= end_column_index)
return;
int base_entry_id = (blockIdx.z * blockDim.z + threadIdx.z) * entry_block_size;
if (base_entry_id >= entry_count)
return;
int local_entry_count = min(entry_block_size, entry_count - base_entry_id);
int column_id = __load_nc(column_indices + base_nnz_index);
int window_it_count = (window_size + 31) >> 5;
int lane_id = thread_id_x & 31;
int thread_id = blockDim.x * (threadIdx.z * blockDim.y + threadIdx.y) + threadIdx.x;
int warp_id = thread_id >> 5;
volatile float * output_errors_sh = arr_sh + warp_id * entry_block_size;
if (lane_id < local_entry_count)
output_errors_sh[lane_id] = __load_nc(output_errors + (int)((base_entry_id + lane_id) * output_elem_count_per_entry + row_id));
int local_weight_id = lane_id;
for(int j = 0; j < window_it_count; ++j)
{
if (local_weight_id < window_size)
{
float sum = 0.0F;
const float * current_input_neurons = input_neurons + base_entry_id * input_elem_count_per_entry + column_id * window_size + local_weight_id;
for(int k = 0; k < local_entry_count; ++k)
{
sum += output_errors_sh[k] * __load_nc(current_input_neurons);
current_input_neurons += input_elem_count_per_entry;
}
atomicAdd(gradient_weights + (int)(base_nnz_index * window_size + local_weight_id), sum);
}
local_weight_id += 32;
}
}
const int sparse_fully_connected_layer_updater_cuda::max_input_feature_map_block_size = 32;
const int sparse_fully_connected_layer_updater_cuda::absolute_min_update_entry_count_block_size = 4;
const int sparse_fully_connected_layer_updater_cuda::absolute_max_update_entry_count_block_size = 32;
sparse_fully_connected_layer_updater_cuda::sparse_fully_connected_layer_updater_cuda()
: output_data_desc(0)
, bias_desc(0)
{
cudnn_safe_call(cudnnCreateTensorDescriptor(&output_data_desc));
cudnn_safe_call(cudnnCreateTensorDescriptor(&bias_desc));
}
sparse_fully_connected_layer_updater_cuda::~sparse_fully_connected_layer_updater_cuda()
{
cudnnDestroyTensorDescriptor(output_data_desc);
cudnnDestroyTensorDescriptor(bias_desc);
}
void sparse_fully_connected_layer_updater_cuda::enqueue_test(
unsigned int offset_input_entry_id,
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count,
bool force_deterministic)
{
cuda_util::set_with_value(
*cuda_config,
*output_neurons_buffer,
0.0F,
output_elem_count_per_entry * entry_count,
stream_id);
std::pair<int, int> input_feature_map_block_size_and_count = get_input_feature_map_block_size_and_count();
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
32 * input_feature_map_block_size_and_count.second,
output_elem_count_per_entry,
(entry_count + OUTPUT_ELEM_COUNT_BLOCK_SIZE - 1) / OUTPUT_ELEM_COUNT_BLOCK_SIZE,
32);
int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z;
int smem_size = threadblock_size * sizeof(float);
hipLaunchKernelGGL(( sparse_fully_connected_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), smem_size, stream_id,
*output_neurons_buffer,
*input_neurons_buffer,
*data[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
input_elem_count_per_entry,
entry_count,
input_feature_map_block_size_and_count.first,
window_size);
// Add bias
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
cudnn_safe_call(cudnnSetTensor4dDescriptor(
output_data_desc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
entry_count,
output_configuration_specific.feature_map_count,
1,
1));
float alpha = 1.0F;
float beta = 1.0F;
cudnn_safe_call(cudnnAddTensor(
cuda_config->get_cudnn_handle(),
CUDNN_ADD_SAME_C,
&alpha,
bias_desc,
*data[1],
&beta,
output_data_desc,
*output_neurons_buffer));
}
}
void sparse_fully_connected_layer_updater_cuda::enqueue_backprop(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count,
bool force_deterministic)
{
if (!backprop_required)
throw neural_network_exception("sparse_fully_connected_layer_updater_cuda is not configured to do backprop but requested to");
cuda_util::set_with_value(
*cuda_config,
*input_errors_buffer,
0.0F,
input_elem_count_per_entry * entry_count,
stream_id);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
32 * max_column_index_count_per_row,
output_elem_count_per_entry,
(entry_count + OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE - 1) / OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE,
32);
int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z;
int smem_size = (threadblock_size / 32) * OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE * sizeof(float);
hipLaunchKernelGGL(( sparse_fully_connected_backprop_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), smem_size, stream_id,
*output_errors_buffer,
*input_errors_buffer,
*data[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
input_elem_count_per_entry,
entry_count,
window_size);
}
void sparse_fully_connected_layer_updater_cuda::enqueue_update_weights(
unsigned int offset_input_entry_id,
hipStream_t stream_id,
const std::vector<cuda_linear_buffer_device_smart_ptr>& gradient,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count,
bool force_deterministic)
{
// Update weights
{
std::pair<int, int> entry_block_size_and_count = get_update_entry_block_size_and_count(entry_count);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
32 * max_column_index_count_per_row,
output_elem_count_per_entry,
entry_block_size_and_count.second,
32);
int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z;
int smem_size = (threadblock_size / 32) * entry_block_size_and_count.first * sizeof(float);
hipLaunchKernelGGL(( sparse_fully_connected_update_weights_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), smem_size, stream_id,
*output_errors_buffer,
*input_neurons_buffer,
*gradient[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
input_elem_count_per_entry,
entry_block_size_and_count.first,
entry_count,
window_size);
}
// Update biases
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
cudnn_safe_call(cudnnSetTensor4dDescriptor(
output_data_desc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
entry_count,
output_configuration_specific.feature_map_count,
1,
output_elem_count_per_feature_map));
float alpha = 1.0F;
float beta = 1.0F;
cudnn_safe_call(cudnnConvolutionBackwardBias(
cuda_config->get_cudnn_handle(),
&alpha,
output_data_desc,
*output_errors_buffer,
&beta,
bias_desc,
*gradient[1]));
}
}
bool sparse_fully_connected_layer_updater_cuda::is_in_place_backprop() const
{
return false;
}
void sparse_fully_connected_layer_updater_cuda::updater_configured()
{
nnforge_shared_ptr<const sparse_convolution_layer> layer_derived = nnforge_dynamic_pointer_cast<const sparse_convolution_layer>(layer_schema);
feature_map_connection_count = layer_derived->feature_map_connection_count;
window_size = 1;
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
window_size *= *it;
int input_data_single_backprop_entry_size = input_elem_count_per_entry * sizeof(float);
max_update_entry_count_block_size = ::min(::max(absolute_min_update_entry_count_block_size, cuda_config->l2_cache_size / 2 / input_data_single_backprop_entry_size), absolute_max_update_entry_count_block_size);
cudnn_safe_call(cudnnSetTensor4dDescriptor(
bias_desc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
1,
output_configuration_specific.feature_map_count,
1,
1));
}
std::vector<size_t> sparse_fully_connected_layer_updater_cuda::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
return res;
}
void sparse_fully_connected_layer_updater_cuda::notify_data_custom(const_layer_data_custom_smart_ptr host_data_custom)
{
max_column_index_count_per_row = 0;
const std::vector<int>& row_indices = host_data_custom->at(1);
for(int i = 0; i < row_indices.size() - 1; ++i)
max_column_index_count_per_row = ::max(max_column_index_count_per_row, row_indices[i + 1] - row_indices[i]);
}
std::pair<int, int> sparse_fully_connected_layer_updater_cuda::get_input_feature_map_block_size_and_count() const
{
int candidate_block_size = max_column_index_count_per_row;
if (candidate_block_size <= max_input_feature_map_block_size)
return std::make_pair(candidate_block_size, 1);
int candidate_block_count2 = (candidate_block_size + max_input_feature_map_block_size - 1) / max_input_feature_map_block_size;
int candidate_block_size2 = (candidate_block_size + candidate_block_count2 - 1) / candidate_block_count2;
return std::make_pair(candidate_block_size2, candidate_block_count2);
}
std::pair<int, int> sparse_fully_connected_layer_updater_cuda::get_update_entry_block_size_and_count(unsigned int entry_count) const
{
int candidate_block_size = entry_count;
if (candidate_block_size <= max_update_entry_count_block_size)
return std::make_pair(candidate_block_size, 1);
int candidate_block_count2 = (candidate_block_size + max_update_entry_count_block_size - 1) / max_update_entry_count_block_size;
int candidate_block_size2 = (candidate_block_size + candidate_block_count2 - 1) / candidate_block_count2;
return std::make_pair(candidate_block_size2, candidate_block_count2);
}
}
}
| 81a78adbda17b1e41b5a7ab335605637366ffc98.cu | /*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sparse_fully_connected_layer_updater_cuda.h"
#include <cuda_runtime.h>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "neural_network_cudnn_exception.h"
#include "../sparse_convolution_layer.h"
namespace nnforge
{
namespace cuda
{
extern __shared__ float arr_sh[];
#define OUTPUT_ELEM_COUNT_BLOCK_SIZE 4
__global__ void sparse_fully_connected_upd_kernel(
float * __restrict output_neurons,
const float * __restrict input_neurons,
const float * __restrict weights,
const int * __restrict column_indices,
const int * __restrict row_ptrs,
int output_elem_count_per_entry,
int input_elem_count_per_entry,
int entry_count,
int input_feature_map_block_size,
int window_size)
{
int row_id = blockIdx.y * blockDim.y + threadIdx.y;
if (row_id >= output_elem_count_per_entry)
return;
int start_column_index = __load_nc(row_ptrs + row_id);
int end_column_index = __load_nc(row_ptrs + row_id + 1);
int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
int base_column_index_offset = (thread_id_x >> 5) * input_feature_map_block_size;
int base_nnz_index = start_column_index + base_column_index_offset;
if (base_nnz_index >= end_column_index)
return;
int base_entry_id = (blockIdx.z * blockDim.z + threadIdx.z) * OUTPUT_ELEM_COUNT_BLOCK_SIZE;
if (base_entry_id >= entry_count)
return;
int lane_id = thread_id_x & 31;
int it_count = min(input_feature_map_block_size, end_column_index - base_nnz_index);
int thread_id = blockDim.x * (threadIdx.z * blockDim.y + threadIdx.y) + threadIdx.x;
int warp_id = thread_id >> 5;
volatile int * column_indices_sh = (int *)arr_sh;
if (lane_id < it_count)
column_indices_sh[thread_id] = column_indices[base_nnz_index + lane_id];
int window_it_count = (window_size + 31) >> 5;
bool valid[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
int entry_ids[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
{
valid[i] = (i < (entry_count - base_entry_id));
entry_ids[i] = valid[i] ? (base_entry_id + i) : (entry_count - 1);
}
float sums[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] = 0.0F;
for(int i = 0; i < it_count; ++i)
{
int index = base_nnz_index + i;
int column_id = column_indices_sh[warp_id * 32 + i];
int local_weight_id = lane_id;
for(int j = 0; j < window_it_count; ++j)
{
if (local_weight_id < window_size)
{
float w = __load_nc(weights + (int)(index * window_size + local_weight_id));
#pragma unroll
for(int k = 0; k < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++k)
{
float inp = __load_nc(input_neurons + entry_ids[k] * input_elem_count_per_entry + column_id * window_size + local_weight_id);
sums[k] += w * inp;
}
}
local_weight_id += 32;
}
}
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] += __shfl_xor(sums[i], tx);
}
if (lane_id == 0)
{
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
if (valid[i])
atomicAdd(output_neurons + (base_entry_id + i) * output_elem_count_per_entry + row_id, sums[i]);
}
}
#define OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE 4
__global__ void sparse_fully_connected_backprop_upd_kernel(
const float * __restrict output_errors,
float * __restrict input_errors,
const float * __restrict weights,
const int * __restrict column_indices,
const int * __restrict row_ptrs,
int output_elem_count_per_entry,
int input_elem_count_per_entry,
int entry_count,
int window_size)
{
int row_id = blockIdx.y * blockDim.y + threadIdx.y;
if (row_id >= output_elem_count_per_entry)
return;
int start_column_index = __load_nc(row_ptrs + row_id);
int end_column_index = __load_nc(row_ptrs + row_id + 1);
int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
int base_column_index_offset = thread_id_x >> 5;
int base_nnz_index = start_column_index + base_column_index_offset;
if (base_nnz_index >= end_column_index)
return;
int base_entry_id = (blockIdx.z * blockDim.z + threadIdx.z) * OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE;
if (base_entry_id >= entry_count)
return;
bool valid[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE];
int entry_ids[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE];
int max_local_entry_count = min(OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE, entry_count - base_entry_id);
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE; ++i)
{
valid[i] = (i < max_local_entry_count);
entry_ids[i] = valid[i] ? (base_entry_id + i) : (entry_count - 1);
}
int column_id = __load_nc(column_indices + base_nnz_index);
int window_it_count = (window_size + 31) >> 5;
int lane_id = thread_id_x & 31;
int thread_id = blockDim.x * (threadIdx.z * blockDim.y + threadIdx.y) + threadIdx.x;
int warp_id = thread_id >> 5;
volatile float * output_errors_sh = arr_sh + warp_id * OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE;
if (lane_id < max_local_entry_count)
output_errors_sh[lane_id] = __load_nc(output_errors + (int)((base_entry_id + lane_id) * output_elem_count_per_entry + row_id));
int local_weight_id = lane_id;
for(int j = 0; j < window_it_count; ++j)
{
if (local_weight_id < window_size)
{
float w = __load_nc(weights + (int)(base_nnz_index * window_size + local_weight_id));
#pragma unroll
for(int k = 0; k < OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE; ++k)
{
if (valid[k])
{
float input_error = output_errors_sh[k] * w;
atomicAdd(input_errors + (int)(entry_ids[k] * input_elem_count_per_entry + column_id * window_size + local_weight_id), input_error);
}
}
}
local_weight_id += 32;
}
}
__global__ void sparse_fully_connected_update_weights_kernel(
const float * __restrict output_errors,
const float * __restrict input_neurons,
float * __restrict gradient_weights,
const int * __restrict column_indices,
const int * __restrict row_ptrs,
int output_elem_count_per_entry,
int input_elem_count_per_entry,
int entry_block_size,
int entry_count,
int window_size)
{
int row_id = blockIdx.y * blockDim.y + threadIdx.y;
if (row_id >= output_elem_count_per_entry)
return;
int start_column_index = __load_nc(row_ptrs + row_id);
int end_column_index = __load_nc(row_ptrs + row_id + 1);
int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
int base_column_index_offset = thread_id_x >> 5;
int base_nnz_index = start_column_index + base_column_index_offset;
if (base_nnz_index >= end_column_index)
return;
int base_entry_id = (blockIdx.z * blockDim.z + threadIdx.z) * entry_block_size;
if (base_entry_id >= entry_count)
return;
int local_entry_count = min(entry_block_size, entry_count - base_entry_id);
int column_id = __load_nc(column_indices + base_nnz_index);
int window_it_count = (window_size + 31) >> 5;
int lane_id = thread_id_x & 31;
int thread_id = blockDim.x * (threadIdx.z * blockDim.y + threadIdx.y) + threadIdx.x;
int warp_id = thread_id >> 5;
volatile float * output_errors_sh = arr_sh + warp_id * entry_block_size;
if (lane_id < local_entry_count)
output_errors_sh[lane_id] = __load_nc(output_errors + (int)((base_entry_id + lane_id) * output_elem_count_per_entry + row_id));
int local_weight_id = lane_id;
for(int j = 0; j < window_it_count; ++j)
{
if (local_weight_id < window_size)
{
float sum = 0.0F;
const float * current_input_neurons = input_neurons + base_entry_id * input_elem_count_per_entry + column_id * window_size + local_weight_id;
for(int k = 0; k < local_entry_count; ++k)
{
sum += output_errors_sh[k] * __load_nc(current_input_neurons);
current_input_neurons += input_elem_count_per_entry;
}
atomicAdd(gradient_weights + (int)(base_nnz_index * window_size + local_weight_id), sum);
}
local_weight_id += 32;
}
}
const int sparse_fully_connected_layer_updater_cuda::max_input_feature_map_block_size = 32;
const int sparse_fully_connected_layer_updater_cuda::absolute_min_update_entry_count_block_size = 4;
const int sparse_fully_connected_layer_updater_cuda::absolute_max_update_entry_count_block_size = 32;
sparse_fully_connected_layer_updater_cuda::sparse_fully_connected_layer_updater_cuda()
: output_data_desc(0)
, bias_desc(0)
{
cudnn_safe_call(cudnnCreateTensorDescriptor(&output_data_desc));
cudnn_safe_call(cudnnCreateTensorDescriptor(&bias_desc));
}
sparse_fully_connected_layer_updater_cuda::~sparse_fully_connected_layer_updater_cuda()
{
cudnnDestroyTensorDescriptor(output_data_desc);
cudnnDestroyTensorDescriptor(bias_desc);
}
void sparse_fully_connected_layer_updater_cuda::enqueue_test(
unsigned int offset_input_entry_id,
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count,
bool force_deterministic)
{
cuda_util::set_with_value(
*cuda_config,
*output_neurons_buffer,
0.0F,
output_elem_count_per_entry * entry_count,
stream_id);
std::pair<int, int> input_feature_map_block_size_and_count = get_input_feature_map_block_size_and_count();
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
32 * input_feature_map_block_size_and_count.second,
output_elem_count_per_entry,
(entry_count + OUTPUT_ELEM_COUNT_BLOCK_SIZE - 1) / OUTPUT_ELEM_COUNT_BLOCK_SIZE,
32);
int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z;
int smem_size = threadblock_size * sizeof(float);
sparse_fully_connected_upd_kernel<<<kernel_dims.first, kernel_dims.second, smem_size, stream_id>>>(
*output_neurons_buffer,
*input_neurons_buffer,
*data[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
input_elem_count_per_entry,
entry_count,
input_feature_map_block_size_and_count.first,
window_size);
// Add bias
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
cudnn_safe_call(cudnnSetTensor4dDescriptor(
output_data_desc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
entry_count,
output_configuration_specific.feature_map_count,
1,
1));
float alpha = 1.0F;
float beta = 1.0F;
cudnn_safe_call(cudnnAddTensor(
cuda_config->get_cudnn_handle(),
CUDNN_ADD_SAME_C,
&alpha,
bias_desc,
*data[1],
&beta,
output_data_desc,
*output_neurons_buffer));
}
}
void sparse_fully_connected_layer_updater_cuda::enqueue_backprop(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count,
bool force_deterministic)
{
if (!backprop_required)
throw neural_network_exception("sparse_fully_connected_layer_updater_cuda is not configured to do backprop but requested to");
cuda_util::set_with_value(
*cuda_config,
*input_errors_buffer,
0.0F,
input_elem_count_per_entry * entry_count,
stream_id);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
32 * max_column_index_count_per_row,
output_elem_count_per_entry,
(entry_count + OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE - 1) / OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE,
32);
int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z;
int smem_size = (threadblock_size / 32) * OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE * sizeof(float);
sparse_fully_connected_backprop_upd_kernel<<<kernel_dims.first, kernel_dims.second, smem_size, stream_id>>>(
*output_errors_buffer,
*input_errors_buffer,
*data[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
input_elem_count_per_entry,
entry_count,
window_size);
}
void sparse_fully_connected_layer_updater_cuda::enqueue_update_weights(
unsigned int offset_input_entry_id,
cudaStream_t stream_id,
const std::vector<cuda_linear_buffer_device_smart_ptr>& gradient,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count,
bool force_deterministic)
{
// Update weights
{
std::pair<int, int> entry_block_size_and_count = get_update_entry_block_size_and_count(entry_count);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
32 * max_column_index_count_per_row,
output_elem_count_per_entry,
entry_block_size_and_count.second,
32);
int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z;
int smem_size = (threadblock_size / 32) * entry_block_size_and_count.first * sizeof(float);
sparse_fully_connected_update_weights_kernel<<<kernel_dims.first, kernel_dims.second, smem_size, stream_id>>>(
*output_errors_buffer,
*input_neurons_buffer,
*gradient[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
input_elem_count_per_entry,
entry_block_size_and_count.first,
entry_count,
window_size);
}
// Update biases
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
cudnn_safe_call(cudnnSetTensor4dDescriptor(
output_data_desc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
entry_count,
output_configuration_specific.feature_map_count,
1,
output_elem_count_per_feature_map));
float alpha = 1.0F;
float beta = 1.0F;
cudnn_safe_call(cudnnConvolutionBackwardBias(
cuda_config->get_cudnn_handle(),
&alpha,
output_data_desc,
*output_errors_buffer,
&beta,
bias_desc,
*gradient[1]));
}
}
bool sparse_fully_connected_layer_updater_cuda::is_in_place_backprop() const
{
return false;
}
void sparse_fully_connected_layer_updater_cuda::updater_configured()
{
nnforge_shared_ptr<const sparse_convolution_layer> layer_derived = nnforge_dynamic_pointer_cast<const sparse_convolution_layer>(layer_schema);
feature_map_connection_count = layer_derived->feature_map_connection_count;
window_size = 1;
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
window_size *= *it;
int input_data_single_backprop_entry_size = input_elem_count_per_entry * sizeof(float);
max_update_entry_count_block_size = std::min(std::max(absolute_min_update_entry_count_block_size, cuda_config->l2_cache_size / 2 / input_data_single_backprop_entry_size), absolute_max_update_entry_count_block_size);
cudnn_safe_call(cudnnSetTensor4dDescriptor(
bias_desc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
1,
output_configuration_specific.feature_map_count,
1,
1));
}
std::vector<size_t> sparse_fully_connected_layer_updater_cuda::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
return res;
}
void sparse_fully_connected_layer_updater_cuda::notify_data_custom(const_layer_data_custom_smart_ptr host_data_custom)
{
max_column_index_count_per_row = 0;
const std::vector<int>& row_indices = host_data_custom->at(1);
for(int i = 0; i < row_indices.size() - 1; ++i)
max_column_index_count_per_row = std::max(max_column_index_count_per_row, row_indices[i + 1] - row_indices[i]);
}
std::pair<int, int> sparse_fully_connected_layer_updater_cuda::get_input_feature_map_block_size_and_count() const
{
int candidate_block_size = max_column_index_count_per_row;
if (candidate_block_size <= max_input_feature_map_block_size)
return std::make_pair(candidate_block_size, 1);
int candidate_block_count2 = (candidate_block_size + max_input_feature_map_block_size - 1) / max_input_feature_map_block_size;
int candidate_block_size2 = (candidate_block_size + candidate_block_count2 - 1) / candidate_block_count2;
return std::make_pair(candidate_block_size2, candidate_block_count2);
}
std::pair<int, int> sparse_fully_connected_layer_updater_cuda::get_update_entry_block_size_and_count(unsigned int entry_count) const
{
int candidate_block_size = entry_count;
if (candidate_block_size <= max_update_entry_count_block_size)
return std::make_pair(candidate_block_size, 1);
int candidate_block_count2 = (candidate_block_size + max_update_entry_count_block_size - 1) / max_update_entry_count_block_size;
int candidate_block_size2 = (candidate_block_size + candidate_block_count2 - 1) / candidate_block_count2;
return std::make_pair(candidate_block_size2, candidate_block_count2);
}
}
}
|
57e8249745c131e4372d2270206e0c57cb154a3e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <staggered_oprod.h>
#include <tune_quda.h>
#include <quda_internal.h>
#include <gauge_field_order.h>
#include <quda_matrix.h>
namespace quda {
namespace experimental {
#ifdef GPU_STAGGERED_DIRAC
namespace { // anonymous
#include <texture.h>
}
template<int N>
void createEventArray(hipEvent_t (&event)[N], unsigned int flags=hipEventDefault)
{
for(int i=0; i<N; ++i)
hipEventCreate(&event[i],flags);
return;
}
template<int N>
void destroyEventArray(hipEvent_t (&event)[N])
{
for(int i=0; i<N; ++i)
hipEventDestroy(event[i]);
}
static hipEvent_t packEnd;
static hipEvent_t gatherEnd[4];
static hipEvent_t scatterEnd[4];
static hipEvent_t oprodStart;
static hipEvent_t oprodEnd;
void createStaggeredOprodEvents(){
hipEventCreate(&packEnd, hipEventDisableTiming);
createEventArray(gatherEnd, hipEventDisableTiming);
createEventArray(scatterEnd, hipEventDisableTiming);
hipEventCreate(&oprodStart, hipEventDisableTiming);
hipEventCreate(&oprodEnd, hipEventDisableTiming);
return;
}
void destroyStaggeredOprodEvents(){
destroyEventArray(gatherEnd);
destroyEventArray(scatterEnd);
hipEventDestroy(packEnd);
hipEventDestroy(oprodStart);
hipEventDestroy(oprodEnd);
return;
}
enum KernelType {OPROD_INTERIOR_KERNEL, OPROD_EXTERIOR_KERNEL};
template<typename Float, typename Output, typename InputA, typename InputB>
struct StaggeredOprodArg {
unsigned int length;
int X[4];
unsigned int parity;
unsigned int dir;
unsigned int ghostOffset[4];
unsigned int displacement;
KernelType kernelType;
int nFace;
bool partitioned[4];
InputA inA;
InputB inB;
Output outA;
Output outB;
Float coeff[2];
StaggeredOprodArg(const unsigned int parity,
const unsigned int dir,
const unsigned int *ghostOffset,
const unsigned int displaceement,
const KernelType& kernelType,
const int nFace,
const double coeff[2],
InputA& inA,
InputB& inB,
Output& outA,
Output& outB,
GaugeField& meta) :
length(meta.VolumeCB()), parity(parity), dir(dir),
displacement(displacement), kernelType(kernelType), nFace(nFace),
inA(inA), inB(inB), outA(outA), outB(outB)
{
this->coeff[0] = coeff[0];
this->coeff[1] = coeff[1];
for(int i=0; i<4; ++i) this->X[i] = meta.X()[i];
for(int i=0; i<4; ++i) this->ghostOffset[i] = ghostOffset[i];
for(int i=0; i<4; ++i) this->partitioned[i] = commDimPartitioned(i) ? true : false;
}
};
enum IndexType {
EVEN_X = 0,
EVEN_Y = 1,
EVEN_Z = 2,
EVEN_T = 3
};
template <IndexType idxType>
static __device__ __forceinline__ void coordsFromIndex(int& idx, int c[4],
const unsigned int cb_idx, const unsigned int parity, const int X[4])
{
const int &LX = X[0];
const int &LY = X[1];
const int &LZ = X[2];
const int XYZ = X[2]*X[1]*X[0];
const int XY = X[1]*X[0];
idx = 2*cb_idx;
int x, y, z, t;
if (idxType == EVEN_X /*!(LX & 1)*/) { // X even
// t = idx / XYZ;
// z = (idx / XY) % Z;
// y = (idx / X) % Y;
// idx += (parity + t + z + y) & 1;
// x = idx % X;
// equivalent to the above, but with fewer divisions/mods:
int aux1 = idx / LX;
x = idx - aux1 * LX;
int aux2 = aux1 / LY;
y = aux1 - aux2 * LY;
t = aux2 / LZ;
z = aux2 - t * LZ;
aux1 = (parity + t + z + y) & 1;
x += aux1;
idx += aux1;
} else if (idxType == EVEN_Y /*!(LY & 1)*/) { // Y even
t = idx / XYZ;
z = (idx / XY) % LZ;
idx += (parity + t + z) & 1;
y = (idx / LX) % LY;
x = idx % LX;
} else if (idxType == EVEN_Z /*!(LZ & 1)*/) { // Z even
t = idx / XYZ;
idx += (parity + t) & 1;
z = (idx / XY) % LZ;
y = (idx / LX) % LY;
x = idx % LX;
} else {
idx += parity;
t = idx / XYZ;
z = (idx / XY) % LZ;
y = (idx / LX) % LY;
x = idx % LX;
}
c[0] = x;
c[1] = y;
c[2] = z;
c[3] = t;
}
// Get the coordinates for the exterior kernels
__device__ void coordsFromIndex(int x[4], const unsigned int cb_idx, const int X[4], const unsigned int dir, const int displacement, const unsigned int parity)
{
int Xh[2] = {X[0]/2, X[1]/2};
switch(dir){
case 0:
x[2] = cb_idx/Xh[1] % X[2];
x[3] = cb_idx/(Xh[1]*X[2]) % X[3];
x[0] = cb_idx/(Xh[1]*X[2]*X[3]);
x[0] += (X[0] - displacement);
x[1] = 2*(cb_idx % Xh[1]) + ((x[0]+x[2]+x[3]+parity)&1);
break;
case 1:
x[2] = cb_idx/Xh[0] % X[2];
x[3] = cb_idx/(Xh[0]*X[2]) % X[3];
x[1] = cb_idx/(Xh[0]*X[2]*X[3]);
x[1] += (X[1] - displacement);
x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1);
break;
case 2:
x[1] = cb_idx/Xh[0] % X[1];
x[3] = cb_idx/(Xh[0]*X[1]) % X[3];
x[2] = cb_idx/(Xh[0]*X[1]*X[3]);
x[2] += (X[2] - displacement);
x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1);
break;
case 3:
x[1] = cb_idx/Xh[0] % X[1];
x[2] = cb_idx/(Xh[0]*X[1]) % X[2];
x[3] = cb_idx/(Xh[0]*X[1]*X[2]);
x[3] += (X[3] - displacement);
x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1);
break;
}
return;
}
__device__ __forceinline__
int neighborIndex(const unsigned int cb_idx, const int shift[4], const bool partitioned[4], const unsigned int parity, const int X[4]){
int full_idx;
int x[4];
coordsFromIndex<EVEN_X>(full_idx, x, cb_idx, parity, X);
for(int dim = 0; dim<4; ++dim){
if( partitioned[dim] )
if( (x[dim]+shift[dim])<0 || (x[dim]+shift[dim])>=X[dim]) return -1;
}
for(int dim=0; dim<4; ++dim){
x[dim] = shift[dim] ? (x[dim]+shift[dim] + X[dim]) % X[dim] : x[dim];
}
return (((x[3]*X[2] + x[2])*X[1] + x[1])*X[0] + x[0]) >> 1;
}
template<typename real, typename Output, typename InputA, typename InputB>
__global__ void interiorOprodKernel(StaggeredOprodArg<real, Output, InputA, InputB> arg)
{
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int gridSize = gridDim.x*blockDim.x;
typedef complex<real> Complex;
Complex x[3];
Complex y[3];
Complex z[3];
Matrix<Complex,3> result;
Matrix<Complex,3> tempA, tempB; // input
while(idx<arg.length){
arg.inA.load(x, idx);
#pragma unroll
for(int dim=0; dim<4; ++dim){
int shift[4] = {0,0,0,0};
shift[dim] = 1;
const int first_nbr_idx = neighborIndex(idx, shift, arg.partitioned, arg.parity, arg.X);
if(first_nbr_idx >= 0){
arg.inB.load(y, first_nbr_idx);
outerProd(y,x,&result);
arg.outA.load(reinterpret_cast<real*>(tempA.data), idx, dim, arg.parity);
result = tempA + result*arg.coeff[0];
arg.outA.save(reinterpret_cast<real*>(result.data), idx, dim, arg.parity);
if (arg.nFace == 3) {
shift[dim] = 3;
const int third_nbr_idx = neighborIndex(idx, shift, arg.partitioned, arg.parity, arg.X);
if(third_nbr_idx >= 0){
arg.inB.load(z, third_nbr_idx);
outerProd(z, x, &result);
arg.outB.load(reinterpret_cast<real*>(tempB.data), idx, dim, arg.parity);
result = tempB + result*arg.coeff[1];
arg.outB.save(reinterpret_cast<real*>(result.data), idx, dim, arg.parity);
}
}
}
} // dim
idx += gridSize;
}
return;
} // interiorOprodKernel
template<int dim, typename real, typename Output, typename InputA, typename InputB>
__global__ void exteriorOprodKernel(StaggeredOprodArg<real, Output, InputA, InputB> arg)
{
typedef complex<real> Complex;
unsigned int cb_idx = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int gridSize = gridDim.x*blockDim.x;
Complex a[3];
Complex b[3];
Matrix<Complex,3> result;
Matrix<Complex,3> inmatrix; // input
Output& out = (arg.displacement == 1) ? arg.outA : arg.outB;
real coeff = (arg.displacement == 1) ? arg.coeff[0] : arg.coeff[1];
int x[4];
while(cb_idx<arg.length){
coordsFromIndex(x, cb_idx, arg.X, arg.dir, arg.displacement, arg.parity);
const unsigned int bulk_cb_idx = ((((x[3]*arg.X[2] + x[2])*arg.X[1] + x[1])*arg.X[0] + x[0]) >> 1);
out.load(reinterpret_cast<real*>(inmatrix.data), bulk_cb_idx, arg.dir, arg.parity);
arg.inA.load(a, bulk_cb_idx);
const unsigned int ghost_idx = arg.ghostOffset[dim] + cb_idx;
arg.inB.loadGhost(b, ghost_idx, arg.dir);
outerProd(b,a,&result);
result = inmatrix + result*coeff;
out.save(reinterpret_cast<real*>(result.data), bulk_cb_idx, arg.dir, arg.parity);
cb_idx += gridSize;
}
return;
}
template<typename Float, typename Output, typename InputA, typename InputB>
class StaggeredOprodField : public Tunable {
private:
StaggeredOprodArg<Float,Output,InputA,InputB> &arg;
const GaugeField &meta;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; }
unsigned int minThreads() const { return arg.outA.volumeCB; }
bool tunedGridDim() const { return false; }
public:
StaggeredOprodField(StaggeredOprodArg<Float,Output,InputA,InputB> &arg, const GaugeField &meta)
: arg(arg), meta(meta) {
writeAuxString("threads=%d,prec=%lu,stride=%d",arg.length,sizeof(Complex)/2,arg.inA.Stride());
// this sets the communications pattern for the packing kernel
int comms[QUDA_MAX_DIM] = { commDimPartitioned(0), commDimPartitioned(1), commDimPartitioned(2), commDimPartitioned(3) };
setPackComms(comms);
}
virtual ~StaggeredOprodField() {}
void apply(const hipStream_t &stream){
if (meta.Location() == QUDA_CUDA_FIELD_LOCATION) {
// Disable tuning for the time being
TuneParam tp = tuneLaunch(*this, QUDA_TUNE_NO, getVerbosity());
if (arg.kernelType == OPROD_INTERIOR_KERNEL) {
hipLaunchKernelGGL(( interiorOprodKernel), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, stream, arg);
} else if (arg.kernelType == OPROD_EXTERIOR_KERNEL) {
if (arg.dir == 0)hipLaunchKernelGGL(( exteriorOprodKernel<0>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, stream, arg);
else if (arg.dir == 1)hipLaunchKernelGGL(( exteriorOprodKernel<1>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, stream, arg);
else if (arg.dir == 2)hipLaunchKernelGGL(( exteriorOprodKernel<2>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, stream, arg);
else if (arg.dir == 3)hipLaunchKernelGGL(( exteriorOprodKernel<3>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, stream, arg);
} else {
errorQuda("Kernel type not supported\n");
}
} else { // run the CPU code
errorQuda("No CPU support for staggered outer-product calculation\n");
}
} // apply
void preTune(){ this->arg.outA.save(); this->arg.outB.save(); }
void postTune(){ this->arg.outA.load(); this->arg.outB.load(); }
long long flops() const { return 0; } // FIXME
long long bytes() const { return 0; } // FIXME
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux);}
}; // StaggeredOprodField
void exchangeGhost(cudaColorSpinorField &a, int parity, int dag) {
// need to enable packing in temporal direction to get spin-projector correct
bool pack_old = getKernelPackT();
setKernelPackT(true);
// first transfer src1
hipDeviceSynchronize();
a.pack(1, 1-parity, dag, Nstream-1, 0);
hipDeviceSynchronize();
for(int i=3; i>=0; i--){
if(commDimPartitioned(i)){
// Initialize the host transfer from the source spinor
a.gather(1, dag, 2*i);
} // commDim(i)
} // i=3,..,0
hipDeviceSynchronize(); comm_barrier();
for (int i=3; i>=0; i--) {
if(commDimPartitioned(i)) {
a.commsStart(1, 2*i, dag);
}
}
for (int i=3; i>=0; i--) {
if(commDimPartitioned(i)) {
a.commsWait(1, 2*i, dag);
a.scatter(1, dag, 2*i);
}
}
hipDeviceSynchronize();
setKernelPackT(pack_old); // restore packing state
a.bufferIndex = (1 - a.bufferIndex);
comm_barrier();
}
template<typename Float, typename Output, typename InputA, typename InputB>
void computeStaggeredOprodCuda(Output outA, Output outB, GaugeField& outFieldA, GaugeField& outFieldB, InputA& inA, InputB& inB, cudaColorSpinorField& src,
const unsigned int parity, const int faceVolumeCB[4], const double coeff[2], int nFace)
{
unsigned int ghostOffset[4] = {0,0,0,0};
for(int dir=0; dir<4; ++dir) ghostOffset[dir] = src.GhostOffset(dir,1)/src.FieldOrder(); // offset we want is the forwards one
// Create the arguments for the interior kernel
StaggeredOprodArg<Float,Output,InputA,InputB> arg(parity, 0, ghostOffset, 1, OPROD_INTERIOR_KERNEL, nFace, coeff, inA, inB, outA, outB, outFieldA);
StaggeredOprodField<Float,Output,InputA,InputB> oprod(arg, outFieldA);
arg.kernelType = OPROD_INTERIOR_KERNEL;
arg.length = src.VolumeCB();
oprod.apply(streams[Nstream-1]);
for(int i=3; i>=0; i--){
if (commDimPartitioned(i)) {
// update parameters for this exterior kernel
arg.kernelType = OPROD_EXTERIOR_KERNEL;
arg.dir = i;
// First, do the one hop term
{
arg.displacement = 1;
arg.length = faceVolumeCB[i];
oprod.apply(streams[Nstream-1]);
}
// Now do the 3 hop term
if (nFace == 3) {
arg.displacement = 3;
arg.length = arg.displacement*faceVolumeCB[i];
oprod.apply(streams[Nstream-1]);
}
}
} // i=3,..,0
checkCudaError();
} // computeStaggeredOprodCuda
#endif // GPU_STAGGERED_DIRAC
void computeStaggeredOprod(GaugeField& outA, GaugeField& outB, ColorSpinorField& inEven, ColorSpinorField& inOdd,
const unsigned int parity, const double coeff[2], int nFace)
{
#ifdef GPU_STAGGERED_DIRAC
if(outA.Order() != QUDA_FLOAT2_GAUGE_ORDER)
errorQuda("Unsupported output ordering: %d\n", outA.Order());
if(outB.Order() != QUDA_FLOAT2_GAUGE_ORDER)
errorQuda("Unsupported output ordering: %d\n", outB.Order());
if(inEven.Precision() != outA.Precision()) errorQuda("Mixed precision not supported: %d %d\n", inEven.Precision(), outA.Precision());
cudaColorSpinorField &inA = (parity&1) ? static_cast<cudaColorSpinorField&>(inOdd) : static_cast<cudaColorSpinorField&>(inEven);
cudaColorSpinorField &inB = (parity&1) ? static_cast<cudaColorSpinorField&>(inEven) : static_cast<cudaColorSpinorField&>(inOdd);
inA.allocateGhostBuffer(nFace);
inB.allocateGhostBuffer(nFace);
if (inEven.Precision() == QUDA_DOUBLE_PRECISION) {
Spinor<double2, double2, 3, 0, 0> spinorA(inA, nFace);
Spinor<double2, double2, 3, 0, 1> spinorB(inB, nFace);
exchangeGhost(static_cast<cudaColorSpinorField&>(inB), parity, 0);
computeStaggeredOprodCuda<double>(gauge::FloatNOrder<double, 18, 2, 18>(outA), gauge::FloatNOrder<double, 18, 2, 18>(outB),
outA, outB, spinorA, spinorB, inB, parity, inB.GhostFace(), coeff, nFace);
} else if (inEven.Precision() == QUDA_SINGLE_PRECISION) {
Spinor<float2, float2, 3, 0, 0> spinorA(inA, nFace);
Spinor<float2, float2, 3, 0, 1> spinorB(inB, nFace);
exchangeGhost(static_cast<cudaColorSpinorField&>(inB), parity, 0);
computeStaggeredOprodCuda<float>(gauge::FloatNOrder<float, 18, 2, 18>(outA), gauge::FloatNOrder<float, 18, 2, 18>(outB),
outA, outB, spinorA, spinorB, inB, parity, inB.GhostFace(), coeff, nFace);
} else {
errorQuda("Unsupported precision: %d\n", inEven.Precision());
}
#else // GPU_STAGGERED_DIRAC not defined
errorQuda("Staggered Outer Product has not been built!");
#endif
return;
} // computeStaggeredOprod
} // namespace experimental
} // namespace quda
| 57e8249745c131e4372d2270206e0c57cb154a3e.cu | #include <cstdio>
#include <cstdlib>
#include <staggered_oprod.h>
#include <tune_quda.h>
#include <quda_internal.h>
#include <gauge_field_order.h>
#include <quda_matrix.h>
namespace quda {
namespace experimental {
#ifdef GPU_STAGGERED_DIRAC
namespace { // anonymous
#include <texture.h>
}
template<int N>
void createEventArray(cudaEvent_t (&event)[N], unsigned int flags=cudaEventDefault)
{
for(int i=0; i<N; ++i)
cudaEventCreate(&event[i],flags);
return;
}
template<int N>
void destroyEventArray(cudaEvent_t (&event)[N])
{
for(int i=0; i<N; ++i)
cudaEventDestroy(event[i]);
}
static cudaEvent_t packEnd;
static cudaEvent_t gatherEnd[4];
static cudaEvent_t scatterEnd[4];
static cudaEvent_t oprodStart;
static cudaEvent_t oprodEnd;
void createStaggeredOprodEvents(){
cudaEventCreate(&packEnd, cudaEventDisableTiming);
createEventArray(gatherEnd, cudaEventDisableTiming);
createEventArray(scatterEnd, cudaEventDisableTiming);
cudaEventCreate(&oprodStart, cudaEventDisableTiming);
cudaEventCreate(&oprodEnd, cudaEventDisableTiming);
return;
}
void destroyStaggeredOprodEvents(){
destroyEventArray(gatherEnd);
destroyEventArray(scatterEnd);
cudaEventDestroy(packEnd);
cudaEventDestroy(oprodStart);
cudaEventDestroy(oprodEnd);
return;
}
enum KernelType {OPROD_INTERIOR_KERNEL, OPROD_EXTERIOR_KERNEL};
template<typename Float, typename Output, typename InputA, typename InputB>
struct StaggeredOprodArg {
unsigned int length;
int X[4];
unsigned int parity;
unsigned int dir;
unsigned int ghostOffset[4];
unsigned int displacement;
KernelType kernelType;
int nFace;
bool partitioned[4];
InputA inA;
InputB inB;
Output outA;
Output outB;
Float coeff[2];
StaggeredOprodArg(const unsigned int parity,
const unsigned int dir,
const unsigned int *ghostOffset,
const unsigned int displaceement,
const KernelType& kernelType,
const int nFace,
const double coeff[2],
InputA& inA,
InputB& inB,
Output& outA,
Output& outB,
GaugeField& meta) :
length(meta.VolumeCB()), parity(parity), dir(dir),
displacement(displacement), kernelType(kernelType), nFace(nFace),
inA(inA), inB(inB), outA(outA), outB(outB)
{
this->coeff[0] = coeff[0];
this->coeff[1] = coeff[1];
for(int i=0; i<4; ++i) this->X[i] = meta.X()[i];
for(int i=0; i<4; ++i) this->ghostOffset[i] = ghostOffset[i];
for(int i=0; i<4; ++i) this->partitioned[i] = commDimPartitioned(i) ? true : false;
}
};
enum IndexType {
EVEN_X = 0,
EVEN_Y = 1,
EVEN_Z = 2,
EVEN_T = 3
};
template <IndexType idxType>
static __device__ __forceinline__ void coordsFromIndex(int& idx, int c[4],
const unsigned int cb_idx, const unsigned int parity, const int X[4])
{
const int &LX = X[0];
const int &LY = X[1];
const int &LZ = X[2];
const int XYZ = X[2]*X[1]*X[0];
const int XY = X[1]*X[0];
idx = 2*cb_idx;
int x, y, z, t;
if (idxType == EVEN_X /*!(LX & 1)*/) { // X even
// t = idx / XYZ;
// z = (idx / XY) % Z;
// y = (idx / X) % Y;
// idx += (parity + t + z + y) & 1;
// x = idx % X;
// equivalent to the above, but with fewer divisions/mods:
int aux1 = idx / LX;
x = idx - aux1 * LX;
int aux2 = aux1 / LY;
y = aux1 - aux2 * LY;
t = aux2 / LZ;
z = aux2 - t * LZ;
aux1 = (parity + t + z + y) & 1;
x += aux1;
idx += aux1;
} else if (idxType == EVEN_Y /*!(LY & 1)*/) { // Y even
t = idx / XYZ;
z = (idx / XY) % LZ;
idx += (parity + t + z) & 1;
y = (idx / LX) % LY;
x = idx % LX;
} else if (idxType == EVEN_Z /*!(LZ & 1)*/) { // Z even
t = idx / XYZ;
idx += (parity + t) & 1;
z = (idx / XY) % LZ;
y = (idx / LX) % LY;
x = idx % LX;
} else {
idx += parity;
t = idx / XYZ;
z = (idx / XY) % LZ;
y = (idx / LX) % LY;
x = idx % LX;
}
c[0] = x;
c[1] = y;
c[2] = z;
c[3] = t;
}
// Get the coordinates for the exterior kernels
__device__ void coordsFromIndex(int x[4], const unsigned int cb_idx, const int X[4], const unsigned int dir, const int displacement, const unsigned int parity)
{
int Xh[2] = {X[0]/2, X[1]/2};
switch(dir){
case 0:
x[2] = cb_idx/Xh[1] % X[2];
x[3] = cb_idx/(Xh[1]*X[2]) % X[3];
x[0] = cb_idx/(Xh[1]*X[2]*X[3]);
x[0] += (X[0] - displacement);
x[1] = 2*(cb_idx % Xh[1]) + ((x[0]+x[2]+x[3]+parity)&1);
break;
case 1:
x[2] = cb_idx/Xh[0] % X[2];
x[3] = cb_idx/(Xh[0]*X[2]) % X[3];
x[1] = cb_idx/(Xh[0]*X[2]*X[3]);
x[1] += (X[1] - displacement);
x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1);
break;
case 2:
x[1] = cb_idx/Xh[0] % X[1];
x[3] = cb_idx/(Xh[0]*X[1]) % X[3];
x[2] = cb_idx/(Xh[0]*X[1]*X[3]);
x[2] += (X[2] - displacement);
x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1);
break;
case 3:
x[1] = cb_idx/Xh[0] % X[1];
x[2] = cb_idx/(Xh[0]*X[1]) % X[2];
x[3] = cb_idx/(Xh[0]*X[1]*X[2]);
x[3] += (X[3] - displacement);
x[0] = 2*(cb_idx % Xh[0]) + ((x[1]+x[2]+x[3]+parity)&1);
break;
}
return;
}
__device__ __forceinline__
int neighborIndex(const unsigned int cb_idx, const int shift[4], const bool partitioned[4], const unsigned int parity, const int X[4]){
int full_idx;
int x[4];
coordsFromIndex<EVEN_X>(full_idx, x, cb_idx, parity, X);
for(int dim = 0; dim<4; ++dim){
if( partitioned[dim] )
if( (x[dim]+shift[dim])<0 || (x[dim]+shift[dim])>=X[dim]) return -1;
}
for(int dim=0; dim<4; ++dim){
x[dim] = shift[dim] ? (x[dim]+shift[dim] + X[dim]) % X[dim] : x[dim];
}
return (((x[3]*X[2] + x[2])*X[1] + x[1])*X[0] + x[0]) >> 1;
}
template<typename real, typename Output, typename InputA, typename InputB>
__global__ void interiorOprodKernel(StaggeredOprodArg<real, Output, InputA, InputB> arg)
{
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int gridSize = gridDim.x*blockDim.x;
typedef complex<real> Complex;
Complex x[3];
Complex y[3];
Complex z[3];
Matrix<Complex,3> result;
Matrix<Complex,3> tempA, tempB; // input
while(idx<arg.length){
arg.inA.load(x, idx);
#pragma unroll
for(int dim=0; dim<4; ++dim){
int shift[4] = {0,0,0,0};
shift[dim] = 1;
const int first_nbr_idx = neighborIndex(idx, shift, arg.partitioned, arg.parity, arg.X);
if(first_nbr_idx >= 0){
arg.inB.load(y, first_nbr_idx);
outerProd(y,x,&result);
arg.outA.load(reinterpret_cast<real*>(tempA.data), idx, dim, arg.parity);
result = tempA + result*arg.coeff[0];
arg.outA.save(reinterpret_cast<real*>(result.data), idx, dim, arg.parity);
if (arg.nFace == 3) {
shift[dim] = 3;
const int third_nbr_idx = neighborIndex(idx, shift, arg.partitioned, arg.parity, arg.X);
if(third_nbr_idx >= 0){
arg.inB.load(z, third_nbr_idx);
outerProd(z, x, &result);
arg.outB.load(reinterpret_cast<real*>(tempB.data), idx, dim, arg.parity);
result = tempB + result*arg.coeff[1];
arg.outB.save(reinterpret_cast<real*>(result.data), idx, dim, arg.parity);
}
}
}
} // dim
idx += gridSize;
}
return;
} // interiorOprodKernel
template<int dim, typename real, typename Output, typename InputA, typename InputB>
__global__ void exteriorOprodKernel(StaggeredOprodArg<real, Output, InputA, InputB> arg)
{
typedef complex<real> Complex;
unsigned int cb_idx = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int gridSize = gridDim.x*blockDim.x;
Complex a[3];
Complex b[3];
Matrix<Complex,3> result;
Matrix<Complex,3> inmatrix; // input
Output& out = (arg.displacement == 1) ? arg.outA : arg.outB;
real coeff = (arg.displacement == 1) ? arg.coeff[0] : arg.coeff[1];
int x[4];
while(cb_idx<arg.length){
coordsFromIndex(x, cb_idx, arg.X, arg.dir, arg.displacement, arg.parity);
const unsigned int bulk_cb_idx = ((((x[3]*arg.X[2] + x[2])*arg.X[1] + x[1])*arg.X[0] + x[0]) >> 1);
out.load(reinterpret_cast<real*>(inmatrix.data), bulk_cb_idx, arg.dir, arg.parity);
arg.inA.load(a, bulk_cb_idx);
const unsigned int ghost_idx = arg.ghostOffset[dim] + cb_idx;
arg.inB.loadGhost(b, ghost_idx, arg.dir);
outerProd(b,a,&result);
result = inmatrix + result*coeff;
out.save(reinterpret_cast<real*>(result.data), bulk_cb_idx, arg.dir, arg.parity);
cb_idx += gridSize;
}
return;
}
template<typename Float, typename Output, typename InputA, typename InputB>
class StaggeredOprodField : public Tunable {
private:
StaggeredOprodArg<Float,Output,InputA,InputB> &arg;
const GaugeField &meta;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; }
unsigned int minThreads() const { return arg.outA.volumeCB; }
bool tunedGridDim() const { return false; }
public:
StaggeredOprodField(StaggeredOprodArg<Float,Output,InputA,InputB> &arg, const GaugeField &meta)
: arg(arg), meta(meta) {
writeAuxString("threads=%d,prec=%lu,stride=%d",arg.length,sizeof(Complex)/2,arg.inA.Stride());
// this sets the communications pattern for the packing kernel
int comms[QUDA_MAX_DIM] = { commDimPartitioned(0), commDimPartitioned(1), commDimPartitioned(2), commDimPartitioned(3) };
setPackComms(comms);
}
virtual ~StaggeredOprodField() {}
void apply(const cudaStream_t &stream){
if (meta.Location() == QUDA_CUDA_FIELD_LOCATION) {
// Disable tuning for the time being
TuneParam tp = tuneLaunch(*this, QUDA_TUNE_NO, getVerbosity());
if (arg.kernelType == OPROD_INTERIOR_KERNEL) {
interiorOprodKernel<<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg);
} else if (arg.kernelType == OPROD_EXTERIOR_KERNEL) {
if (arg.dir == 0) exteriorOprodKernel<0><<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg);
else if (arg.dir == 1) exteriorOprodKernel<1><<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg);
else if (arg.dir == 2) exteriorOprodKernel<2><<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg);
else if (arg.dir == 3) exteriorOprodKernel<3><<<tp.grid,tp.block,tp.shared_bytes, stream>>>(arg);
} else {
errorQuda("Kernel type not supported\n");
}
} else { // run the CPU code
errorQuda("No CPU support for staggered outer-product calculation\n");
}
} // apply
void preTune(){ this->arg.outA.save(); this->arg.outB.save(); }
void postTune(){ this->arg.outA.load(); this->arg.outB.load(); }
long long flops() const { return 0; } // FIXME
long long bytes() const { return 0; } // FIXME
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux);}
}; // StaggeredOprodField
void exchangeGhost(cudaColorSpinorField &a, int parity, int dag) {
// need to enable packing in temporal direction to get spin-projector correct
bool pack_old = getKernelPackT();
setKernelPackT(true);
// first transfer src1
cudaDeviceSynchronize();
a.pack(1, 1-parity, dag, Nstream-1, 0);
cudaDeviceSynchronize();
for(int i=3; i>=0; i--){
if(commDimPartitioned(i)){
// Initialize the host transfer from the source spinor
a.gather(1, dag, 2*i);
} // commDim(i)
} // i=3,..,0
cudaDeviceSynchronize(); comm_barrier();
for (int i=3; i>=0; i--) {
if(commDimPartitioned(i)) {
a.commsStart(1, 2*i, dag);
}
}
for (int i=3; i>=0; i--) {
if(commDimPartitioned(i)) {
a.commsWait(1, 2*i, dag);
a.scatter(1, dag, 2*i);
}
}
cudaDeviceSynchronize();
setKernelPackT(pack_old); // restore packing state
a.bufferIndex = (1 - a.bufferIndex);
comm_barrier();
}
template<typename Float, typename Output, typename InputA, typename InputB>
void computeStaggeredOprodCuda(Output outA, Output outB, GaugeField& outFieldA, GaugeField& outFieldB, InputA& inA, InputB& inB, cudaColorSpinorField& src,
const unsigned int parity, const int faceVolumeCB[4], const double coeff[2], int nFace)
{
unsigned int ghostOffset[4] = {0,0,0,0};
for(int dir=0; dir<4; ++dir) ghostOffset[dir] = src.GhostOffset(dir,1)/src.FieldOrder(); // offset we want is the forwards one
// Create the arguments for the interior kernel
StaggeredOprodArg<Float,Output,InputA,InputB> arg(parity, 0, ghostOffset, 1, OPROD_INTERIOR_KERNEL, nFace, coeff, inA, inB, outA, outB, outFieldA);
StaggeredOprodField<Float,Output,InputA,InputB> oprod(arg, outFieldA);
arg.kernelType = OPROD_INTERIOR_KERNEL;
arg.length = src.VolumeCB();
oprod.apply(streams[Nstream-1]);
for(int i=3; i>=0; i--){
if (commDimPartitioned(i)) {
// update parameters for this exterior kernel
arg.kernelType = OPROD_EXTERIOR_KERNEL;
arg.dir = i;
// First, do the one hop term
{
arg.displacement = 1;
arg.length = faceVolumeCB[i];
oprod.apply(streams[Nstream-1]);
}
// Now do the 3 hop term
if (nFace == 3) {
arg.displacement = 3;
arg.length = arg.displacement*faceVolumeCB[i];
oprod.apply(streams[Nstream-1]);
}
}
} // i=3,..,0
checkCudaError();
} // computeStaggeredOprodCuda
#endif // GPU_STAGGERED_DIRAC
void computeStaggeredOprod(GaugeField& outA, GaugeField& outB, ColorSpinorField& inEven, ColorSpinorField& inOdd,
const unsigned int parity, const double coeff[2], int nFace)
{
#ifdef GPU_STAGGERED_DIRAC
if(outA.Order() != QUDA_FLOAT2_GAUGE_ORDER)
errorQuda("Unsupported output ordering: %d\n", outA.Order());
if(outB.Order() != QUDA_FLOAT2_GAUGE_ORDER)
errorQuda("Unsupported output ordering: %d\n", outB.Order());
if(inEven.Precision() != outA.Precision()) errorQuda("Mixed precision not supported: %d %d\n", inEven.Precision(), outA.Precision());
cudaColorSpinorField &inA = (parity&1) ? static_cast<cudaColorSpinorField&>(inOdd) : static_cast<cudaColorSpinorField&>(inEven);
cudaColorSpinorField &inB = (parity&1) ? static_cast<cudaColorSpinorField&>(inEven) : static_cast<cudaColorSpinorField&>(inOdd);
inA.allocateGhostBuffer(nFace);
inB.allocateGhostBuffer(nFace);
if (inEven.Precision() == QUDA_DOUBLE_PRECISION) {
Spinor<double2, double2, 3, 0, 0> spinorA(inA, nFace);
Spinor<double2, double2, 3, 0, 1> spinorB(inB, nFace);
exchangeGhost(static_cast<cudaColorSpinorField&>(inB), parity, 0);
computeStaggeredOprodCuda<double>(gauge::FloatNOrder<double, 18, 2, 18>(outA), gauge::FloatNOrder<double, 18, 2, 18>(outB),
outA, outB, spinorA, spinorB, inB, parity, inB.GhostFace(), coeff, nFace);
} else if (inEven.Precision() == QUDA_SINGLE_PRECISION) {
Spinor<float2, float2, 3, 0, 0> spinorA(inA, nFace);
Spinor<float2, float2, 3, 0, 1> spinorB(inB, nFace);
exchangeGhost(static_cast<cudaColorSpinorField&>(inB), parity, 0);
computeStaggeredOprodCuda<float>(gauge::FloatNOrder<float, 18, 2, 18>(outA), gauge::FloatNOrder<float, 18, 2, 18>(outB),
outA, outB, spinorA, spinorB, inB, parity, inB.GhostFace(), coeff, nFace);
} else {
errorQuda("Unsupported precision: %d\n", inEven.Precision());
}
#else // GPU_STAGGERED_DIRAC not defined
errorQuda("Staggered Outer Product has not been built!");
#endif
return;
} // computeStaggeredOprod
} // namespace experimental
} // namespace quda
|
c8b4697c27c628ba90929502b9c48c066c52ba39.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/BCECriterion.cu"
#else
#include "THHApply.cuh"
void THNN_(BCECriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *output,
bool sizeAverage,
THCTensor *weights,
bool reduce)
{
THCUNN_check_nElement(state, input, target);
THCUNN_check_nElement(state, input, weights);
THCUNN_assertSameGPU(state, 3, input, target, weights);
if (!reduce) {
THCTensor_(resizeAs)(state, output, input);
THC_pointwiseApply3(state, input, target, output,
bce_updateOutput_no_reduce_functor<real, accreal>());
if (weights) {
THCTensor_(cmul)(state, output, output, weights);
}
return;
}
THCTensor_(resize1d)(state, output, 1);
ptrdiff_t size = THCTensor_(nElement)(state, input);
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
thrust::device_ptr<real> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<real> target_data(THCTensor_(data)(state, target));
accreal sum;
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
thrust::device_ptr<real> weights_data(THCTensor_(data)(state, weights));
sum = thrust::transform_reduce(
thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data, weights_data)),
thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size, weights_data+size)),
bce_functor_weights<real, accreal>(),
(accreal) 0,
thrust::plus<accreal>()
);
THCTensor_(free)(state, weights);
} else {
sum = thrust::transform_reduce(
thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data)),
thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size)),
bce_functor<real, accreal>(),
(accreal) 0,
thrust::plus<accreal>()
);
}
if (sizeAverage)
sum /= size;
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, real>::to(sum));
}
void THNN_(BCECriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
bool sizeAverage,
THCTensor *weights,
bool reduce)
{
THCUNN_check_nElement(state, input, target);
THCUNN_check_nElement(state, input, weights);
THCUNN_assertSameGPU(state, 4, input, target, gradInput, weights);
THCTensor_(resizeAs)(state, gradInput, input);
if (!reduce) {
THCUNN_check_nElement(state, gradOutput, input);
THC_pointwiseApply3(state, input, target, gradInput,
bce_updateGradInput_no_reduce_functor<real, accreal>());
THCTensor_(cmul)(state, gradInput, gradInput, gradOutput);
if (weights) {
THCTensor_(cmul)(state, gradInput, gradInput, weights);
}
return;
}
THCUNN_check_dim_size(state, gradOutput, 1, 0, 1);
ptrdiff_t size = THCTensor_(nElement)(state, input);
real norm = ScalarConvert<accreal, real>::to((sizeAverage ? accreal(1)/size : accreal(1)) * THCTensor_(get1d)(state, gradOutput, 0));
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
thrust::device_ptr<real> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<real> target_data(THCTensor_(data)(state, target));
thrust::device_ptr<real> gradInput_data(THCTensor_(data)(state, gradInput));
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
thrust::device_ptr<real> weights_data(THCTensor_(data)(state, weights));
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data, weights_data)),
thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size, weights_data+size)),
gradInput_data,
bce_updateGradInput_functor_weights<real, accreal>(norm)
);
THCTensor_(free)(state, weights);
} else {
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data)),
thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size)),
gradInput_data,
bce_updateGradInput_functor<real, accreal>(norm)
);
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
}
#endif
| c8b4697c27c628ba90929502b9c48c066c52ba39.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/BCECriterion.cu"
#else
#include "THCApply.cuh"
void THNN_(BCECriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *output,
bool sizeAverage,
THCTensor *weights,
bool reduce)
{
THCUNN_check_nElement(state, input, target);
THCUNN_check_nElement(state, input, weights);
THCUNN_assertSameGPU(state, 3, input, target, weights);
if (!reduce) {
THCTensor_(resizeAs)(state, output, input);
THC_pointwiseApply3(state, input, target, output,
bce_updateOutput_no_reduce_functor<real, accreal>());
if (weights) {
THCTensor_(cmul)(state, output, output, weights);
}
return;
}
THCTensor_(resize1d)(state, output, 1);
ptrdiff_t size = THCTensor_(nElement)(state, input);
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
thrust::device_ptr<real> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<real> target_data(THCTensor_(data)(state, target));
accreal sum;
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
thrust::device_ptr<real> weights_data(THCTensor_(data)(state, weights));
sum = thrust::transform_reduce(
thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data, weights_data)),
thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size, weights_data+size)),
bce_functor_weights<real, accreal>(),
(accreal) 0,
thrust::plus<accreal>()
);
THCTensor_(free)(state, weights);
} else {
sum = thrust::transform_reduce(
thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data)),
thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size)),
bce_functor<real, accreal>(),
(accreal) 0,
thrust::plus<accreal>()
);
}
if (sizeAverage)
sum /= size;
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, real>::to(sum));
}
void THNN_(BCECriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
bool sizeAverage,
THCTensor *weights,
bool reduce)
{
THCUNN_check_nElement(state, input, target);
THCUNN_check_nElement(state, input, weights);
THCUNN_assertSameGPU(state, 4, input, target, gradInput, weights);
THCTensor_(resizeAs)(state, gradInput, input);
if (!reduce) {
THCUNN_check_nElement(state, gradOutput, input);
THC_pointwiseApply3(state, input, target, gradInput,
bce_updateGradInput_no_reduce_functor<real, accreal>());
THCTensor_(cmul)(state, gradInput, gradInput, gradOutput);
if (weights) {
THCTensor_(cmul)(state, gradInput, gradInput, weights);
}
return;
}
THCUNN_check_dim_size(state, gradOutput, 1, 0, 1);
ptrdiff_t size = THCTensor_(nElement)(state, input);
real norm = ScalarConvert<accreal, real>::to((sizeAverage ? accreal(1)/size : accreal(1)) * THCTensor_(get1d)(state, gradOutput, 0));
input = THCTensor_(newContiguous)(state, input);
target = THCTensor_(newContiguous)(state, target);
thrust::device_ptr<real> input_data(THCTensor_(data)(state, input));
thrust::device_ptr<real> target_data(THCTensor_(data)(state, target));
thrust::device_ptr<real> gradInput_data(THCTensor_(data)(state, gradInput));
if (weights) {
weights = THCTensor_(newContiguous)(state, weights);
thrust::device_ptr<real> weights_data(THCTensor_(data)(state, weights));
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data, weights_data)),
thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size, weights_data+size)),
gradInput_data,
bce_updateGradInput_functor_weights<real, accreal>(norm)
);
THCTensor_(free)(state, weights);
} else {
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data)),
thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size)),
gradInput_data,
bce_updateGradInput_functor<real, accreal>(norm)
);
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, target);
}
#endif
|
5b9299082d8691dd24a4edf43e2e57961d689227.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lrn.hpp"
namespace Shadow {
namespace Vision {
__global__ void KernelLRNFillScale(const float* in_data, int count, int in_c,
int in_h, int in_w, int size,
float alpha_over_size, float k,
float* scale_data) {
CUDA_KERNEL_LOOP(globalid, count) {
int temp = globalid / in_w;
int w = globalid % in_w;
int h = temp % in_h;
int b = temp / in_h;
int offset = (b * in_c * in_h + h) * in_w + w, head = 0;
const auto* in_off = in_data + offset;
auto* scale_off = scale_data + offset;
auto accum_scale = 0.f;
int step = in_h * in_w;
int pre_pad = (size - 1) / 2, post_pad = size - pre_pad - 1;
while (head < post_pad && head < in_c) {
accum_scale += in_off[head * step] * in_off[head * step];
head++;
}
while (head < in_c) {
accum_scale += in_off[head * step] * in_off[head * step];
if (head - size >= 0) {
accum_scale -=
in_off[(head - size) * step] * in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
head++;
}
while (head < in_c + post_pad) {
if (head - size >= 0) {
accum_scale -=
in_off[(head - size) * step] * in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
head++;
}
}
}
__global__ void KernelLRN(const float* in_data, int count,
const float* scale_data, float negative_beta,
float* out_data) {
CUDA_KERNEL_LOOP(globalid, count) {
out_data[globalid] =
in_data[globalid] * pow(scale_data[globalid], negative_beta);
}
}
template <>
void LRN<DeviceType::kGPU, float>(const float* in_data, const VecInt& in_shape,
int size, float alpha, float beta, float k,
float* scale_data, float* out_data,
Context* context) {
int batch = in_shape[0], in_c = in_shape[1];
int in_h = in_shape[2], in_w = in_shape[3];
int count = batch * in_h * in_w;
hipLaunchKernelGGL(( KernelLRNFillScale), dim3(GetBlocks(count)), dim3(NumThreads), 0,
hipStream_t(context->stream()),
in_data, count, in_c, in_h, in_w, size, alpha / size, k, scale_data);
CUDA_CHECK(hipPeekAtLastError());
count *= in_c;
hipLaunchKernelGGL(( KernelLRN), dim3(GetBlocks(count)), dim3(NumThreads), 0,
hipStream_t(context->stream()), in_data, count, scale_data,
-beta, out_data);
CUDA_CHECK(hipPeekAtLastError());
}
} // namespace Vision
} // namespace Shadow
namespace Shadow {
REGISTER_OP_KERNEL_DEFAULT(LRNGPU, LRNKernelDefault<DeviceType::kGPU>);
} // namespace Shadow
| 5b9299082d8691dd24a4edf43e2e57961d689227.cu | #include "lrn.hpp"
namespace Shadow {
namespace Vision {
__global__ void KernelLRNFillScale(const float* in_data, int count, int in_c,
int in_h, int in_w, int size,
float alpha_over_size, float k,
float* scale_data) {
CUDA_KERNEL_LOOP(globalid, count) {
int temp = globalid / in_w;
int w = globalid % in_w;
int h = temp % in_h;
int b = temp / in_h;
int offset = (b * in_c * in_h + h) * in_w + w, head = 0;
const auto* in_off = in_data + offset;
auto* scale_off = scale_data + offset;
auto accum_scale = 0.f;
int step = in_h * in_w;
int pre_pad = (size - 1) / 2, post_pad = size - pre_pad - 1;
while (head < post_pad && head < in_c) {
accum_scale += in_off[head * step] * in_off[head * step];
head++;
}
while (head < in_c) {
accum_scale += in_off[head * step] * in_off[head * step];
if (head - size >= 0) {
accum_scale -=
in_off[(head - size) * step] * in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
head++;
}
while (head < in_c + post_pad) {
if (head - size >= 0) {
accum_scale -=
in_off[(head - size) * step] * in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
head++;
}
}
}
__global__ void KernelLRN(const float* in_data, int count,
const float* scale_data, float negative_beta,
float* out_data) {
CUDA_KERNEL_LOOP(globalid, count) {
out_data[globalid] =
in_data[globalid] * pow(scale_data[globalid], negative_beta);
}
}
template <>
void LRN<DeviceType::kGPU, float>(const float* in_data, const VecInt& in_shape,
int size, float alpha, float beta, float k,
float* scale_data, float* out_data,
Context* context) {
int batch = in_shape[0], in_c = in_shape[1];
int in_h = in_shape[2], in_w = in_shape[3];
int count = batch * in_h * in_w;
KernelLRNFillScale<<<GetBlocks(count), NumThreads, 0,
cudaStream_t(context->stream())>>>(
in_data, count, in_c, in_h, in_w, size, alpha / size, k, scale_data);
CUDA_CHECK(cudaPeekAtLastError());
count *= in_c;
KernelLRN<<<GetBlocks(count), NumThreads, 0,
cudaStream_t(context->stream())>>>(in_data, count, scale_data,
-beta, out_data);
CUDA_CHECK(cudaPeekAtLastError());
}
} // namespace Vision
} // namespace Shadow
namespace Shadow {
REGISTER_OP_KERNEL_DEFAULT(LRNGPU, LRNKernelDefault<DeviceType::kGPU>);
} // namespace Shadow
|
e3c6d903c42f43cb7f621a2eb52ccf524c589348.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "solve_cnm.cuh"
//#include "device_launch_parameters.h"
#include <rocblas.h>
#include <hipsparse.h>
//#include <math.h>
__global__ static
void reset_vec ( neuron_solve_t *d_solve, const int nc )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < nc )
{
d_solve -> b [ id ] = 0.0;
d_solve -> vec [ cn_gamma2 ] [ id ] = 0.0;
d_solve -> vec [ cn_ommega2 ] [ id ] = 0.0;
}
}
__host__
static void cg_cusparse_crs(const int ngc, const int nnz,
const double *d_val, const int *d_col, const int *d_row, double *d_x, double *d_b)
{
static double *d_r, *d_p, *d_ap;
static double *_buffer;
static int size = 0;
if ( size < ngc ) {
if ( size == 0 ) {
hipFree ( d_r );
hipFree ( d_p );
hipFree ( d_ap );
hipFree ( _buffer );
}
hipMalloc ( ( double ** ) &d_r, ngc * sizeof ( double ) );
hipMalloc ( ( double ** ) &d_p, ngc * sizeof ( double ) );
hipMalloc ( ( double ** ) &d_ap, ngc * sizeof ( double ) );
hipMalloc ( ( double ** ) &_buffer, ngc * sizeof ( double ) );
size = ngc;
}
//hipMalloc ( ( double ** ) &d_r, ngc * sizeof ( double ) );
//hipMalloc ( ( double ** ) &d_p, ngc * sizeof ( double ) );
//hipMalloc ( ( double ** ) &d_ap, ngc * sizeof ( double ) );
double bnorm, rnorm_k, rnorm_k1;
double alpha, beta, pap;
double epsilon = 1.0e-15;
double cp1 = 1.0;
double c0 = 0.0;
double cm1 = -1.0;
hipblasStatus_t stat1;
hipblasHandle_t handle1;
hipsparseStatus_t stat2;
hipsparseHandle_t handle2;
hipsparseMatDescr_t descrA;
stat1 = hipblasCreate(&handle1);
stat2 = hipsparseCreate(&handle2);
hipsparseCreateMatDescr(&descrA);
hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ZERO);
stat1 = hipblasDscal(handle1, ngc, &c0, d_x, 1); // x = 0
stat1 = hipblasDcopy(handle1, ngc, d_b, 1, d_r, 1); // r = b
stat1 = hipblasDcopy(handle1, ngc, d_r, 1, d_p, 1); // p = r
stat1 = hipblasDdot(handle1, ngc, d_b, 1, d_b, 1, &bnorm); // ||b||
/**/
for (int k = 0; k < 100; k++) {
//stat2 = hipsparseDcsrmv(handle2, HIPSPARSE_OPERATION_NON_TRANSPOSE,
//ngc, ngc, nnz, &cp1, descrA, d_val, d_row, d_col, d_p, &c0, d_ap); // Ap
stat2 = cusparseCsrmvEx(handle2,CUSPARSE_ALG_MERGE_PATH, HIPSPARSE_OPERATION_NON_TRANSPOSE,
ngc, ngc, nnz, &cp1, HIP_R_64F, descrA, d_val, HIP_R_64F, d_row, d_col, d_p, HIP_R_64F,
&c0, HIP_R_64F, d_ap, HIP_R_64F, HIP_R_64F, _buffer ); // Ap
stat1 = hipblasDdot(handle1, ngc, d_r, 1, d_r, 1, &rnorm_k); // ||r_k||^2
stat1 = hipblasDdot(handle1, ngc, d_p, 1, d_ap, 1, &pap); // pAp
alpha = rnorm_k / pap; // alpha
stat1 = hipblasDaxpy(handle1, ngc, &alpha, d_p, 1, d_x, 1); // x += alpha * p
alpha = -1.0 * alpha;
stat1 = hipblasDaxpy(handle1, ngc, &alpha, d_ap, 1, d_r, 1); // r -= alpha * ap
stat1 = hipblasDdot(handle1, ngc, d_r, 1, d_r, 1, &rnorm_k1); // ||r_k+1||^2
if (sqrt(rnorm_k1) <= epsilon * sqrt(bnorm)) { break; }
// p = r + beta * p
beta = rnorm_k1 / rnorm_k;
stat1 = hipblasDscal(handle1, ngc, &beta, d_p, 1);
stat1 = hipblasDaxpy(handle1, ngc, &cp1, d_r, 1, d_p, 1);
}
hipsparseDestroyMatDescr(descrA);
hipblasDestroy(handle1);
hipsparseDestroy(handle2);//
//hipFree ( d_r );
//hipFree ( d_p );
//hipFree ( d_ap );
}
//////////////////////////////// GR /////////////////////////////////
__global__ static
void add_mfgr_val ( neuron_solve_t *d_gr_solve, int *mfgr_comp, double *mfgr_elem, const int num_mfgr )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < num_mfgr )
{
int post_num = mfgr_comp [ post_comp * num_mfgr + id ];
double l_val = mfgr_elem [ mfgr_val * num_mfgr + id ];
atomicAdd ( & ( d_gr_solve -> vec [ cn_gamma2 ] [ post_num ] ), 0.5 * l_val );
atomicAdd ( & ( d_gr_solve -> vec [ cn_ommega2 ] [ post_num ] ), 0.5 * l_val * E_MFGR );
}
}
__global__ static
void add_gogr_val ( neuron_t *d_gr, neuron_solve_t *d_gr_solve,
int *gogr_comp, double *gogr_elem, const int num_gogr )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < num_gogr )
{
int post_num = gogr_comp [ post_comp * num_gogr + id ];
double l_val = gogr_elem [ gogr_val * num_gogr + id ];
atomicAdd ( & ( d_gr_solve -> vec [ cn_gamma2 ] [ post_num ] ), 0.5 * l_val ); // no need atomicAdd ?
atomicAdd ( & ( d_gr_solve -> vec [ cn_ommega2 ] [ post_num ] ), 0.5 * l_val * E_GOGR );
}
}
__global__
void gr_cnm_vec_initialize ( neuron_t *d_gr, neuron_solve_t *d_gr_solve )
{
double **elem = d_gr -> elem;
double **cond = d_gr -> cond;
double **ion = d_gr -> ion;
double **vec = d_gr_solve -> vec;
//double *val = d_gr_solve -> val;
//double *val_ori = d_gr_solve -> val_ori;
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < d_gr -> nc)
{
vec [ cn_gamma1 ] [ id ] =
( cond [ g_leak1 ] [ id ] + cond [ g_leak2 ] [ id ] + cond [ g_leak3 ] [ id ]
+ cond [ g_Na ] [ id ] * ion [ o_Na ] [ id ]
+ cond [ g_Ca ] [ id ] * ion [ ch_Ca ] [ id ] * ion [ ch_Ca ] [ id ] * ion [ ci_Ca ] [ id ]
+ cond [ g_KV ] [ id ] * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ]
+ cond [ g_KIR ] [ id ] * ion [ ir_KIR ] [ id ]
+ cond [ g_KA ] [ id ] * ion [ a_KA ] [ id ] * ion [ a_KA ] [ id ] * ion [ a_KA ] [ id ] * ion [ b_KA ] [ id ]
+ cond [ g_KCa ] [ id ] * ion [ c_KCa ] [ id ]
+ cond [ g_KM ] [ id ] * ion [ s_KM ] [ id ] ) / 2.0;
vec [ cn_gamma2 ] [ id ] = 0.0;
vec [ cn_ommega1 ] [ id ] =
( cond [ g_leak1 ] [ id ] * V_LEAK1_GR + cond [ g_leak2 ] [ id ] * V_LEAK2_GR + cond [ g_leak3 ] [ id ] * V_LEAK3_GR + elem [ i_ext ] [ id ]
+ cond [ g_Na ] [ id ] * V_Na_GR * ion [ o_Na ] [ id ]
+ cond [ g_Ca ] [ id ] * V_Ca_GR * ion [ ch_Ca ] [ id ] * ion [ ch_Ca ] [ id ] * ion [ ci_Ca ] [ id ]
+ cond [ g_KV ] [ id ] * V_K_GR * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ]
+ cond [ g_KIR ] [ id ] * V_K_GR * ion [ ir_KIR ] [ id ]
+ cond [ g_KA ] [ id ] * V_K_GR * ion [ a_KA ] [ id ] * ion [ a_KA ] [ id ] * ion [ a_KA ] [ id ] * ion [ b_KA ] [ id ]
+ cond [ g_KCa ] [ id ] * V_K_GR * ion [ c_KCa ] [ id ]
+ cond [ g_KM ] [ id ] * V_K_GR * ion [ s_KM ] [ id ] ) / 2.0;
vec [ cn_ommega2 ] [ id ] = 0.0;
vec [ cn_v_old ] [ id ] = elem [ v ] [ id ];
}
//for ( int i = 0; i < gr_solve -> nnz; i++ ) { val [ id ] /= 2.0; val_ori [ id ] = val [ id ]; } // to gr_solve.cu
}
__global__
static void gr_update_matrix ( neuron_t *d_gr, neuron_solve_t *d_gr_solve )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
double **elem = d_gr -> elem;
double **cond = d_gr -> cond;
double **ion = d_gr -> ion;
double **vec = d_gr_solve -> vec;
double *val = d_gr_solve -> val;
double *val_ori = d_gr_solve -> val_ori;
double *b = d_gr_solve -> b;
int *col = d_gr_solve -> col;
int *row = d_gr_solve -> row;
double DT = d_gr -> DT;
if ( id < d_gr -> nc)
{
vec [ cn_gamma2 ] [ id ] +=
( cond [ g_leak1 ] [ id ] + cond [ g_leak2 ] [ id ] + cond [ g_leak3 ] [ id ]
+ cond [ g_Na ] [ id ] * ion [ o_Na ] [ id ]
+ cond [ g_Ca ] [ id ] * ion [ ch_Ca ] [ id ] * ion [ ch_Ca ] [ id ] * ion [ ci_Ca ] [ id ]
+ cond [ g_KV ] [ id ] * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ]
+ cond [ g_KIR ] [ id ] * ion [ ir_KIR ] [ id ]
+ cond [ g_KA ] [ id ] * ion [ a_KA ] [ id ] * ion [ a_KA ] [ id ] * ion [ a_KA ] [ id ] * ion [ b_KA ] [ id ]
+ cond [ g_KCa ] [ id ] * ion [ c_KCa ] [ id ]
+ cond [ g_KM ] [ id ] * ion [ s_KM ] [ id ] ) / 2.0;
vec [ cn_ommega2 ] [ id ] +=
( cond [ g_leak1 ] [ id ] * V_LEAK1_GR + cond [ g_leak2 ] [ id ] * V_LEAK2_GR + cond [ g_leak3 ] [ id ] * V_LEAK3_GR + elem [ i_ext ] [ id ]
+ cond [ g_Na ] [ id ] * V_Na_GR * ion [ o_Na ] [ id ]
+ cond [ g_Ca ] [ id ] * V_Ca_GR * ion [ ch_Ca ] [ id ] * ion [ ch_Ca ] [ id ] * ion [ ci_Ca ] [ id ]
+ cond [ g_KV ] [ id ] * V_K_GR * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ]
+ cond [ g_KIR ] [ id ] * V_K_GR * ion [ ir_KIR ] [ id ]
+ cond [ g_KA ] [ id ] * V_K_GR * ion [ a_KA ] [ id ] * ion [ a_KA ] [ id ] * ion [ a_KA ] [ id ] * ion [ b_KA ] [ id ]
+ cond [ g_KCa ] [ id ] * V_K_GR * ion [ c_KCa ] [ id ]
+ cond [ g_KM ] [ id ] * V_K_GR * ion [ s_KM ] [ id ] ) / 2.0;
int d = d_gr_solve -> dig [ id ];
val [ d ] += ( elem [ Cm ] [ id ] / DT) + vec [ cn_gamma2 ] [ id ];
b [ id ] = 0.0;
for (int j = row [ id ]; j < row [ id + 1 ]; j++) {
b [ id ] -= elem [ v ] [ col [ j ] ] * val_ori [ j ];
}
b [ id ] += (elem [ Cm ] [ id ] / DT - vec [ cn_gamma1 ] [ id ]) * elem [ v ][ id ] + vec [ cn_ommega1 ] [ id ] + vec [ cn_ommega2 ] [ id ];
vec [ cn_ommega1 ] [ id ] = vec [ cn_ommega2 ] [ id ];
vec [ cn_gamma1 ] [ id ] = vec [ cn_gamma2 ] [ id ];
}
}
__host__
void gr_solve_by_cnm ( neuron_t *d_gr, neuron_solve_t *d_gr_solve,
neuron_t *p_gr, neuron_solve_t *p_gr_solve,
synapse_t *d_mfgr, synapse_t *d_gogr )
{
// global
double **ion = p_gr -> ion;
double **elem = p_gr -> elem;
int nc = p_gr -> nc;
static int numThreadsPerBlock = p_gr_solve -> numThreadsPerBlock;
static int numBlocks = p_gr_solve -> numBlocks;
// update ion
hipLaunchKernelGGL(( gr_Na_update_2order) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0,
nc, elem [ v ], p_gr_solve -> vec [ cn_v_old ], CN_DT, elem [ compart ],
ion [ o_Na ], ion [ c1_Na ], ion [ c2_Na ], ion [ c3_Na ], ion [ c4_Na ], ion [ c5_Na ],
ion [ i1_Na ], ion [ i2_Na ], ion [ i3_Na ], ion [ i4_Na ], ion [ i5_Na ], ion [ i6_Na ] );
hipLaunchKernelGGL(( gr_update_ion_exp_imp) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0, d_gr, d_gr_solve, CN_DT );
// reset val and b
hipMemcpy ( p_gr_solve -> val, p_gr_solve -> val_ori, p_gr_solve -> nnz * sizeof ( double ), hipMemcpyDeviceToDevice );
hipLaunchKernelGGL(( reset_vec) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0, d_gr_solve, nc );
// update val, b and v
hipLaunchKernelGGL(( add_mfgr_val) , dim3(( d_mfgr -> n + 127 ) / numThreadsPerBlock), dim3(numThreadsPerBlock) , 0, 0,
d_gr_solve, d_mfgr -> comp, d_mfgr -> elem, d_mfgr -> n );
hipLaunchKernelGGL(( add_gogr_val) , dim3(( d_gogr -> n + 127 ) / 128), dim3(128) , 0, 0,
d_gr, d_gr_solve, d_gogr -> comp, d_gogr -> elem, d_gogr -> n ); //hipDeviceSynchronize();
hipLaunchKernelGGL(( gr_update_matrix) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0, d_gr, d_gr_solve);
cg_cusparse_crs ( nc, p_gr_solve -> nnz, p_gr_solve -> val, p_gr_solve -> col, p_gr_solve -> row, p_gr -> elem [ v ], p_gr_solve -> b );
}
//////////////////////////////// GO /////////////////////////////////
__global__ static
void add_grgo_val ( neuron_t *d_go, neuron_solve_t *d_go_solve,
int *grgo_comp, double *grgo_elem, const int num_grgo )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < num_grgo )
{
int post_num = grgo_comp [ post_comp * num_grgo + id ];
double l_val = grgo_elem [ grgo_val * num_grgo + id ];
atomicAdd ( & ( d_go_solve -> vec [ cn_gamma2 ] [ post_num ] ), 0.5 * l_val ); // no need atomicAdd ?
atomicAdd ( & ( d_go_solve -> vec [ cn_ommega2 ] [ post_num ] ), 0.5 * l_val * E_GRGO );
}
}
__global__
void go_cnm_vec_initialize ( neuron_t *d_go, neuron_solve_t *d_go_solve )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < d_go -> nc)
{
double **elem = d_go -> elem;
double **cond = d_go -> cond;
double **ion = d_go -> ion;
double **vec = d_go_solve -> vec;
vec [ cn_gamma1 ] [ id ] =
( cond [ g_leak_go ] [ id ]
+ cond [ g_NaT_go ] [ id ] * ion [ m_NaT_go ] [ id ] * ion [ m_NaT_go ] [ id ] * ion [ m_NaT_go ] [ id ] * ion [ h_NaT_go ] [ id ]
+ cond [ g_NaR_go ] [ id ] * ion [ r_NaR_go ] [ id ] * ion [ s_NaR_go ] [ id ]
+ cond [ g_NaP_go ] [ id ] * ion [ p_NaP_go ] [ id ]
+ cond [ g_CaHVA_go ] [ id ] * ion [ ch_CaHVA_go ] [ id ] * ion [ ch_CaHVA_go ] [ id ] * ion [ ci_CaHVA_go ] [ id ]
+ cond [ g_CaLVA_go ] [ id ] * ion [ cl_CaLVA_go ] [ id ] * ion [ cl_CaLVA_go ] [ id ] * ion [ cm_CaLVA_go ] [ id ]
+ cond [ g_KAHP_go ] [ id ] * ( ion [ o1_KAHP_go ] [ id ] + ion [ o2_KAHP_go ] [ id ] )
+ cond [ g_KV_go ] [ id ] * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ]
+ cond [ g_KA_go ] [ id ] * ion [ a_KA_go ] [ id ] * ion [ a_KA_go ] [ id ] * ion [ a_KA_go ] [ id ] * ion [ b_KA_go ] [ id ]
+ cond [ g_KC_go ] [ id ] * ion [ c_KC_go ] [ id ]
+ cond [ g_Kslow_go ] [ id ] * ion [ sl_Kslow_go ] [ id ]
+ cond [ g_HCN1_go ] [ id ] * ( ion [ hf_HCN1_go ] [ id ] + ion [ hs_HCN1_go ] [ id ] )
+ cond [ g_HCN2_go ] [ id ] * ( ion [ hf_HCN2_go ] [ id ] + ion [ hs_HCN2_go ] [ id ] )
) / 2.0;
vec [ cn_gamma2 ] [ id ] = 0.0;
vec [ cn_ommega1 ] [ id ] =
( cond [ g_leak_go ] [ id ] * V_LEAK_GO + elem [ i_ext ] [ id ]
+ cond [ g_NaT_go ] [ id ] * V_Na_GO * ion [ m_NaT_go ] [ id ] * ion [ m_NaT_go ] [ id ] * ion [ m_NaT_go ] [ id ] * ion [ h_NaT_go ] [ id ]
+ cond [ g_NaR_go ] [ id ] * V_Na_GO * ion [ r_NaR_go ] [ id ] * ion [ s_NaR_go ] [ id ]
+ cond [ g_NaP_go ] [ id ] * V_Na_GO * ion [ p_NaP_go ] [ id ]
+ cond [ g_CaHVA_go ] [ id ] * V_Ca_GO * ion [ ch_CaHVA_go ] [ id ] * ion [ ch_CaHVA_go ] [ id ] * ion [ ci_CaHVA_go ] [ id ]
+ cond [ g_CaLVA_go ] [ id ] * ( d_go -> rev_ca2 [ id ] ) * ion [ cl_CaLVA_go ] [ id ] * ion [ cl_CaLVA_go ] [ id ] * ion [ cm_CaLVA_go ] [ id ]
+ cond [ g_KAHP_go ] [ id ] * V_K_GO * ( ion [ o1_KAHP_go ] [ id ] + ion [ o2_KAHP_go ] [ id ] )
+ cond [ g_KV_go ] [ id ] * V_K_GO * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ]
+ cond [ g_KA_go ] [ id ] * V_K_GO * ion [ a_KA_go ] [ id ] * ion [ a_KA_go ] [ id ] * ion [ a_KA_go ] [ id ] * ion [ b_KA_go ] [ id ]
+ cond [ g_KC_go ] [ id ] * V_K_GO * ion [ c_KC_go ] [ id ]
+ cond [ g_Kslow_go ] [ id ] * V_K_GO * ion [ sl_Kslow_go ] [ id ]
+ cond [ g_HCN1_go ] [ id ] * V_H_GO * ( ion [ hf_HCN1_go ] [ id ] + ion [ hs_HCN1_go ] [ id ] )
+ cond [ g_HCN2_go ] [ id ] * V_H_GO * ( ion [ hf_HCN2_go ] [ id ] + ion [ hs_HCN2_go ] [ id ] )
) / 2.0;
vec [ cn_ommega2 ] [ id ] = 0.0;
vec [ cn_v_old ] [ id ] = elem [ v ] [ id ];
}
//for ( int i = 0; i < go_solve -> nnz; i++ ) { val [ id ] /= 2.0; val_ori [ id ] = val [ id ]; } // to go_solve.cu
}
__global__
static void go_update_matrix ( neuron_t *d_go, neuron_solve_t *d_go_solve )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < d_go -> nc)
{
double **elem = d_go -> elem;
double **cond = d_go -> cond;
double **ion = d_go -> ion;
double **vec = d_go_solve -> vec;
double *val = d_go_solve -> val;
double *val_ori = d_go_solve -> val_ori;
double *b = d_go_solve -> b;
int *col = d_go_solve -> col;
int *row = d_go_solve -> row;
double DT = d_go -> DT;
vec [ cn_gamma2 ] [ id ] +=
( cond [ g_leak_go ] [ id ]
+ cond [ g_NaT_go ] [ id ] * ion [ m_NaT_go ] [ id ] * ion [ m_NaT_go ] [ id ] * ion [ m_NaT_go ] [ id ] * ion [ h_NaT_go ] [ id ]
+ cond [ g_NaR_go ] [ id ] * ion [ r_NaR_go ] [ id ] * ion [ s_NaR_go ] [ id ]
+ cond [ g_NaP_go ] [ id ] * ion [ p_NaP_go ] [ id ]
+ cond [ g_CaHVA_go ] [ id ] * ion [ ch_CaHVA_go ] [ id ] * ion [ ch_CaHVA_go ] [ id ] * ion [ ci_CaHVA_go ] [ id ]
+ cond [ g_CaLVA_go ] [ id ] * ion [ cl_CaLVA_go ] [ id ] * ion [ cl_CaLVA_go ] [ id ] * ion [ cm_CaLVA_go ] [ id ]
+ cond [ g_KAHP_go ] [ id ] * ( ion [ o1_KAHP_go ] [ id ] + ion [ o2_KAHP_go ] [ id ] )
+ cond [ g_KV_go ] [ id ] * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ]
+ cond [ g_KA_go ] [ id ] * ion [ a_KA_go ] [ id ] * ion [ a_KA_go ] [ id ] * ion [ a_KA_go ] [ id ] * ion [ b_KA_go ] [ id ]
+ cond [ g_KC_go ] [ id ] * ion [ c_KC_go ] [ id ]
+ cond [ g_Kslow_go ] [ id ] * ion [ sl_Kslow_go ] [ id ]
+ cond [ g_HCN1_go ] [ id ] * ( ion [ hf_HCN1_go ] [ id ] + ion [ hs_HCN1_go ] [ id ] )
+ cond [ g_HCN2_go ] [ id ] * ( ion [ hf_HCN2_go ] [ id ] + ion [ hs_HCN2_go ] [ id ] )
) / 2.0;
vec [ cn_ommega2 ] [ id ] +=
( cond [ g_leak_go ] [ id ] * V_LEAK_GO + elem [ i_ext ] [ id ]
+ cond [ g_NaT_go ] [ id ] * V_Na_GO * ion [ m_NaT_go ] [ id ] * ion [ m_NaT_go ] [ id ] * ion [ m_NaT_go ] [ id ] * ion [ h_NaT_go ] [ id ]
+ cond [ g_NaR_go ] [ id ] * V_Na_GO * ion [ r_NaR_go ] [ id ] * ion [ s_NaR_go ] [ id ]
+ cond [ g_NaP_go ] [ id ] * V_Na_GO * ion [ p_NaP_go ] [ id ]
+ cond [ g_CaHVA_go ] [ id ] * V_Ca_GO * ion [ ch_CaHVA_go ] [ id ] * ion [ ch_CaHVA_go ] [ id ] * ion [ ci_CaHVA_go ] [ id ]
+ cond [ g_CaLVA_go ] [ id ] * ( d_go -> rev_ca2 [ id ] ) * ion [ cl_CaLVA_go ] [ id ] * ion [ cl_CaLVA_go ] [ id ] * ion [ cm_CaLVA_go ] [ id ]
+ cond [ g_KAHP_go ] [ id ] * V_K_GO * ( ion [ o1_KAHP_go ] [ id ] + ion [ o2_KAHP_go ] [ id ] )
+ cond [ g_KV_go ] [ id ] * V_K_GO * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ]
+ cond [ g_KA_go ] [ id ] * V_K_GO * ion [ a_KA_go ] [ id ] * ion [ a_KA_go ] [ id ] * ion [ a_KA_go ] [ id ] * ion [ b_KA_go ] [ id ]
+ cond [ g_KC_go ] [ id ] * V_K_GO * ion [ c_KC_go ] [ id ]
+ cond [ g_Kslow_go ] [ id ] * V_K_GO * ion [ sl_Kslow_go ] [ id ]
+ cond [ g_HCN1_go ] [ id ] * V_H_GO * ( ion [ hf_HCN1_go ] [ id ] + ion [ hs_HCN1_go ] [ id ] )
+ cond [ g_HCN2_go ] [ id ] * V_H_GO * ( ion [ hf_HCN2_go ] [ id ] + ion [ hs_HCN2_go ] [ id ] )
) / 2.0;
int d = d_go_solve -> dig [ id ];
val [ d ] += ( elem [ Cm ] [ id ] / DT) + vec [ cn_gamma2 ] [ id ];
b [ id ] = 0.0;
for (int j = row [ id ]; j < row [ id + 1 ]; j++) {
b [ id ] -= elem [ v ] [ col [ j ] ] * val_ori [ j ];
}
b [ id ] += (elem [ Cm ] [ id ] / DT - vec [ cn_gamma1 ] [ id ]) * elem [ v ][ id ]
+ vec [ cn_ommega1 ] [ id ] + vec [ cn_ommega2 ] [ id ];
vec [ cn_ommega1 ] [ id ] = vec [ cn_ommega2 ] [ id ];
vec [ cn_gamma1 ] [ id ] = vec [ cn_gamma2 ] [ id ];
}
}
__host__
void go_solve_by_cnm ( neuron_t *d_go, neuron_solve_t *d_go_solve,
neuron_t *p_go, neuron_solve_t *p_go_solve, synapse_t *d_grgo )
{
// global
double **ion = p_go -> ion;
int nc = p_go -> nc;
static int numThreadsPerBlock = p_go_solve -> numThreadsPerBlock;
static int numBlocks = p_go_solve -> numBlocks;
// update ion
hipLaunchKernelGGL(( go_update_ion_exp_imp) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0, d_go, d_go_solve, CN_DT );
hipLaunchKernelGGL(( go_KAHP_update_2order) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0,
p_go -> n, p_go -> elem [ Ca ], p_go -> ca_old, ion [ o1_KAHP_go ], ion [ o2_KAHP_go ],
ion [ c1_KAHP_go ], ion [ c2_KAHP_go ], ion [ c3_KAHP_go ], ion [ c4_KAHP_go ], CN_DT );
// reset val and b
hipMemcpy ( p_go_solve -> val, p_go_solve -> val_ori, p_go_solve -> nnz * sizeof ( double ), hipMemcpyDeviceToDevice );
hipLaunchKernelGGL(( reset_vec) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0, d_go_solve, nc );
// update val, b and v
hipLaunchKernelGGL(( add_grgo_val) , dim3(( d_grgo -> n + 127 ) / 128), dim3(128) , 0, 0,
d_go, d_go_solve, d_grgo -> comp, d_grgo -> elem, d_grgo -> n ); //hipDeviceSynchronize();
hipLaunchKernelGGL(( go_update_matrix) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0, d_go, d_go_solve);
cg_cusparse_crs( p_go -> nc, p_go_solve -> nnz, p_go_solve -> val, p_go_solve -> col, p_go_solve -> row, p_go -> elem [ v ], p_go_solve -> b );
}
//////////////////////////////// PKJ /////////////////////////////////
__global__ static
void add_mlipkj_val ( neuron_solve_t *d_pkj_solve, int *mlipkj_comp, double *mlipkj_elem, const int num_mlipkj )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < num_mlipkj )
{
int post_num = mlipkj_comp [ post_comp * num_mlipkj + id ];
double l_val = mlipkj_elem [ mlipkj_val * num_mlipkj + id ];
atomicAdd ( & ( d_pkj_solve -> vec [ cn_gamma2 ] [ post_num ] ), 0.5 * l_val );
atomicAdd ( & ( d_pkj_solve -> vec [ cn_ommega2 ] [ post_num ] ), 0.5 * l_val * E_MLIPKJ );
}
}
__global__ static
void add_grpkj_val ( neuron_t *d_pkj, neuron_solve_t *d_pkj_solve,
int *grpkj_comp, double *grpkj_elem, const int num_grpkj )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < num_grpkj )
{
int post_num = grpkj_comp [ post_comp * num_grpkj + id ];
double l_val = grpkj_elem [ grpkj_val * num_grpkj + id ];
atomicAdd ( & ( d_pkj_solve -> vec [ cn_gamma2 ] [ post_num ] ), 0.5 * l_val ); // no need atomicAdd ?
atomicAdd ( & ( d_pkj_solve -> vec [ cn_ommega2 ] [ post_num ] ), 0.5 * l_val * E_GRPKJ );
}
}
__global__
void pkj_cnm_vec_initialize ( neuron_t *d_pkj, neuron_solve_t *d_pkj_solve )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < d_pkj -> nc )
{
double **elem = d_pkj -> elem;
double **cond = d_pkj -> cond;
double **ion = d_pkj -> ion;
double **vec = d_pkj_solve -> vec;
double l_v_Ca = d_pkj -> rev_ca2 [ id ];
vec [ cn_gamma1 ] [ id ] =
( + cond [ g_leak_pkj ] [ id ]
+ cond [ g_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ h_NaF_pkj ] [ id ]
+ cond [ g_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ]
+ cond [ g_CaP_pkj ] [ id ] * ion [ m_CaP_pkj ] [ id ] * ion [ h_CaP_pkj ] [ id ]
+ cond [ g_CaT_pkj ] [ id ] * ion [ m_CaT_pkj ] [ id ] * ion [ h_CaT_pkj ] [ id ]
+ cond [ g_Kh_pkj ] [ id ] * ion [ m_Kh1_pkj ] [ id ]
+ cond [ g_Kh_pkj ] [ id ] * ion [ m_Kh2_pkj ] [ id ]
+ cond [ g_Kdr_pkj ] [ id ] * ion [ m_Kdr_pkj ] [ id ] * ion [ m_Kdr_pkj ] [ id ] * ion [ h_Kdr_pkj ] [ id ]
+ cond [ g_KM_pkj ] [ id ] * ion [ m_KM_pkj ] [ id ]
+ cond [ g_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ h_KA_pkj ] [ id ]
+ cond [ g_KC_pkj ] [ id ] * ion [ m_KC_pkj ] [ id ] * ion [ z_KC_pkj ] [ id ] * ion [ z_KC_pkj ] [ id ]
+ cond [ g_K2_pkj ] [ id ] * ion [ m_K2_pkj ] [ id ] * ion [ z_K2_pkj ] [ id ] * ion [ z_K2_pkj ] [ id ]
) / 2.0;
vec [ cn_gamma2 ] [ id ] = 0.0;//= vec [ cn_gamma1 ] [ id ];//
vec [ cn_ommega1 ] [ id ] =
( + cond [ g_leak_pkj ] [ id ] * ( V_LEAK_PKJ ) + elem [ i_ext ] [ id ]
+ cond [ g_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ h_NaF_pkj ] [ id ] * ( V_Na_PKJ )
+ cond [ g_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ] * ( V_Na_PKJ )
+ cond [ g_CaP_pkj ] [ id ] * ion [ m_CaP_pkj ] [ id ] * ion [ h_CaP_pkj ] [ id ] * ( l_v_Ca )
+ cond [ g_CaT_pkj ] [ id ] * ion [ m_CaT_pkj ] [ id ] * ion [ h_CaT_pkj ] [ id ] * ( l_v_Ca )
+ cond [ g_Kh_pkj ] [ id ] * ion [ m_Kh1_pkj ] [ id ] * ( V_KH_PKJ ) //KH????
+ cond [ g_Kh_pkj ] [ id ] * ion [ m_Kh2_pkj ] [ id ] * ( V_KH_PKJ ) //KH???
+ cond [ g_Kdr_pkj ] [ id ] * ion [ m_Kdr_pkj ] [ id ] * ion [ m_Kdr_pkj ] [ id ] * ion [ h_Kdr_pkj ] [ id ] * ( V_K_PKJ )
+ cond [ g_KM_pkj ] [ id ] * ion [ m_KM_pkj ] [ id ] * ( V_K_PKJ )
+ cond [ g_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ h_KA_pkj ] [ id ] * ( V_K_PKJ )
+ cond [ g_KC_pkj ] [ id ] * ion [ m_KC_pkj ] [ id ] * ion [ z_KC_pkj ] [ id ] * ion [ z_KC_pkj ] [ id ] * ( V_K_PKJ )
+ cond [ g_K2_pkj ] [ id ] * ion [ m_K2_pkj ] [ id ] * ion [ z_K2_pkj ] [ id ] * ion [ z_K2_pkj ] [ id ] * ( V_K_PKJ )
) / 2.0;
vec [ cn_ommega2 ] [ id ] = 0.0;//vec [ cn_ommega1 ] [ id ];//
vec [ cn_v_old ] [ id ] = elem [ v ] [ id ];
}
//for ( int i = 0; i < pkj_solve -> nnz; i++ ) { val [ id ] /= 2.0; val_ori [ id ] = val [ id ]; } // to pkj_solve.cu
}
__global__
static void pkj_update_matrix ( neuron_t *d_pkj, neuron_solve_t *d_pkj_solve )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < d_pkj -> nc)
{
double **elem = d_pkj -> elem;
double **cond = d_pkj -> cond;
double **ion = d_pkj -> ion;
double **vec = d_pkj_solve -> vec;
double *val = d_pkj_solve -> val;
double *val_ori = d_pkj_solve -> val_ori;
double *b = d_pkj_solve -> b;
int *col = d_pkj_solve -> col;
int *row = d_pkj_solve -> row;
double DT = d_pkj -> DT;
double l_v_Ca = d_pkj -> rev_ca2 [ id ] ;
vec [ cn_gamma2 ] [ id ] +=
( cond [ g_leak_pkj ] [ id ]
+ cond [ g_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ h_NaF_pkj ] [ id ]
+ cond [ g_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ]
+ cond [ g_CaP_pkj ] [ id ] * ion [ m_CaP_pkj ] [ id ] * ion [ h_CaP_pkj ] [ id ]
+ cond [ g_CaT_pkj ] [ id ] * ion [ m_CaT_pkj ] [ id ] * ion [ h_CaT_pkj ] [ id ]
+ cond [ g_Kh_pkj ] [ id ] * ion [ m_Kh1_pkj ] [ id ]
+ cond [ g_Kh_pkj ] [ id ] * ion [ m_Kh2_pkj ] [ id ]
+ cond [ g_Kdr_pkj ] [ id ] * ion [ m_Kdr_pkj ] [ id ] * ion [ m_Kdr_pkj ] [ id ] * ion [ h_Kdr_pkj ] [ id ]
+ cond [ g_KM_pkj ] [ id ] * ion [ m_KM_pkj ] [ id ]
+ cond [ g_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ h_KA_pkj ] [ id ]
+ cond [ g_KC_pkj ] [ id ] * ion [ m_KC_pkj ] [ id ] * ion [ z_KC_pkj ] [ id ] * ion [ z_KC_pkj ] [ id ]
+ cond [ g_K2_pkj ] [ id ] * ion [ m_K2_pkj ] [ id ] * ion [ z_K2_pkj ] [ id ] * ion [ z_K2_pkj ] [ id ]
) / 2.0;
vec [ cn_ommega2 ] [ id ] +=
( cond [ g_leak_pkj ] [ id ] * ( V_LEAK_PKJ ) + elem [ i_ext ] [ id ]
+ cond [ g_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ h_NaF_pkj ] [ id ] * ( V_Na_PKJ )
+ cond [ g_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ] * ( V_Na_PKJ )
+ cond [ g_CaP_pkj ] [ id ] * ion [ m_CaP_pkj ] [ id ] * ion [ h_CaP_pkj ] [ id ] * ( l_v_Ca )
+ cond [ g_CaT_pkj ] [ id ] * ion [ m_CaT_pkj ] [ id ] * ion [ h_CaT_pkj ] [ id ] * ( l_v_Ca )
+ cond [ g_Kh_pkj ] [ id ] * ion [ m_Kh1_pkj ] [ id ] * ( V_KH_PKJ )
+ cond [ g_Kh_pkj ] [ id ] * ion [ m_Kh2_pkj ] [ id ] * ( V_KH_PKJ )
+ cond [ g_Kdr_pkj ] [ id ] * ion [ m_Kdr_pkj ] [ id ] * ion [ m_Kdr_pkj ] [ id ] * ion [ h_Kdr_pkj ] [ id ] * ( V_K_PKJ )
+ cond [ g_KM_pkj ] [ id ] * ion [ m_KM_pkj ] [ id ] * ( V_K_PKJ )
+ cond [ g_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ h_KA_pkj ] [ id ] * ( V_K_PKJ )
+ cond [ g_KC_pkj ] [ id ] * ion [ m_KC_pkj ] [ id ] * ion [ z_KC_pkj ] [ id ] * ion [ z_KC_pkj ] [ id ] * ( V_K_PKJ )
+ cond [ g_K2_pkj ] [ id ] * ion [ m_K2_pkj ] [ id ] * ion [ z_K2_pkj ] [ id ] * ion [ z_K2_pkj ] [ id ] * ( V_K_PKJ )
) / 2.0;
int d = d_pkj_solve -> dig [ id ];
val [ d ] += ( elem [ Cm ] [ id ] / DT) + vec [ cn_gamma2 ] [ id ];
b [ id ] = 0.0;
for (int j = row [ id ]; j < row [ id + 1 ]; j++) {
b [ id ] -= elem [ v ] [ col [ j ] ] * val_ori [ j ];
}
b [ id ] += (elem [ Cm ] [ id ] / DT - vec [ cn_gamma1 ] [ id ]) * elem [ v ] [ id ]
+ vec [ cn_ommega1 ] [ id ] + vec [ cn_ommega2 ] [ id ];
vec [ cn_ommega1 ] [ id ] = vec [ cn_ommega2 ] [ id ];
vec [ cn_gamma1 ] [ id ] = vec [ cn_gamma2 ] [ id ];
//d_pkj_solve -> vec [ cn_v_old ] [ id ] = elem [ v ] [ id ];
}
}
__host__
void pkj_solve_by_cnm ( neuron_t *d_pkj, neuron_solve_t *d_pkj_solve,
neuron_t *p_pkj, neuron_solve_t *p_pkj_solve,
synapse_t *d_grpkj, synapse_t *d_mlipkj )
{
// global
int nc = p_pkj -> nc;
static int numThreadsPerBlock = p_pkj_solve -> numThreadsPerBlock;
static int numBlocks = p_pkj_solve -> numBlocks;
// update ion
hipLaunchKernelGGL(( pkj_update_ion_2nd) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0, d_pkj, d_pkj_solve, CN_DT );
//pkj_update_ion_RK2 <<< numBlocks, numThreadsPerBlock >>> ( d_pkj, d_pkj_solve, CN_DT );
//pkj_update_ion <<< numBlocks, numThreadsPerBlock >>> ( d_pkj, d_pkj_solve, CN_DT );
// reset val and b
hipMemcpy ( p_pkj_solve -> val, p_pkj_solve -> val_ori, p_pkj_solve -> nnz * sizeof ( double ), hipMemcpyDeviceToDevice );
hipLaunchKernelGGL(( reset_vec) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0, d_pkj_solve, nc );
// update val, b and v
hipLaunchKernelGGL(( add_grpkj_val) , dim3(( d_grpkj -> n + 127 ) / 128), dim3(128) , 0, 0,
d_pkj, d_pkj_solve, d_grpkj -> comp, d_grpkj -> elem, d_grpkj -> n );
// add_mlipkj_val <<< ( d_mlipkj -> n + 127 ) / 128, 128 >>>
// ( d_pkj_solve, d_mlipkj -> comp, d_mlipkj -> elem, d_mlipkj -> n );
hipLaunchKernelGGL(( pkj_update_matrix) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0, d_pkj, d_pkj_solve );
cg_cusparse_crs ( p_pkj -> nc, p_pkj_solve -> nnz, p_pkj_solve -> val, p_pkj_solve -> col, p_pkj_solve -> row, p_pkj -> elem [ v ], p_pkj_solve -> b );
}
//////////////////////////////// IO /////////////////////////////////
__global__
static void eazy_transposed_matrix ( const int nnz, const double *d_val, double *val_h, int *order )
{
int i = threadIdx.x + blockDim.x*blockIdx.x;
if( i < nnz ) { val_h [ i ] = d_val [ order [ i ] ]; }
}
__global__
static void create_transposed_matrix ( const int ngc, const int nnz,
const double *d_val, const int *d_col, const int *d_row,
double *val_h, int *col_h, int *row_h, int *order )
{
row_h [ 0 ] = 0;
int count_row = 0;
int n_v = 0;
int n_r = 0;
for ( int i = 0; i < nnz; i++ ) { order [ i ] = i; }
for ( int i = 0; i < ngc; i++ ) {
for ( int j = 0; j < ngc; j++ ) {
for ( int k = d_row [ j ]; k < d_row [ j + 1 ]; k++ ) {
if ( d_col [ k ] == i ) {
val_h [ n_v ] = d_val [ k ];
order [ n_v ] = k;
col_h [ n_v ] = j;
n_v++;
}
}
}
row_h [ n_r + 1 ] = n_v;
n_r++;
}
// Debug
for ( int i = 0; i < nnz; i++ )
{
int j = order [ i ];
if ( d_val [ j ] != val_h [ i ] )
{
printf ("val cpy order error in solve_cnm.cu\n");
}
}
// Debug
/*
for ( int i = 0; i < nnz; i++ )
printf ( "val [ %d ] = %f\n", i, d_val [ i ] );
for ( int i = 0; i < nnz; i++ )
printf ( "val_h [ %d ] = %f\n", i, val_h [ i ] );
for ( int i = 0; i < nnz; i++ )
printf ( "col [ %d ] = %d\n", i, d_col [ i ] );
for ( int i = 0; i < nnz; i++ )
printf ( "col_h [ %d ] = %d\n", i, col_h [ i ] );
for ( int i = 0; i < ngc + 1; i++ )
printf ( "row [ %d ] = %d\n", i, d_row [ i ] );
for ( int i = 0; i < ngc + 1; i++ )
printf ( "row_h [ %d ] = %d\n", i, row_h [ i ] );
*/
}
__host__
static void bicg_cusparse_crs( const int ngc, const int nnz,
const double *d_val, const int *d_col, const int *d_row, double *d_x, double *d_b)
{
static double *d_r, *d_p, *d_ap;
static double *d_rs, *d_ps, *d_atps;
static double *_buffer;
static double *val_h;
static int *col_h, *row_h, *order;
static int size = 0;
if ( size < ngc ) {
if ( size == 0 ) {
hipFree ( d_r );
hipFree ( d_p );
hipFree ( d_ap );
hipFree ( d_rs );
hipFree ( d_ps );
hipFree ( d_atps );
hipFree ( _buffer );
hipFree ( val_h );
hipFree ( col_h );
hipFree ( row_h );
hipFree ( order );
}
hipMalloc ( ( double ** ) &d_r, ngc * sizeof ( double ) );
hipMalloc ( ( double ** ) &d_p, ngc * sizeof ( double ) );
hipMalloc ( ( double ** ) &d_ap, ngc * sizeof ( double ) );
hipMalloc ( ( double ** ) &d_rs, ngc * sizeof ( double ) );
hipMalloc ( ( double ** ) &d_ps, ngc * sizeof ( double ) );
hipMalloc ( ( double ** ) &d_atps, ngc * sizeof ( double ) );
hipMalloc ( ( double ** ) &val_h, nnz * sizeof ( double ) );
hipMalloc ( ( int ** ) &col_h, nnz * sizeof ( int ) );
hipMalloc ( ( int ** ) &row_h, ( ngc + 1 ) * sizeof ( int ) );
hipMalloc ( ( int ** ) &order, nnz * sizeof ( int ) );
hipMalloc ( ( double ** ) &_buffer, ngc * sizeof ( double ) );
hipLaunchKernelGGL(( create_transposed_matrix) , dim3(1), dim3(1) , 0, 0,
ngc, nnz, d_val, d_col, d_row, val_h, col_h, row_h, order );
size = ngc;
}
hipLaunchKernelGGL(( eazy_transposed_matrix) , dim3((nnz + 127)/128), dim3(128) , 0, 0, nnz, d_val, val_h, order );
//hipDeviceSynchronize();
//hipMalloc ( ( double ** ) &d_r, ngc * sizeof ( double ) );
//hipMalloc ( ( double ** ) &d_p, ngc * sizeof ( double ) );
//hipMalloc ( ( double ** ) &d_ap, ngc * sizeof ( double ) );
double bnorm, rnorm_k, rnorm_k1;
double alpha, beta, pap;
double epsilon = 1.0e-15;
double cp1 = 1.0;
double c0 = 0.0;
double cm1 = -1.0;
hipblasStatus_t stat1;
hipblasHandle_t handle1;
hipsparseStatus_t stat2;
hipsparseHandle_t handle2;
hipsparseMatDescr_t descrA;
stat1 = hipblasCreate(&handle1);
stat2 = hipsparseCreate(&handle2);
hipsparseCreateMatDescr(&descrA);
hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ZERO);
stat1 = hipblasDscal(handle1, ngc, &c0, d_x, 1); // x = 0
stat1 = hipblasDcopy(handle1, ngc, d_b, 1, d_r, 1); // r = b
stat1 = hipblasDcopy(handle1, ngc, d_r, 1, d_rs, 1); // rs = r
stat1 = hipblasDcopy(handle1, ngc, d_r, 1, d_p, 1); // p = r
stat1 = hipblasDcopy(handle1, ngc, d_rs, 1, d_ps, 1); // ps = rs
stat1 = hipblasDdot(handle1, ngc, d_b, 1, d_b, 1, &bnorm); // ||b||
/**/
for (int k = 0; k < 100; k++) {
stat2 = cusparseCsrmvEx(handle2,CUSPARSE_ALG_MERGE_PATH, HIPSPARSE_OPERATION_NON_TRANSPOSE,
ngc, ngc, nnz, &cp1, HIP_R_64F, descrA, d_val, HIP_R_64F, d_row, d_col, d_p, HIP_R_64F,
&c0, HIP_R_64F, d_ap, HIP_R_64F, HIP_R_64F, _buffer ); // Ap
stat2 = cusparseCsrmvEx(handle2,CUSPARSE_ALG_MERGE_PATH, HIPSPARSE_OPERATION_NON_TRANSPOSE,
ngc, ngc, nnz, &cp1, HIP_R_64F, descrA, val_h, HIP_R_64F, row_h, col_h, d_ps, HIP_R_64F,
&c0, HIP_R_64F, d_atps, HIP_R_64F, HIP_R_64F, _buffer ); // Atps
stat1 = hipblasDdot(handle1, ngc, d_r, 1, d_rs, 1, &rnorm_k); // ( rs, r )
stat1 = hipblasDdot(handle1, ngc, d_ps, 1, d_ap, 1, &pap); // psAp
alpha = rnorm_k / pap; // alpha
stat1 = hipblasDaxpy(handle1, ngc, &alpha, d_p, 1, d_x, 1); // x += alpha * p
alpha = -1.0 * alpha;
stat1 = hipblasDaxpy(handle1, ngc, &alpha, d_ap, 1, d_r, 1); // r -= alpha * ap
stat1 = hipblasDaxpy(handle1, ngc, &alpha, d_atps, 1, d_rs, 1); // rs -= alpha * atps
stat1 = hipblasDdot(handle1, ngc, d_r, 1, d_rs, 1, &rnorm_k1); // ( r_k+1, rs_k+1 )
if (sqrt(rnorm_k1) <= epsilon * sqrt(bnorm)) { break; }
// beta
beta = rnorm_k1 / rnorm_k;
// p = r + beta * p
stat1 = hipblasDscal(handle1, ngc, &beta, d_p, 1);
stat1 = hipblasDaxpy(handle1, ngc, &cp1, d_r, 1, d_p, 1);
// ps = rs + beta * ps
stat1 = hipblasDscal(handle1, ngc, &beta, d_ps, 1);
stat1 = hipblasDaxpy(handle1, ngc, &cp1, d_rs, 1, d_ps, 1);
}
hipsparseDestroyMatDescr(descrA);
hipblasDestroy(handle1);
hipsparseDestroy(handle2);//
//hipFree ( d_r );
//hipFree ( d_p );
//hipFree ( d_ap );
}
__global__ static
void add_io_gap_val ( neuron_solve_t *d_io_solve, int *io_gap_comp, double *io_gap_elem, const int num_io_gap )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < num_io_gap )
{
double l_val = -1.0 * io_gap_elem [ gap_current * num_io_gap + id ];//-0.5
int post_num = io_gap_comp [ post_comp_gap * num_io_gap + id ];
atomicAdd ( & ( d_io_solve -> vec [ cn_ommega2 ] [ post_num ] ), ( l_val ) );
}
}
__global__
void io_cnm_vec_initialize ( neuron_t *d_io, neuron_solve_t *d_io_solve )
{
double **elem = d_io -> elem;
double **cond = d_io -> cond;
double **ion = d_io -> ion;
double **vec = d_io_solve -> vec;
//double *val = d_io_solve -> val;
//double *val_ori = d_io_solve -> val_ori;
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < d_io -> nc)
{
vec [ cn_gamma1 ] [ id ] =
( cond [ g_leak_io ] [ id ]
+ cond [ g_CaL_io ] [ id ] * ion [ k_CaL_io ] [ id ] * ion [ k_CaL_io ] [ id ] * ion [ k_CaL_io ] [ id ] * ion [ l_CaL_io ] [ id ]
+ cond [ g_Na_io ] [ id ] * ion [ m_Na_io ] [ id ] * ion [ m_Na_io ] [ id ] * ion [ m_Na_io ] [ id ] * ion [ h_Na_io ] [ id ]
+ cond [ g_Kdr_io ] [ id ] * ion [ n_Kdr_io ] [ id ] * ion [ p_Kdr_io ] [ id ]
+ cond [ g_K_io ] [ id ] * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ]
+ cond [ g_CaH_io ] [ id ] * ion [ r_CaH_io ] [ id ] * ion [ r_CaH_io ] [ id ]
+ cond [ g_KCa_io ] [ id ] * ion [ s_KCa_io ] [ id ]
+ cond [ g_H_io ] [ id ] * ion [ q_H_io ] [ id ] ) / 2.0;//*0.5
vec [ cn_gamma2 ] [ id ] = 0.0;
vec [ cn_ommega1 ] [ id ] =
( cond [ g_leak_io ] [ id ] * V_LEAK_IO + elem [ i_ext ] [ id ]
+ cond [ g_CaL_io ] [ id ] * V_Ca_IO * ion [ k_CaL_io ] [ id ] * ion [ k_CaL_io ] [ id ] * ion [ k_CaL_io ] [ id ] * ion [ l_CaL_io ] [ id ]
+ cond [ g_Na_io ] [ id ] * V_Na_IO * ion [ m_Na_io ] [ id ] * ion [ m_Na_io ] [ id ] * ion [ m_Na_io ] [ id ] * ion [ h_Na_io ] [ id ]
+ cond [ g_Kdr_io ] [ id ] * V_K_IO * ion [ n_Kdr_io ] [ id ] * ion [ p_Kdr_io ] [ id ]
+ cond [ g_K_io ] [ id ] * V_K_IO * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ]
+ cond [ g_CaH_io ] [ id ] * V_Ca_IO * ion [ r_CaH_io ] [ id ] * ion [ r_CaH_io ] [ id ]
+ cond [ g_KCa_io ] [ id ] * V_K_IO * ion [ s_KCa_io ] [ id ]
+ cond [ g_H_io ] [ id ] * V_H_IO * ion [ q_H_io ] [ id ] ) / 2.0;//*0.5
vec [ cn_ommega2 ] [ id ] = 0.0;
vec [ cn_v_old ] [ id ] = elem [ v ] [ id ];
}
//for ( int i = 0; i < io_solve -> nnz; i++ ) { val [ id ] /= 2.0; val_ori [ id ] = val [ id ]; } // to io_solve.cu
}
__global__
static void io_update_matrix ( neuron_t *d_io, neuron_solve_t *d_io_solve )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < d_io -> nc)
{
double **elem = d_io -> elem;
double **cond = d_io -> cond;
double **ion = d_io -> ion;
double **vec = d_io_solve -> vec;
double *val = d_io_solve -> val;
double *val_ori = d_io_solve -> val_ori;
double *b = d_io_solve -> b;
int *col = d_io_solve -> col;
int *row = d_io_solve -> row;
double DT = d_io -> DT;
vec [ cn_gamma2 ] [ id ] +=
( cond [ g_leak_io ] [ id ]
+ cond [ g_CaL_io ] [ id ] * ion [ k_CaL_io ] [ id ] * ion [ k_CaL_io ] [ id ] * ion [ k_CaL_io ] [ id ] * ion [ l_CaL_io ] [ id ]
+ cond [ g_Na_io ] [ id ] * ion [ m_Na_io ] [ id ] * ion [ m_Na_io ] [ id ] * ion [ m_Na_io ] [ id ] * ion [ h_Na_io ] [ id ]
+ cond [ g_Kdr_io ] [ id ] * ion [ n_Kdr_io ] [ id ] * ion [ p_Kdr_io ] [ id ]
+ cond [ g_K_io ] [ id ] * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ]
+ cond [ g_CaH_io ] [ id ] * ion [ r_CaH_io ] [ id ] * ion [ r_CaH_io ] [ id ]
+ cond [ g_KCa_io ] [ id ] * ion [ s_KCa_io ] [ id ]
+ cond [ g_H_io ] [ id ] * ion [ q_H_io ] [ id ] ) / 2.0;//*0.5
vec [ cn_ommega2 ] [ id ] +=
( cond [ g_leak_io ] [ id ] * V_LEAK_IO + elem [ i_ext ] [ id ]
+ cond [ g_CaL_io ] [ id ] * V_Ca_IO * ion [ k_CaL_io ] [ id ] * ion [ k_CaL_io ] [ id ] * ion [ k_CaL_io ] [ id ] * ion [ l_CaL_io ] [ id ]
+ cond [ g_Na_io ] [ id ] * V_Na_IO * ion [ m_Na_io ] [ id ] * ion [ m_Na_io ] [ id ] * ion [ m_Na_io ] [ id ] * ion [ h_Na_io ] [ id ]
+ cond [ g_Kdr_io ] [ id ] * V_K_IO * ion [ n_Kdr_io ] [ id ] * ion [ p_Kdr_io ] [ id ]
+ cond [ g_K_io ] [ id ] * V_K_IO * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ]
+ cond [ g_CaH_io ] [ id ] * V_Ca_IO * ion [ r_CaH_io ] [ id ] * ion [ r_CaH_io ] [ id ]
+ cond [ g_KCa_io ] [ id ] * V_K_IO * ion [ s_KCa_io ] [ id ]
+ cond [ g_H_io ] [ id ] * V_H_IO * ion [ q_H_io ] [ id ] ) / 2.0;//*0.5
int d = d_io_solve -> dig [ id ];
val [ d ] += ( elem [ Cm ] [ id ] / DT) + vec [ cn_gamma2 ] [ id ];
b [ id ] = 0.0;
for (int j = row [ id ]; j < row [ id + 1 ]; j++) {
b [ id ] -= elem [ v ] [ col [ j ] ] * val_ori [ j ];
}
b [ id ] += (elem [ Cm ] [ id ] / DT - vec [ cn_gamma1 ] [ id ]) * elem [ v ][ id ] + vec [ cn_ommega1 ] [ id ] + vec [ cn_ommega2 ] [ id ];
vec [ cn_ommega1 ] [ id ] = vec [ cn_ommega2 ] [ id ];
vec [ cn_gamma1 ] [ id ] = vec [ cn_gamma2 ] [ id ];
}
}
__host__
void io_solve_by_cnm ( neuron_t *d_io, neuron_solve_t *d_io_solve,
neuron_t *p_io, neuron_solve_t *p_io_solve, gap_t* d_io_gap )
{
// global
double **ion = p_io -> ion;
double **elem = p_io -> elem;
int nc = p_io -> nc;
static int numThreadsPerBlock = p_io_solve -> numThreadsPerBlock;
static int numBlocks = p_io_solve -> numBlocks;
// update ion
hipLaunchKernelGGL(( io_update_ion_2nd) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0, d_io, d_io_solve, CN_DT );
// reset val and b
hipMemcpy ( p_io_solve -> val, p_io_solve -> val_ori, p_io_solve -> nnz * sizeof ( double ), hipMemcpyDeviceToDevice );
hipLaunchKernelGGL(( reset_vec) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0, d_io_solve, nc );
// update val, b and v
if ( p_io -> n > 1 )
{
hipLaunchKernelGGL(( io_gap_update) , dim3(( d_io_gap -> n + 127 ) / 128), dim3(128) , 0, 0,
d_io, d_io_gap -> comp, d_io_gap -> elem, d_io_gap -> n );
hipLaunchKernelGGL(( add_io_gap_val) , dim3(( d_io_gap -> n + 127 ) / 128), dim3(128) , 0, 0,
d_io_solve, d_io_gap -> comp, d_io_gap -> elem, d_io_gap -> n );
}
hipLaunchKernelGGL(( io_update_matrix) , dim3(numBlocks), dim3(numThreadsPerBlock) , 0, 0, d_io, d_io_solve );
bicg_cusparse_crs ( nc, p_io_solve -> nnz, p_io_solve -> val,
p_io_solve -> col, p_io_solve -> row, p_io -> elem [ v ], p_io_solve -> b );
}
| e3c6d903c42f43cb7f621a2eb52ccf524c589348.cu | #include "solve_cnm.cuh"
//#include "device_launch_parameters.h"
#include <cublas_v2.h>
#include <cusparse.h>
//#include <math.h>
__global__ static
void reset_vec ( neuron_solve_t *d_solve, const int nc )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < nc )
{
d_solve -> b [ id ] = 0.0;
d_solve -> vec [ cn_gamma2 ] [ id ] = 0.0;
d_solve -> vec [ cn_ommega2 ] [ id ] = 0.0;
}
}
__host__
static void cg_cusparse_crs(const int ngc, const int nnz,
const double *d_val, const int *d_col, const int *d_row, double *d_x, double *d_b)
{
static double *d_r, *d_p, *d_ap;
static double *_buffer;
static int size = 0;
if ( size < ngc ) {
if ( size == 0 ) {
cudaFree ( d_r );
cudaFree ( d_p );
cudaFree ( d_ap );
cudaFree ( _buffer );
}
cudaMalloc ( ( double ** ) &d_r, ngc * sizeof ( double ) );
cudaMalloc ( ( double ** ) &d_p, ngc * sizeof ( double ) );
cudaMalloc ( ( double ** ) &d_ap, ngc * sizeof ( double ) );
cudaMalloc ( ( double ** ) &_buffer, ngc * sizeof ( double ) );
size = ngc;
}
//cudaMalloc ( ( double ** ) &d_r, ngc * sizeof ( double ) );
//cudaMalloc ( ( double ** ) &d_p, ngc * sizeof ( double ) );
//cudaMalloc ( ( double ** ) &d_ap, ngc * sizeof ( double ) );
double bnorm, rnorm_k, rnorm_k1;
double alpha, beta, pap;
double epsilon = 1.0e-15;
double cp1 = 1.0;
double c0 = 0.0;
double cm1 = -1.0;
cublasStatus_t stat1;
cublasHandle_t handle1;
cusparseStatus_t stat2;
cusparseHandle_t handle2;
cusparseMatDescr_t descrA;
stat1 = cublasCreate_v2(&handle1);
stat2 = cusparseCreate(&handle2);
cusparseCreateMatDescr(&descrA);
cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ZERO);
stat1 = cublasDscal(handle1, ngc, &c0, d_x, 1); // x = 0
stat1 = cublasDcopy(handle1, ngc, d_b, 1, d_r, 1); // r = b
stat1 = cublasDcopy(handle1, ngc, d_r, 1, d_p, 1); // p = r
stat1 = cublasDdot(handle1, ngc, d_b, 1, d_b, 1, &bnorm); // ||b||
/**/
for (int k = 0; k < 100; k++) {
//stat2 = cusparseDcsrmv(handle2, CUSPARSE_OPERATION_NON_TRANSPOSE,
//ngc, ngc, nnz, &cp1, descrA, d_val, d_row, d_col, d_p, &c0, d_ap); // Ap
stat2 = cusparseCsrmvEx(handle2,CUSPARSE_ALG_MERGE_PATH, CUSPARSE_OPERATION_NON_TRANSPOSE,
ngc, ngc, nnz, &cp1, CUDA_R_64F, descrA, d_val, CUDA_R_64F, d_row, d_col, d_p, CUDA_R_64F,
&c0, CUDA_R_64F, d_ap, CUDA_R_64F, CUDA_R_64F, _buffer ); // Ap
stat1 = cublasDdot(handle1, ngc, d_r, 1, d_r, 1, &rnorm_k); // ||r_k||^2
stat1 = cublasDdot(handle1, ngc, d_p, 1, d_ap, 1, &pap); // pAp
alpha = rnorm_k / pap; // alpha
stat1 = cublasDaxpy(handle1, ngc, &alpha, d_p, 1, d_x, 1); // x += alpha * p
alpha = -1.0 * alpha;
stat1 = cublasDaxpy(handle1, ngc, &alpha, d_ap, 1, d_r, 1); // r -= alpha * ap
stat1 = cublasDdot(handle1, ngc, d_r, 1, d_r, 1, &rnorm_k1); // ||r_k+1||^2
if (sqrt(rnorm_k1) <= epsilon * sqrt(bnorm)) { break; }
// p = r + beta * p
beta = rnorm_k1 / rnorm_k;
stat1 = cublasDscal(handle1, ngc, &beta, d_p, 1);
stat1 = cublasDaxpy(handle1, ngc, &cp1, d_r, 1, d_p, 1);
}
cusparseDestroyMatDescr(descrA);
cublasDestroy_v2(handle1);
cusparseDestroy(handle2);//
//cudaFree ( d_r );
//cudaFree ( d_p );
//cudaFree ( d_ap );
}
//////////////////////////////// GR /////////////////////////////////
__global__ static
void add_mfgr_val ( neuron_solve_t *d_gr_solve, int *mfgr_comp, double *mfgr_elem, const int num_mfgr )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < num_mfgr )
{
int post_num = mfgr_comp [ post_comp * num_mfgr + id ];
double l_val = mfgr_elem [ mfgr_val * num_mfgr + id ];
atomicAdd ( & ( d_gr_solve -> vec [ cn_gamma2 ] [ post_num ] ), 0.5 * l_val );
atomicAdd ( & ( d_gr_solve -> vec [ cn_ommega2 ] [ post_num ] ), 0.5 * l_val * E_MFGR );
}
}
__global__ static
void add_gogr_val ( neuron_t *d_gr, neuron_solve_t *d_gr_solve,
int *gogr_comp, double *gogr_elem, const int num_gogr )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < num_gogr )
{
int post_num = gogr_comp [ post_comp * num_gogr + id ];
double l_val = gogr_elem [ gogr_val * num_gogr + id ];
atomicAdd ( & ( d_gr_solve -> vec [ cn_gamma2 ] [ post_num ] ), 0.5 * l_val ); // no need atomicAdd ?
atomicAdd ( & ( d_gr_solve -> vec [ cn_ommega2 ] [ post_num ] ), 0.5 * l_val * E_GOGR );
}
}
__global__
void gr_cnm_vec_initialize ( neuron_t *d_gr, neuron_solve_t *d_gr_solve )
{
double **elem = d_gr -> elem;
double **cond = d_gr -> cond;
double **ion = d_gr -> ion;
double **vec = d_gr_solve -> vec;
//double *val = d_gr_solve -> val;
//double *val_ori = d_gr_solve -> val_ori;
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < d_gr -> nc)
{
vec [ cn_gamma1 ] [ id ] =
( cond [ g_leak1 ] [ id ] + cond [ g_leak2 ] [ id ] + cond [ g_leak3 ] [ id ]
+ cond [ g_Na ] [ id ] * ion [ o_Na ] [ id ]
+ cond [ g_Ca ] [ id ] * ion [ ch_Ca ] [ id ] * ion [ ch_Ca ] [ id ] * ion [ ci_Ca ] [ id ]
+ cond [ g_KV ] [ id ] * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ]
+ cond [ g_KIR ] [ id ] * ion [ ir_KIR ] [ id ]
+ cond [ g_KA ] [ id ] * ion [ a_KA ] [ id ] * ion [ a_KA ] [ id ] * ion [ a_KA ] [ id ] * ion [ b_KA ] [ id ]
+ cond [ g_KCa ] [ id ] * ion [ c_KCa ] [ id ]
+ cond [ g_KM ] [ id ] * ion [ s_KM ] [ id ] ) / 2.0;
vec [ cn_gamma2 ] [ id ] = 0.0;
vec [ cn_ommega1 ] [ id ] =
( cond [ g_leak1 ] [ id ] * V_LEAK1_GR + cond [ g_leak2 ] [ id ] * V_LEAK2_GR + cond [ g_leak3 ] [ id ] * V_LEAK3_GR + elem [ i_ext ] [ id ]
+ cond [ g_Na ] [ id ] * V_Na_GR * ion [ o_Na ] [ id ]
+ cond [ g_Ca ] [ id ] * V_Ca_GR * ion [ ch_Ca ] [ id ] * ion [ ch_Ca ] [ id ] * ion [ ci_Ca ] [ id ]
+ cond [ g_KV ] [ id ] * V_K_GR * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ]
+ cond [ g_KIR ] [ id ] * V_K_GR * ion [ ir_KIR ] [ id ]
+ cond [ g_KA ] [ id ] * V_K_GR * ion [ a_KA ] [ id ] * ion [ a_KA ] [ id ] * ion [ a_KA ] [ id ] * ion [ b_KA ] [ id ]
+ cond [ g_KCa ] [ id ] * V_K_GR * ion [ c_KCa ] [ id ]
+ cond [ g_KM ] [ id ] * V_K_GR * ion [ s_KM ] [ id ] ) / 2.0;
vec [ cn_ommega2 ] [ id ] = 0.0;
vec [ cn_v_old ] [ id ] = elem [ v ] [ id ];
}
//for ( int i = 0; i < gr_solve -> nnz; i++ ) { val [ id ] /= 2.0; val_ori [ id ] = val [ id ]; } // to gr_solve.cu
}
__global__
static void gr_update_matrix ( neuron_t *d_gr, neuron_solve_t *d_gr_solve )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
double **elem = d_gr -> elem;
double **cond = d_gr -> cond;
double **ion = d_gr -> ion;
double **vec = d_gr_solve -> vec;
double *val = d_gr_solve -> val;
double *val_ori = d_gr_solve -> val_ori;
double *b = d_gr_solve -> b;
int *col = d_gr_solve -> col;
int *row = d_gr_solve -> row;
double DT = d_gr -> DT;
if ( id < d_gr -> nc)
{
vec [ cn_gamma2 ] [ id ] +=
( cond [ g_leak1 ] [ id ] + cond [ g_leak2 ] [ id ] + cond [ g_leak3 ] [ id ]
+ cond [ g_Na ] [ id ] * ion [ o_Na ] [ id ]
+ cond [ g_Ca ] [ id ] * ion [ ch_Ca ] [ id ] * ion [ ch_Ca ] [ id ] * ion [ ci_Ca ] [ id ]
+ cond [ g_KV ] [ id ] * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ]
+ cond [ g_KIR ] [ id ] * ion [ ir_KIR ] [ id ]
+ cond [ g_KA ] [ id ] * ion [ a_KA ] [ id ] * ion [ a_KA ] [ id ] * ion [ a_KA ] [ id ] * ion [ b_KA ] [ id ]
+ cond [ g_KCa ] [ id ] * ion [ c_KCa ] [ id ]
+ cond [ g_KM ] [ id ] * ion [ s_KM ] [ id ] ) / 2.0;
vec [ cn_ommega2 ] [ id ] +=
( cond [ g_leak1 ] [ id ] * V_LEAK1_GR + cond [ g_leak2 ] [ id ] * V_LEAK2_GR + cond [ g_leak3 ] [ id ] * V_LEAK3_GR + elem [ i_ext ] [ id ]
+ cond [ g_Na ] [ id ] * V_Na_GR * ion [ o_Na ] [ id ]
+ cond [ g_Ca ] [ id ] * V_Ca_GR * ion [ ch_Ca ] [ id ] * ion [ ch_Ca ] [ id ] * ion [ ci_Ca ] [ id ]
+ cond [ g_KV ] [ id ] * V_K_GR * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ] * ion [ n_KV ] [ id ]
+ cond [ g_KIR ] [ id ] * V_K_GR * ion [ ir_KIR ] [ id ]
+ cond [ g_KA ] [ id ] * V_K_GR * ion [ a_KA ] [ id ] * ion [ a_KA ] [ id ] * ion [ a_KA ] [ id ] * ion [ b_KA ] [ id ]
+ cond [ g_KCa ] [ id ] * V_K_GR * ion [ c_KCa ] [ id ]
+ cond [ g_KM ] [ id ] * V_K_GR * ion [ s_KM ] [ id ] ) / 2.0;
int d = d_gr_solve -> dig [ id ];
val [ d ] += ( elem [ Cm ] [ id ] / DT) + vec [ cn_gamma2 ] [ id ];
b [ id ] = 0.0;
for (int j = row [ id ]; j < row [ id + 1 ]; j++) {
b [ id ] -= elem [ v ] [ col [ j ] ] * val_ori [ j ];
}
b [ id ] += (elem [ Cm ] [ id ] / DT - vec [ cn_gamma1 ] [ id ]) * elem [ v ][ id ] + vec [ cn_ommega1 ] [ id ] + vec [ cn_ommega2 ] [ id ];
vec [ cn_ommega1 ] [ id ] = vec [ cn_ommega2 ] [ id ];
vec [ cn_gamma1 ] [ id ] = vec [ cn_gamma2 ] [ id ];
}
}
__host__
void gr_solve_by_cnm ( neuron_t *d_gr, neuron_solve_t *d_gr_solve,
neuron_t *p_gr, neuron_solve_t *p_gr_solve,
synapse_t *d_mfgr, synapse_t *d_gogr )
{
// global
double **ion = p_gr -> ion;
double **elem = p_gr -> elem;
int nc = p_gr -> nc;
static int numThreadsPerBlock = p_gr_solve -> numThreadsPerBlock;
static int numBlocks = p_gr_solve -> numBlocks;
// update ion
gr_Na_update_2order <<< numBlocks, numThreadsPerBlock >>>
( nc, elem [ v ], p_gr_solve -> vec [ cn_v_old ], CN_DT, elem [ compart ],
ion [ o_Na ], ion [ c1_Na ], ion [ c2_Na ], ion [ c3_Na ], ion [ c4_Na ], ion [ c5_Na ],
ion [ i1_Na ], ion [ i2_Na ], ion [ i3_Na ], ion [ i4_Na ], ion [ i5_Na ], ion [ i6_Na ] );
gr_update_ion_exp_imp <<< numBlocks, numThreadsPerBlock >>> ( d_gr, d_gr_solve, CN_DT );
// reset val and b
cudaMemcpy ( p_gr_solve -> val, p_gr_solve -> val_ori, p_gr_solve -> nnz * sizeof ( double ), cudaMemcpyDeviceToDevice );
reset_vec <<< numBlocks, numThreadsPerBlock >>> ( d_gr_solve, nc );
// update val, b and v
add_mfgr_val <<< ( d_mfgr -> n + 127 ) / numThreadsPerBlock, numThreadsPerBlock >>>
( d_gr_solve, d_mfgr -> comp, d_mfgr -> elem, d_mfgr -> n );
add_gogr_val <<< ( d_gogr -> n + 127 ) / 128, 128 >>>
( d_gr, d_gr_solve, d_gogr -> comp, d_gogr -> elem, d_gogr -> n ); //cudaDeviceSynchronize();
gr_update_matrix <<< numBlocks, numThreadsPerBlock >>> ( d_gr, d_gr_solve);
cg_cusparse_crs ( nc, p_gr_solve -> nnz, p_gr_solve -> val, p_gr_solve -> col, p_gr_solve -> row, p_gr -> elem [ v ], p_gr_solve -> b );
}
//////////////////////////////// GO /////////////////////////////////
__global__ static
void add_grgo_val ( neuron_t *d_go, neuron_solve_t *d_go_solve,
int *grgo_comp, double *grgo_elem, const int num_grgo )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < num_grgo )
{
int post_num = grgo_comp [ post_comp * num_grgo + id ];
double l_val = grgo_elem [ grgo_val * num_grgo + id ];
atomicAdd ( & ( d_go_solve -> vec [ cn_gamma2 ] [ post_num ] ), 0.5 * l_val ); // no need atomicAdd ?
atomicAdd ( & ( d_go_solve -> vec [ cn_ommega2 ] [ post_num ] ), 0.5 * l_val * E_GRGO );
}
}
__global__
void go_cnm_vec_initialize ( neuron_t *d_go, neuron_solve_t *d_go_solve )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < d_go -> nc)
{
double **elem = d_go -> elem;
double **cond = d_go -> cond;
double **ion = d_go -> ion;
double **vec = d_go_solve -> vec;
vec [ cn_gamma1 ] [ id ] =
( cond [ g_leak_go ] [ id ]
+ cond [ g_NaT_go ] [ id ] * ion [ m_NaT_go ] [ id ] * ion [ m_NaT_go ] [ id ] * ion [ m_NaT_go ] [ id ] * ion [ h_NaT_go ] [ id ]
+ cond [ g_NaR_go ] [ id ] * ion [ r_NaR_go ] [ id ] * ion [ s_NaR_go ] [ id ]
+ cond [ g_NaP_go ] [ id ] * ion [ p_NaP_go ] [ id ]
+ cond [ g_CaHVA_go ] [ id ] * ion [ ch_CaHVA_go ] [ id ] * ion [ ch_CaHVA_go ] [ id ] * ion [ ci_CaHVA_go ] [ id ]
+ cond [ g_CaLVA_go ] [ id ] * ion [ cl_CaLVA_go ] [ id ] * ion [ cl_CaLVA_go ] [ id ] * ion [ cm_CaLVA_go ] [ id ]
+ cond [ g_KAHP_go ] [ id ] * ( ion [ o1_KAHP_go ] [ id ] + ion [ o2_KAHP_go ] [ id ] )
+ cond [ g_KV_go ] [ id ] * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ]
+ cond [ g_KA_go ] [ id ] * ion [ a_KA_go ] [ id ] * ion [ a_KA_go ] [ id ] * ion [ a_KA_go ] [ id ] * ion [ b_KA_go ] [ id ]
+ cond [ g_KC_go ] [ id ] * ion [ c_KC_go ] [ id ]
+ cond [ g_Kslow_go ] [ id ] * ion [ sl_Kslow_go ] [ id ]
+ cond [ g_HCN1_go ] [ id ] * ( ion [ hf_HCN1_go ] [ id ] + ion [ hs_HCN1_go ] [ id ] )
+ cond [ g_HCN2_go ] [ id ] * ( ion [ hf_HCN2_go ] [ id ] + ion [ hs_HCN2_go ] [ id ] )
) / 2.0;
vec [ cn_gamma2 ] [ id ] = 0.0;
vec [ cn_ommega1 ] [ id ] =
( cond [ g_leak_go ] [ id ] * V_LEAK_GO + elem [ i_ext ] [ id ]
+ cond [ g_NaT_go ] [ id ] * V_Na_GO * ion [ m_NaT_go ] [ id ] * ion [ m_NaT_go ] [ id ] * ion [ m_NaT_go ] [ id ] * ion [ h_NaT_go ] [ id ]
+ cond [ g_NaR_go ] [ id ] * V_Na_GO * ion [ r_NaR_go ] [ id ] * ion [ s_NaR_go ] [ id ]
+ cond [ g_NaP_go ] [ id ] * V_Na_GO * ion [ p_NaP_go ] [ id ]
+ cond [ g_CaHVA_go ] [ id ] * V_Ca_GO * ion [ ch_CaHVA_go ] [ id ] * ion [ ch_CaHVA_go ] [ id ] * ion [ ci_CaHVA_go ] [ id ]
+ cond [ g_CaLVA_go ] [ id ] * ( d_go -> rev_ca2 [ id ] ) * ion [ cl_CaLVA_go ] [ id ] * ion [ cl_CaLVA_go ] [ id ] * ion [ cm_CaLVA_go ] [ id ]
+ cond [ g_KAHP_go ] [ id ] * V_K_GO * ( ion [ o1_KAHP_go ] [ id ] + ion [ o2_KAHP_go ] [ id ] )
+ cond [ g_KV_go ] [ id ] * V_K_GO * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ]
+ cond [ g_KA_go ] [ id ] * V_K_GO * ion [ a_KA_go ] [ id ] * ion [ a_KA_go ] [ id ] * ion [ a_KA_go ] [ id ] * ion [ b_KA_go ] [ id ]
+ cond [ g_KC_go ] [ id ] * V_K_GO * ion [ c_KC_go ] [ id ]
+ cond [ g_Kslow_go ] [ id ] * V_K_GO * ion [ sl_Kslow_go ] [ id ]
+ cond [ g_HCN1_go ] [ id ] * V_H_GO * ( ion [ hf_HCN1_go ] [ id ] + ion [ hs_HCN1_go ] [ id ] )
+ cond [ g_HCN2_go ] [ id ] * V_H_GO * ( ion [ hf_HCN2_go ] [ id ] + ion [ hs_HCN2_go ] [ id ] )
) / 2.0;
vec [ cn_ommega2 ] [ id ] = 0.0;
vec [ cn_v_old ] [ id ] = elem [ v ] [ id ];
}
//for ( int i = 0; i < go_solve -> nnz; i++ ) { val [ id ] /= 2.0; val_ori [ id ] = val [ id ]; } // to go_solve.cu
}
__global__
static void go_update_matrix ( neuron_t *d_go, neuron_solve_t *d_go_solve )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < d_go -> nc)
{
double **elem = d_go -> elem;
double **cond = d_go -> cond;
double **ion = d_go -> ion;
double **vec = d_go_solve -> vec;
double *val = d_go_solve -> val;
double *val_ori = d_go_solve -> val_ori;
double *b = d_go_solve -> b;
int *col = d_go_solve -> col;
int *row = d_go_solve -> row;
double DT = d_go -> DT;
vec [ cn_gamma2 ] [ id ] +=
( cond [ g_leak_go ] [ id ]
+ cond [ g_NaT_go ] [ id ] * ion [ m_NaT_go ] [ id ] * ion [ m_NaT_go ] [ id ] * ion [ m_NaT_go ] [ id ] * ion [ h_NaT_go ] [ id ]
+ cond [ g_NaR_go ] [ id ] * ion [ r_NaR_go ] [ id ] * ion [ s_NaR_go ] [ id ]
+ cond [ g_NaP_go ] [ id ] * ion [ p_NaP_go ] [ id ]
+ cond [ g_CaHVA_go ] [ id ] * ion [ ch_CaHVA_go ] [ id ] * ion [ ch_CaHVA_go ] [ id ] * ion [ ci_CaHVA_go ] [ id ]
+ cond [ g_CaLVA_go ] [ id ] * ion [ cl_CaLVA_go ] [ id ] * ion [ cl_CaLVA_go ] [ id ] * ion [ cm_CaLVA_go ] [ id ]
+ cond [ g_KAHP_go ] [ id ] * ( ion [ o1_KAHP_go ] [ id ] + ion [ o2_KAHP_go ] [ id ] )
+ cond [ g_KV_go ] [ id ] * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ]
+ cond [ g_KA_go ] [ id ] * ion [ a_KA_go ] [ id ] * ion [ a_KA_go ] [ id ] * ion [ a_KA_go ] [ id ] * ion [ b_KA_go ] [ id ]
+ cond [ g_KC_go ] [ id ] * ion [ c_KC_go ] [ id ]
+ cond [ g_Kslow_go ] [ id ] * ion [ sl_Kslow_go ] [ id ]
+ cond [ g_HCN1_go ] [ id ] * ( ion [ hf_HCN1_go ] [ id ] + ion [ hs_HCN1_go ] [ id ] )
+ cond [ g_HCN2_go ] [ id ] * ( ion [ hf_HCN2_go ] [ id ] + ion [ hs_HCN2_go ] [ id ] )
) / 2.0;
vec [ cn_ommega2 ] [ id ] +=
( cond [ g_leak_go ] [ id ] * V_LEAK_GO + elem [ i_ext ] [ id ]
+ cond [ g_NaT_go ] [ id ] * V_Na_GO * ion [ m_NaT_go ] [ id ] * ion [ m_NaT_go ] [ id ] * ion [ m_NaT_go ] [ id ] * ion [ h_NaT_go ] [ id ]
+ cond [ g_NaR_go ] [ id ] * V_Na_GO * ion [ r_NaR_go ] [ id ] * ion [ s_NaR_go ] [ id ]
+ cond [ g_NaP_go ] [ id ] * V_Na_GO * ion [ p_NaP_go ] [ id ]
+ cond [ g_CaHVA_go ] [ id ] * V_Ca_GO * ion [ ch_CaHVA_go ] [ id ] * ion [ ch_CaHVA_go ] [ id ] * ion [ ci_CaHVA_go ] [ id ]
+ cond [ g_CaLVA_go ] [ id ] * ( d_go -> rev_ca2 [ id ] ) * ion [ cl_CaLVA_go ] [ id ] * ion [ cl_CaLVA_go ] [ id ] * ion [ cm_CaLVA_go ] [ id ]
+ cond [ g_KAHP_go ] [ id ] * V_K_GO * ( ion [ o1_KAHP_go ] [ id ] + ion [ o2_KAHP_go ] [ id ] )
+ cond [ g_KV_go ] [ id ] * V_K_GO * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ] * ion [ n_KV_go ] [ id ]
+ cond [ g_KA_go ] [ id ] * V_K_GO * ion [ a_KA_go ] [ id ] * ion [ a_KA_go ] [ id ] * ion [ a_KA_go ] [ id ] * ion [ b_KA_go ] [ id ]
+ cond [ g_KC_go ] [ id ] * V_K_GO * ion [ c_KC_go ] [ id ]
+ cond [ g_Kslow_go ] [ id ] * V_K_GO * ion [ sl_Kslow_go ] [ id ]
+ cond [ g_HCN1_go ] [ id ] * V_H_GO * ( ion [ hf_HCN1_go ] [ id ] + ion [ hs_HCN1_go ] [ id ] )
+ cond [ g_HCN2_go ] [ id ] * V_H_GO * ( ion [ hf_HCN2_go ] [ id ] + ion [ hs_HCN2_go ] [ id ] )
) / 2.0;
int d = d_go_solve -> dig [ id ];
val [ d ] += ( elem [ Cm ] [ id ] / DT) + vec [ cn_gamma2 ] [ id ];
b [ id ] = 0.0;
for (int j = row [ id ]; j < row [ id + 1 ]; j++) {
b [ id ] -= elem [ v ] [ col [ j ] ] * val_ori [ j ];
}
b [ id ] += (elem [ Cm ] [ id ] / DT - vec [ cn_gamma1 ] [ id ]) * elem [ v ][ id ]
+ vec [ cn_ommega1 ] [ id ] + vec [ cn_ommega2 ] [ id ];
vec [ cn_ommega1 ] [ id ] = vec [ cn_ommega2 ] [ id ];
vec [ cn_gamma1 ] [ id ] = vec [ cn_gamma2 ] [ id ];
}
}
__host__
void go_solve_by_cnm ( neuron_t *d_go, neuron_solve_t *d_go_solve,
neuron_t *p_go, neuron_solve_t *p_go_solve, synapse_t *d_grgo )
{
// global
double **ion = p_go -> ion;
int nc = p_go -> nc;
static int numThreadsPerBlock = p_go_solve -> numThreadsPerBlock;
static int numBlocks = p_go_solve -> numBlocks;
// update ion
go_update_ion_exp_imp <<< numBlocks, numThreadsPerBlock >>> ( d_go, d_go_solve, CN_DT );
go_KAHP_update_2order <<< numBlocks, numThreadsPerBlock >>>
( p_go -> n, p_go -> elem [ Ca ], p_go -> ca_old, ion [ o1_KAHP_go ], ion [ o2_KAHP_go ],
ion [ c1_KAHP_go ], ion [ c2_KAHP_go ], ion [ c3_KAHP_go ], ion [ c4_KAHP_go ], CN_DT );
// reset val and b
cudaMemcpy ( p_go_solve -> val, p_go_solve -> val_ori, p_go_solve -> nnz * sizeof ( double ), cudaMemcpyDeviceToDevice );
reset_vec <<< numBlocks, numThreadsPerBlock >>> ( d_go_solve, nc );
// update val, b and v
add_grgo_val <<< ( d_grgo -> n + 127 ) / 128, 128 >>>
( d_go, d_go_solve, d_grgo -> comp, d_grgo -> elem, d_grgo -> n ); //cudaDeviceSynchronize();
go_update_matrix <<< numBlocks, numThreadsPerBlock >>> ( d_go, d_go_solve);
cg_cusparse_crs( p_go -> nc, p_go_solve -> nnz, p_go_solve -> val, p_go_solve -> col, p_go_solve -> row, p_go -> elem [ v ], p_go_solve -> b );
}
//////////////////////////////// PKJ /////////////////////////////////
__global__ static
void add_mlipkj_val ( neuron_solve_t *d_pkj_solve, int *mlipkj_comp, double *mlipkj_elem, const int num_mlipkj )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < num_mlipkj )
{
int post_num = mlipkj_comp [ post_comp * num_mlipkj + id ];
double l_val = mlipkj_elem [ mlipkj_val * num_mlipkj + id ];
atomicAdd ( & ( d_pkj_solve -> vec [ cn_gamma2 ] [ post_num ] ), 0.5 * l_val );
atomicAdd ( & ( d_pkj_solve -> vec [ cn_ommega2 ] [ post_num ] ), 0.5 * l_val * E_MLIPKJ );
}
}
__global__ static
void add_grpkj_val ( neuron_t *d_pkj, neuron_solve_t *d_pkj_solve,
int *grpkj_comp, double *grpkj_elem, const int num_grpkj )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < num_grpkj )
{
int post_num = grpkj_comp [ post_comp * num_grpkj + id ];
double l_val = grpkj_elem [ grpkj_val * num_grpkj + id ];
atomicAdd ( & ( d_pkj_solve -> vec [ cn_gamma2 ] [ post_num ] ), 0.5 * l_val ); // no need atomicAdd ?
atomicAdd ( & ( d_pkj_solve -> vec [ cn_ommega2 ] [ post_num ] ), 0.5 * l_val * E_GRPKJ );
}
}
__global__
void pkj_cnm_vec_initialize ( neuron_t *d_pkj, neuron_solve_t *d_pkj_solve )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < d_pkj -> nc )
{
double **elem = d_pkj -> elem;
double **cond = d_pkj -> cond;
double **ion = d_pkj -> ion;
double **vec = d_pkj_solve -> vec;
double l_v_Ca = d_pkj -> rev_ca2 [ id ];
vec [ cn_gamma1 ] [ id ] =
( + cond [ g_leak_pkj ] [ id ]
+ cond [ g_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ h_NaF_pkj ] [ id ]
+ cond [ g_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ]
+ cond [ g_CaP_pkj ] [ id ] * ion [ m_CaP_pkj ] [ id ] * ion [ h_CaP_pkj ] [ id ]
+ cond [ g_CaT_pkj ] [ id ] * ion [ m_CaT_pkj ] [ id ] * ion [ h_CaT_pkj ] [ id ]
+ cond [ g_Kh_pkj ] [ id ] * ion [ m_Kh1_pkj ] [ id ]
+ cond [ g_Kh_pkj ] [ id ] * ion [ m_Kh2_pkj ] [ id ]
+ cond [ g_Kdr_pkj ] [ id ] * ion [ m_Kdr_pkj ] [ id ] * ion [ m_Kdr_pkj ] [ id ] * ion [ h_Kdr_pkj ] [ id ]
+ cond [ g_KM_pkj ] [ id ] * ion [ m_KM_pkj ] [ id ]
+ cond [ g_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ h_KA_pkj ] [ id ]
+ cond [ g_KC_pkj ] [ id ] * ion [ m_KC_pkj ] [ id ] * ion [ z_KC_pkj ] [ id ] * ion [ z_KC_pkj ] [ id ]
+ cond [ g_K2_pkj ] [ id ] * ion [ m_K2_pkj ] [ id ] * ion [ z_K2_pkj ] [ id ] * ion [ z_K2_pkj ] [ id ]
) / 2.0;
vec [ cn_gamma2 ] [ id ] = 0.0;//= vec [ cn_gamma1 ] [ id ];//
vec [ cn_ommega1 ] [ id ] =
( + cond [ g_leak_pkj ] [ id ] * ( V_LEAK_PKJ ) + elem [ i_ext ] [ id ]
+ cond [ g_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ h_NaF_pkj ] [ id ] * ( V_Na_PKJ )
+ cond [ g_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ] * ( V_Na_PKJ )
+ cond [ g_CaP_pkj ] [ id ] * ion [ m_CaP_pkj ] [ id ] * ion [ h_CaP_pkj ] [ id ] * ( l_v_Ca )
+ cond [ g_CaT_pkj ] [ id ] * ion [ m_CaT_pkj ] [ id ] * ion [ h_CaT_pkj ] [ id ] * ( l_v_Ca )
+ cond [ g_Kh_pkj ] [ id ] * ion [ m_Kh1_pkj ] [ id ] * ( V_KH_PKJ ) //KH????
+ cond [ g_Kh_pkj ] [ id ] * ion [ m_Kh2_pkj ] [ id ] * ( V_KH_PKJ ) //KH???
+ cond [ g_Kdr_pkj ] [ id ] * ion [ m_Kdr_pkj ] [ id ] * ion [ m_Kdr_pkj ] [ id ] * ion [ h_Kdr_pkj ] [ id ] * ( V_K_PKJ )
+ cond [ g_KM_pkj ] [ id ] * ion [ m_KM_pkj ] [ id ] * ( V_K_PKJ )
+ cond [ g_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ h_KA_pkj ] [ id ] * ( V_K_PKJ )
+ cond [ g_KC_pkj ] [ id ] * ion [ m_KC_pkj ] [ id ] * ion [ z_KC_pkj ] [ id ] * ion [ z_KC_pkj ] [ id ] * ( V_K_PKJ )
+ cond [ g_K2_pkj ] [ id ] * ion [ m_K2_pkj ] [ id ] * ion [ z_K2_pkj ] [ id ] * ion [ z_K2_pkj ] [ id ] * ( V_K_PKJ )
) / 2.0;
vec [ cn_ommega2 ] [ id ] = 0.0;//vec [ cn_ommega1 ] [ id ];//
vec [ cn_v_old ] [ id ] = elem [ v ] [ id ];
}
//for ( int i = 0; i < pkj_solve -> nnz; i++ ) { val [ id ] /= 2.0; val_ori [ id ] = val [ id ]; } // to pkj_solve.cu
}
__global__
static void pkj_update_matrix ( neuron_t *d_pkj, neuron_solve_t *d_pkj_solve )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < d_pkj -> nc)
{
double **elem = d_pkj -> elem;
double **cond = d_pkj -> cond;
double **ion = d_pkj -> ion;
double **vec = d_pkj_solve -> vec;
double *val = d_pkj_solve -> val;
double *val_ori = d_pkj_solve -> val_ori;
double *b = d_pkj_solve -> b;
int *col = d_pkj_solve -> col;
int *row = d_pkj_solve -> row;
double DT = d_pkj -> DT;
double l_v_Ca = d_pkj -> rev_ca2 [ id ] ;
vec [ cn_gamma2 ] [ id ] +=
( cond [ g_leak_pkj ] [ id ]
+ cond [ g_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ h_NaF_pkj ] [ id ]
+ cond [ g_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ]
+ cond [ g_CaP_pkj ] [ id ] * ion [ m_CaP_pkj ] [ id ] * ion [ h_CaP_pkj ] [ id ]
+ cond [ g_CaT_pkj ] [ id ] * ion [ m_CaT_pkj ] [ id ] * ion [ h_CaT_pkj ] [ id ]
+ cond [ g_Kh_pkj ] [ id ] * ion [ m_Kh1_pkj ] [ id ]
+ cond [ g_Kh_pkj ] [ id ] * ion [ m_Kh2_pkj ] [ id ]
+ cond [ g_Kdr_pkj ] [ id ] * ion [ m_Kdr_pkj ] [ id ] * ion [ m_Kdr_pkj ] [ id ] * ion [ h_Kdr_pkj ] [ id ]
+ cond [ g_KM_pkj ] [ id ] * ion [ m_KM_pkj ] [ id ]
+ cond [ g_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ h_KA_pkj ] [ id ]
+ cond [ g_KC_pkj ] [ id ] * ion [ m_KC_pkj ] [ id ] * ion [ z_KC_pkj ] [ id ] * ion [ z_KC_pkj ] [ id ]
+ cond [ g_K2_pkj ] [ id ] * ion [ m_K2_pkj ] [ id ] * ion [ z_K2_pkj ] [ id ] * ion [ z_K2_pkj ] [ id ]
) / 2.0;
vec [ cn_ommega2 ] [ id ] +=
( cond [ g_leak_pkj ] [ id ] * ( V_LEAK_PKJ ) + elem [ i_ext ] [ id ]
+ cond [ g_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ m_NaF_pkj ] [ id ] * ion [ h_NaF_pkj ] [ id ] * ( V_Na_PKJ )
+ cond [ g_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ] * ion [ m_NaP_pkj ] [ id ] * ( V_Na_PKJ )
+ cond [ g_CaP_pkj ] [ id ] * ion [ m_CaP_pkj ] [ id ] * ion [ h_CaP_pkj ] [ id ] * ( l_v_Ca )
+ cond [ g_CaT_pkj ] [ id ] * ion [ m_CaT_pkj ] [ id ] * ion [ h_CaT_pkj ] [ id ] * ( l_v_Ca )
+ cond [ g_Kh_pkj ] [ id ] * ion [ m_Kh1_pkj ] [ id ] * ( V_KH_PKJ )
+ cond [ g_Kh_pkj ] [ id ] * ion [ m_Kh2_pkj ] [ id ] * ( V_KH_PKJ )
+ cond [ g_Kdr_pkj ] [ id ] * ion [ m_Kdr_pkj ] [ id ] * ion [ m_Kdr_pkj ] [ id ] * ion [ h_Kdr_pkj ] [ id ] * ( V_K_PKJ )
+ cond [ g_KM_pkj ] [ id ] * ion [ m_KM_pkj ] [ id ] * ( V_K_PKJ )
+ cond [ g_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ m_KA_pkj ] [ id ] * ion [ h_KA_pkj ] [ id ] * ( V_K_PKJ )
+ cond [ g_KC_pkj ] [ id ] * ion [ m_KC_pkj ] [ id ] * ion [ z_KC_pkj ] [ id ] * ion [ z_KC_pkj ] [ id ] * ( V_K_PKJ )
+ cond [ g_K2_pkj ] [ id ] * ion [ m_K2_pkj ] [ id ] * ion [ z_K2_pkj ] [ id ] * ion [ z_K2_pkj ] [ id ] * ( V_K_PKJ )
) / 2.0;
int d = d_pkj_solve -> dig [ id ];
val [ d ] += ( elem [ Cm ] [ id ] / DT) + vec [ cn_gamma2 ] [ id ];
b [ id ] = 0.0;
for (int j = row [ id ]; j < row [ id + 1 ]; j++) {
b [ id ] -= elem [ v ] [ col [ j ] ] * val_ori [ j ];
}
b [ id ] += (elem [ Cm ] [ id ] / DT - vec [ cn_gamma1 ] [ id ]) * elem [ v ] [ id ]
+ vec [ cn_ommega1 ] [ id ] + vec [ cn_ommega2 ] [ id ];
vec [ cn_ommega1 ] [ id ] = vec [ cn_ommega2 ] [ id ];
vec [ cn_gamma1 ] [ id ] = vec [ cn_gamma2 ] [ id ];
//d_pkj_solve -> vec [ cn_v_old ] [ id ] = elem [ v ] [ id ];
}
}
__host__
void pkj_solve_by_cnm ( neuron_t *d_pkj, neuron_solve_t *d_pkj_solve,
neuron_t *p_pkj, neuron_solve_t *p_pkj_solve,
synapse_t *d_grpkj, synapse_t *d_mlipkj )
{
// global
int nc = p_pkj -> nc;
static int numThreadsPerBlock = p_pkj_solve -> numThreadsPerBlock;
static int numBlocks = p_pkj_solve -> numBlocks;
// update ion
pkj_update_ion_2nd <<< numBlocks, numThreadsPerBlock >>> ( d_pkj, d_pkj_solve, CN_DT );
//pkj_update_ion_RK2 <<< numBlocks, numThreadsPerBlock >>> ( d_pkj, d_pkj_solve, CN_DT );
//pkj_update_ion <<< numBlocks, numThreadsPerBlock >>> ( d_pkj, d_pkj_solve, CN_DT );
// reset val and b
cudaMemcpy ( p_pkj_solve -> val, p_pkj_solve -> val_ori, p_pkj_solve -> nnz * sizeof ( double ), cudaMemcpyDeviceToDevice );
reset_vec <<< numBlocks, numThreadsPerBlock >>> ( d_pkj_solve, nc );
// update val, b and v
add_grpkj_val <<< ( d_grpkj -> n + 127 ) / 128, 128 >>>
( d_pkj, d_pkj_solve, d_grpkj -> comp, d_grpkj -> elem, d_grpkj -> n );
// add_mlipkj_val <<< ( d_mlipkj -> n + 127 ) / 128, 128 >>>
// ( d_pkj_solve, d_mlipkj -> comp, d_mlipkj -> elem, d_mlipkj -> n );
pkj_update_matrix <<< numBlocks, numThreadsPerBlock >>> ( d_pkj, d_pkj_solve );
cg_cusparse_crs ( p_pkj -> nc, p_pkj_solve -> nnz, p_pkj_solve -> val, p_pkj_solve -> col, p_pkj_solve -> row, p_pkj -> elem [ v ], p_pkj_solve -> b );
}
//////////////////////////////// IO /////////////////////////////////
__global__
static void eazy_transposed_matrix ( const int nnz, const double *d_val, double *val_h, int *order )
{
int i = threadIdx.x + blockDim.x*blockIdx.x;
if( i < nnz ) { val_h [ i ] = d_val [ order [ i ] ]; }
}
__global__
static void create_transposed_matrix ( const int ngc, const int nnz,
const double *d_val, const int *d_col, const int *d_row,
double *val_h, int *col_h, int *row_h, int *order )
{
row_h [ 0 ] = 0;
int count_row = 0;
int n_v = 0;
int n_r = 0;
for ( int i = 0; i < nnz; i++ ) { order [ i ] = i; }
for ( int i = 0; i < ngc; i++ ) {
for ( int j = 0; j < ngc; j++ ) {
for ( int k = d_row [ j ]; k < d_row [ j + 1 ]; k++ ) {
if ( d_col [ k ] == i ) {
val_h [ n_v ] = d_val [ k ];
order [ n_v ] = k;
col_h [ n_v ] = j;
n_v++;
}
}
}
row_h [ n_r + 1 ] = n_v;
n_r++;
}
// Debug
for ( int i = 0; i < nnz; i++ )
{
int j = order [ i ];
if ( d_val [ j ] != val_h [ i ] )
{
printf ("val cpy order error in solve_cnm.cu\n");
}
}
// Debug
/*
for ( int i = 0; i < nnz; i++ )
printf ( "val [ %d ] = %f\n", i, d_val [ i ] );
for ( int i = 0; i < nnz; i++ )
printf ( "val_h [ %d ] = %f\n", i, val_h [ i ] );
for ( int i = 0; i < nnz; i++ )
printf ( "col [ %d ] = %d\n", i, d_col [ i ] );
for ( int i = 0; i < nnz; i++ )
printf ( "col_h [ %d ] = %d\n", i, col_h [ i ] );
for ( int i = 0; i < ngc + 1; i++ )
printf ( "row [ %d ] = %d\n", i, d_row [ i ] );
for ( int i = 0; i < ngc + 1; i++ )
printf ( "row_h [ %d ] = %d\n", i, row_h [ i ] );
*/
}
__host__
static void bicg_cusparse_crs( const int ngc, const int nnz,
const double *d_val, const int *d_col, const int *d_row, double *d_x, double *d_b)
{
static double *d_r, *d_p, *d_ap;
static double *d_rs, *d_ps, *d_atps;
static double *_buffer;
static double *val_h;
static int *col_h, *row_h, *order;
static int size = 0;
if ( size < ngc ) {
if ( size == 0 ) {
cudaFree ( d_r );
cudaFree ( d_p );
cudaFree ( d_ap );
cudaFree ( d_rs );
cudaFree ( d_ps );
cudaFree ( d_atps );
cudaFree ( _buffer );
cudaFree ( val_h );
cudaFree ( col_h );
cudaFree ( row_h );
cudaFree ( order );
}
cudaMalloc ( ( double ** ) &d_r, ngc * sizeof ( double ) );
cudaMalloc ( ( double ** ) &d_p, ngc * sizeof ( double ) );
cudaMalloc ( ( double ** ) &d_ap, ngc * sizeof ( double ) );
cudaMalloc ( ( double ** ) &d_rs, ngc * sizeof ( double ) );
cudaMalloc ( ( double ** ) &d_ps, ngc * sizeof ( double ) );
cudaMalloc ( ( double ** ) &d_atps, ngc * sizeof ( double ) );
cudaMalloc ( ( double ** ) &val_h, nnz * sizeof ( double ) );
cudaMalloc ( ( int ** ) &col_h, nnz * sizeof ( int ) );
cudaMalloc ( ( int ** ) &row_h, ( ngc + 1 ) * sizeof ( int ) );
cudaMalloc ( ( int ** ) &order, nnz * sizeof ( int ) );
cudaMalloc ( ( double ** ) &_buffer, ngc * sizeof ( double ) );
create_transposed_matrix <<< 1, 1 >>>
( ngc, nnz, d_val, d_col, d_row, val_h, col_h, row_h, order );
size = ngc;
}
eazy_transposed_matrix <<< (nnz + 127)/128, 128 >>> ( nnz, d_val, val_h, order );
//cudaDeviceSynchronize();
//cudaMalloc ( ( double ** ) &d_r, ngc * sizeof ( double ) );
//cudaMalloc ( ( double ** ) &d_p, ngc * sizeof ( double ) );
//cudaMalloc ( ( double ** ) &d_ap, ngc * sizeof ( double ) );
double bnorm, rnorm_k, rnorm_k1;
double alpha, beta, pap;
double epsilon = 1.0e-15;
double cp1 = 1.0;
double c0 = 0.0;
double cm1 = -1.0;
cublasStatus_t stat1;
cublasHandle_t handle1;
cusparseStatus_t stat2;
cusparseHandle_t handle2;
cusparseMatDescr_t descrA;
stat1 = cublasCreate_v2(&handle1);
stat2 = cusparseCreate(&handle2);
cusparseCreateMatDescr(&descrA);
cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ZERO);
stat1 = cublasDscal(handle1, ngc, &c0, d_x, 1); // x = 0
stat1 = cublasDcopy(handle1, ngc, d_b, 1, d_r, 1); // r = b
stat1 = cublasDcopy(handle1, ngc, d_r, 1, d_rs, 1); // rs = r
stat1 = cublasDcopy(handle1, ngc, d_r, 1, d_p, 1); // p = r
stat1 = cublasDcopy(handle1, ngc, d_rs, 1, d_ps, 1); // ps = rs
stat1 = cublasDdot(handle1, ngc, d_b, 1, d_b, 1, &bnorm); // ||b||
/**/
for (int k = 0; k < 100; k++) {
stat2 = cusparseCsrmvEx(handle2,CUSPARSE_ALG_MERGE_PATH, CUSPARSE_OPERATION_NON_TRANSPOSE,
ngc, ngc, nnz, &cp1, CUDA_R_64F, descrA, d_val, CUDA_R_64F, d_row, d_col, d_p, CUDA_R_64F,
&c0, CUDA_R_64F, d_ap, CUDA_R_64F, CUDA_R_64F, _buffer ); // Ap
stat2 = cusparseCsrmvEx(handle2,CUSPARSE_ALG_MERGE_PATH, CUSPARSE_OPERATION_NON_TRANSPOSE,
ngc, ngc, nnz, &cp1, CUDA_R_64F, descrA, val_h, CUDA_R_64F, row_h, col_h, d_ps, CUDA_R_64F,
&c0, CUDA_R_64F, d_atps, CUDA_R_64F, CUDA_R_64F, _buffer ); // Atps
stat1 = cublasDdot(handle1, ngc, d_r, 1, d_rs, 1, &rnorm_k); // ( rs, r )
stat1 = cublasDdot(handle1, ngc, d_ps, 1, d_ap, 1, &pap); // psAp
alpha = rnorm_k / pap; // alpha
stat1 = cublasDaxpy(handle1, ngc, &alpha, d_p, 1, d_x, 1); // x += alpha * p
alpha = -1.0 * alpha;
stat1 = cublasDaxpy(handle1, ngc, &alpha, d_ap, 1, d_r, 1); // r -= alpha * ap
stat1 = cublasDaxpy(handle1, ngc, &alpha, d_atps, 1, d_rs, 1); // rs -= alpha * atps
stat1 = cublasDdot(handle1, ngc, d_r, 1, d_rs, 1, &rnorm_k1); // ( r_k+1, rs_k+1 )
if (sqrt(rnorm_k1) <= epsilon * sqrt(bnorm)) { break; }
// beta
beta = rnorm_k1 / rnorm_k;
// p = r + beta * p
stat1 = cublasDscal(handle1, ngc, &beta, d_p, 1);
stat1 = cublasDaxpy(handle1, ngc, &cp1, d_r, 1, d_p, 1);
// ps = rs + beta * ps
stat1 = cublasDscal(handle1, ngc, &beta, d_ps, 1);
stat1 = cublasDaxpy(handle1, ngc, &cp1, d_rs, 1, d_ps, 1);
}
cusparseDestroyMatDescr(descrA);
cublasDestroy_v2(handle1);
cusparseDestroy(handle2);//
//cudaFree ( d_r );
//cudaFree ( d_p );
//cudaFree ( d_ap );
}
__global__ static
void add_io_gap_val ( neuron_solve_t *d_io_solve, int *io_gap_comp, double *io_gap_elem, const int num_io_gap )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < num_io_gap )
{
double l_val = -1.0 * io_gap_elem [ gap_current * num_io_gap + id ];//-0.5
int post_num = io_gap_comp [ post_comp_gap * num_io_gap + id ];
atomicAdd ( & ( d_io_solve -> vec [ cn_ommega2 ] [ post_num ] ), ( l_val ) );
}
}
__global__
void io_cnm_vec_initialize ( neuron_t *d_io, neuron_solve_t *d_io_solve )
{
double **elem = d_io -> elem;
double **cond = d_io -> cond;
double **ion = d_io -> ion;
double **vec = d_io_solve -> vec;
//double *val = d_io_solve -> val;
//double *val_ori = d_io_solve -> val_ori;
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < d_io -> nc)
{
vec [ cn_gamma1 ] [ id ] =
( cond [ g_leak_io ] [ id ]
+ cond [ g_CaL_io ] [ id ] * ion [ k_CaL_io ] [ id ] * ion [ k_CaL_io ] [ id ] * ion [ k_CaL_io ] [ id ] * ion [ l_CaL_io ] [ id ]
+ cond [ g_Na_io ] [ id ] * ion [ m_Na_io ] [ id ] * ion [ m_Na_io ] [ id ] * ion [ m_Na_io ] [ id ] * ion [ h_Na_io ] [ id ]
+ cond [ g_Kdr_io ] [ id ] * ion [ n_Kdr_io ] [ id ] * ion [ p_Kdr_io ] [ id ]
+ cond [ g_K_io ] [ id ] * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ]
+ cond [ g_CaH_io ] [ id ] * ion [ r_CaH_io ] [ id ] * ion [ r_CaH_io ] [ id ]
+ cond [ g_KCa_io ] [ id ] * ion [ s_KCa_io ] [ id ]
+ cond [ g_H_io ] [ id ] * ion [ q_H_io ] [ id ] ) / 2.0;//*0.5
vec [ cn_gamma2 ] [ id ] = 0.0;
vec [ cn_ommega1 ] [ id ] =
( cond [ g_leak_io ] [ id ] * V_LEAK_IO + elem [ i_ext ] [ id ]
+ cond [ g_CaL_io ] [ id ] * V_Ca_IO * ion [ k_CaL_io ] [ id ] * ion [ k_CaL_io ] [ id ] * ion [ k_CaL_io ] [ id ] * ion [ l_CaL_io ] [ id ]
+ cond [ g_Na_io ] [ id ] * V_Na_IO * ion [ m_Na_io ] [ id ] * ion [ m_Na_io ] [ id ] * ion [ m_Na_io ] [ id ] * ion [ h_Na_io ] [ id ]
+ cond [ g_Kdr_io ] [ id ] * V_K_IO * ion [ n_Kdr_io ] [ id ] * ion [ p_Kdr_io ] [ id ]
+ cond [ g_K_io ] [ id ] * V_K_IO * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ]
+ cond [ g_CaH_io ] [ id ] * V_Ca_IO * ion [ r_CaH_io ] [ id ] * ion [ r_CaH_io ] [ id ]
+ cond [ g_KCa_io ] [ id ] * V_K_IO * ion [ s_KCa_io ] [ id ]
+ cond [ g_H_io ] [ id ] * V_H_IO * ion [ q_H_io ] [ id ] ) / 2.0;//*0.5
vec [ cn_ommega2 ] [ id ] = 0.0;
vec [ cn_v_old ] [ id ] = elem [ v ] [ id ];
}
//for ( int i = 0; i < io_solve -> nnz; i++ ) { val [ id ] /= 2.0; val_ori [ id ] = val [ id ]; } // to io_solve.cu
}
__global__
static void io_update_matrix ( neuron_t *d_io, neuron_solve_t *d_io_solve )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if ( id < d_io -> nc)
{
double **elem = d_io -> elem;
double **cond = d_io -> cond;
double **ion = d_io -> ion;
double **vec = d_io_solve -> vec;
double *val = d_io_solve -> val;
double *val_ori = d_io_solve -> val_ori;
double *b = d_io_solve -> b;
int *col = d_io_solve -> col;
int *row = d_io_solve -> row;
double DT = d_io -> DT;
vec [ cn_gamma2 ] [ id ] +=
( cond [ g_leak_io ] [ id ]
+ cond [ g_CaL_io ] [ id ] * ion [ k_CaL_io ] [ id ] * ion [ k_CaL_io ] [ id ] * ion [ k_CaL_io ] [ id ] * ion [ l_CaL_io ] [ id ]
+ cond [ g_Na_io ] [ id ] * ion [ m_Na_io ] [ id ] * ion [ m_Na_io ] [ id ] * ion [ m_Na_io ] [ id ] * ion [ h_Na_io ] [ id ]
+ cond [ g_Kdr_io ] [ id ] * ion [ n_Kdr_io ] [ id ] * ion [ p_Kdr_io ] [ id ]
+ cond [ g_K_io ] [ id ] * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ]
+ cond [ g_CaH_io ] [ id ] * ion [ r_CaH_io ] [ id ] * ion [ r_CaH_io ] [ id ]
+ cond [ g_KCa_io ] [ id ] * ion [ s_KCa_io ] [ id ]
+ cond [ g_H_io ] [ id ] * ion [ q_H_io ] [ id ] ) / 2.0;//*0.5
vec [ cn_ommega2 ] [ id ] +=
( cond [ g_leak_io ] [ id ] * V_LEAK_IO + elem [ i_ext ] [ id ]
+ cond [ g_CaL_io ] [ id ] * V_Ca_IO * ion [ k_CaL_io ] [ id ] * ion [ k_CaL_io ] [ id ] * ion [ k_CaL_io ] [ id ] * ion [ l_CaL_io ] [ id ]
+ cond [ g_Na_io ] [ id ] * V_Na_IO * ion [ m_Na_io ] [ id ] * ion [ m_Na_io ] [ id ] * ion [ m_Na_io ] [ id ] * ion [ h_Na_io ] [ id ]
+ cond [ g_Kdr_io ] [ id ] * V_K_IO * ion [ n_Kdr_io ] [ id ] * ion [ p_Kdr_io ] [ id ]
+ cond [ g_K_io ] [ id ] * V_K_IO * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ] * ion [ x_K_io ] [ id ]
+ cond [ g_CaH_io ] [ id ] * V_Ca_IO * ion [ r_CaH_io ] [ id ] * ion [ r_CaH_io ] [ id ]
+ cond [ g_KCa_io ] [ id ] * V_K_IO * ion [ s_KCa_io ] [ id ]
+ cond [ g_H_io ] [ id ] * V_H_IO * ion [ q_H_io ] [ id ] ) / 2.0;//*0.5
int d = d_io_solve -> dig [ id ];
val [ d ] += ( elem [ Cm ] [ id ] / DT) + vec [ cn_gamma2 ] [ id ];
b [ id ] = 0.0;
for (int j = row [ id ]; j < row [ id + 1 ]; j++) {
b [ id ] -= elem [ v ] [ col [ j ] ] * val_ori [ j ];
}
b [ id ] += (elem [ Cm ] [ id ] / DT - vec [ cn_gamma1 ] [ id ]) * elem [ v ][ id ] + vec [ cn_ommega1 ] [ id ] + vec [ cn_ommega2 ] [ id ];
vec [ cn_ommega1 ] [ id ] = vec [ cn_ommega2 ] [ id ];
vec [ cn_gamma1 ] [ id ] = vec [ cn_gamma2 ] [ id ];
}
}
__host__
void io_solve_by_cnm ( neuron_t *d_io, neuron_solve_t *d_io_solve,
neuron_t *p_io, neuron_solve_t *p_io_solve, gap_t* d_io_gap )
{
// global
double **ion = p_io -> ion;
double **elem = p_io -> elem;
int nc = p_io -> nc;
static int numThreadsPerBlock = p_io_solve -> numThreadsPerBlock;
static int numBlocks = p_io_solve -> numBlocks;
// update ion
io_update_ion_2nd <<< numBlocks, numThreadsPerBlock >>> ( d_io, d_io_solve, CN_DT );
// reset val and b
cudaMemcpy ( p_io_solve -> val, p_io_solve -> val_ori, p_io_solve -> nnz * sizeof ( double ), cudaMemcpyDeviceToDevice );
reset_vec <<< numBlocks, numThreadsPerBlock >>> ( d_io_solve, nc );
// update val, b and v
if ( p_io -> n > 1 )
{
io_gap_update <<< ( d_io_gap -> n + 127 ) / 128, 128 >>>
( d_io, d_io_gap -> comp, d_io_gap -> elem, d_io_gap -> n );
add_io_gap_val <<< ( d_io_gap -> n + 127 ) / 128, 128 >>>
( d_io_solve, d_io_gap -> comp, d_io_gap -> elem, d_io_gap -> n );
}
io_update_matrix <<< numBlocks, numThreadsPerBlock >>> ( d_io, d_io_solve );
bicg_cusparse_crs ( nc, p_io_solve -> nnz, p_io_solve -> val,
p_io_solve -> col, p_io_solve -> row, p_io -> elem [ v ], p_io_solve -> b );
}
|
cb44bfa4093fcba7c84349a4ea53cb4bdb7249c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/Repeat.h>
__global__ static void compute_cuda_kernel(int64_t *repeat_ptr, int64_t *cumsum_ptr, int64_t *result_ptr, int64_t size) {
int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t stride = blockDim.x * gridDim.x;
for (int64_t i = idx; i < size; i += stride) {
int64_t end = cumsum_ptr[i];
int64_t repeat = repeat_ptr[i];
int64_t start = end - repeat;
for(int64_t j = start; j < end; j++) {
result_ptr[j] = i;
}
}
}
static void compute_cuda(int64_t *repeat_ptr, int64_t *cumsum_ptr, int64_t *result_ptr, int64_t size) {
int64_t block = 512;
int64_t grid = std::min<int64_t>((size + block - 1) / block, 2048L);
hipLaunchKernelGGL(( compute_cuda_kernel), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), repeat_ptr, cumsum_ptr, result_ptr, size);
}
namespace at { namespace native {
Tensor repeat_interleave_cuda(const Tensor &repeat) {
return repeat_interleave_common<compute_cuda>(repeat);
}
}}
| cb44bfa4093fcba7c84349a4ea53cb4bdb7249c6.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/Repeat.h>
__global__ static void compute_cuda_kernel(int64_t *repeat_ptr, int64_t *cumsum_ptr, int64_t *result_ptr, int64_t size) {
int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t stride = blockDim.x * gridDim.x;
for (int64_t i = idx; i < size; i += stride) {
int64_t end = cumsum_ptr[i];
int64_t repeat = repeat_ptr[i];
int64_t start = end - repeat;
for(int64_t j = start; j < end; j++) {
result_ptr[j] = i;
}
}
}
static void compute_cuda(int64_t *repeat_ptr, int64_t *cumsum_ptr, int64_t *result_ptr, int64_t size) {
int64_t block = 512;
int64_t grid = std::min<int64_t>((size + block - 1) / block, 2048L);
compute_cuda_kernel<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(repeat_ptr, cumsum_ptr, result_ptr, size);
}
namespace at { namespace native {
Tensor repeat_interleave_cuda(const Tensor &repeat) {
return repeat_interleave_common<compute_cuda>(repeat);
}
}}
|
0e682dca45a45c18fc60ce7920469a3c074d15ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void decat(float* output1, float* output2, float* output3, float* output4, size_t num1, size_t num2, size_t num3, size_t num4, size_t maxNum, float* input, const int numPerBatch)
{
size_t i = blockDim.x * blockIdx.x + threadIdx.x;
for(;i < maxNum; i += size_t(blockDim.x * gridDim.x)){
size_t batchIdx = i / numPerBatch; // which batch this thread is working in
const int batchOffset = i - batchIdx * numPerBatch; // offset of current thread in current batch
if(batchOffset < num1){ // first output
output1[batchOffset + batchIdx * num1] = input[i];
}
else if(batchOffset < (num1 + num2)){ // second output
output2[(batchOffset - num1) + batchIdx * num2] = input[i];
}
else if(batchOffset < (num1 + num2 + num3)){ // third input
output3[(batchOffset - (num1 + num2)) + batchIdx * num3] = input[i];
}
else{ // fourth input
output4[(batchOffset - (num1 + num2 + num3)) + batchIdx * num4] = input[i];
}
}
} | 0e682dca45a45c18fc60ce7920469a3c074d15ea.cu | #include "includes.h"
__global__ void decat(float* output1, float* output2, float* output3, float* output4, size_t num1, size_t num2, size_t num3, size_t num4, size_t maxNum, float* input, const int numPerBatch)
{
size_t i = blockDim.x * blockIdx.x + threadIdx.x;
for(;i < maxNum; i += size_t(blockDim.x * gridDim.x)){
size_t batchIdx = i / numPerBatch; // which batch this thread is working in
const int batchOffset = i - batchIdx * numPerBatch; // offset of current thread in current batch
if(batchOffset < num1){ // first output
output1[batchOffset + batchIdx * num1] = input[i];
}
else if(batchOffset < (num1 + num2)){ // second output
output2[(batchOffset - num1) + batchIdx * num2] = input[i];
}
else if(batchOffset < (num1 + num2 + num3)){ // third input
output3[(batchOffset - (num1 + num2)) + batchIdx * num3] = input[i];
}
else{ // fourth input
output4[(batchOffset - (num1 + num2 + num3)) + batchIdx * num4] = input[i];
}
}
} |
1e186a5994929bb999770548e7a948b9913592e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zgecsrmv.cu normal z -> d, Fri Sep 11 18:29:42 2015
*/
#include "common_magma.h"
#define BLOCK_SIZE 256
// CSR-SpMV kernel
__global__ void
dgecsrmv_kernel(
int num_rows,
int num_cols,
double alpha,
double * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
double * dx,
double beta,
double * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
double dot = MAGMA_D_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * dx[ dcolind[j] ];
dy[ row ] = dot *alpha + beta * dy[ row ];
}
}
// shifted CSR-SpMV kernel
__global__ void
dgecsrmv_kernel_shift(
int num_rows,
int num_cols,
double alpha,
double lambda,
double * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
double * dx,
double beta,
int offset,
int blocksize,
magma_index_t * addrows,
double * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
double dot = MAGMA_D_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * dx[ dcolind[j] ];
if( row<blocksize )
dy[ row ] = dot * alpha - lambda
* dx[ offset+row ] + beta * dy [ row ];
else
dy[ row ] = dot * alpha - lambda
* dx[ addrows[row-blocksize] ] + beta * dy [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
The input format is CSR (val, row, col).
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
alpha double
scalar multiplier
@param[in]
dval magmaDouble_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaDouble_ptr
input vector x
@param[in]
beta double
scalar multiplier
@param[out]
dy magmaDouble_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgecsrmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
double alpha,
magmaDouble_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDouble_ptr dx,
double beta,
magmaDouble_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( dgecsrmv_kernel), dim3(grid), dim3(threads), 0, queue ,
m, n, alpha, dval, drowptr, dcolind, dx, beta, dy);
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha * ( A -lambda I ) * x + beta * y on the GPU.
It is a shifted version of the CSR-SpMV.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
alpha double
scalar multiplier
@param[in]
lambda double
scalar multiplier
@param[in]
dval magmaDouble_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaDouble_ptr
input vector x
@param[in]
beta double
scalar multiplier
@param[in]
offset magma_int_t
in case not the main diagonal is scaled
@param[in]
blocksize magma_int_t
in case of processing multiple vectors
@param[in]
addrows magmaIndex_ptr
in case the matrixpowerskernel is used
@param[out]
dy magmaDouble_ptr
output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgecsrmv_shift(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
double alpha,
double lambda,
magmaDouble_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDouble_ptr dx,
double beta,
int offset,
int blocksize,
magma_index_t * addrows,
magmaDouble_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( dgecsrmv_kernel_shift), dim3(grid), dim3(threads), 0, queue ,
m, n, alpha, lambda, dval, drowptr, dcolind, dx,
beta, offset, blocksize, addrows, dy);
return MAGMA_SUCCESS;
}
| 1e186a5994929bb999770548e7a948b9913592e0.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zgecsrmv.cu normal z -> d, Fri Sep 11 18:29:42 2015
*/
#include "common_magma.h"
#define BLOCK_SIZE 256
// CSR-SpMV kernel
__global__ void
dgecsrmv_kernel(
int num_rows,
int num_cols,
double alpha,
double * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
double * dx,
double beta,
double * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
double dot = MAGMA_D_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * dx[ dcolind[j] ];
dy[ row ] = dot *alpha + beta * dy[ row ];
}
}
// shifted CSR-SpMV kernel
__global__ void
dgecsrmv_kernel_shift(
int num_rows,
int num_cols,
double alpha,
double lambda,
double * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
double * dx,
double beta,
int offset,
int blocksize,
magma_index_t * addrows,
double * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
double dot = MAGMA_D_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * dx[ dcolind[j] ];
if( row<blocksize )
dy[ row ] = dot * alpha - lambda
* dx[ offset+row ] + beta * dy [ row ];
else
dy[ row ] = dot * alpha - lambda
* dx[ addrows[row-blocksize] ] + beta * dy [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
The input format is CSR (val, row, col).
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
alpha double
scalar multiplier
@param[in]
dval magmaDouble_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaDouble_ptr
input vector x
@param[in]
beta double
scalar multiplier
@param[out]
dy magmaDouble_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgecsrmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
double alpha,
magmaDouble_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDouble_ptr dx,
double beta,
magmaDouble_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
dgecsrmv_kernel<<< grid, threads, 0, queue >>>
(m, n, alpha, dval, drowptr, dcolind, dx, beta, dy);
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha * ( A -lambda I ) * x + beta * y on the GPU.
It is a shifted version of the CSR-SpMV.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
alpha double
scalar multiplier
@param[in]
lambda double
scalar multiplier
@param[in]
dval magmaDouble_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaDouble_ptr
input vector x
@param[in]
beta double
scalar multiplier
@param[in]
offset magma_int_t
in case not the main diagonal is scaled
@param[in]
blocksize magma_int_t
in case of processing multiple vectors
@param[in]
addrows magmaIndex_ptr
in case the matrixpowerskernel is used
@param[out]
dy magmaDouble_ptr
output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgecsrmv_shift(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
double alpha,
double lambda,
magmaDouble_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDouble_ptr dx,
double beta,
int offset,
int blocksize,
magma_index_t * addrows,
magmaDouble_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
dgecsrmv_kernel_shift<<< grid, threads, 0, queue >>>
(m, n, alpha, lambda, dval, drowptr, dcolind, dx,
beta, offset, blocksize, addrows, dy);
return MAGMA_SUCCESS;
}
|
568db3b337f143d51b1710c6c1ed709b524b0d39.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2016 Fixstars Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http ://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <cstdio>
#include "oblique_path_aggregation.hpp"
#include "path_aggregation_common.hpp"
namespace sgm {
namespace path_aggregation {
static constexpr unsigned int DP_BLOCK_SIZE = 16u;
static constexpr unsigned int BLOCK_SIZE = WARP_SIZE * 8u;
template <int X_DIRECTION, int Y_DIRECTION, unsigned int MAX_DISPARITY>
__global__ void aggregate_oblique_path_kernel(
uint8_t *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2)
{
static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE;
static const unsigned int PATHS_PER_WARP = WARP_SIZE / SUBGROUP_SIZE;
static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE;
static const unsigned int RIGHT_BUFFER_SIZE = MAX_DISPARITY + PATHS_PER_BLOCK;
static const unsigned int RIGHT_BUFFER_ROWS = RIGHT_BUFFER_SIZE / DP_BLOCK_SIZE;
static_assert(X_DIRECTION == 1 || X_DIRECTION == -1, "");
static_assert(Y_DIRECTION == 1 || Y_DIRECTION == -1, "");
if(width == 0 || height == 0){
return;
}
__shared__ feature_type right_buffer[2 * DP_BLOCK_SIZE][RIGHT_BUFFER_ROWS];
DynamicProgramming<DP_BLOCK_SIZE, SUBGROUP_SIZE> dp;
const unsigned int warp_id = threadIdx.x / WARP_SIZE;
const unsigned int group_id = threadIdx.x % WARP_SIZE / SUBGROUP_SIZE;
const unsigned int lane_id = threadIdx.x % SUBGROUP_SIZE;
const unsigned int shfl_mask =
((1u << SUBGROUP_SIZE) - 1u) << (group_id * SUBGROUP_SIZE);
const int x0 =
blockIdx.x * PATHS_PER_BLOCK +
warp_id * PATHS_PER_WARP +
group_id +
(X_DIRECTION > 0 ? -static_cast<int>(height - 1) : 0);
const int right_x00 =
blockIdx.x * PATHS_PER_BLOCK +
(X_DIRECTION > 0 ? -static_cast<int>(height - 1) : 0);
const unsigned int dp_offset = lane_id * DP_BLOCK_SIZE;
const unsigned int right0_addr =
static_cast<unsigned int>(right_x00 + PATHS_PER_BLOCK - 1 - x0) + dp_offset;
const unsigned int right0_addr_lo = right0_addr % DP_BLOCK_SIZE;
const unsigned int right0_addr_hi = right0_addr / DP_BLOCK_SIZE;
for(unsigned int iter = 0; iter < height; ++iter){
const int y = static_cast<int>(Y_DIRECTION > 0 ? iter : height - 1 - iter);
const int x = x0 + static_cast<int>(iter) * X_DIRECTION;
const int right_x0 = right_x00 + static_cast<int>(iter) * X_DIRECTION;
// Load right to smem
for(unsigned int i0 = 0; i0 < RIGHT_BUFFER_SIZE; i0 += BLOCK_SIZE){
const unsigned int i = i0 + threadIdx.x;
if(i < RIGHT_BUFFER_SIZE){
const int x = static_cast<int>(right_x0 + PATHS_PER_BLOCK - 1 - i);
feature_type right_value = 0;
if(0 <= x && x < static_cast<int>(width)){
right_value = right[x + y * width];
}
const unsigned int lo = i % DP_BLOCK_SIZE;
const unsigned int hi = i / DP_BLOCK_SIZE;
right_buffer[lo][hi] = right_value;
if(hi > 0){
right_buffer[lo + DP_BLOCK_SIZE][hi - 1] = right_value;
}
}
}
__syncthreads();
// Compute
if(0 <= x && x < static_cast<int>(width)){
const feature_type left_value = __ldg(&left[x + y * width]);
feature_type right_values[DP_BLOCK_SIZE];
for(unsigned int j = 0; j < DP_BLOCK_SIZE; ++j){
right_values[j] = right_buffer[right0_addr_lo + j][right0_addr_hi];
}
uint32_t local_costs[DP_BLOCK_SIZE];
for(unsigned int j = 0; j < DP_BLOCK_SIZE; ++j){
local_costs[j] = __popc(left_value ^ right_values[j]);
}
dp.update(local_costs, p1, p2, shfl_mask);
store_uint8_vector<DP_BLOCK_SIZE>(
&dest[dp_offset + x * MAX_DISPARITY + y * MAX_DISPARITY * width],
dp.dp);
}
__syncthreads();
}
}
template <unsigned int MAX_DISPARITY>
void enqueue_aggregate_upleft2downright_path(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
hipStream_t stream)
{
static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE;
static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE;
const int gdim = (width + height + PATHS_PER_BLOCK - 2) / PATHS_PER_BLOCK;
const int bdim = BLOCK_SIZE;
hipLaunchKernelGGL(( aggregate_oblique_path_kernel<1, 1, MAX_DISPARITY>), dim3(gdim), dim3(bdim), 0, stream,
dest, left, right, width, height, p1, p2);
}
template <unsigned int MAX_DISPARITY>
void enqueue_aggregate_upright2downleft_path(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
hipStream_t stream)
{
static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE;
static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE;
const int gdim = (width + height + PATHS_PER_BLOCK - 2) / PATHS_PER_BLOCK;
const int bdim = BLOCK_SIZE;
hipLaunchKernelGGL(( aggregate_oblique_path_kernel<-1, 1, MAX_DISPARITY>), dim3(gdim), dim3(bdim), 0, stream,
dest, left, right, width, height, p1, p2);
}
template <unsigned int MAX_DISPARITY>
void enqueue_aggregate_downright2upleft_path(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
hipStream_t stream)
{
static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE;
static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE;
const int gdim = (width + height + PATHS_PER_BLOCK - 2) / PATHS_PER_BLOCK;
const int bdim = BLOCK_SIZE;
hipLaunchKernelGGL(( aggregate_oblique_path_kernel<-1, -1, MAX_DISPARITY>), dim3(gdim), dim3(bdim), 0, stream,
dest, left, right, width, height, p1, p2);
}
template <unsigned int MAX_DISPARITY>
void enqueue_aggregate_downleft2upright_path(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
hipStream_t stream)
{
static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE;
static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE;
const int gdim = (width + height + PATHS_PER_BLOCK - 2) / PATHS_PER_BLOCK;
const int bdim = BLOCK_SIZE;
hipLaunchKernelGGL(( aggregate_oblique_path_kernel<1, -1, MAX_DISPARITY>), dim3(gdim), dim3(bdim), 0, stream,
dest, left, right, width, height, p1, p2);
}
template void enqueue_aggregate_upleft2downright_path<64u>(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
hipStream_t stream);
template void enqueue_aggregate_upleft2downright_path<128u>(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
hipStream_t stream);
template void enqueue_aggregate_upright2downleft_path<64u>(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
hipStream_t stream);
template void enqueue_aggregate_upright2downleft_path<128u>(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
hipStream_t stream);
template void enqueue_aggregate_downright2upleft_path<64u>(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
hipStream_t stream);
template void enqueue_aggregate_downright2upleft_path<128u>(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
hipStream_t stream);
template void enqueue_aggregate_downleft2upright_path<64u>(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
hipStream_t stream);
template void enqueue_aggregate_downleft2upright_path<128u>(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
hipStream_t stream);
}
}
| 568db3b337f143d51b1710c6c1ed709b524b0d39.cu | /*
Copyright 2016 Fixstars Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http ://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <cstdio>
#include "oblique_path_aggregation.hpp"
#include "path_aggregation_common.hpp"
namespace sgm {
namespace path_aggregation {
static constexpr unsigned int DP_BLOCK_SIZE = 16u;
static constexpr unsigned int BLOCK_SIZE = WARP_SIZE * 8u;
template <int X_DIRECTION, int Y_DIRECTION, unsigned int MAX_DISPARITY>
__global__ void aggregate_oblique_path_kernel(
uint8_t *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2)
{
static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE;
static const unsigned int PATHS_PER_WARP = WARP_SIZE / SUBGROUP_SIZE;
static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE;
static const unsigned int RIGHT_BUFFER_SIZE = MAX_DISPARITY + PATHS_PER_BLOCK;
static const unsigned int RIGHT_BUFFER_ROWS = RIGHT_BUFFER_SIZE / DP_BLOCK_SIZE;
static_assert(X_DIRECTION == 1 || X_DIRECTION == -1, "");
static_assert(Y_DIRECTION == 1 || Y_DIRECTION == -1, "");
if(width == 0 || height == 0){
return;
}
__shared__ feature_type right_buffer[2 * DP_BLOCK_SIZE][RIGHT_BUFFER_ROWS];
DynamicProgramming<DP_BLOCK_SIZE, SUBGROUP_SIZE> dp;
const unsigned int warp_id = threadIdx.x / WARP_SIZE;
const unsigned int group_id = threadIdx.x % WARP_SIZE / SUBGROUP_SIZE;
const unsigned int lane_id = threadIdx.x % SUBGROUP_SIZE;
const unsigned int shfl_mask =
((1u << SUBGROUP_SIZE) - 1u) << (group_id * SUBGROUP_SIZE);
const int x0 =
blockIdx.x * PATHS_PER_BLOCK +
warp_id * PATHS_PER_WARP +
group_id +
(X_DIRECTION > 0 ? -static_cast<int>(height - 1) : 0);
const int right_x00 =
blockIdx.x * PATHS_PER_BLOCK +
(X_DIRECTION > 0 ? -static_cast<int>(height - 1) : 0);
const unsigned int dp_offset = lane_id * DP_BLOCK_SIZE;
const unsigned int right0_addr =
static_cast<unsigned int>(right_x00 + PATHS_PER_BLOCK - 1 - x0) + dp_offset;
const unsigned int right0_addr_lo = right0_addr % DP_BLOCK_SIZE;
const unsigned int right0_addr_hi = right0_addr / DP_BLOCK_SIZE;
for(unsigned int iter = 0; iter < height; ++iter){
const int y = static_cast<int>(Y_DIRECTION > 0 ? iter : height - 1 - iter);
const int x = x0 + static_cast<int>(iter) * X_DIRECTION;
const int right_x0 = right_x00 + static_cast<int>(iter) * X_DIRECTION;
// Load right to smem
for(unsigned int i0 = 0; i0 < RIGHT_BUFFER_SIZE; i0 += BLOCK_SIZE){
const unsigned int i = i0 + threadIdx.x;
if(i < RIGHT_BUFFER_SIZE){
const int x = static_cast<int>(right_x0 + PATHS_PER_BLOCK - 1 - i);
feature_type right_value = 0;
if(0 <= x && x < static_cast<int>(width)){
right_value = right[x + y * width];
}
const unsigned int lo = i % DP_BLOCK_SIZE;
const unsigned int hi = i / DP_BLOCK_SIZE;
right_buffer[lo][hi] = right_value;
if(hi > 0){
right_buffer[lo + DP_BLOCK_SIZE][hi - 1] = right_value;
}
}
}
__syncthreads();
// Compute
if(0 <= x && x < static_cast<int>(width)){
const feature_type left_value = __ldg(&left[x + y * width]);
feature_type right_values[DP_BLOCK_SIZE];
for(unsigned int j = 0; j < DP_BLOCK_SIZE; ++j){
right_values[j] = right_buffer[right0_addr_lo + j][right0_addr_hi];
}
uint32_t local_costs[DP_BLOCK_SIZE];
for(unsigned int j = 0; j < DP_BLOCK_SIZE; ++j){
local_costs[j] = __popc(left_value ^ right_values[j]);
}
dp.update(local_costs, p1, p2, shfl_mask);
store_uint8_vector<DP_BLOCK_SIZE>(
&dest[dp_offset + x * MAX_DISPARITY + y * MAX_DISPARITY * width],
dp.dp);
}
__syncthreads();
}
}
template <unsigned int MAX_DISPARITY>
void enqueue_aggregate_upleft2downright_path(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
cudaStream_t stream)
{
static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE;
static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE;
const int gdim = (width + height + PATHS_PER_BLOCK - 2) / PATHS_PER_BLOCK;
const int bdim = BLOCK_SIZE;
aggregate_oblique_path_kernel<1, 1, MAX_DISPARITY><<<gdim, bdim, 0, stream>>>(
dest, left, right, width, height, p1, p2);
}
template <unsigned int MAX_DISPARITY>
void enqueue_aggregate_upright2downleft_path(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
cudaStream_t stream)
{
static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE;
static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE;
const int gdim = (width + height + PATHS_PER_BLOCK - 2) / PATHS_PER_BLOCK;
const int bdim = BLOCK_SIZE;
aggregate_oblique_path_kernel<-1, 1, MAX_DISPARITY><<<gdim, bdim, 0, stream>>>(
dest, left, right, width, height, p1, p2);
}
template <unsigned int MAX_DISPARITY>
void enqueue_aggregate_downright2upleft_path(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
cudaStream_t stream)
{
static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE;
static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE;
const int gdim = (width + height + PATHS_PER_BLOCK - 2) / PATHS_PER_BLOCK;
const int bdim = BLOCK_SIZE;
aggregate_oblique_path_kernel<-1, -1, MAX_DISPARITY><<<gdim, bdim, 0, stream>>>(
dest, left, right, width, height, p1, p2);
}
template <unsigned int MAX_DISPARITY>
void enqueue_aggregate_downleft2upright_path(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
cudaStream_t stream)
{
static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE;
static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE;
const int gdim = (width + height + PATHS_PER_BLOCK - 2) / PATHS_PER_BLOCK;
const int bdim = BLOCK_SIZE;
aggregate_oblique_path_kernel<1, -1, MAX_DISPARITY><<<gdim, bdim, 0, stream>>>(
dest, left, right, width, height, p1, p2);
}
template void enqueue_aggregate_upleft2downright_path<64u>(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
cudaStream_t stream);
template void enqueue_aggregate_upleft2downright_path<128u>(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
cudaStream_t stream);
template void enqueue_aggregate_upright2downleft_path<64u>(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
cudaStream_t stream);
template void enqueue_aggregate_upright2downleft_path<128u>(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
cudaStream_t stream);
template void enqueue_aggregate_downright2upleft_path<64u>(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
cudaStream_t stream);
template void enqueue_aggregate_downright2upleft_path<128u>(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
cudaStream_t stream);
template void enqueue_aggregate_downleft2upright_path<64u>(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
cudaStream_t stream);
template void enqueue_aggregate_downleft2upright_path<128u>(
cost_type *dest,
const feature_type *left,
const feature_type *right,
int width,
int height,
unsigned int p1,
unsigned int p2,
cudaStream_t stream);
}
}
|
cba62106722b3e1d62cbcd957c2294983f1cdb3a.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @author Oded Green <br>
* Georgia Institute of Technology, Computational Science and Engineering <br>
* [email protected]
* @date August, 2017
* @version v2
*
* @copyright Copyright 2017 Hornet. All rights reserved.
*
* @license{<blockquote>
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* </blockquote>}
*/
#include "Static/KatzCentrality/Katz.cuh"
#include "KatzOperators.cuh"
using length_t = int;
namespace hornets_nest {
/// TODO - changed hostKatzdata to pointer so that I can try to inherit it in
// the streaming case.
KatzCentrality::KatzCentrality(HornetGraph& hornet, int max_iteration, int K,
int max_degree, bool is_static) :
StaticAlgorithm(hornet),
load_balancing(hornet),
is_static(is_static) {
if (max_iteration <= 0)
ERROR("Number of max iterations should be greater than zero")
hd_katzdata().nV = hornet.nV();
hd_katzdata().K = K;
hd_katzdata().max_degree = max_degree;
hd_katzdata().alpha = 1.0 / (static_cast<double>(max_degree) + 1.0);
hd_katzdata().max_iteration = max_iteration;
auto nV = hornet.nV();
if (is_static) {
gpu::allocate(hd_katzdata().num_paths_data, nV * 2);
hd_katzdata().num_paths_prev = hd_katzdata().num_paths_data;
hd_katzdata().num_paths_curr = hd_katzdata().num_paths_data + nV;
hd_katzdata().num_paths = nullptr;
h_paths_ptr = nullptr;
}
else {
gpu::allocate(hd_katzdata().num_paths_data, nV * max_iteration);
gpu::allocate(hd_katzdata().num_paths, max_iteration);
host::allocate(h_paths_ptr, max_iteration);
for(int i = 0; i < max_iteration; i++)
h_paths_ptr[i] = hd_katzdata().num_paths_data + nV * i;
hd_katzdata().num_paths_prev = h_paths_ptr[0];
hd_katzdata().num_paths_curr = h_paths_ptr[1];
host::copyToDevice(h_paths_ptr, max_iteration, hd_katzdata().num_paths);
}
gpu::allocate(hd_katzdata().KC, nV);
gpu::allocate(hd_katzdata().lower_bound, nV);
gpu::allocate(hd_katzdata().upper_bound, nV);
gpu::allocate(hd_katzdata().is_active, nV);
gpu::allocate(hd_katzdata().vertex_array_sorted, nV);
gpu::allocate(hd_katzdata().vertex_array_unsorted, nV);
gpu::allocate(hd_katzdata().lower_bound_sorted, nV);
gpu::allocate(hd_katzdata().lower_bound_unsorted, nV);
reset();
}
KatzCentrality::~KatzCentrality() {
release();
}
void KatzCentrality::reset() {
hd_katzdata().iteration = 1;
if (is_static) {
hd_katzdata().num_paths_prev = hd_katzdata().num_paths_data;
hd_katzdata().num_paths_curr = hd_katzdata().num_paths_data +
hornet.nV();
}
else {
hd_katzdata().num_paths_prev = h_paths_ptr[0];
hd_katzdata().num_paths_curr = h_paths_ptr[1];
}
}
void KatzCentrality::release(){
gpu::free(hd_katzdata().num_paths_data);
gpu::free(hd_katzdata().num_paths);
gpu::free(hd_katzdata().KC);
gpu::free(hd_katzdata().lower_bound);
gpu::free(hd_katzdata().upper_bound);
gpu::free(hd_katzdata().vertex_array_sorted);
gpu::free(hd_katzdata().vertex_array_unsorted);
gpu::free(hd_katzdata().lower_bound_sorted);
gpu::free(hd_katzdata().lower_bound_unsorted);
host::free(h_paths_ptr);
}
void KatzCentrality::run() {
forAllnumV(hornet, Init { hd_katzdata });
hd_katzdata().iteration = 1;
hd_katzdata().num_active = hornet.nV();
while (hd_katzdata().num_active > hd_katzdata().K &&
hd_katzdata().iteration < hd_katzdata().max_iteration) {
hd_katzdata().alphaI = ::pow(hd_katzdata().alpha,
hd_katzdata().iteration);
hd_katzdata().lower_bound_const = ::pow(hd_katzdata().alpha,
hd_katzdata().iteration + 1) /
(1.0 - hd_katzdata().alpha);
hd_katzdata().upper_bound_const = ::pow(hd_katzdata().alpha,
hd_katzdata().iteration + 1) /
(1.0 - hd_katzdata().alpha *
static_cast<double>(hd_katzdata().max_degree));
hd_katzdata().num_active = 0; // Each iteration the number of active
// vertices is set to zero.
forAllnumV (hornet, InitNumPathsPerIteration { hd_katzdata } );
forAllEdges(hornet, UpdatePathCount { hd_katzdata },
load_balancing);
forAllnumV (hornet, UpdateKatzAndBounds { hd_katzdata } );
hd_katzdata.sync();
hd_katzdata().iteration++;
if(is_static) {
std::swap(hd_katzdata().num_paths_curr,
hd_katzdata().num_paths_prev);
}
else {
auto iter = hd_katzdata().iteration;
hd_katzdata().num_paths_prev = h_paths_ptr[iter - 1];
hd_katzdata().num_paths_curr = h_paths_ptr[iter - 0];
}
auto old_active_count = hd_katzdata().num_active;
hd_katzdata().num_prev_active = hd_katzdata().num_active;
hd_katzdata().num_active = 0; // Resetting active vertices for
// sorting
// Notice that the sorts the vertices in an incremental order based on
// the lower bounds.
// The algorithms requires the vertices to be sorted in an decremental
// fashion.
// As such, we use the num_prev_active variables to store the number of
// previous active vertices and are able to find the K-th from last
// vertex (which is essentially going from the tail of the array).
xlib::CubSortByKey<double, vid_t>::srun
(hd_katzdata().lower_bound_unsorted,
hd_katzdata().vertex_array_unsorted,
old_active_count, hd_katzdata().lower_bound_sorted,
hd_katzdata().vertex_array_sorted);
forAllnumV(hornet, CountActive { hd_katzdata } );
hd_katzdata.sync();
}
}
// This function should only be used directly within run() and is currently
// commented out due to to large execution overheads.
void KatzCentrality::printKMostImportant() {
ulong_t* num_paths_curr;
ulong_t* num_paths_prev;
int* vertex_array;
int* vertex_array_unsorted;
double* KC;
double* lower_bound;
double* upper_bound;
auto nV = hornet.nV();
host::allocate(num_paths_curr, nV);
host::allocate(num_paths_prev, nV);
host::allocate(vertex_array, nV);
host::allocate(vertex_array_unsorted, nV);
host::allocate(KC, nV);
host::allocate(lower_bound, nV);
host::allocate(upper_bound, nV);
gpu::copyToHost(hd_katzdata().lower_bound, nV, lower_bound);
gpu::copyToHost(hd_katzdata().upper_bound, nV, upper_bound);
gpu::copyToHost(hd_katzdata().KC, nV, KC);
gpu::copyToHost(hd_katzdata().vertex_array_sorted, nV, vertex_array);
gpu::copyToHost(hd_katzdata().vertex_array_unsorted, nV,
vertex_array_unsorted);
if (hd_katzdata().num_prev_active > hd_katzdata().K) {
for (int i = hd_katzdata().num_prev_active - 1;
i >= hd_katzdata().num_prev_active - hd_katzdata().K; i--) {
vid_t j = vertex_array[i];
std::cout << j << "\t\t" << KC[j] << "\t\t" << upper_bound[j]
<< upper_bound[j] - lower_bound[j] << "\n";
}
}
std::cout << std::endl;
host::free(num_paths_curr);
host::free(num_paths_prev);
host::free(vertex_array);
host::free(vertex_array_unsorted);
host::free(KC);
host::free(lower_bound);
host::free(upper_bound);
}
int KatzCentrality::get_iteration_count() {
return hd_katzdata().iteration;
}
bool KatzCentrality::validate() {
return true;
}
} // namespace hornets_nest
| cba62106722b3e1d62cbcd957c2294983f1cdb3a.cu | /**
* @author Oded Green <br>
* Georgia Institute of Technology, Computational Science and Engineering <br>
* [email protected]
* @date August, 2017
* @version v2
*
* @copyright Copyright © 2017 Hornet. All rights reserved.
*
* @license{<blockquote>
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* </blockquote>}
*/
#include "Static/KatzCentrality/Katz.cuh"
#include "KatzOperators.cuh"
using length_t = int;
namespace hornets_nest {
/// TODO - changed hostKatzdata to pointer so that I can try to inherit it in
// the streaming case.
KatzCentrality::KatzCentrality(HornetGraph& hornet, int max_iteration, int K,
int max_degree, bool is_static) :
StaticAlgorithm(hornet),
load_balancing(hornet),
is_static(is_static) {
if (max_iteration <= 0)
ERROR("Number of max iterations should be greater than zero")
hd_katzdata().nV = hornet.nV();
hd_katzdata().K = K;
hd_katzdata().max_degree = max_degree;
hd_katzdata().alpha = 1.0 / (static_cast<double>(max_degree) + 1.0);
hd_katzdata().max_iteration = max_iteration;
auto nV = hornet.nV();
if (is_static) {
gpu::allocate(hd_katzdata().num_paths_data, nV * 2);
hd_katzdata().num_paths_prev = hd_katzdata().num_paths_data;
hd_katzdata().num_paths_curr = hd_katzdata().num_paths_data + nV;
hd_katzdata().num_paths = nullptr;
h_paths_ptr = nullptr;
}
else {
gpu::allocate(hd_katzdata().num_paths_data, nV * max_iteration);
gpu::allocate(hd_katzdata().num_paths, max_iteration);
host::allocate(h_paths_ptr, max_iteration);
for(int i = 0; i < max_iteration; i++)
h_paths_ptr[i] = hd_katzdata().num_paths_data + nV * i;
hd_katzdata().num_paths_prev = h_paths_ptr[0];
hd_katzdata().num_paths_curr = h_paths_ptr[1];
host::copyToDevice(h_paths_ptr, max_iteration, hd_katzdata().num_paths);
}
gpu::allocate(hd_katzdata().KC, nV);
gpu::allocate(hd_katzdata().lower_bound, nV);
gpu::allocate(hd_katzdata().upper_bound, nV);
gpu::allocate(hd_katzdata().is_active, nV);
gpu::allocate(hd_katzdata().vertex_array_sorted, nV);
gpu::allocate(hd_katzdata().vertex_array_unsorted, nV);
gpu::allocate(hd_katzdata().lower_bound_sorted, nV);
gpu::allocate(hd_katzdata().lower_bound_unsorted, nV);
reset();
}
KatzCentrality::~KatzCentrality() {
release();
}
void KatzCentrality::reset() {
hd_katzdata().iteration = 1;
if (is_static) {
hd_katzdata().num_paths_prev = hd_katzdata().num_paths_data;
hd_katzdata().num_paths_curr = hd_katzdata().num_paths_data +
hornet.nV();
}
else {
hd_katzdata().num_paths_prev = h_paths_ptr[0];
hd_katzdata().num_paths_curr = h_paths_ptr[1];
}
}
void KatzCentrality::release(){
gpu::free(hd_katzdata().num_paths_data);
gpu::free(hd_katzdata().num_paths);
gpu::free(hd_katzdata().KC);
gpu::free(hd_katzdata().lower_bound);
gpu::free(hd_katzdata().upper_bound);
gpu::free(hd_katzdata().vertex_array_sorted);
gpu::free(hd_katzdata().vertex_array_unsorted);
gpu::free(hd_katzdata().lower_bound_sorted);
gpu::free(hd_katzdata().lower_bound_unsorted);
host::free(h_paths_ptr);
}
void KatzCentrality::run() {
forAllnumV(hornet, Init { hd_katzdata });
hd_katzdata().iteration = 1;
hd_katzdata().num_active = hornet.nV();
while (hd_katzdata().num_active > hd_katzdata().K &&
hd_katzdata().iteration < hd_katzdata().max_iteration) {
hd_katzdata().alphaI = std::pow(hd_katzdata().alpha,
hd_katzdata().iteration);
hd_katzdata().lower_bound_const = std::pow(hd_katzdata().alpha,
hd_katzdata().iteration + 1) /
(1.0 - hd_katzdata().alpha);
hd_katzdata().upper_bound_const = std::pow(hd_katzdata().alpha,
hd_katzdata().iteration + 1) /
(1.0 - hd_katzdata().alpha *
static_cast<double>(hd_katzdata().max_degree));
hd_katzdata().num_active = 0; // Each iteration the number of active
// vertices is set to zero.
forAllnumV (hornet, InitNumPathsPerIteration { hd_katzdata } );
forAllEdges(hornet, UpdatePathCount { hd_katzdata },
load_balancing);
forAllnumV (hornet, UpdateKatzAndBounds { hd_katzdata } );
hd_katzdata.sync();
hd_katzdata().iteration++;
if(is_static) {
std::swap(hd_katzdata().num_paths_curr,
hd_katzdata().num_paths_prev);
}
else {
auto iter = hd_katzdata().iteration;
hd_katzdata().num_paths_prev = h_paths_ptr[iter - 1];
hd_katzdata().num_paths_curr = h_paths_ptr[iter - 0];
}
auto old_active_count = hd_katzdata().num_active;
hd_katzdata().num_prev_active = hd_katzdata().num_active;
hd_katzdata().num_active = 0; // Resetting active vertices for
// sorting
// Notice that the sorts the vertices in an incremental order based on
// the lower bounds.
// The algorithms requires the vertices to be sorted in an decremental
// fashion.
// As such, we use the num_prev_active variables to store the number of
// previous active vertices and are able to find the K-th from last
// vertex (which is essentially going from the tail of the array).
xlib::CubSortByKey<double, vid_t>::srun
(hd_katzdata().lower_bound_unsorted,
hd_katzdata().vertex_array_unsorted,
old_active_count, hd_katzdata().lower_bound_sorted,
hd_katzdata().vertex_array_sorted);
forAllnumV(hornet, CountActive { hd_katzdata } );
hd_katzdata.sync();
}
}
// This function should only be used directly within run() and is currently
// commented out due to to large execution overheads.
void KatzCentrality::printKMostImportant() {
ulong_t* num_paths_curr;
ulong_t* num_paths_prev;
int* vertex_array;
int* vertex_array_unsorted;
double* KC;
double* lower_bound;
double* upper_bound;
auto nV = hornet.nV();
host::allocate(num_paths_curr, nV);
host::allocate(num_paths_prev, nV);
host::allocate(vertex_array, nV);
host::allocate(vertex_array_unsorted, nV);
host::allocate(KC, nV);
host::allocate(lower_bound, nV);
host::allocate(upper_bound, nV);
gpu::copyToHost(hd_katzdata().lower_bound, nV, lower_bound);
gpu::copyToHost(hd_katzdata().upper_bound, nV, upper_bound);
gpu::copyToHost(hd_katzdata().KC, nV, KC);
gpu::copyToHost(hd_katzdata().vertex_array_sorted, nV, vertex_array);
gpu::copyToHost(hd_katzdata().vertex_array_unsorted, nV,
vertex_array_unsorted);
if (hd_katzdata().num_prev_active > hd_katzdata().K) {
for (int i = hd_katzdata().num_prev_active - 1;
i >= hd_katzdata().num_prev_active - hd_katzdata().K; i--) {
vid_t j = vertex_array[i];
std::cout << j << "\t\t" << KC[j] << "\t\t" << upper_bound[j]
<< upper_bound[j] - lower_bound[j] << "\n";
}
}
std::cout << std::endl;
host::free(num_paths_curr);
host::free(num_paths_prev);
host::free(vertex_array);
host::free(vertex_array_unsorted);
host::free(KC);
host::free(lower_bound);
host::free(upper_bound);
}
int KatzCentrality::get_iteration_count() {
return hd_katzdata().iteration;
}
bool KatzCentrality::validate() {
return true;
}
} // namespace hornets_nest
|
6c592b24bf83f598e729837693013b8d7a1bfc4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2020-2021 by XGBoost Contributors
*/
#include <limits>
#include "evaluate_splits.cuh"
#include "../../common/categorical.h"
namespace xgboost {
namespace tree {
// With constraints
XGBOOST_DEVICE float LossChangeMissing(const GradientPairPrecise &scan,
const GradientPairPrecise &missing,
const GradientPairPrecise &parent_sum,
const GPUTrainingParam ¶m, bst_node_t nidx,
bst_feature_t fidx,
TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator,
bool &missing_left_out) { // NOLINT
float parent_gain = CalcGain(param, parent_sum);
float missing_left_gain =
evaluator.CalcSplitGain(param, nidx, fidx, GradStats(scan + missing),
GradStats(parent_sum - (scan + missing)));
float missing_right_gain = evaluator.CalcSplitGain(
param, nidx, fidx, GradStats(scan), GradStats(parent_sum - scan));
if (missing_left_gain >= missing_right_gain) {
missing_left_out = true;
return missing_left_gain - parent_gain;
} else {
missing_left_out = false;
return missing_right_gain - parent_gain;
}
}
/*!
* \brief
*
* \tparam ReduceT BlockReduce Type.
* \tparam TempStorage Cub Shared memory
*
* \param begin
* \param end
* \param temp_storage Shared memory for intermediate result.
*/
template <int BLOCK_THREADS, typename ReduceT, typename TempStorageT,
typename GradientSumT>
__device__ GradientSumT
ReduceFeature(common::Span<const GradientSumT> feature_histogram,
TempStorageT* temp_storage) {
__shared__ cub::Uninitialized<GradientSumT> uninitialized_sum;
GradientSumT& shared_sum = uninitialized_sum.Alias();
GradientSumT local_sum = GradientSumT();
// For loop sums features into one block size
auto begin = feature_histogram.data();
auto end = begin + feature_histogram.size();
for (auto itr = begin; itr < end; itr += BLOCK_THREADS) {
bool thread_active = itr + threadIdx.x < end;
// Scan histogram
GradientSumT bin = thread_active ? *(itr + threadIdx.x) : GradientSumT();
local_sum += bin;
}
local_sum = ReduceT(temp_storage->sum_reduce).Reduce(local_sum, hipcub::Sum());
// Reduction result is stored in thread 0.
if (threadIdx.x == 0) {
shared_sum = local_sum;
}
cub::CTA_SYNC();
return shared_sum;
}
template <typename GradientSumT, typename TempStorageT> struct OneHotBin {
GradientSumT __device__ operator()(bool thread_active, uint32_t scan_begin,
SumCallbackOp<GradientSumT> *,
GradientPairPrecise const &missing,
EvaluateSplitInputs<GradientSumT> const &inputs,
TempStorageT *) {
GradientSumT bin = thread_active
? inputs.gradient_histogram[scan_begin + threadIdx.x]
: GradientSumT();
auto rest = inputs.parent_sum - GradientPairPrecise(bin) - missing;
return GradientSumT{rest};
}
};
template <typename GradientSumT>
struct UpdateOneHot {
void __device__ operator()(bool missing_left, uint32_t scan_begin, float gain,
bst_feature_t fidx, GradientPairPrecise const &missing,
GradientSumT const &bin,
EvaluateSplitInputs<GradientSumT> const &inputs,
DeviceSplitCandidate *best_split) {
int split_gidx = (scan_begin + threadIdx.x);
float fvalue = inputs.feature_values[split_gidx];
GradientPairPrecise left =
missing_left ? GradientPairPrecise{bin} + missing : GradientPairPrecise{bin};
GradientPairPrecise right = inputs.parent_sum - left;
best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue, fidx, left, right, true,
inputs.param);
}
};
template <typename GradientSumT, typename TempStorageT, typename ScanT>
struct NumericBin {
GradientSumT __device__ operator()(bool thread_active, uint32_t scan_begin,
SumCallbackOp<GradientSumT> *prefix_callback,
GradientPairPrecise const &missing,
EvaluateSplitInputs<GradientSumT> inputs,
TempStorageT *temp_storage) {
GradientSumT bin = thread_active
? inputs.gradient_histogram[scan_begin + threadIdx.x]
: GradientSumT();
ScanT(temp_storage->scan).ExclusiveScan(bin, bin, hipcub::Sum(), *prefix_callback);
return bin;
}
};
template <typename GradientSumT>
struct UpdateNumeric {
void __device__ operator()(bool missing_left, uint32_t scan_begin, float gain,
bst_feature_t fidx, GradientPairPrecise const &missing,
GradientSumT const &bin,
EvaluateSplitInputs<GradientSumT> const &inputs,
DeviceSplitCandidate *best_split) {
// Use pointer from cut to indicate begin and end of bins for each feature.
uint32_t gidx_begin = inputs.feature_segments[fidx]; // beginning bin
int split_gidx = (scan_begin + threadIdx.x) - 1;
float fvalue;
if (split_gidx < static_cast<int>(gidx_begin)) {
fvalue = inputs.min_fvalue[fidx];
} else {
fvalue = inputs.feature_values[split_gidx];
}
GradientPairPrecise left =
missing_left ? GradientPairPrecise{bin} + missing : GradientPairPrecise{bin};
GradientPairPrecise right = inputs.parent_sum - left;
best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue, fidx, left, right, false,
inputs.param);
}
};
/*! \brief Find the thread with best gain. */
template <int BLOCK_THREADS, typename ReduceT, typename ScanT,
typename MaxReduceT, typename TempStorageT, typename GradientSumT,
typename BinFn, typename UpdateFn>
__device__ void EvaluateFeature(
int fidx, EvaluateSplitInputs<GradientSumT> inputs,
TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator,
DeviceSplitCandidate* best_split, // shared memory storing best split
TempStorageT* temp_storage // temp memory for cub operations
) {
// Use pointer from cut to indicate begin and end of bins for each feature.
uint32_t gidx_begin = inputs.feature_segments[fidx]; // beginning bin
uint32_t gidx_end =
inputs.feature_segments[fidx + 1]; // end bin for i^th feature
auto feature_hist = inputs.gradient_histogram.subspan(gidx_begin, gidx_end - gidx_begin);
auto bin_fn = BinFn();
auto update_fn = UpdateFn();
// Sum histogram bins for current feature
GradientSumT const feature_sum =
ReduceFeature<BLOCK_THREADS, ReduceT, TempStorageT, GradientSumT>(
feature_hist, temp_storage);
GradientPairPrecise const missing = inputs.parent_sum - GradientPairPrecise{feature_sum};
float const null_gain = -std::numeric_limits<bst_float>::infinity();
SumCallbackOp<GradientSumT> prefix_op = SumCallbackOp<GradientSumT>();
for (int scan_begin = gidx_begin; scan_begin < gidx_end;
scan_begin += BLOCK_THREADS) {
bool thread_active = (scan_begin + threadIdx.x) < gidx_end;
auto bin = bin_fn(thread_active, scan_begin, &prefix_op, missing, inputs, temp_storage);
// Whether the gradient of missing values is put to the left side.
bool missing_left = true;
float gain = null_gain;
if (thread_active) {
gain = LossChangeMissing(GradientPairPrecise{bin}, missing, inputs.parent_sum, inputs.param,
inputs.nidx, fidx, evaluator, missing_left);
}
__syncthreads();
// Find thread with best gain
hipcub::KeyValuePair<int, float> tuple(threadIdx.x, gain);
hipcub::KeyValuePair<int, float> best =
MaxReduceT(temp_storage->max_reduce).Reduce(tuple, hipcub::ArgMax());
__shared__ hipcub::KeyValuePair<int, float> block_max;
if (threadIdx.x == 0) {
block_max = best;
}
cub::CTA_SYNC();
// Best thread updates split
if (threadIdx.x == block_max.key) {
update_fn(missing_left, scan_begin, gain, fidx, missing, bin, inputs,
best_split);
}
cub::CTA_SYNC();
}
}
template <int BLOCK_THREADS, typename GradientSumT>
__global__ void EvaluateSplitsKernel(
EvaluateSplitInputs<GradientSumT> left,
EvaluateSplitInputs<GradientSumT> right,
TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator,
common::Span<DeviceSplitCandidate> out_candidates) {
// KeyValuePair here used as threadIdx.x -> gain_value
using ArgMaxT = hipcub::KeyValuePair<int, float>;
using BlockScanT =
hipcub::BlockScan<GradientSumT, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>;
using MaxReduceT = hipcub::BlockReduce<ArgMaxT, BLOCK_THREADS>;
using SumReduceT = hipcub::BlockReduce<GradientSumT, BLOCK_THREADS>;
union TempStorage {
typename BlockScanT::TempStorage scan;
typename MaxReduceT::TempStorage max_reduce;
typename SumReduceT::TempStorage sum_reduce;
};
// Aligned && shared storage for best_split
__shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split;
DeviceSplitCandidate& best_split = uninitialized_split.Alias();
__shared__ TempStorage temp_storage;
if (threadIdx.x == 0) {
best_split = DeviceSplitCandidate();
}
__syncthreads();
// If this block is working on the left or right node
bool is_left = blockIdx.x < left.feature_set.size();
EvaluateSplitInputs<GradientSumT>& inputs = is_left ? left : right;
// One block for each feature. Features are sampled, so fidx != blockIdx.x
int fidx = inputs.feature_set[is_left ? blockIdx.x
: blockIdx.x - left.feature_set.size()];
if (common::IsCat(inputs.feature_types, fidx)) {
EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT,
TempStorage, GradientSumT,
OneHotBin<GradientSumT, TempStorage>,
UpdateOneHot<GradientSumT>>(fidx, inputs, evaluator, &best_split,
&temp_storage);
} else {
EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT,
TempStorage, GradientSumT,
NumericBin<GradientSumT, TempStorage, BlockScanT>,
UpdateNumeric<GradientSumT>>(fidx, inputs, evaluator, &best_split,
&temp_storage);
}
cub::CTA_SYNC();
if (threadIdx.x == 0) {
// Record best loss for each feature
out_candidates[blockIdx.x] = best_split;
}
}
__device__ DeviceSplitCandidate operator+(const DeviceSplitCandidate& a,
const DeviceSplitCandidate& b) {
return b.loss_chg > a.loss_chg ? b : a;
}
template <typename GradientSumT>
void EvaluateSplits(common::Span<DeviceSplitCandidate> out_splits,
TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator,
EvaluateSplitInputs<GradientSumT> left,
EvaluateSplitInputs<GradientSumT> right) {
size_t combined_num_features =
left.feature_set.size() + right.feature_set.size();
dh::TemporaryArray<DeviceSplitCandidate> feature_best_splits(
combined_num_features);
// One block for each feature
uint32_t constexpr kBlockThreads = 256;
dh::LaunchKernel {uint32_t(combined_num_features), kBlockThreads, 0}(
EvaluateSplitsKernel<kBlockThreads, GradientSumT>, left, right, evaluator,
dh::ToSpan(feature_best_splits));
// Reduce to get best candidate for left and right child over all features
auto reduce_offset =
dh::MakeTransformIterator<size_t>(thrust::make_counting_iterator(0llu),
[=] __device__(size_t idx) -> size_t {
if (idx == 0) {
return 0;
}
if (idx == 1) {
return left.feature_set.size();
}
if (idx == 2) {
return combined_num_features;
}
return 0;
});
size_t temp_storage_bytes = 0;
auto num_segments = out_splits.size();
hipcub::DeviceSegmentedReduce::Sum(nullptr, temp_storage_bytes,
feature_best_splits.data(), out_splits.data(),
num_segments, reduce_offset, reduce_offset + 1);
dh::TemporaryArray<int8_t> temp(temp_storage_bytes);
hipcub::DeviceSegmentedReduce::Sum(temp.data().get(), temp_storage_bytes,
feature_best_splits.data(), out_splits.data(),
num_segments, reduce_offset, reduce_offset + 1);
}
template <typename GradientSumT>
void EvaluateSingleSplit(common::Span<DeviceSplitCandidate> out_split,
TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator,
EvaluateSplitInputs<GradientSumT> input) {
EvaluateSplits(out_split, evaluator, input, {});
}
template void EvaluateSplits<GradientPair>(
common::Span<DeviceSplitCandidate> out_splits,
TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator,
EvaluateSplitInputs<GradientPair> left,
EvaluateSplitInputs<GradientPair> right);
template void EvaluateSplits<GradientPairPrecise>(
common::Span<DeviceSplitCandidate> out_splits,
TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator,
EvaluateSplitInputs<GradientPairPrecise> left,
EvaluateSplitInputs<GradientPairPrecise> right);
template void EvaluateSingleSplit<GradientPair>(
common::Span<DeviceSplitCandidate> out_split,
TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator,
EvaluateSplitInputs<GradientPair> input);
template void EvaluateSingleSplit<GradientPairPrecise>(
common::Span<DeviceSplitCandidate> out_split,
TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator,
EvaluateSplitInputs<GradientPairPrecise> input);
} // namespace tree
} // namespace xgboost
| 6c592b24bf83f598e729837693013b8d7a1bfc4c.cu | /*!
* Copyright 2020-2021 by XGBoost Contributors
*/
#include <limits>
#include "evaluate_splits.cuh"
#include "../../common/categorical.h"
namespace xgboost {
namespace tree {
// With constraints
XGBOOST_DEVICE float LossChangeMissing(const GradientPairPrecise &scan,
const GradientPairPrecise &missing,
const GradientPairPrecise &parent_sum,
const GPUTrainingParam ¶m, bst_node_t nidx,
bst_feature_t fidx,
TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator,
bool &missing_left_out) { // NOLINT
float parent_gain = CalcGain(param, parent_sum);
float missing_left_gain =
evaluator.CalcSplitGain(param, nidx, fidx, GradStats(scan + missing),
GradStats(parent_sum - (scan + missing)));
float missing_right_gain = evaluator.CalcSplitGain(
param, nidx, fidx, GradStats(scan), GradStats(parent_sum - scan));
if (missing_left_gain >= missing_right_gain) {
missing_left_out = true;
return missing_left_gain - parent_gain;
} else {
missing_left_out = false;
return missing_right_gain - parent_gain;
}
}
/*!
* \brief
*
* \tparam ReduceT BlockReduce Type.
* \tparam TempStorage Cub Shared memory
*
* \param begin
* \param end
* \param temp_storage Shared memory for intermediate result.
*/
template <int BLOCK_THREADS, typename ReduceT, typename TempStorageT,
typename GradientSumT>
__device__ GradientSumT
ReduceFeature(common::Span<const GradientSumT> feature_histogram,
TempStorageT* temp_storage) {
__shared__ cub::Uninitialized<GradientSumT> uninitialized_sum;
GradientSumT& shared_sum = uninitialized_sum.Alias();
GradientSumT local_sum = GradientSumT();
// For loop sums features into one block size
auto begin = feature_histogram.data();
auto end = begin + feature_histogram.size();
for (auto itr = begin; itr < end; itr += BLOCK_THREADS) {
bool thread_active = itr + threadIdx.x < end;
// Scan histogram
GradientSumT bin = thread_active ? *(itr + threadIdx.x) : GradientSumT();
local_sum += bin;
}
local_sum = ReduceT(temp_storage->sum_reduce).Reduce(local_sum, cub::Sum());
// Reduction result is stored in thread 0.
if (threadIdx.x == 0) {
shared_sum = local_sum;
}
cub::CTA_SYNC();
return shared_sum;
}
template <typename GradientSumT, typename TempStorageT> struct OneHotBin {
GradientSumT __device__ operator()(bool thread_active, uint32_t scan_begin,
SumCallbackOp<GradientSumT> *,
GradientPairPrecise const &missing,
EvaluateSplitInputs<GradientSumT> const &inputs,
TempStorageT *) {
GradientSumT bin = thread_active
? inputs.gradient_histogram[scan_begin + threadIdx.x]
: GradientSumT();
auto rest = inputs.parent_sum - GradientPairPrecise(bin) - missing;
return GradientSumT{rest};
}
};
template <typename GradientSumT>
struct UpdateOneHot {
void __device__ operator()(bool missing_left, uint32_t scan_begin, float gain,
bst_feature_t fidx, GradientPairPrecise const &missing,
GradientSumT const &bin,
EvaluateSplitInputs<GradientSumT> const &inputs,
DeviceSplitCandidate *best_split) {
int split_gidx = (scan_begin + threadIdx.x);
float fvalue = inputs.feature_values[split_gidx];
GradientPairPrecise left =
missing_left ? GradientPairPrecise{bin} + missing : GradientPairPrecise{bin};
GradientPairPrecise right = inputs.parent_sum - left;
best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue, fidx, left, right, true,
inputs.param);
}
};
template <typename GradientSumT, typename TempStorageT, typename ScanT>
struct NumericBin {
GradientSumT __device__ operator()(bool thread_active, uint32_t scan_begin,
SumCallbackOp<GradientSumT> *prefix_callback,
GradientPairPrecise const &missing,
EvaluateSplitInputs<GradientSumT> inputs,
TempStorageT *temp_storage) {
GradientSumT bin = thread_active
? inputs.gradient_histogram[scan_begin + threadIdx.x]
: GradientSumT();
ScanT(temp_storage->scan).ExclusiveScan(bin, bin, cub::Sum(), *prefix_callback);
return bin;
}
};
template <typename GradientSumT>
struct UpdateNumeric {
void __device__ operator()(bool missing_left, uint32_t scan_begin, float gain,
bst_feature_t fidx, GradientPairPrecise const &missing,
GradientSumT const &bin,
EvaluateSplitInputs<GradientSumT> const &inputs,
DeviceSplitCandidate *best_split) {
// Use pointer from cut to indicate begin and end of bins for each feature.
uint32_t gidx_begin = inputs.feature_segments[fidx]; // beginning bin
int split_gidx = (scan_begin + threadIdx.x) - 1;
float fvalue;
if (split_gidx < static_cast<int>(gidx_begin)) {
fvalue = inputs.min_fvalue[fidx];
} else {
fvalue = inputs.feature_values[split_gidx];
}
GradientPairPrecise left =
missing_left ? GradientPairPrecise{bin} + missing : GradientPairPrecise{bin};
GradientPairPrecise right = inputs.parent_sum - left;
best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue, fidx, left, right, false,
inputs.param);
}
};
/*! \brief Find the thread with best gain. */
template <int BLOCK_THREADS, typename ReduceT, typename ScanT,
typename MaxReduceT, typename TempStorageT, typename GradientSumT,
typename BinFn, typename UpdateFn>
__device__ void EvaluateFeature(
int fidx, EvaluateSplitInputs<GradientSumT> inputs,
TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator,
DeviceSplitCandidate* best_split, // shared memory storing best split
TempStorageT* temp_storage // temp memory for cub operations
) {
// Use pointer from cut to indicate begin and end of bins for each feature.
uint32_t gidx_begin = inputs.feature_segments[fidx]; // beginning bin
uint32_t gidx_end =
inputs.feature_segments[fidx + 1]; // end bin for i^th feature
auto feature_hist = inputs.gradient_histogram.subspan(gidx_begin, gidx_end - gidx_begin);
auto bin_fn = BinFn();
auto update_fn = UpdateFn();
// Sum histogram bins for current feature
GradientSumT const feature_sum =
ReduceFeature<BLOCK_THREADS, ReduceT, TempStorageT, GradientSumT>(
feature_hist, temp_storage);
GradientPairPrecise const missing = inputs.parent_sum - GradientPairPrecise{feature_sum};
float const null_gain = -std::numeric_limits<bst_float>::infinity();
SumCallbackOp<GradientSumT> prefix_op = SumCallbackOp<GradientSumT>();
for (int scan_begin = gidx_begin; scan_begin < gidx_end;
scan_begin += BLOCK_THREADS) {
bool thread_active = (scan_begin + threadIdx.x) < gidx_end;
auto bin = bin_fn(thread_active, scan_begin, &prefix_op, missing, inputs, temp_storage);
// Whether the gradient of missing values is put to the left side.
bool missing_left = true;
float gain = null_gain;
if (thread_active) {
gain = LossChangeMissing(GradientPairPrecise{bin}, missing, inputs.parent_sum, inputs.param,
inputs.nidx, fidx, evaluator, missing_left);
}
__syncthreads();
// Find thread with best gain
cub::KeyValuePair<int, float> tuple(threadIdx.x, gain);
cub::KeyValuePair<int, float> best =
MaxReduceT(temp_storage->max_reduce).Reduce(tuple, cub::ArgMax());
__shared__ cub::KeyValuePair<int, float> block_max;
if (threadIdx.x == 0) {
block_max = best;
}
cub::CTA_SYNC();
// Best thread updates split
if (threadIdx.x == block_max.key) {
update_fn(missing_left, scan_begin, gain, fidx, missing, bin, inputs,
best_split);
}
cub::CTA_SYNC();
}
}
template <int BLOCK_THREADS, typename GradientSumT>
__global__ void EvaluateSplitsKernel(
EvaluateSplitInputs<GradientSumT> left,
EvaluateSplitInputs<GradientSumT> right,
TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator,
common::Span<DeviceSplitCandidate> out_candidates) {
// KeyValuePair here used as threadIdx.x -> gain_value
using ArgMaxT = cub::KeyValuePair<int, float>;
using BlockScanT =
cub::BlockScan<GradientSumT, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>;
using MaxReduceT = cub::BlockReduce<ArgMaxT, BLOCK_THREADS>;
using SumReduceT = cub::BlockReduce<GradientSumT, BLOCK_THREADS>;
union TempStorage {
typename BlockScanT::TempStorage scan;
typename MaxReduceT::TempStorage max_reduce;
typename SumReduceT::TempStorage sum_reduce;
};
// Aligned && shared storage for best_split
__shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split;
DeviceSplitCandidate& best_split = uninitialized_split.Alias();
__shared__ TempStorage temp_storage;
if (threadIdx.x == 0) {
best_split = DeviceSplitCandidate();
}
__syncthreads();
// If this block is working on the left or right node
bool is_left = blockIdx.x < left.feature_set.size();
EvaluateSplitInputs<GradientSumT>& inputs = is_left ? left : right;
// One block for each feature. Features are sampled, so fidx != blockIdx.x
int fidx = inputs.feature_set[is_left ? blockIdx.x
: blockIdx.x - left.feature_set.size()];
if (common::IsCat(inputs.feature_types, fidx)) {
EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT,
TempStorage, GradientSumT,
OneHotBin<GradientSumT, TempStorage>,
UpdateOneHot<GradientSumT>>(fidx, inputs, evaluator, &best_split,
&temp_storage);
} else {
EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT,
TempStorage, GradientSumT,
NumericBin<GradientSumT, TempStorage, BlockScanT>,
UpdateNumeric<GradientSumT>>(fidx, inputs, evaluator, &best_split,
&temp_storage);
}
cub::CTA_SYNC();
if (threadIdx.x == 0) {
// Record best loss for each feature
out_candidates[blockIdx.x] = best_split;
}
}
__device__ DeviceSplitCandidate operator+(const DeviceSplitCandidate& a,
const DeviceSplitCandidate& b) {
return b.loss_chg > a.loss_chg ? b : a;
}
template <typename GradientSumT>
void EvaluateSplits(common::Span<DeviceSplitCandidate> out_splits,
TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator,
EvaluateSplitInputs<GradientSumT> left,
EvaluateSplitInputs<GradientSumT> right) {
size_t combined_num_features =
left.feature_set.size() + right.feature_set.size();
dh::TemporaryArray<DeviceSplitCandidate> feature_best_splits(
combined_num_features);
// One block for each feature
uint32_t constexpr kBlockThreads = 256;
dh::LaunchKernel {uint32_t(combined_num_features), kBlockThreads, 0}(
EvaluateSplitsKernel<kBlockThreads, GradientSumT>, left, right, evaluator,
dh::ToSpan(feature_best_splits));
// Reduce to get best candidate for left and right child over all features
auto reduce_offset =
dh::MakeTransformIterator<size_t>(thrust::make_counting_iterator(0llu),
[=] __device__(size_t idx) -> size_t {
if (idx == 0) {
return 0;
}
if (idx == 1) {
return left.feature_set.size();
}
if (idx == 2) {
return combined_num_features;
}
return 0;
});
size_t temp_storage_bytes = 0;
auto num_segments = out_splits.size();
cub::DeviceSegmentedReduce::Sum(nullptr, temp_storage_bytes,
feature_best_splits.data(), out_splits.data(),
num_segments, reduce_offset, reduce_offset + 1);
dh::TemporaryArray<int8_t> temp(temp_storage_bytes);
cub::DeviceSegmentedReduce::Sum(temp.data().get(), temp_storage_bytes,
feature_best_splits.data(), out_splits.data(),
num_segments, reduce_offset, reduce_offset + 1);
}
template <typename GradientSumT>
void EvaluateSingleSplit(common::Span<DeviceSplitCandidate> out_split,
TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator,
EvaluateSplitInputs<GradientSumT> input) {
EvaluateSplits(out_split, evaluator, input, {});
}
template void EvaluateSplits<GradientPair>(
common::Span<DeviceSplitCandidate> out_splits,
TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator,
EvaluateSplitInputs<GradientPair> left,
EvaluateSplitInputs<GradientPair> right);
template void EvaluateSplits<GradientPairPrecise>(
common::Span<DeviceSplitCandidate> out_splits,
TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator,
EvaluateSplitInputs<GradientPairPrecise> left,
EvaluateSplitInputs<GradientPairPrecise> right);
template void EvaluateSingleSplit<GradientPair>(
common::Span<DeviceSplitCandidate> out_split,
TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator,
EvaluateSplitInputs<GradientPair> input);
template void EvaluateSingleSplit<GradientPairPrecise>(
common::Span<DeviceSplitCandidate> out_split,
TreeEvaluator::SplitEvaluator<GPUTrainingParam> evaluator,
EvaluateSplitInputs<GradientPairPrecise> input);
} // namespace tree
} // namespace xgboost
|
2330daad5cb070ead457b155e589e0319fadc9e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "funset.hpp"
#include <iostream>
#include <algorithm>
#include <memory>
#include <vector>
#include <hip/hip_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_")
#include <device_launch_parameters.h>
#include "common.hpp"
//
struct Sphere {
float r, b, g;
float radius;
float x, y, z;
/* __device__: ,
__device____global____device__
__device____device__;
__device__ */
__device__ float hit(float ox, float oy, float *n)
{
float dx = ox - x;
float dy = oy - y;
if (dx*dx + dy*dy < radius*radius) {
float dz = sqrtf(radius*radius - dx*dx - dy*dy);
*n = dz / sqrtf(radius * radius);
return dz + z;
}
return -INF;
}
};
// method2:
/* __constant__: __device__
__constant____constant__
extern__constant__
__constant__devicehosthost
__constant__
NVIDIA64KB
*/
__constant__ Sphere dev_spheres[20]; // , = sphere_num
/* __global__: ;;,3.2
;void;,
;,
gridblock,(<<< >>>);
a kernel,(GPUCUDAkernel(
),__global__); */
__global__ static void ray_tracking(unsigned char* ptr_image, Sphere* ptr_sphere, int width, int height, int sphere_num)
{
/* gridDim: ,,,
,,.
grid,dim3
blockDim: ,block.dim3,
block;,,
;
blockIdx: ,;
threadblockgrid,blockIdx.x
[0,gridDim.x-1],blockIdx.y[0, gridDim.y-1].uint3,
blockgrid;
threadIdx: ,;
threadblock;threadIdx.x,
threadIdx.y,threadIdx.z;uint3
,threadblock */
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox{ (x - width / 2.f) };
float oy{ (y - height / 2.f) };
float r{ 0 }, g{ 0 }, b{ 0 };
float maxz{ -INF };
for (int i = 0; i < sphere_num; ++i) {
float n;
float t = ptr_sphere[i].hit(ox, oy, &n);
if (t > maxz) {
float fscale = n;
r = ptr_sphere[i].r * fscale;
g = ptr_sphere[i].g * fscale;
b = ptr_sphere[i].b * fscale;
maxz = t;
}
}
ptr_image[offset * 4 + 0] = static_cast<unsigned char>(r * 255);
ptr_image[offset * 4 + 1] = static_cast<unsigned char>(g * 255);
ptr_image[offset * 4 + 2] = static_cast<unsigned char>(b * 255);
ptr_image[offset * 4 + 3] = 255;
}
__global__ static void ray_tracking(unsigned char* ptr_image, int width, int height, int sphere_num)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox{ (x - width / 2.f) };
float oy{ (y - height / 2.f) };
float r{ 0 }, g{ 0 }, b{ 0 };
float maxz{ -INF };
for (int i = 0; i < sphere_num; ++i) {
float n;
float t = dev_spheres[i].hit(ox, oy, &n);
if (t > maxz) {
float fscale = n;
r = dev_spheres[i].r * fscale;
g = dev_spheres[i].g * fscale;
b = dev_spheres[i].b * fscale;
maxz = t;
}
}
ptr_image[offset * 4 + 0] = static_cast<unsigned char>(r * 255);
ptr_image[offset * 4 + 1] = static_cast<unsigned char>(g * 255);
ptr_image[offset * 4 + 2] = static_cast<unsigned char>(b * 255);
ptr_image[offset * 4 + 3] = 255;
}
int ray_tracking_gpu(const float* a, const float* b, const float* c, int sphere_num, unsigned char* ptr, int width, int height, float* elapsed_time)
{
/* hipEvent_t: CUDA event types,, CUDA,GPU
,CUDAGPU,CUDA
GPU, */
hipEvent_t start, stop;
// hipEventCreate: ,
hipEventCreate(&start);
hipEventCreate(&stop);
// hipEventRecord: ,,start
hipEventRecord(start, 0);
const size_t length{ width * height * 4 * sizeof(unsigned char) };
unsigned char* dev_image{ nullptr };
std::unique_ptr<Sphere[]> spheres(new Sphere[sphere_num]);
for (int i = 0, t = 0; i < sphere_num; ++i, t += 3) {
spheres[i].r = a[t];
spheres[i].g = a[t + 1];
spheres[i].b = a[t + 2];
spheres[i].x = b[t];
spheres[i].y = b[t + 1];
spheres[i].z = b[t + 2];
spheres[i].radius = c[i];
}
// hipMalloc:
hipMalloc(&dev_image, length);
// method1:
//Sphere* dev_spheres{ nullptr };
//hipMalloc(&dev_spheres, sizeof(Sphere) * sphere_num);
/* hipMemcpy: ,:
(1). hipMemcpyHostToHost:
(2). hipMemcpyHostToDevice:
(3). hipMemcpyDeviceToHost:
(4). hipMemcpyDeviceToDevice:
(5). hipMemcpyDefault: ,
(CUDA6.0)
cudaMemcpy */
//hipMemcpy(dev_spheres, spheres.get(), sizeof(Sphere) * sphere_num, hipMemcpyHostToDevice);
// method2:
/* hipMemcpyToSymbol: cudaMemcpyToSymbolcudaMemcpy
cudaMemcpyHostToDevicecudaMemcpyToSymbol
cudaMemcpy */
hipMemcpyToSymbol(dev_spheres, spheres.get(), sizeof(Sphere)* sphere_num);
const int threads_block{ 16 };
/* dim3: uint33unsigned int
dim3
1 */
dim3 blocks(width / threads_block, height / threads_block);
dim3 threads(threads_block, threads_block);
/* <<< >>>: CUDA,,
CUDA,,
;,
,,
;;
kernel,kernel,
GPU,;
API,<<<Dg,Db,Ns,S>>>
,Dgdim3,grid
.Dg,gridDg.x*Dg.y*Dg.zblock;Db
dim3,block.Db,
blockDb.x*Db.y*Db.zthread;Nssize_t,
,
(extern __shared__);Ns,0;S
cudaStream_t,.S,0. */
//ray_tracking << <blocks, threads >> >(dev_image, dev_spheres, width, height, sphere_num); // method1,
ray_tracking << <blocks, threads >> >(dev_image, width, height, sphere_num); // method2,
hipMemcpy(ptr, dev_image, length, hipMemcpyDeviceToHost);
// hipFree: cudaMalloc
hipFree(dev_image);
//hipFree(dev_spheres); // method1, method2
// hipEventRecord: ,,stop
hipEventRecord(stop, 0);
// hipEventSynchronize: ,,
hipEventSynchronize(stop);
// cudaEventElapseTime: ,,
hipEventElapsedTime(elapsed_time, start, stop);
// hipEventDestroy: ,
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
| 2330daad5cb070ead457b155e589e0319fadc9e7.cu | #include "funset.hpp"
#include <iostream>
#include <algorithm>
#include <memory>
#include <vector>
#include <cuda_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_")
#include <device_launch_parameters.h>
#include "common.hpp"
// 通过一个数据结构对球面建模
struct Sphere {
float r, b, g;
float radius;
float x, y, z;
/* __device__: 函数类型限定符,表明被修饰的函数在设备上执行,只能从设备上调用,
但只能在其它__device__函数或者__global__函数中调用;__device__函数不支持递归;
__device__函数的函数体内不能声明静态变量;__device__函数的参数数目是不可变化的;
不能对__device__函数取指针 */
__device__ float hit(float ox, float oy, float *n)
{
float dx = ox - x;
float dy = oy - y;
if (dx*dx + dy*dy < radius*radius) {
float dz = sqrtf(radius*radius - dx*dx - dy*dy);
*n = dz / sqrtf(radius * radius);
return dz + z;
}
return -INF;
}
};
// method2: 使用常量内存
/* __constant__: 变量类型限定符,或者与__device__限定符连用,这样声明的变量:存
在于常数存储器空间;与应用程序具有相同的生命周期;可以通过运行时库从主机端访问,
设备端的所有线程也可访问。__constant__变量默认为是静态存储。__constant__不能用
extern关键字声明为外部变量。__constant__变量只能在文件作用域中声明,不能再函数
体内声明。__constant__变量不能从device中赋值,只能从host中通过host运行时函数赋
值。__constant__将把变量的访问限制为只读。与从全局内存中读取数据相比,从常量内
存中读取相同的数据可以节约内存带宽。常量内存用于保存在核函数执行期间不会发生变
化的数据。
常量内存:用于保存在核函数执行期间不会发生变化的数据。NVIDIA硬件提供了64KB的常
量内存,并且对常量内存采取了不同于标准全局内存的处理方式。在某些情况中,用常量
内存来替换全局内存能有效地减少内存带宽。 在某些情况下,使用常量内存将提升应用程
序的性能 */
__constant__ Sphere dev_spheres[20]; // 常量内存, = sphere_num
/* __global__: 函数类型限定符;在设备上运行;在主机端调用,计算能力3.2及以上可以在
设备端调用;声明的函数的返回值必须是void类型;对此类型函数的调用是异步的,即在
设备完全完成它的运行之前就返回了;对此类型函数的调用必须指定执行配置,即用于在
设备上执行函数时的grid和block的维度,以及相关的流(即插入<<< >>>运算符);
a kernel,表示此函数为内核函数(运行在GPU上的CUDA并行计算函数称为kernel(内核函
数),内核函数必须通过__global__函数类型限定符定义); */
__global__ static void ray_tracking(unsigned char* ptr_image, Sphere* ptr_sphere, int width, int height, int sphere_num)
{
/* gridDim: 内置变量,用于描述线程网格的维度,对于所有线程块来说,这个
变量是一个常数,用来保存线程格每一维的大小,即每个线程格中线程块的数量.
一个grid为三维,为dim3类型;
blockDim: 内置变量,用于说明每个block的维度与尺寸.为dim3类型,包含
了block在三个维度上的尺寸信息;对于所有线程块来说,这个变量是一个常数,
保存的是线程块中每一维的线程数量;
blockIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程块的索引;用
于说明当前thread所在的block在整个grid中的位置,blockIdx.x取值范围是
[0,gridDim.x-1],blockIdx.y取值范围是[0, gridDim.y-1].为uint3类型,
包含了一个block在grid中各个维度上的索引信息;
threadIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程索引;用于
说明当前thread在block中的位置;如果线程是一维的可获取threadIdx.x,如果
是二维的还可获取threadIdx.y,如果是三维的还可获取threadIdx.z;为uint3类
型,包含了一个thread在block中各个维度的索引信息 */
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox{ (x - width / 2.f) };
float oy{ (y - height / 2.f) };
float r{ 0 }, g{ 0 }, b{ 0 };
float maxz{ -INF };
for (int i = 0; i < sphere_num; ++i) {
float n;
float t = ptr_sphere[i].hit(ox, oy, &n);
if (t > maxz) {
float fscale = n;
r = ptr_sphere[i].r * fscale;
g = ptr_sphere[i].g * fscale;
b = ptr_sphere[i].b * fscale;
maxz = t;
}
}
ptr_image[offset * 4 + 0] = static_cast<unsigned char>(r * 255);
ptr_image[offset * 4 + 1] = static_cast<unsigned char>(g * 255);
ptr_image[offset * 4 + 2] = static_cast<unsigned char>(b * 255);
ptr_image[offset * 4 + 3] = 255;
}
__global__ static void ray_tracking(unsigned char* ptr_image, int width, int height, int sphere_num)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox{ (x - width / 2.f) };
float oy{ (y - height / 2.f) };
float r{ 0 }, g{ 0 }, b{ 0 };
float maxz{ -INF };
for (int i = 0; i < sphere_num; ++i) {
float n;
float t = dev_spheres[i].hit(ox, oy, &n);
if (t > maxz) {
float fscale = n;
r = dev_spheres[i].r * fscale;
g = dev_spheres[i].g * fscale;
b = dev_spheres[i].b * fscale;
maxz = t;
}
}
ptr_image[offset * 4 + 0] = static_cast<unsigned char>(r * 255);
ptr_image[offset * 4 + 1] = static_cast<unsigned char>(g * 255);
ptr_image[offset * 4 + 2] = static_cast<unsigned char>(b * 255);
ptr_image[offset * 4 + 3] = 255;
}
int ray_tracking_gpu(const float* a, const float* b, const float* c, int sphere_num, unsigned char* ptr, int width, int height, float* elapsed_time)
{
/* cudaEvent_t: CUDA event types,结构体类型, CUDA事件,用于测量GPU在某
个任务上花费的时间,CUDA中的事件本质上是一个GPU时间戳,由于CUDA事件是在
GPU上实现的,因此它们不适于对同时包含设备代码和主机代码的混合代码计时 */
cudaEvent_t start, stop;
// cudaEventCreate: 创建一个事件对象,异步启动
cudaEventCreate(&start);
cudaEventCreate(&stop);
// cudaEventRecord: 记录一个事件,异步启动,start记录起始时间
cudaEventRecord(start, 0);
const size_t length{ width * height * 4 * sizeof(unsigned char) };
unsigned char* dev_image{ nullptr };
std::unique_ptr<Sphere[]> spheres(new Sphere[sphere_num]);
for (int i = 0, t = 0; i < sphere_num; ++i, t += 3) {
spheres[i].r = a[t];
spheres[i].g = a[t + 1];
spheres[i].b = a[t + 2];
spheres[i].x = b[t];
spheres[i].y = b[t + 1];
spheres[i].z = b[t + 2];
spheres[i].radius = c[i];
}
// cudaMalloc: 在设备端分配内存
cudaMalloc(&dev_image, length);
// method1: 没有使用常量内存
//Sphere* dev_spheres{ nullptr };
//cudaMalloc(&dev_spheres, sizeof(Sphere) * sphere_num);
/* cudaMemcpy: 在主机端和设备端拷贝数据,此函数第四个参数仅能是下面之一:
(1). cudaMemcpyHostToHost: 拷贝数据从主机端到主机端
(2). cudaMemcpyHostToDevice: 拷贝数据从主机端到设备端
(3). cudaMemcpyDeviceToHost: 拷贝数据从设备端到主机端
(4). cudaMemcpyDeviceToDevice: 拷贝数据从设备端到设备端
(5). cudaMemcpyDefault: 从指针值自动推断拷贝数据方向,需要支持
统一虚拟寻址(CUDA6.0及以上版本)
cudaMemcpy函数对于主机是同步的 */
//cudaMemcpy(dev_spheres, spheres.get(), sizeof(Sphere) * sphere_num, cudaMemcpyHostToDevice);
// method2: 使用常量内存
/* cudaMemcpyToSymbol: cudaMemcpyToSymbol和cudaMemcpy参数为
cudaMemcpyHostToDevice时的唯一差异在于cudaMemcpyToSymbol会复制到常量内
存,而cudaMemcpy会复制到全局内存 */
cudaMemcpyToSymbol(dev_spheres, spheres.get(), sizeof(Sphere)* sphere_num);
const int threads_block{ 16 };
/* dim3: 基于uint3定义的内置矢量类型,相当于由3个unsigned int类型组成的
结构体,可表示一个三维数组,在定义dim3类型变量时,凡是没有赋值的元素都
会被赋予默认值1 */
dim3 blocks(width / threads_block, height / threads_block);
dim3 threads(threads_block, threads_block);
/* <<< >>>: 为CUDA引入的运算符,指定线程网格和线程块维度等,传递执行参
数给CUDA编译器和运行时系统,用于说明内核函数中的线程数量,以及线程是如何
组织的;尖括号中这些参数并不是传递给设备代码的参数,而是告诉运行时如何
启动设备代码,传递给设备代码本身的参数是放在圆括号中传递的,就像标准的函
数调用一样;不同计算能力的设备对线程的总数和组织方式有不同的约束;必须
先为kernel中用到的数组或变量分配好足够的空间,再调用kernel函数,否则在
GPU计算时会发生错误,例如越界等;
使用运行时API时,需要在调用的内核函数名与参数列表直接以<<<Dg,Db,Ns,S>>>
的形式设置执行配置,其中:Dg是一个dim3型变量,用于设置grid的维度和各个
维度上的尺寸.设置好Dg后,grid中将有Dg.x*Dg.y*Dg.z个block;Db是
一个dim3型变量,用于设置block的维度和各个维度上的尺寸.设置好Db后,每个
block中将有Db.x*Db.y*Db.z个thread;Ns是一个size_t型变量,指定各块为此调
用动态分配的共享存储器大小,这些动态分配的存储器可供声明为外部数组
(extern __shared__)的其他任何变量使用;Ns是一个可选参数,默认值为0;S为
cudaStream_t类型,用于设置与内核函数关联的流.S是一个可选参数,默认值0. */
//ray_tracking << <blocks, threads >> >(dev_image, dev_spheres, width, height, sphere_num); // method1, 不使用常量内存
ray_tracking << <blocks, threads >> >(dev_image, width, height, sphere_num); // method2, 使用常量内存
cudaMemcpy(ptr, dev_image, length, cudaMemcpyDeviceToHost);
// cudaFree: 释放设备上由cudaMalloc函数分配的内存
cudaFree(dev_image);
//cudaFree(dev_spheres); // 使用method1时需要释放, 如果使用常量内存即method2则不需要释放
// cudaEventRecord: 记录一个事件,异步启动,stop记录结束时间
cudaEventRecord(stop, 0);
// cudaEventSynchronize: 事件同步,等待一个事件完成,异步启动
cudaEventSynchronize(stop);
// cudaEventElapseTime: 计算两个事件之间经历的时间,单位为毫秒,异步启动
cudaEventElapsedTime(elapsed_time, start, stop);
// cudaEventDestroy: 销毁事件对象,异步启动
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
dbf3c219f7c04694c42c05b53ed42f92b045fa3f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void SumOverLargeBuffer( float* buffer, int spread, int size ){
int offset = CUDASTDOFFSET;
float value1 = buffer[offset];
float value2 = buffer[offset+spread];
if( offset+spread < size )
buffer[offset] = value1+value2;
} | dbf3c219f7c04694c42c05b53ed42f92b045fa3f.cu | #include "includes.h"
__global__ void SumOverLargeBuffer( float* buffer, int spread, int size ){
int offset = CUDASTDOFFSET;
float value1 = buffer[offset];
float value2 = buffer[offset+spread];
if( offset+spread < size )
buffer[offset] = value1+value2;
} |
12ae5435cd0c12462ff1e1d101e6252d1f72cb36.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "csv_common.h"
#include "csv_gpu.h"
#include "datetime.cuh"
#include <io/utilities/block_utils.cuh>
#include <io/utilities/parsing_utils.cuh>
#include <cudf/detail/utilities/trie.cuh>
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/lists/list_view.cuh>
#include <cudf/null_mask.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/detail/copy.h>
#include <thrust/transform.h>
#include <type_traits>
using namespace ::cudf::io;
using cudf::detail::device_span;
namespace cudf {
namespace io {
namespace csv {
namespace gpu {
/// Block dimension for dtype detection and conversion kernels
constexpr uint32_t csvparse_block_dim = 128;
/*
* @brief Checks whether the given character is a whitespace character.
*
* @param ch The character to check
*
* @return True if the input is whitespace, False otherwise
*/
__device__ __inline__ bool is_whitespace(char c) { return c == '\t' || c == ' '; }
// TODO: replace with `trim_whitespaces_quotes` once `end` semantics is fixed
/*
* @brief Scans a character stream within a range, and adjusts the start and end
* indices of the range to ignore whitespace and quotation characters.
*
* @param data The character stream to scan
* @param start The start index to adjust
* @param end The end index to adjust
* @param quotechar The character used to denote quotes
*
* @return Adjusted or unchanged start_idx and end_idx
*/
__device__ __inline__ void trim_field_start_end(const char **start,
const char **end,
char quotechar = '\0')
{
while ((*start < *end) && is_whitespace(**start)) { (*start)++; }
if ((*start < *end) && **start == quotechar) { (*start)++; }
while ((*start <= *end) && is_whitespace(**end)) { (*end)--; }
if ((*start <= *end) && **end == quotechar) { (*end)--; }
}
/*
* @brief Returns true is the input character is a valid digit.
* Supports both decimal and hexadecimal digits (uppercase and lowercase).
*
* @param c Character to check
* @param is_hex Whether to check as a hexadecimal
*
* @return `true` if it is digit-like, `false` otherwise
*/
__device__ __inline__ bool is_digit(char c, bool is_hex = false)
{
if (c >= '0' && c <= '9') return true;
if (is_hex) {
if (c >= 'A' && c <= 'F') return true;
if (c >= 'a' && c <= 'f') return true;
}
return false;
}
/*
* @brief Checks whether the given character counters indicate a potentially
* valid date and/or time field.
*
* For performance and simplicity, we detect only the most common date
* formats. Example formats that are detectable:
*
* `2001/02/30`
* `2001-02-30 00:00:00`
* `2/30/2001 T04:05:60.7`
* `2 / 1 / 2011`
* `02/January`
*
* @param len Number of non special-symbol or numeric characters
* @param decimal_count Number of '.' characters
* @param colon_count Number of ':' characters
* @param dash_count Number of '-' characters
* @param slash_count Number of '/' characters
*
* @return `true` if it is date-like, `false` otherwise
*/
__device__ __inline__ bool is_datetime(
long len, long decimal_count, long colon_count, long dash_count, long slash_count)
{
// Must not exceed count of longest month (September) plus `T` time indicator
if (len > 10) { return false; }
// Must not exceed more than one decimals or more than two time separators
if (decimal_count > 1 || colon_count > 2) { return false; }
// Must have one or two '-' or '/' but not both as date separators
if ((dash_count > 0 && dash_count < 3 && slash_count == 0) ||
(dash_count == 0 && slash_count > 0 && slash_count < 3)) {
return true;
}
return false;
}
/*
* @brief Returns true if the counters indicate a potentially valid float.
* False positives are possible because positions are not taken into account.
* For example, field "e.123-" would match the pattern.
*
* @param len Number of non special-symbol or numeric characters
* @param digit_count Number of digits characters
* @param decimal_count Number of '.' characters
* @param dash_count Number of '-' characters
* @param exponent_count Number of 'e or E' characters
*
* @return `true` if it is floating point-like, `false` otherwise
*/
__device__ __inline__ bool is_floatingpoint(
long len, long digit_count, long decimal_count, long dash_count, long exponent_count)
{
// Can't have more than one exponent and one decimal point
if (decimal_count > 1) return false;
if (exponent_count > 1) return false;
// Without the exponent or a decimal point, this is an integer, not a float
if (decimal_count == 0 && exponent_count == 0) return false;
// Can only have one '-' per component
if (dash_count > 1 + exponent_count) return false;
// If anything other than these characters is present, it's not a float
if (digit_count + decimal_count + dash_count + exponent_count != len) { return false; }
// Needs at least 1 digit, 2 if exponent is present
if (digit_count < 1 + exponent_count) return false;
return true;
}
/*
* @brief CUDA kernel that parses and converts CSV data into cuDF column data.
*
* Data is processed in one row/record at a time, so the number of total
* threads (tid) is equal to the number of rows.
*
* @param opts A set of parsing options
* @param csv_text The entire CSV data to read
* @param column_flags Per-column parsing behavior flags
* @param row_offsets The start the CSV data of interest
* @param d_columnData The count for each column data type
*/
__global__ void __launch_bounds__(csvparse_block_dim)
data_type_detection(parse_options_view const opts,
device_span<char const> csv_text,
device_span<column_parse::flags const> const column_flags,
device_span<uint64_t const> const row_offsets,
device_span<column_type_histogram> d_columnData)
{
auto const raw_csv = csv_text.data();
// ThreadIds range per block, so also need the blockId
// This is entry into the fields; threadId is an element within `num_records`
long const rec_id = threadIdx.x + (blockDim.x * blockIdx.x);
long const rec_id_next = rec_id + 1;
// we can have more threads than data, make sure we are not past the end of
// the data
if (rec_id_next >= row_offsets.size()) { return; }
auto field_start = raw_csv + row_offsets[rec_id];
auto const row_end = raw_csv + row_offsets[rec_id_next];
auto next_field = field_start;
int col = 0;
int actual_col = 0;
// Going through all the columns of a given record
while (col < column_flags.size() && field_start <= row_end) {
auto next_delimiter = cudf::io::gpu::seek_field_end(field_start, row_end, opts);
// Checking if this is a column that the user wants --- user can filter
// columns
if (column_flags[col] & column_parse::enabled) {
// points to last character in the field
auto field_end = next_delimiter - 1;
long field_len = next_delimiter - field_start;
if (serialized_trie_contains(opts.trie_na, field_start, field_len)) {
atomicAdd(&d_columnData[actual_col].null_count, 1);
} else if (serialized_trie_contains(opts.trie_true, field_start, field_len) ||
serialized_trie_contains(opts.trie_false, field_start, field_len)) {
atomicAdd(&d_columnData[actual_col].bool_count, 1);
} else if (cudf::io::gpu::is_infinity(field_start, field_end)) {
atomicAdd(&d_columnData[actual_col].float_count, 1);
} else {
long countNumber = 0;
long countDecimal = 0;
long countSlash = 0;
long countDash = 0;
long countPlus = 0;
long countColon = 0;
long countString = 0;
long countExponent = 0;
// Modify field_start & end to ignore whitespace and quotechars
// This could possibly result in additional empty fields
trim_field_start_end(&field_start, &field_end);
field_len = field_end - field_start + 1;
for (auto cur = field_start; cur <= field_end; cur++) {
if (is_digit(*cur)) {
countNumber++;
continue;
}
// Looking for unique characters that will help identify column types.
switch (*cur) {
case '.': countDecimal++; break;
case '-': countDash++; break;
case '+': countPlus++; break;
case '/': countSlash++; break;
case ':': countColon++; break;
case 'e':
case 'E':
if (cur > field_start && cur < field_end) countExponent++;
break;
default: countString++; break;
}
}
// Integers have to have the length of the string
long int_req_number_cnt = field_len;
// Off by one if they start with a minus sign
if ((*field_start == '-' || *field_start == '+') && field_len > 1) { --int_req_number_cnt; }
if (column_flags[col] & column_parse::as_datetime) {
// PANDAS uses `object` dtype if the date is unparseable
if (is_datetime(countString, countDecimal, countColon, countDash, countSlash)) {
atomicAdd(&d_columnData[actual_col].datetime_count, 1);
} else {
atomicAdd(&d_columnData[actual_col].string_count, 1);
}
} else if (countNumber == int_req_number_cnt) {
bool is_negative = (*field_start == '-');
char const *data_begin = field_start + (is_negative || (*field_start == '+'));
cudf::size_type *ptr = cudf::io::gpu::infer_integral_field_counter(
data_begin, data_begin + countNumber, is_negative, d_columnData[actual_col]);
atomicAdd(ptr, 1);
} else if (is_floatingpoint(
field_len, countNumber, countDecimal, countDash + countPlus, countExponent)) {
atomicAdd(&d_columnData[actual_col].float_count, 1);
} else {
atomicAdd(&d_columnData[actual_col].string_count, 1);
}
}
actual_col++;
}
next_field = next_delimiter + 1;
field_start = next_field;
col++;
}
}
template <typename T, int base>
__inline__ __device__ T decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
return cudf::io::gpu::parse_numeric<T, base>(begin, end, opts);
}
template <typename T>
__inline__ __device__ T decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
return cudf::io::gpu::parse_numeric<T>(begin, end, opts);
}
template <>
__inline__ __device__ cudf::timestamp_D decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
return timestamp_D{cudf::duration_D{to_date(begin, end, opts.dayfirst)}};
}
template <>
__inline__ __device__ cudf::timestamp_s decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
auto milli = to_date_time(begin, end, opts.dayfirst);
if (milli == -1) {
return timestamp_s{cudf::duration_s{to_non_negative_integer<int64_t>(begin, end)}};
} else {
return timestamp_s{cudf::duration_s{milli / 1000}};
}
}
template <>
__inline__ __device__ cudf::timestamp_ms decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
auto milli = to_date_time(begin, end, opts.dayfirst);
if (milli == -1) {
return timestamp_ms{cudf::duration_ms{to_non_negative_integer<int64_t>(begin, end)}};
} else {
return timestamp_ms{cudf::duration_ms{milli}};
}
}
template <>
__inline__ __device__ cudf::timestamp_us decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
auto milli = to_date_time(begin, end, opts.dayfirst);
if (milli == -1) {
return timestamp_us{cudf::duration_us{to_non_negative_integer<int64_t>(begin, end)}};
} else {
return timestamp_us{cudf::duration_us{milli * 1000}};
}
}
template <>
__inline__ __device__ cudf::timestamp_ns decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
auto milli = to_date_time(begin, end, opts.dayfirst);
if (milli == -1) {
return timestamp_ns{cudf::duration_ns{to_non_negative_integer<int64_t>(begin, end)}};
} else {
return timestamp_ns{cudf::duration_ns{milli * 1000000}};
}
}
#ifndef DURATION_DECODE_VALUE
#define DURATION_DECODE_VALUE(Type) \
template <> \
__inline__ __device__ Type decode_value( \
const char *begin, const char *end, parse_options_view const &opts) \
{ \
return Type{to_time_delta<Type>(begin, end)}; \
}
#endif
DURATION_DECODE_VALUE(duration_D)
DURATION_DECODE_VALUE(duration_s)
DURATION_DECODE_VALUE(duration_ms)
DURATION_DECODE_VALUE(duration_us)
DURATION_DECODE_VALUE(duration_ns)
// The purpose of this is merely to allow compilation ONLY
// TODO : make this work for csv
template <>
__inline__ __device__ cudf::string_view decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
return cudf::string_view{};
}
// The purpose of this is merely to allow compilation ONLY
template <>
__inline__ __device__ cudf::dictionary32 decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
return cudf::dictionary32{};
}
// The purpose of this is merely to allow compilation ONLY
// TODO : make this work for csv
template <>
__inline__ __device__ cudf::list_view decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
return cudf::list_view{};
}
// The purpose of this is merely to allow compilation ONLY
// TODO : make this work for csv
template <>
__inline__ __device__ numeric::decimal32 decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
return numeric::decimal32{};
}
// The purpose of this is merely to allow compilation ONLY
// TODO : make this work for csv
template <>
__inline__ __device__ numeric::decimal64 decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
return numeric::decimal64{};
}
// The purpose of this is merely to allow compilation ONLY
// TODO : make this work for csv
template <>
__inline__ __device__ cudf::struct_view decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
return cudf::struct_view{};
}
/**
* @brief Functor for converting CSV raw data to typed value.
*/
struct decode_op {
/**
* @brief Dispatch for numeric types whose values can be convertible to
* 0 or 1 to represent boolean false/true, based upon checking against a
* true/false values list.
*
* @return bool Whether the parsed value is valid.
*/
template <typename T,
typename std::enable_if_t<std::is_integral<T>::value and !std::is_same<T, bool>::value>
* = nullptr>
__host__ __device__ __forceinline__ bool operator()(void *out_buffer,
size_t row,
char const *begin,
char const *end,
parse_options_view const &opts,
column_parse::flags flags)
{
static_cast<T *>(out_buffer)[row] = [&]() {
// Check for user-specified true/false values first, where the output is
// replaced with 1/0 respectively
const size_t field_len = end - begin + 1;
if (serialized_trie_contains(opts.trie_true, begin, field_len)) {
return static_cast<T>(1);
} else if (serialized_trie_contains(opts.trie_false, begin, field_len)) {
return static_cast<T>(0);
} else {
if (flags & column_parse::as_hexadecimal) {
return decode_value<T, 16>(begin, end, opts);
} else {
return decode_value<T>(begin, end, opts);
}
}
}();
return true;
}
/**
* @brief Dispatch for boolean type types.
*/
template <typename T, typename std::enable_if_t<std::is_same<T, bool>::value> * = nullptr>
__host__ __device__ __forceinline__ bool operator()(void *out_buffer,
size_t row,
char const *begin,
char const *end,
parse_options_view const &opts,
column_parse::flags flags)
{
auto &value{static_cast<T *>(out_buffer)[row]};
// Check for user-specified true/false values first, where the output is
// replaced with 1/0 respectively
const size_t field_len = end - begin + 1;
if (serialized_trie_contains(opts.trie_true, begin, field_len)) {
value = 1;
} else if (serialized_trie_contains(opts.trie_false, begin, field_len)) {
value = 0;
} else {
value = decode_value<T>(begin, end, opts);
}
return true;
}
/**
* @brief Dispatch for floating points, which are set to NaN if the input
* is not valid. In such case, the validity mask is set to zero too.
*/
template <typename T, typename std::enable_if_t<std::is_floating_point<T>::value> * = nullptr>
__host__ __device__ __forceinline__ bool operator()(void *out_buffer,
size_t row,
char const *begin,
char const *end,
parse_options_view const &opts,
column_parse::flags flags)
{
auto &value{static_cast<T *>(out_buffer)[row]};
value = decode_value<T>(begin, end, opts);
return !std::isnan(value);
}
/**
* @brief Dispatch for all other types.
*/
template <typename T,
typename std::enable_if_t<!std::is_integral<T>::value and
!std::is_floating_point<T>::value> * = nullptr>
__host__ __device__ __forceinline__ bool operator()(void *out_buffer,
size_t row,
char const *begin,
char const *end,
parse_options_view const &opts,
column_parse::flags flags)
{
auto &value{static_cast<T *>(out_buffer)[row]};
value = decode_value<T>(begin, end, opts);
return true;
}
};
/**
* @brief CUDA kernel that parses and converts CSV data into cuDF column data.
*
* Data is processed one record at a time
*
* @param[in] raw_csv The entire CSV data to read
* @param[in] opts A set of parsing options
* @param[in] num_records The number of lines/rows of CSV data
* @param[in] num_columns The number of columns of CSV data
* @param[in] column_flags Per-column parsing behavior flags
* @param[in] recStart The start the CSV data of interest
* @param[in] dtype The data type of the column
* @param[out] data The output column data
* @param[out] valid The bitmaps indicating whether column fields are valid
* @param[out] num_valid The numbers of valid fields in columns
*/
__global__ void __launch_bounds__(csvparse_block_dim)
convert_csv_to_cudf(cudf::io::parse_options_view options,
device_span<char const> data,
device_span<column_parse::flags const> column_flags,
device_span<uint64_t const> row_offsets,
device_span<cudf::data_type const> dtypes,
device_span<void *> columns,
device_span<cudf::bitmask_type *> valids)
{
auto const raw_csv = data.data();
// thread IDs range per block, so also need the block id.
// this is entry into the field array - tid is an elements within the num_entries array
long const rec_id = threadIdx.x + (blockDim.x * blockIdx.x);
long const rec_id_next = rec_id + 1;
// we can have more threads than data, make sure we are not past the end of
// the data
if (rec_id_next >= row_offsets.size()) return;
auto field_start = raw_csv + row_offsets[rec_id];
auto const row_end = raw_csv + row_offsets[rec_id_next];
auto next_field = field_start;
int col = 0;
int actual_col = 0;
while (col < column_flags.size() && field_start <= row_end) {
auto next_delimiter = cudf::io::gpu::seek_field_end(next_field, row_end, options);
if (column_flags[col] & column_parse::enabled) {
// check if the entire field is a NaN string - consistent with pandas
auto const is_valid =
!serialized_trie_contains(options.trie_na, field_start, next_delimiter - field_start);
// Modify field_start & end to ignore whitespace and quotechars
auto field_end = next_delimiter - 1;
if (is_valid && dtypes[actual_col].id() != cudf::type_id::STRING) {
trim_field_start_end(&field_start, &field_end, options.quotechar);
}
if (is_valid) {
// Type dispatcher does not handle STRING
if (dtypes[actual_col].id() == cudf::type_id::STRING) {
auto end = next_delimiter;
if (options.keepquotes == false) {
if ((*field_start == options.quotechar) && (*(end - 1) == options.quotechar)) {
++field_start;
--end;
}
}
auto str_list = static_cast<std::pair<const char *, size_t> *>(columns[actual_col]);
str_list[rec_id].first = field_start;
str_list[rec_id].second = end - field_start;
} else {
if (cudf::type_dispatcher(dtypes[actual_col],
decode_op{},
columns[actual_col],
rec_id,
field_start,
field_end,
options,
column_flags[col])) {
// set the valid bitmap - all bits were set to 0 to start
set_bit(valids[actual_col], rec_id);
}
}
} else if (dtypes[actual_col].id() == cudf::type_id::STRING) {
auto str_list = static_cast<std::pair<const char *, size_t> *>(columns[actual_col]);
str_list[rec_id].first = nullptr;
str_list[rec_id].second = 0;
}
++actual_col;
}
next_field = next_delimiter + 1;
field_start = next_field;
++col;
}
}
/*
* @brief Merge two packed row contexts (each corresponding to a block of characters)
* and return the packed row context corresponding to the merged character block
*/
inline __device__ packed_rowctx_t merge_row_contexts(packed_rowctx_t first_ctx,
packed_rowctx_t second_ctx)
{
uint32_t id0 = get_row_context(first_ctx, ROW_CTX_NONE) & 3;
uint32_t id1 = get_row_context(first_ctx, ROW_CTX_QUOTE) & 3;
uint32_t id2 = get_row_context(first_ctx, ROW_CTX_COMMENT) & 3;
return (first_ctx & ~pack_row_contexts(3, 3, 3)) +
pack_row_contexts(get_row_context(second_ctx, id0),
get_row_context(second_ctx, id1),
get_row_context(second_ctx, id2));
}
/*
* @brief Per-character context:
* 1-bit count (0 or 1) per context in the lower 4 bits
* 2-bit output context id per input context in bits 8..15
*/
constexpr __device__ uint32_t make_char_context(uint32_t id0,
uint32_t id1,
uint32_t id2 = ROW_CTX_COMMENT,
uint32_t c0 = 0,
uint32_t c1 = 0,
uint32_t c2 = 0)
{
return (id0 << 8) | (id1 << 10) | (id2 << 12) | (ROW_CTX_EOF << 14) | (c0) | (c1 << 1) |
(c2 << 2);
}
/*
* @brief Merge a 1-character context to keep track of bitmasks where new rows occur
* Merges a single-character "block" row context at position pos with the current
* block's row context (the current block contains 32-pos characters)
*
* @param ctx Current block context and new rows bitmaps
* @param char_ctx state transitions associated with new character
* @param pos Position within the current 32-character block
*
* NOTE: This is probably the most performance-critical piece of the row gathering kernel.
* The char_ctx value should be created via make_char_context, and its value should
* have been evaluated at compile-time.
*/
inline __device__ void merge_char_context(uint4 &ctx, uint32_t char_ctx, uint32_t pos)
{
uint32_t id0 = (ctx.w >> 0) & 3;
uint32_t id1 = (ctx.w >> 2) & 3;
uint32_t id2 = (ctx.w >> 4) & 3;
// Set the newrow bit in the bitmap at the corresponding position
ctx.x |= ((char_ctx >> id0) & 1) << pos;
ctx.y |= ((char_ctx >> id1) & 1) << pos;
ctx.z |= ((char_ctx >> id2) & 1) << pos;
// Update the output context ids
ctx.w = ((char_ctx >> (8 + id0 * 2)) & 0x03) | ((char_ctx >> (6 + id1 * 2)) & 0x0c) |
((char_ctx >> (4 + id2 * 2)) & 0x30) | (ROW_CTX_EOF << 6);
}
/*
* Convert the context-with-row-bitmaps version to a packed row context
*/
inline __device__ packed_rowctx_t pack_rowmaps(uint4 ctx_map)
{
return pack_row_contexts(make_row_context(__popc(ctx_map.x), (ctx_map.w >> 0) & 3),
make_row_context(__popc(ctx_map.y), (ctx_map.w >> 2) & 3),
make_row_context(__popc(ctx_map.z), (ctx_map.w >> 4) & 3));
}
/*
* Selects the row bitmap corresponding to the given parser state
*/
inline __device__ uint32_t select_rowmap(uint4 ctx_map, uint32_t ctxid)
{
return (ctxid == ROW_CTX_NONE)
? ctx_map.x
: (ctxid == ROW_CTX_QUOTE) ? ctx_map.y : (ctxid == ROW_CTX_COMMENT) ? ctx_map.z : 0;
}
/**
* @brief Single pair-wise 512-wide row context merge transform
*
* Merge row context blocks and record the merge operation in a context
* tree so that the transform is reversible.
* The tree is organized such that the left and right children of node n
* are located at indices n*2 and n*2+1, the root node starting at index 1
*
* @tparam lanemask mask to specify source of packed row context
* @tparam tmask mask to specify principle thread for merging row context
* @tparam base start location for writing into packed row context tree
* @tparam level_scale level of the node in the tree
* @param ctxtree[out] packed row context tree
* @param ctxb[in] packed row context for the current character block
* @param t thread id (leaf node id)
*/
template <uint32_t lanemask, uint32_t tmask, uint32_t base, uint32_t level_scale>
inline __device__ void ctx_merge(uint64_t *ctxtree, packed_rowctx_t *ctxb, uint32_t t)
{
uint64_t tmp = shuffle_xor(*ctxb, lanemask);
if (!(t & tmask)) {
*ctxb = merge_row_contexts(*ctxb, tmp);
ctxtree[base + (t >> level_scale)] = *ctxb;
}
}
/**
* @brief Single 512-wide row context inverse merge transform
*
* Walks the context tree starting from a root node
*
* @tparam rmask Mask to specify which threads write input row context
* @param[in] base Start read location of the merge transform tree
* @param[in] ctxtree Merge transform tree
* @param[in] ctx Input context
* @param[in] brow4 output row in block *4
* @param[in] t thread id (leaf node id)
*/
template <uint32_t rmask>
inline __device__ void ctx_unmerge(
uint32_t base, uint64_t *ctxtree, uint32_t *ctx, uint32_t *brow4, uint32_t t)
{
rowctx32_t ctxb_left, ctxb_right, ctxb_sum;
ctxb_sum = get_row_context(ctxtree[base], *ctx);
ctxb_left = get_row_context(ctxtree[(base)*2 + 0], *ctx);
ctxb_right = get_row_context(ctxtree[(base)*2 + 1], ctxb_left & 3);
if (t & (rmask)) {
*brow4 += (ctxb_sum & ~3) - (ctxb_right & ~3);
*ctx = ctxb_left & 3;
}
}
/*
* @brief 512-wide row context merge transform
*
* Repeatedly merge row context blocks, keeping track of each merge operation
* in a context tree so that the transform is reversible
* The tree is organized such that the left and right children of node n
* are located at indices n*2 and n*2+1, the root node starting at index 1
*
* Each node contains the counts and output contexts corresponding to the
* possible input contexts.
* Each parent node's count is obtained by adding the corresponding counts
* from the left child node with the right child node's count selected from
* the left child node's output context:
* parent.count[k] = left.count[k] + right.count[left.outctx[k]]
* parent.outctx[k] = right.outctx[left.outctx[k]]
*
* @param ctxtree[out] packed row context tree
* @param ctxb[in] packed row context for the current character block
* @param t thread id (leaf node id)
*/
static inline __device__ void rowctx_merge_transform(uint64_t ctxtree[1024],
packed_rowctx_t ctxb,
uint32_t t)
{
ctxtree[512 + t] = ctxb;
ctx_merge<1, 0x1, 256, 1>(ctxtree, &ctxb, t);
ctx_merge<2, 0x3, 128, 2>(ctxtree, &ctxb, t);
ctx_merge<4, 0x7, 64, 3>(ctxtree, &ctxb, t);
ctx_merge<8, 0xf, 32, 4>(ctxtree, &ctxb, t);
__syncthreads();
if (t < 32) {
ctxb = ctxtree[32 + t];
ctx_merge<1, 0x1, 16, 1>(ctxtree, &ctxb, t);
ctx_merge<2, 0x3, 8, 2>(ctxtree, &ctxb, t);
ctx_merge<4, 0x7, 4, 3>(ctxtree, &ctxb, t);
ctx_merge<8, 0xf, 2, 4>(ctxtree, &ctxb, t);
// Final stage
uint64_t tmp = shuffle_xor(ctxb, 16);
if (t == 0) { ctxtree[1] = merge_row_contexts(ctxb, tmp); }
}
}
/*
* @brief 512-wide row context inverse merge transform
*
* Walks the context tree starting from the root node (index 1) using
* the starting context in node index 0.
* The return value is the starting row and input context for the given leaf node
*
* @param[in] ctxtree Merge transform tree
* @param[in] t thread id (leaf node id)
*
* @return Final row context and count (row_position*4 + context_id format)
*/
static inline __device__ rowctx32_t rowctx_inverse_merge_transform(uint64_t ctxtree[1024],
uint32_t t)
{
uint32_t ctx = ctxtree[0] & 3; // Starting input context
rowctx32_t brow4 = 0; // output row in block *4
ctx_unmerge<256>(1, ctxtree, &ctx, &brow4, t);
ctx_unmerge<128>(2 + (t >> 8), ctxtree, &ctx, &brow4, t);
ctx_unmerge<64>(4 + (t >> 7), ctxtree, &ctx, &brow4, t);
ctx_unmerge<32>(8 + (t >> 6), ctxtree, &ctx, &brow4, t);
ctx_unmerge<16>(16 + (t >> 5), ctxtree, &ctx, &brow4, t);
ctx_unmerge<8>(32 + (t >> 4), ctxtree, &ctx, &brow4, t);
ctx_unmerge<4>(64 + (t >> 3), ctxtree, &ctx, &brow4, t);
ctx_unmerge<2>(128 + (t >> 2), ctxtree, &ctx, &brow4, t);
ctx_unmerge<1>(256 + (t >> 1), ctxtree, &ctx, &brow4, t);
return brow4 + ctx;
}
/**
* @brief Gather row offsets from CSV character data split into 16KB chunks
*
* This is done in two phases: the first phase returns the possible row counts
* per 16K character block for each possible parsing context at the start of the block,
* along with the resulting parsing context at the end of the block.
* The caller can then compute the actual parsing context at the beginning of each
* individual block and total row count.
* The second phase outputs the location of each row in the block, using the parsing
* context and initial row counter accumulated from the results of the previous phase.
* Row parsing context will be updated after phase 2 such that the value contains
* the number of rows starting at byte_range_end or beyond.
*
* @param row_ctx Row parsing context (output of phase 1 or input to phase 2)
* @param offsets_out Row offsets (nullptr for phase1, non-null indicates phase 2)
* @param data Base pointer of character data (all row offsets are relative to this)
* @param chunk_size Total number of characters to parse
* @param parse_pos Current parsing position in the file
* @param start_offset Position of the start of the character buffer in the file
* @param data_size CSV file size
* @param byte_range_start Ignore rows starting before this position in the file
* @param byte_range_end In phase 2, store the number of rows beyond range in row_ctx
* @param skip_rows Number of rows to skip (ignored in phase 1)
* @param terminator Line terminator character
* @param delimiter Column delimiter character
* @param quotechar Quote character
* @param escapechar Delimiter escape character
* @param commentchar Comment line character (skip rows starting with this character)
*/
__global__ void __launch_bounds__(rowofs_block_dim)
gather_row_offsets_gpu(uint64_t *row_ctx,
device_span<uint64_t> offsets_out,
device_span<char const> const data,
size_t chunk_size,
size_t parse_pos,
size_t start_offset,
size_t data_size,
size_t byte_range_start,
size_t byte_range_end,
size_t skip_rows,
int terminator,
int delimiter,
int quotechar,
int escapechar,
int commentchar)
{
auto start = data.begin();
__shared__ __align__(8) uint64_t ctxtree[rowofs_block_dim * 2];
using warp_reduce = typename hipcub::WarpReduce<uint32_t>;
using half_warp_reduce = typename hipcub::WarpReduce<uint32_t, 16>;
__shared__ union {
typename warp_reduce::TempStorage full;
typename half_warp_reduce::TempStorage half[rowofs_block_dim / 32];
} temp_storage;
const char *end = start + (min(parse_pos + chunk_size, data_size) - start_offset);
uint32_t t = threadIdx.x;
size_t block_pos =
(parse_pos - start_offset) + blockIdx.x * static_cast<size_t>(rowofs_block_bytes) + t * 32;
const char *cur = start + block_pos;
// Initial state is neutral context (no state transitions), zero rows
uint4 ctx_map = {
.x = 0,
.y = 0,
.z = 0,
.w = (ROW_CTX_NONE << 0) | (ROW_CTX_QUOTE << 2) | (ROW_CTX_COMMENT << 4) | (ROW_CTX_EOF << 6)};
int c, c_prev = (cur > start && cur <= end) ? cur[-1] : terminator;
// Loop through all 32 bytes and keep a bitmask of row starts for each possible input context
for (uint32_t pos = 0; pos < 32; pos++, cur++, c_prev = c) {
uint32_t ctx;
if (cur < end) {
c = cur[0];
if (c_prev == terminator) {
if (c == commentchar) {
// Start of a new comment row
ctx = make_char_context(ROW_CTX_COMMENT, ROW_CTX_QUOTE, ROW_CTX_COMMENT, 1, 0, 1);
} else if (c == quotechar) {
// Quoted string on newrow, or quoted string ending in terminator
ctx = make_char_context(ROW_CTX_QUOTE, ROW_CTX_NONE, ROW_CTX_QUOTE, 1, 0, 1);
} else {
// Start of a new row unless within a quote
ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE, ROW_CTX_NONE, 1, 0, 1);
}
} else if (c == quotechar) {
if (c_prev == delimiter || c_prev == quotechar) {
// Quoted string after delimiter, quoted string ending in delimiter, or double-quote
ctx = make_char_context(ROW_CTX_QUOTE, ROW_CTX_NONE);
} else {
// Closing or ignored quote
ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_NONE);
}
} else {
// Neutral character
ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE);
}
} else {
const char *data_end = start + data_size - start_offset;
if (cur <= end && cur == data_end) {
// Add a newline at data end (need the extra row offset to infer length of previous row)
ctx = make_char_context(ROW_CTX_EOF, ROW_CTX_EOF, ROW_CTX_EOF, 1, 1, 1);
} else {
// Pass-through context (beyond chunk_size or data_end)
ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE, ROW_CTX_COMMENT);
}
}
// Merge with current context, keeping track of where new rows occur
merge_char_context(ctx_map, ctx, pos);
}
// Eliminate rows that start before byte_range_start
if (start_offset + block_pos < byte_range_start) {
uint32_t dist_minus1 = min(byte_range_start - (start_offset + block_pos) - 1, UINT64_C(31));
uint32_t mask = 0xfffffffe << dist_minus1;
ctx_map.x &= mask;
ctx_map.y &= mask;
ctx_map.z &= mask;
}
// Convert the long-form {rowmap,outctx}[inctx] version into packed version
// {rowcount,ouctx}[inctx], then merge the row contexts of the 32-character blocks into
// a single 16K-character block context
rowctx_merge_transform(ctxtree, pack_rowmaps(ctx_map), t);
// If this is the second phase, get the block's initial parser state and row counter
if (offsets_out.data()) {
if (t == 0) { ctxtree[0] = row_ctx[blockIdx.x]; }
__syncthreads();
// Walk back the transform tree with the known initial parser state
rowctx32_t ctx = rowctx_inverse_merge_transform(ctxtree, t);
uint64_t row = (ctxtree[0] >> 2) + (ctx >> 2);
uint32_t rows_out_of_range = 0;
uint32_t rowmap = select_rowmap(ctx_map, ctx & 3);
// Output row positions
while (rowmap != 0) {
uint32_t pos = __ffs(rowmap);
block_pos += pos;
if (row >= skip_rows && row - skip_rows < offsets_out.size()) {
// Output byte offsets are relative to the base of the input buffer
offsets_out[row - skip_rows] = block_pos - 1;
rows_out_of_range += (start_offset + block_pos - 1 >= byte_range_end);
}
row++;
rowmap >>= pos;
}
// Return the number of rows out of range
rows_out_of_range = half_warp_reduce(temp_storage.half[t / 32]).Sum(rows_out_of_range);
__syncthreads();
if (!(t & 0xf)) { ctxtree[t >> 4] = rows_out_of_range; }
__syncthreads();
if (t < 32) {
rows_out_of_range = warp_reduce(temp_storage.full).Sum(static_cast<uint32_t>(ctxtree[t]));
if (t == 0) { row_ctx[blockIdx.x] = rows_out_of_range; }
}
} else {
// Just store the row counts and output contexts
if (t == 0) { row_ctx[blockIdx.x] = ctxtree[1]; }
}
}
size_t __host__ count_blank_rows(const cudf::io::parse_options_view &opts,
device_span<char const> const data,
device_span<uint64_t const> const row_offsets,
rmm::cuda_stream_view stream)
{
const auto newline = opts.skipblanklines ? opts.terminator : opts.comment;
const auto comment = opts.comment != '\0' ? opts.comment : newline;
const auto carriage = (opts.skipblanklines && opts.terminator == '\n') ? '\r' : comment;
return thrust::count_if(
rmm::exec_policy(stream),
row_offsets.begin(),
row_offsets.end(),
[data = data, newline, comment, carriage] __device__(const uint64_t pos) {
return ((pos != data.size()) &&
(data[pos] == newline || data[pos] == comment || data[pos] == carriage));
});
}
void __host__ remove_blank_rows(cudf::io::parse_options_view const &options,
device_span<char const> const data,
rmm::device_vector<uint64_t> &row_offsets,
rmm::cuda_stream_view stream)
{
size_t d_size = data.size();
const auto newline = options.skipblanklines ? options.terminator : options.comment;
const auto comment = options.comment != '\0' ? options.comment : newline;
const auto carriage = (options.skipblanklines && options.terminator == '\n') ? '\r' : comment;
auto new_end = thrust::remove_if(
rmm::exec_policy(stream),
row_offsets.begin(),
row_offsets.end(),
[data = data, d_size, newline, comment, carriage] __device__(const uint64_t pos) {
return ((pos != d_size) &&
(data[pos] == newline || data[pos] == comment || data[pos] == carriage));
});
row_offsets.resize(new_end - row_offsets.begin());
}
thrust::host_vector<column_type_histogram> detect_column_types(
cudf::io::parse_options_view const &options,
device_span<char const> const data,
device_span<column_parse::flags const> const column_flags,
device_span<uint64_t const> const row_starts,
size_t const num_active_columns,
rmm::cuda_stream_view stream)
{
// Calculate actual block count to use based on records count
const int block_size = csvparse_block_dim;
const int grid_size = (row_starts.size() + block_size - 1) / block_size;
auto d_stats = rmm::device_vector<column_type_histogram>(num_active_columns);
hipLaunchKernelGGL(( data_type_detection), dim3(grid_size), dim3(block_size), 0, stream.value(),
options, data, column_flags, row_starts, d_stats);
return thrust::host_vector<column_type_histogram>(d_stats);
}
void __host__ decode_row_column_data(cudf::io::parse_options_view const &options,
device_span<char const> const data,
device_span<column_parse::flags const> const column_flags,
device_span<uint64_t const> const row_offsets,
device_span<cudf::data_type const> const dtypes,
device_span<void *> const columns,
device_span<cudf::bitmask_type *> const valids,
rmm::cuda_stream_view stream)
{
// Calculate actual block count to use based on records count
auto const block_size = csvparse_block_dim;
auto const num_rows = row_offsets.size() - 1;
auto const grid_size = (num_rows + block_size - 1) / block_size;
hipLaunchKernelGGL(( convert_csv_to_cudf), dim3(grid_size), dim3(block_size), 0, stream.value(),
options, data, column_flags, row_offsets, dtypes, columns, valids);
}
uint32_t __host__ gather_row_offsets(const parse_options_view &options,
uint64_t *row_ctx,
device_span<uint64_t> const offsets_out,
device_span<char const> const data,
size_t chunk_size,
size_t parse_pos,
size_t start_offset,
size_t data_size,
size_t byte_range_start,
size_t byte_range_end,
size_t skip_rows,
rmm::cuda_stream_view stream)
{
uint32_t dim_grid = 1 + (chunk_size / rowofs_block_bytes);
hipLaunchKernelGGL(( gather_row_offsets_gpu), dim3(dim_grid), dim3(rowofs_block_dim), 0, stream.value(),
row_ctx,
offsets_out,
data,
chunk_size,
parse_pos,
start_offset,
data_size,
byte_range_start,
byte_range_end,
skip_rows,
options.terminator,
options.delimiter,
(options.quotechar) ? options.quotechar : 0x100,
/*(options.escapechar) ? options.escapechar :*/ 0x100,
(options.comment) ? options.comment : 0x100);
return dim_grid;
}
} // namespace gpu
} // namespace csv
} // namespace io
} // namespace cudf
| 12ae5435cd0c12462ff1e1d101e6252d1f72cb36.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "csv_common.h"
#include "csv_gpu.h"
#include "datetime.cuh"
#include <io/utilities/block_utils.cuh>
#include <io/utilities/parsing_utils.cuh>
#include <cudf/detail/utilities/trie.cuh>
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/lists/list_view.cuh>
#include <cudf/null_mask.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/detail/copy.h>
#include <thrust/transform.h>
#include <type_traits>
using namespace ::cudf::io;
using cudf::detail::device_span;
namespace cudf {
namespace io {
namespace csv {
namespace gpu {
/// Block dimension for dtype detection and conversion kernels
constexpr uint32_t csvparse_block_dim = 128;
/*
* @brief Checks whether the given character is a whitespace character.
*
* @param ch The character to check
*
* @return True if the input is whitespace, False otherwise
*/
__device__ __inline__ bool is_whitespace(char c) { return c == '\t' || c == ' '; }
// TODO: replace with `trim_whitespaces_quotes` once `end` semantics is fixed
/*
* @brief Scans a character stream within a range, and adjusts the start and end
* indices of the range to ignore whitespace and quotation characters.
*
* @param data The character stream to scan
* @param start The start index to adjust
* @param end The end index to adjust
* @param quotechar The character used to denote quotes
*
* @return Adjusted or unchanged start_idx and end_idx
*/
__device__ __inline__ void trim_field_start_end(const char **start,
const char **end,
char quotechar = '\0')
{
while ((*start < *end) && is_whitespace(**start)) { (*start)++; }
if ((*start < *end) && **start == quotechar) { (*start)++; }
while ((*start <= *end) && is_whitespace(**end)) { (*end)--; }
if ((*start <= *end) && **end == quotechar) { (*end)--; }
}
/*
* @brief Returns true is the input character is a valid digit.
* Supports both decimal and hexadecimal digits (uppercase and lowercase).
*
* @param c Character to check
* @param is_hex Whether to check as a hexadecimal
*
* @return `true` if it is digit-like, `false` otherwise
*/
__device__ __inline__ bool is_digit(char c, bool is_hex = false)
{
if (c >= '0' && c <= '9') return true;
if (is_hex) {
if (c >= 'A' && c <= 'F') return true;
if (c >= 'a' && c <= 'f') return true;
}
return false;
}
/*
* @brief Checks whether the given character counters indicate a potentially
* valid date and/or time field.
*
* For performance and simplicity, we detect only the most common date
* formats. Example formats that are detectable:
*
* `2001/02/30`
* `2001-02-30 00:00:00`
* `2/30/2001 T04:05:60.7`
* `2 / 1 / 2011`
* `02/January`
*
* @param len Number of non special-symbol or numeric characters
* @param decimal_count Number of '.' characters
* @param colon_count Number of ':' characters
* @param dash_count Number of '-' characters
* @param slash_count Number of '/' characters
*
* @return `true` if it is date-like, `false` otherwise
*/
__device__ __inline__ bool is_datetime(
long len, long decimal_count, long colon_count, long dash_count, long slash_count)
{
// Must not exceed count of longest month (September) plus `T` time indicator
if (len > 10) { return false; }
// Must not exceed more than one decimals or more than two time separators
if (decimal_count > 1 || colon_count > 2) { return false; }
// Must have one or two '-' or '/' but not both as date separators
if ((dash_count > 0 && dash_count < 3 && slash_count == 0) ||
(dash_count == 0 && slash_count > 0 && slash_count < 3)) {
return true;
}
return false;
}
/*
* @brief Returns true if the counters indicate a potentially valid float.
* False positives are possible because positions are not taken into account.
* For example, field "e.123-" would match the pattern.
*
* @param len Number of non special-symbol or numeric characters
* @param digit_count Number of digits characters
* @param decimal_count Number of '.' characters
* @param dash_count Number of '-' characters
* @param exponent_count Number of 'e or E' characters
*
* @return `true` if it is floating point-like, `false` otherwise
*/
__device__ __inline__ bool is_floatingpoint(
long len, long digit_count, long decimal_count, long dash_count, long exponent_count)
{
// Can't have more than one exponent and one decimal point
if (decimal_count > 1) return false;
if (exponent_count > 1) return false;
// Without the exponent or a decimal point, this is an integer, not a float
if (decimal_count == 0 && exponent_count == 0) return false;
// Can only have one '-' per component
if (dash_count > 1 + exponent_count) return false;
// If anything other than these characters is present, it's not a float
if (digit_count + decimal_count + dash_count + exponent_count != len) { return false; }
// Needs at least 1 digit, 2 if exponent is present
if (digit_count < 1 + exponent_count) return false;
return true;
}
/*
* @brief CUDA kernel that parses and converts CSV data into cuDF column data.
*
* Data is processed in one row/record at a time, so the number of total
* threads (tid) is equal to the number of rows.
*
* @param opts A set of parsing options
* @param csv_text The entire CSV data to read
* @param column_flags Per-column parsing behavior flags
* @param row_offsets The start the CSV data of interest
* @param d_columnData The count for each column data type
*/
__global__ void __launch_bounds__(csvparse_block_dim)
data_type_detection(parse_options_view const opts,
device_span<char const> csv_text,
device_span<column_parse::flags const> const column_flags,
device_span<uint64_t const> const row_offsets,
device_span<column_type_histogram> d_columnData)
{
auto const raw_csv = csv_text.data();
// ThreadIds range per block, so also need the blockId
// This is entry into the fields; threadId is an element within `num_records`
long const rec_id = threadIdx.x + (blockDim.x * blockIdx.x);
long const rec_id_next = rec_id + 1;
// we can have more threads than data, make sure we are not past the end of
// the data
if (rec_id_next >= row_offsets.size()) { return; }
auto field_start = raw_csv + row_offsets[rec_id];
auto const row_end = raw_csv + row_offsets[rec_id_next];
auto next_field = field_start;
int col = 0;
int actual_col = 0;
// Going through all the columns of a given record
while (col < column_flags.size() && field_start <= row_end) {
auto next_delimiter = cudf::io::gpu::seek_field_end(field_start, row_end, opts);
// Checking if this is a column that the user wants --- user can filter
// columns
if (column_flags[col] & column_parse::enabled) {
// points to last character in the field
auto field_end = next_delimiter - 1;
long field_len = next_delimiter - field_start;
if (serialized_trie_contains(opts.trie_na, field_start, field_len)) {
atomicAdd(&d_columnData[actual_col].null_count, 1);
} else if (serialized_trie_contains(opts.trie_true, field_start, field_len) ||
serialized_trie_contains(opts.trie_false, field_start, field_len)) {
atomicAdd(&d_columnData[actual_col].bool_count, 1);
} else if (cudf::io::gpu::is_infinity(field_start, field_end)) {
atomicAdd(&d_columnData[actual_col].float_count, 1);
} else {
long countNumber = 0;
long countDecimal = 0;
long countSlash = 0;
long countDash = 0;
long countPlus = 0;
long countColon = 0;
long countString = 0;
long countExponent = 0;
// Modify field_start & end to ignore whitespace and quotechars
// This could possibly result in additional empty fields
trim_field_start_end(&field_start, &field_end);
field_len = field_end - field_start + 1;
for (auto cur = field_start; cur <= field_end; cur++) {
if (is_digit(*cur)) {
countNumber++;
continue;
}
// Looking for unique characters that will help identify column types.
switch (*cur) {
case '.': countDecimal++; break;
case '-': countDash++; break;
case '+': countPlus++; break;
case '/': countSlash++; break;
case ':': countColon++; break;
case 'e':
case 'E':
if (cur > field_start && cur < field_end) countExponent++;
break;
default: countString++; break;
}
}
// Integers have to have the length of the string
long int_req_number_cnt = field_len;
// Off by one if they start with a minus sign
if ((*field_start == '-' || *field_start == '+') && field_len > 1) { --int_req_number_cnt; }
if (column_flags[col] & column_parse::as_datetime) {
// PANDAS uses `object` dtype if the date is unparseable
if (is_datetime(countString, countDecimal, countColon, countDash, countSlash)) {
atomicAdd(&d_columnData[actual_col].datetime_count, 1);
} else {
atomicAdd(&d_columnData[actual_col].string_count, 1);
}
} else if (countNumber == int_req_number_cnt) {
bool is_negative = (*field_start == '-');
char const *data_begin = field_start + (is_negative || (*field_start == '+'));
cudf::size_type *ptr = cudf::io::gpu::infer_integral_field_counter(
data_begin, data_begin + countNumber, is_negative, d_columnData[actual_col]);
atomicAdd(ptr, 1);
} else if (is_floatingpoint(
field_len, countNumber, countDecimal, countDash + countPlus, countExponent)) {
atomicAdd(&d_columnData[actual_col].float_count, 1);
} else {
atomicAdd(&d_columnData[actual_col].string_count, 1);
}
}
actual_col++;
}
next_field = next_delimiter + 1;
field_start = next_field;
col++;
}
}
template <typename T, int base>
__inline__ __device__ T decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
return cudf::io::gpu::parse_numeric<T, base>(begin, end, opts);
}
template <typename T>
__inline__ __device__ T decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
return cudf::io::gpu::parse_numeric<T>(begin, end, opts);
}
template <>
__inline__ __device__ cudf::timestamp_D decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
return timestamp_D{cudf::duration_D{to_date(begin, end, opts.dayfirst)}};
}
template <>
__inline__ __device__ cudf::timestamp_s decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
auto milli = to_date_time(begin, end, opts.dayfirst);
if (milli == -1) {
return timestamp_s{cudf::duration_s{to_non_negative_integer<int64_t>(begin, end)}};
} else {
return timestamp_s{cudf::duration_s{milli / 1000}};
}
}
template <>
__inline__ __device__ cudf::timestamp_ms decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
auto milli = to_date_time(begin, end, opts.dayfirst);
if (milli == -1) {
return timestamp_ms{cudf::duration_ms{to_non_negative_integer<int64_t>(begin, end)}};
} else {
return timestamp_ms{cudf::duration_ms{milli}};
}
}
template <>
__inline__ __device__ cudf::timestamp_us decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
auto milli = to_date_time(begin, end, opts.dayfirst);
if (milli == -1) {
return timestamp_us{cudf::duration_us{to_non_negative_integer<int64_t>(begin, end)}};
} else {
return timestamp_us{cudf::duration_us{milli * 1000}};
}
}
template <>
__inline__ __device__ cudf::timestamp_ns decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
auto milli = to_date_time(begin, end, opts.dayfirst);
if (milli == -1) {
return timestamp_ns{cudf::duration_ns{to_non_negative_integer<int64_t>(begin, end)}};
} else {
return timestamp_ns{cudf::duration_ns{milli * 1000000}};
}
}
#ifndef DURATION_DECODE_VALUE
#define DURATION_DECODE_VALUE(Type) \
template <> \
__inline__ __device__ Type decode_value( \
const char *begin, const char *end, parse_options_view const &opts) \
{ \
return Type{to_time_delta<Type>(begin, end)}; \
}
#endif
DURATION_DECODE_VALUE(duration_D)
DURATION_DECODE_VALUE(duration_s)
DURATION_DECODE_VALUE(duration_ms)
DURATION_DECODE_VALUE(duration_us)
DURATION_DECODE_VALUE(duration_ns)
// The purpose of this is merely to allow compilation ONLY
// TODO : make this work for csv
template <>
__inline__ __device__ cudf::string_view decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
return cudf::string_view{};
}
// The purpose of this is merely to allow compilation ONLY
template <>
__inline__ __device__ cudf::dictionary32 decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
return cudf::dictionary32{};
}
// The purpose of this is merely to allow compilation ONLY
// TODO : make this work for csv
template <>
__inline__ __device__ cudf::list_view decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
return cudf::list_view{};
}
// The purpose of this is merely to allow compilation ONLY
// TODO : make this work for csv
template <>
__inline__ __device__ numeric::decimal32 decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
return numeric::decimal32{};
}
// The purpose of this is merely to allow compilation ONLY
// TODO : make this work for csv
template <>
__inline__ __device__ numeric::decimal64 decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
return numeric::decimal64{};
}
// The purpose of this is merely to allow compilation ONLY
// TODO : make this work for csv
template <>
__inline__ __device__ cudf::struct_view decode_value(char const *begin,
char const *end,
parse_options_view const &opts)
{
return cudf::struct_view{};
}
/**
* @brief Functor for converting CSV raw data to typed value.
*/
struct decode_op {
/**
* @brief Dispatch for numeric types whose values can be convertible to
* 0 or 1 to represent boolean false/true, based upon checking against a
* true/false values list.
*
* @return bool Whether the parsed value is valid.
*/
template <typename T,
typename std::enable_if_t<std::is_integral<T>::value and !std::is_same<T, bool>::value>
* = nullptr>
__host__ __device__ __forceinline__ bool operator()(void *out_buffer,
size_t row,
char const *begin,
char const *end,
parse_options_view const &opts,
column_parse::flags flags)
{
static_cast<T *>(out_buffer)[row] = [&]() {
// Check for user-specified true/false values first, where the output is
// replaced with 1/0 respectively
const size_t field_len = end - begin + 1;
if (serialized_trie_contains(opts.trie_true, begin, field_len)) {
return static_cast<T>(1);
} else if (serialized_trie_contains(opts.trie_false, begin, field_len)) {
return static_cast<T>(0);
} else {
if (flags & column_parse::as_hexadecimal) {
return decode_value<T, 16>(begin, end, opts);
} else {
return decode_value<T>(begin, end, opts);
}
}
}();
return true;
}
/**
* @brief Dispatch for boolean type types.
*/
template <typename T, typename std::enable_if_t<std::is_same<T, bool>::value> * = nullptr>
__host__ __device__ __forceinline__ bool operator()(void *out_buffer,
size_t row,
char const *begin,
char const *end,
parse_options_view const &opts,
column_parse::flags flags)
{
auto &value{static_cast<T *>(out_buffer)[row]};
// Check for user-specified true/false values first, where the output is
// replaced with 1/0 respectively
const size_t field_len = end - begin + 1;
if (serialized_trie_contains(opts.trie_true, begin, field_len)) {
value = 1;
} else if (serialized_trie_contains(opts.trie_false, begin, field_len)) {
value = 0;
} else {
value = decode_value<T>(begin, end, opts);
}
return true;
}
/**
* @brief Dispatch for floating points, which are set to NaN if the input
* is not valid. In such case, the validity mask is set to zero too.
*/
template <typename T, typename std::enable_if_t<std::is_floating_point<T>::value> * = nullptr>
__host__ __device__ __forceinline__ bool operator()(void *out_buffer,
size_t row,
char const *begin,
char const *end,
parse_options_view const &opts,
column_parse::flags flags)
{
auto &value{static_cast<T *>(out_buffer)[row]};
value = decode_value<T>(begin, end, opts);
return !std::isnan(value);
}
/**
* @brief Dispatch for all other types.
*/
template <typename T,
typename std::enable_if_t<!std::is_integral<T>::value and
!std::is_floating_point<T>::value> * = nullptr>
__host__ __device__ __forceinline__ bool operator()(void *out_buffer,
size_t row,
char const *begin,
char const *end,
parse_options_view const &opts,
column_parse::flags flags)
{
auto &value{static_cast<T *>(out_buffer)[row]};
value = decode_value<T>(begin, end, opts);
return true;
}
};
/**
* @brief CUDA kernel that parses and converts CSV data into cuDF column data.
*
* Data is processed one record at a time
*
* @param[in] raw_csv The entire CSV data to read
* @param[in] opts A set of parsing options
* @param[in] num_records The number of lines/rows of CSV data
* @param[in] num_columns The number of columns of CSV data
* @param[in] column_flags Per-column parsing behavior flags
* @param[in] recStart The start the CSV data of interest
* @param[in] dtype The data type of the column
* @param[out] data The output column data
* @param[out] valid The bitmaps indicating whether column fields are valid
* @param[out] num_valid The numbers of valid fields in columns
*/
__global__ void __launch_bounds__(csvparse_block_dim)
convert_csv_to_cudf(cudf::io::parse_options_view options,
device_span<char const> data,
device_span<column_parse::flags const> column_flags,
device_span<uint64_t const> row_offsets,
device_span<cudf::data_type const> dtypes,
device_span<void *> columns,
device_span<cudf::bitmask_type *> valids)
{
auto const raw_csv = data.data();
// thread IDs range per block, so also need the block id.
// this is entry into the field array - tid is an elements within the num_entries array
long const rec_id = threadIdx.x + (blockDim.x * blockIdx.x);
long const rec_id_next = rec_id + 1;
// we can have more threads than data, make sure we are not past the end of
// the data
if (rec_id_next >= row_offsets.size()) return;
auto field_start = raw_csv + row_offsets[rec_id];
auto const row_end = raw_csv + row_offsets[rec_id_next];
auto next_field = field_start;
int col = 0;
int actual_col = 0;
while (col < column_flags.size() && field_start <= row_end) {
auto next_delimiter = cudf::io::gpu::seek_field_end(next_field, row_end, options);
if (column_flags[col] & column_parse::enabled) {
// check if the entire field is a NaN string - consistent with pandas
auto const is_valid =
!serialized_trie_contains(options.trie_na, field_start, next_delimiter - field_start);
// Modify field_start & end to ignore whitespace and quotechars
auto field_end = next_delimiter - 1;
if (is_valid && dtypes[actual_col].id() != cudf::type_id::STRING) {
trim_field_start_end(&field_start, &field_end, options.quotechar);
}
if (is_valid) {
// Type dispatcher does not handle STRING
if (dtypes[actual_col].id() == cudf::type_id::STRING) {
auto end = next_delimiter;
if (options.keepquotes == false) {
if ((*field_start == options.quotechar) && (*(end - 1) == options.quotechar)) {
++field_start;
--end;
}
}
auto str_list = static_cast<std::pair<const char *, size_t> *>(columns[actual_col]);
str_list[rec_id].first = field_start;
str_list[rec_id].second = end - field_start;
} else {
if (cudf::type_dispatcher(dtypes[actual_col],
decode_op{},
columns[actual_col],
rec_id,
field_start,
field_end,
options,
column_flags[col])) {
// set the valid bitmap - all bits were set to 0 to start
set_bit(valids[actual_col], rec_id);
}
}
} else if (dtypes[actual_col].id() == cudf::type_id::STRING) {
auto str_list = static_cast<std::pair<const char *, size_t> *>(columns[actual_col]);
str_list[rec_id].first = nullptr;
str_list[rec_id].second = 0;
}
++actual_col;
}
next_field = next_delimiter + 1;
field_start = next_field;
++col;
}
}
/*
* @brief Merge two packed row contexts (each corresponding to a block of characters)
* and return the packed row context corresponding to the merged character block
*/
inline __device__ packed_rowctx_t merge_row_contexts(packed_rowctx_t first_ctx,
packed_rowctx_t second_ctx)
{
uint32_t id0 = get_row_context(first_ctx, ROW_CTX_NONE) & 3;
uint32_t id1 = get_row_context(first_ctx, ROW_CTX_QUOTE) & 3;
uint32_t id2 = get_row_context(first_ctx, ROW_CTX_COMMENT) & 3;
return (first_ctx & ~pack_row_contexts(3, 3, 3)) +
pack_row_contexts(get_row_context(second_ctx, id0),
get_row_context(second_ctx, id1),
get_row_context(second_ctx, id2));
}
/*
* @brief Per-character context:
* 1-bit count (0 or 1) per context in the lower 4 bits
* 2-bit output context id per input context in bits 8..15
*/
constexpr __device__ uint32_t make_char_context(uint32_t id0,
uint32_t id1,
uint32_t id2 = ROW_CTX_COMMENT,
uint32_t c0 = 0,
uint32_t c1 = 0,
uint32_t c2 = 0)
{
return (id0 << 8) | (id1 << 10) | (id2 << 12) | (ROW_CTX_EOF << 14) | (c0) | (c1 << 1) |
(c2 << 2);
}
/*
* @brief Merge a 1-character context to keep track of bitmasks where new rows occur
* Merges a single-character "block" row context at position pos with the current
* block's row context (the current block contains 32-pos characters)
*
* @param ctx Current block context and new rows bitmaps
* @param char_ctx state transitions associated with new character
* @param pos Position within the current 32-character block
*
* NOTE: This is probably the most performance-critical piece of the row gathering kernel.
* The char_ctx value should be created via make_char_context, and its value should
* have been evaluated at compile-time.
*/
inline __device__ void merge_char_context(uint4 &ctx, uint32_t char_ctx, uint32_t pos)
{
uint32_t id0 = (ctx.w >> 0) & 3;
uint32_t id1 = (ctx.w >> 2) & 3;
uint32_t id2 = (ctx.w >> 4) & 3;
// Set the newrow bit in the bitmap at the corresponding position
ctx.x |= ((char_ctx >> id0) & 1) << pos;
ctx.y |= ((char_ctx >> id1) & 1) << pos;
ctx.z |= ((char_ctx >> id2) & 1) << pos;
// Update the output context ids
ctx.w = ((char_ctx >> (8 + id0 * 2)) & 0x03) | ((char_ctx >> (6 + id1 * 2)) & 0x0c) |
((char_ctx >> (4 + id2 * 2)) & 0x30) | (ROW_CTX_EOF << 6);
}
/*
* Convert the context-with-row-bitmaps version to a packed row context
*/
inline __device__ packed_rowctx_t pack_rowmaps(uint4 ctx_map)
{
return pack_row_contexts(make_row_context(__popc(ctx_map.x), (ctx_map.w >> 0) & 3),
make_row_context(__popc(ctx_map.y), (ctx_map.w >> 2) & 3),
make_row_context(__popc(ctx_map.z), (ctx_map.w >> 4) & 3));
}
/*
* Selects the row bitmap corresponding to the given parser state
*/
inline __device__ uint32_t select_rowmap(uint4 ctx_map, uint32_t ctxid)
{
return (ctxid == ROW_CTX_NONE)
? ctx_map.x
: (ctxid == ROW_CTX_QUOTE) ? ctx_map.y : (ctxid == ROW_CTX_COMMENT) ? ctx_map.z : 0;
}
/**
* @brief Single pair-wise 512-wide row context merge transform
*
* Merge row context blocks and record the merge operation in a context
* tree so that the transform is reversible.
* The tree is organized such that the left and right children of node n
* are located at indices n*2 and n*2+1, the root node starting at index 1
*
* @tparam lanemask mask to specify source of packed row context
* @tparam tmask mask to specify principle thread for merging row context
* @tparam base start location for writing into packed row context tree
* @tparam level_scale level of the node in the tree
* @param ctxtree[out] packed row context tree
* @param ctxb[in] packed row context for the current character block
* @param t thread id (leaf node id)
*/
template <uint32_t lanemask, uint32_t tmask, uint32_t base, uint32_t level_scale>
inline __device__ void ctx_merge(uint64_t *ctxtree, packed_rowctx_t *ctxb, uint32_t t)
{
uint64_t tmp = shuffle_xor(*ctxb, lanemask);
if (!(t & tmask)) {
*ctxb = merge_row_contexts(*ctxb, tmp);
ctxtree[base + (t >> level_scale)] = *ctxb;
}
}
/**
* @brief Single 512-wide row context inverse merge transform
*
* Walks the context tree starting from a root node
*
* @tparam rmask Mask to specify which threads write input row context
* @param[in] base Start read location of the merge transform tree
* @param[in] ctxtree Merge transform tree
* @param[in] ctx Input context
* @param[in] brow4 output row in block *4
* @param[in] t thread id (leaf node id)
*/
template <uint32_t rmask>
inline __device__ void ctx_unmerge(
uint32_t base, uint64_t *ctxtree, uint32_t *ctx, uint32_t *brow4, uint32_t t)
{
rowctx32_t ctxb_left, ctxb_right, ctxb_sum;
ctxb_sum = get_row_context(ctxtree[base], *ctx);
ctxb_left = get_row_context(ctxtree[(base)*2 + 0], *ctx);
ctxb_right = get_row_context(ctxtree[(base)*2 + 1], ctxb_left & 3);
if (t & (rmask)) {
*brow4 += (ctxb_sum & ~3) - (ctxb_right & ~3);
*ctx = ctxb_left & 3;
}
}
/*
* @brief 512-wide row context merge transform
*
* Repeatedly merge row context blocks, keeping track of each merge operation
* in a context tree so that the transform is reversible
* The tree is organized such that the left and right children of node n
* are located at indices n*2 and n*2+1, the root node starting at index 1
*
* Each node contains the counts and output contexts corresponding to the
* possible input contexts.
* Each parent node's count is obtained by adding the corresponding counts
* from the left child node with the right child node's count selected from
* the left child node's output context:
* parent.count[k] = left.count[k] + right.count[left.outctx[k]]
* parent.outctx[k] = right.outctx[left.outctx[k]]
*
* @param ctxtree[out] packed row context tree
* @param ctxb[in] packed row context for the current character block
* @param t thread id (leaf node id)
*/
static inline __device__ void rowctx_merge_transform(uint64_t ctxtree[1024],
packed_rowctx_t ctxb,
uint32_t t)
{
ctxtree[512 + t] = ctxb;
ctx_merge<1, 0x1, 256, 1>(ctxtree, &ctxb, t);
ctx_merge<2, 0x3, 128, 2>(ctxtree, &ctxb, t);
ctx_merge<4, 0x7, 64, 3>(ctxtree, &ctxb, t);
ctx_merge<8, 0xf, 32, 4>(ctxtree, &ctxb, t);
__syncthreads();
if (t < 32) {
ctxb = ctxtree[32 + t];
ctx_merge<1, 0x1, 16, 1>(ctxtree, &ctxb, t);
ctx_merge<2, 0x3, 8, 2>(ctxtree, &ctxb, t);
ctx_merge<4, 0x7, 4, 3>(ctxtree, &ctxb, t);
ctx_merge<8, 0xf, 2, 4>(ctxtree, &ctxb, t);
// Final stage
uint64_t tmp = shuffle_xor(ctxb, 16);
if (t == 0) { ctxtree[1] = merge_row_contexts(ctxb, tmp); }
}
}
/*
* @brief 512-wide row context inverse merge transform
*
* Walks the context tree starting from the root node (index 1) using
* the starting context in node index 0.
* The return value is the starting row and input context for the given leaf node
*
* @param[in] ctxtree Merge transform tree
* @param[in] t thread id (leaf node id)
*
* @return Final row context and count (row_position*4 + context_id format)
*/
static inline __device__ rowctx32_t rowctx_inverse_merge_transform(uint64_t ctxtree[1024],
uint32_t t)
{
uint32_t ctx = ctxtree[0] & 3; // Starting input context
rowctx32_t brow4 = 0; // output row in block *4
ctx_unmerge<256>(1, ctxtree, &ctx, &brow4, t);
ctx_unmerge<128>(2 + (t >> 8), ctxtree, &ctx, &brow4, t);
ctx_unmerge<64>(4 + (t >> 7), ctxtree, &ctx, &brow4, t);
ctx_unmerge<32>(8 + (t >> 6), ctxtree, &ctx, &brow4, t);
ctx_unmerge<16>(16 + (t >> 5), ctxtree, &ctx, &brow4, t);
ctx_unmerge<8>(32 + (t >> 4), ctxtree, &ctx, &brow4, t);
ctx_unmerge<4>(64 + (t >> 3), ctxtree, &ctx, &brow4, t);
ctx_unmerge<2>(128 + (t >> 2), ctxtree, &ctx, &brow4, t);
ctx_unmerge<1>(256 + (t >> 1), ctxtree, &ctx, &brow4, t);
return brow4 + ctx;
}
/**
* @brief Gather row offsets from CSV character data split into 16KB chunks
*
* This is done in two phases: the first phase returns the possible row counts
* per 16K character block for each possible parsing context at the start of the block,
* along with the resulting parsing context at the end of the block.
* The caller can then compute the actual parsing context at the beginning of each
* individual block and total row count.
* The second phase outputs the location of each row in the block, using the parsing
* context and initial row counter accumulated from the results of the previous phase.
* Row parsing context will be updated after phase 2 such that the value contains
* the number of rows starting at byte_range_end or beyond.
*
* @param row_ctx Row parsing context (output of phase 1 or input to phase 2)
* @param offsets_out Row offsets (nullptr for phase1, non-null indicates phase 2)
* @param data Base pointer of character data (all row offsets are relative to this)
* @param chunk_size Total number of characters to parse
* @param parse_pos Current parsing position in the file
* @param start_offset Position of the start of the character buffer in the file
* @param data_size CSV file size
* @param byte_range_start Ignore rows starting before this position in the file
* @param byte_range_end In phase 2, store the number of rows beyond range in row_ctx
* @param skip_rows Number of rows to skip (ignored in phase 1)
* @param terminator Line terminator character
* @param delimiter Column delimiter character
* @param quotechar Quote character
* @param escapechar Delimiter escape character
* @param commentchar Comment line character (skip rows starting with this character)
*/
__global__ void __launch_bounds__(rowofs_block_dim)
gather_row_offsets_gpu(uint64_t *row_ctx,
device_span<uint64_t> offsets_out,
device_span<char const> const data,
size_t chunk_size,
size_t parse_pos,
size_t start_offset,
size_t data_size,
size_t byte_range_start,
size_t byte_range_end,
size_t skip_rows,
int terminator,
int delimiter,
int quotechar,
int escapechar,
int commentchar)
{
auto start = data.begin();
__shared__ __align__(8) uint64_t ctxtree[rowofs_block_dim * 2];
using warp_reduce = typename cub::WarpReduce<uint32_t>;
using half_warp_reduce = typename cub::WarpReduce<uint32_t, 16>;
__shared__ union {
typename warp_reduce::TempStorage full;
typename half_warp_reduce::TempStorage half[rowofs_block_dim / 32];
} temp_storage;
const char *end = start + (min(parse_pos + chunk_size, data_size) - start_offset);
uint32_t t = threadIdx.x;
size_t block_pos =
(parse_pos - start_offset) + blockIdx.x * static_cast<size_t>(rowofs_block_bytes) + t * 32;
const char *cur = start + block_pos;
// Initial state is neutral context (no state transitions), zero rows
uint4 ctx_map = {
.x = 0,
.y = 0,
.z = 0,
.w = (ROW_CTX_NONE << 0) | (ROW_CTX_QUOTE << 2) | (ROW_CTX_COMMENT << 4) | (ROW_CTX_EOF << 6)};
int c, c_prev = (cur > start && cur <= end) ? cur[-1] : terminator;
// Loop through all 32 bytes and keep a bitmask of row starts for each possible input context
for (uint32_t pos = 0; pos < 32; pos++, cur++, c_prev = c) {
uint32_t ctx;
if (cur < end) {
c = cur[0];
if (c_prev == terminator) {
if (c == commentchar) {
// Start of a new comment row
ctx = make_char_context(ROW_CTX_COMMENT, ROW_CTX_QUOTE, ROW_CTX_COMMENT, 1, 0, 1);
} else if (c == quotechar) {
// Quoted string on newrow, or quoted string ending in terminator
ctx = make_char_context(ROW_CTX_QUOTE, ROW_CTX_NONE, ROW_CTX_QUOTE, 1, 0, 1);
} else {
// Start of a new row unless within a quote
ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE, ROW_CTX_NONE, 1, 0, 1);
}
} else if (c == quotechar) {
if (c_prev == delimiter || c_prev == quotechar) {
// Quoted string after delimiter, quoted string ending in delimiter, or double-quote
ctx = make_char_context(ROW_CTX_QUOTE, ROW_CTX_NONE);
} else {
// Closing or ignored quote
ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_NONE);
}
} else {
// Neutral character
ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE);
}
} else {
const char *data_end = start + data_size - start_offset;
if (cur <= end && cur == data_end) {
// Add a newline at data end (need the extra row offset to infer length of previous row)
ctx = make_char_context(ROW_CTX_EOF, ROW_CTX_EOF, ROW_CTX_EOF, 1, 1, 1);
} else {
// Pass-through context (beyond chunk_size or data_end)
ctx = make_char_context(ROW_CTX_NONE, ROW_CTX_QUOTE, ROW_CTX_COMMENT);
}
}
// Merge with current context, keeping track of where new rows occur
merge_char_context(ctx_map, ctx, pos);
}
// Eliminate rows that start before byte_range_start
if (start_offset + block_pos < byte_range_start) {
uint32_t dist_minus1 = min(byte_range_start - (start_offset + block_pos) - 1, UINT64_C(31));
uint32_t mask = 0xfffffffe << dist_minus1;
ctx_map.x &= mask;
ctx_map.y &= mask;
ctx_map.z &= mask;
}
// Convert the long-form {rowmap,outctx}[inctx] version into packed version
// {rowcount,ouctx}[inctx], then merge the row contexts of the 32-character blocks into
// a single 16K-character block context
rowctx_merge_transform(ctxtree, pack_rowmaps(ctx_map), t);
// If this is the second phase, get the block's initial parser state and row counter
if (offsets_out.data()) {
if (t == 0) { ctxtree[0] = row_ctx[blockIdx.x]; }
__syncthreads();
// Walk back the transform tree with the known initial parser state
rowctx32_t ctx = rowctx_inverse_merge_transform(ctxtree, t);
uint64_t row = (ctxtree[0] >> 2) + (ctx >> 2);
uint32_t rows_out_of_range = 0;
uint32_t rowmap = select_rowmap(ctx_map, ctx & 3);
// Output row positions
while (rowmap != 0) {
uint32_t pos = __ffs(rowmap);
block_pos += pos;
if (row >= skip_rows && row - skip_rows < offsets_out.size()) {
// Output byte offsets are relative to the base of the input buffer
offsets_out[row - skip_rows] = block_pos - 1;
rows_out_of_range += (start_offset + block_pos - 1 >= byte_range_end);
}
row++;
rowmap >>= pos;
}
// Return the number of rows out of range
rows_out_of_range = half_warp_reduce(temp_storage.half[t / 32]).Sum(rows_out_of_range);
__syncthreads();
if (!(t & 0xf)) { ctxtree[t >> 4] = rows_out_of_range; }
__syncthreads();
if (t < 32) {
rows_out_of_range = warp_reduce(temp_storage.full).Sum(static_cast<uint32_t>(ctxtree[t]));
if (t == 0) { row_ctx[blockIdx.x] = rows_out_of_range; }
}
} else {
// Just store the row counts and output contexts
if (t == 0) { row_ctx[blockIdx.x] = ctxtree[1]; }
}
}
size_t __host__ count_blank_rows(const cudf::io::parse_options_view &opts,
device_span<char const> const data,
device_span<uint64_t const> const row_offsets,
rmm::cuda_stream_view stream)
{
const auto newline = opts.skipblanklines ? opts.terminator : opts.comment;
const auto comment = opts.comment != '\0' ? opts.comment : newline;
const auto carriage = (opts.skipblanklines && opts.terminator == '\n') ? '\r' : comment;
return thrust::count_if(
rmm::exec_policy(stream),
row_offsets.begin(),
row_offsets.end(),
[data = data, newline, comment, carriage] __device__(const uint64_t pos) {
return ((pos != data.size()) &&
(data[pos] == newline || data[pos] == comment || data[pos] == carriage));
});
}
void __host__ remove_blank_rows(cudf::io::parse_options_view const &options,
device_span<char const> const data,
rmm::device_vector<uint64_t> &row_offsets,
rmm::cuda_stream_view stream)
{
size_t d_size = data.size();
const auto newline = options.skipblanklines ? options.terminator : options.comment;
const auto comment = options.comment != '\0' ? options.comment : newline;
const auto carriage = (options.skipblanklines && options.terminator == '\n') ? '\r' : comment;
auto new_end = thrust::remove_if(
rmm::exec_policy(stream),
row_offsets.begin(),
row_offsets.end(),
[data = data, d_size, newline, comment, carriage] __device__(const uint64_t pos) {
return ((pos != d_size) &&
(data[pos] == newline || data[pos] == comment || data[pos] == carriage));
});
row_offsets.resize(new_end - row_offsets.begin());
}
thrust::host_vector<column_type_histogram> detect_column_types(
cudf::io::parse_options_view const &options,
device_span<char const> const data,
device_span<column_parse::flags const> const column_flags,
device_span<uint64_t const> const row_starts,
size_t const num_active_columns,
rmm::cuda_stream_view stream)
{
// Calculate actual block count to use based on records count
const int block_size = csvparse_block_dim;
const int grid_size = (row_starts.size() + block_size - 1) / block_size;
auto d_stats = rmm::device_vector<column_type_histogram>(num_active_columns);
data_type_detection<<<grid_size, block_size, 0, stream.value()>>>(
options, data, column_flags, row_starts, d_stats);
return thrust::host_vector<column_type_histogram>(d_stats);
}
void __host__ decode_row_column_data(cudf::io::parse_options_view const &options,
device_span<char const> const data,
device_span<column_parse::flags const> const column_flags,
device_span<uint64_t const> const row_offsets,
device_span<cudf::data_type const> const dtypes,
device_span<void *> const columns,
device_span<cudf::bitmask_type *> const valids,
rmm::cuda_stream_view stream)
{
// Calculate actual block count to use based on records count
auto const block_size = csvparse_block_dim;
auto const num_rows = row_offsets.size() - 1;
auto const grid_size = (num_rows + block_size - 1) / block_size;
convert_csv_to_cudf<<<grid_size, block_size, 0, stream.value()>>>(
options, data, column_flags, row_offsets, dtypes, columns, valids);
}
uint32_t __host__ gather_row_offsets(const parse_options_view &options,
uint64_t *row_ctx,
device_span<uint64_t> const offsets_out,
device_span<char const> const data,
size_t chunk_size,
size_t parse_pos,
size_t start_offset,
size_t data_size,
size_t byte_range_start,
size_t byte_range_end,
size_t skip_rows,
rmm::cuda_stream_view stream)
{
uint32_t dim_grid = 1 + (chunk_size / rowofs_block_bytes);
gather_row_offsets_gpu<<<dim_grid, rowofs_block_dim, 0, stream.value()>>>(
row_ctx,
offsets_out,
data,
chunk_size,
parse_pos,
start_offset,
data_size,
byte_range_start,
byte_range_end,
skip_rows,
options.terminator,
options.delimiter,
(options.quotechar) ? options.quotechar : 0x100,
/*(options.escapechar) ? options.escapechar :*/ 0x100,
(options.comment) ? options.comment : 0x100);
return dim_grid;
}
} // namespace gpu
} // namespace csv
} // namespace io
} // namespace cudf
|
e253916354dac8ddfc8eb2fcf8a3f1ca411c1ee6.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *coherent_pos;
glm::vec3 *coherent_vel;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount; //num of cells in entire grid
int gridSideCount; //num of cells along one side in the grid
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
//GridCellWidth is 2 times the neighborhood distance
//This means that need to search through surrounding 8 neighboring grid cells
//gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
//Changing gridCellWidth to be neighborhood distance
//Note: would need to search through surrounding 27 neighboring grid cells
gridCellWidth = ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
hipMalloc((void**)&coherent_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc coherent_pos failed!");
hipMalloc((void**)&coherent_vel, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc coherent_vel failed!");
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
// Rule 2: boids try to stay a distance d away from each other
// Rule 3: boids try to match the speed of surrounding boids
glm::vec3 v1 = glm::vec3(0.0f);
glm::vec3 v2 = glm::vec3(0.0f);
glm::vec3 v3 = glm::vec3(0.0f);
int neighborCount1 = 0;
int neighborCount2 = 0;
for (int j = 0; j < N; j++)
{
//if (j == iSelf) continue;
float distance = glm::distance(pos[j], pos[iSelf]);
if (j != iSelf)
{
if (distance < rule1Distance)
{
v1 += pos[j];
neighborCount1++;
}
if (distance < rule2Distance)
{
v2 -= (pos[j] - pos[iSelf]);
}
if (distance < rule3Distance)
{
v3 += vel[j];
neighborCount2++;
}
}//end if j!= iself
}//end for loop
if (neighborCount1 > 0)
{
v1 /= neighborCount1;
v1 = (v1 - pos[iSelf]) * rule1Scale;
}
if (neighborCount2 > 0)
{
v3 /= neighborCount2;
v3 *= rule3Scale;
}
v2 *= rule2Scale;
return v1 + v2 + v3;
}
// --------------------------------------------------------------------------------------------
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 totalVel = vel1[index] + computeVelocityChange(N, index, pos, vel1);
if (glm::length(totalVel) > maxSpeed)
{
totalVel = glm::normalize(totalVel) * maxSpeed;
}
vel2[index] = totalVel;
}
// --------------------------------------------------------------------------------------------
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// ===========================================================================================
/*
COMPUTE INDICES FUNCTIONS
*/
// ===========================================================================================
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
// --------------------------------------------------------------------------------------------
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
//Need to do this b/c the simulation cube origin is (0,0,0), but bottom left corner of grid is (0,0,0).
glm::ivec3 particleGridPos = glm::round((pos[index] - gridMin) * inverseCellWidth);
//[QUESTION] Do I need to clamp so that position doesn't produce an index out of range? 0 to gridResolution - 1?
int particleGridIdx = gridIndex3Dto1D(particleGridPos.x, particleGridPos.y, particleGridPos.z, gridResolution);
indices[index] = index;
gridIndices[index] = particleGridIdx;
}
// --------------------------------------------------------------------------------------------
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
// --------------------------------------------------------------------------------------------
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
//Edge cases:
//Since you're always filling in the start for index + 1, you need to separately figure out start for first index
//Since you're always filling the end for index, you need to separately figure out end for last index
//Note: this is NOT ideal since this assumes there are always particles in the first and last grid cells
if (index == 0)
{
gridCellStartIndices[particleGridIndices[index]] = 0;
}
else if (index == N - 1)
{
gridCellEndIndices[particleGridIndices[index]] = N - 1;
}
//Fill in the start of index + 1 and the end of index
//only checking in particleGridIndices where the values differ
if (particleGridIndices[index] != particleGridIndices[index + 1])
{
gridCellStartIndices[particleGridIndices[index + 1]] = index + 1;
gridCellEndIndices[particleGridIndices[index]] = index;
}
}
// --------------------------------------------------------------------------------------------
__global__ void kernComputeCoherentBuffers(int N, int *particleArrayIndices,
const glm::vec3 *pos, const glm::vec3 *vel,
glm::vec3 *coher_pos, glm::vec3 *coher_vel)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
//Reorder dev_pos and dev_vel1 to be in order to particleArrayIndices
//Store in coher_pos and coher_vel
int particleIdx = particleArrayIndices[index];
coher_pos[index] = pos[particleIdx];
coher_vel[index] = vel[particleIdx];
}
// ===========================================================================================
/*
UPDATE VELOCITY FUNCTIONS
*/
// ===========================================================================================
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 currParticlePos = pos[index];
glm::vec3 currParticleVel = vel1[index];
glm::ivec3 particleGridPos = glm::round((currParticlePos - gridMin) * inverseCellWidth);
//Iterate through neighbors of current grid cell
glm::vec3 v1 = glm::vec3(0.0f);
glm::vec3 v2 = glm::vec3(0.0f);
glm::vec3 v3 = glm::vec3(0.0f);
int neighborCount1 = 0;
int neighborCount2 = 0;
//Loop through to get the indices of all the particles within the neighborhood of the current particle at index
for (int i = -1; i < 2; i++) {
for (int j = -1; j < 2; j++) {
for (int k = -1; k < 2; k++) {
//[QUESTION] Do I need to clamp from 0 to (gridResolution - 1)?
int particleGridIdx = gridIndex3Dto1D(particleGridPos.x + i, particleGridPos.y + j, particleGridPos.z + k, gridResolution);
int startIdx = gridCellStartIndices[particleGridIdx];
int endIdx = gridCellEndIndices[particleGridIdx];
if (startIdx != -1) //Shouldn't need to check endIdx b/c startIdx and endIdx should be -1 in the same places
{
for (int n = startIdx; n <= endIdx; n++)
{
glm::vec3 currNeighboringParticlePos = pos[particleArrayIndices[n]];
glm::vec3 currNeighboringParticleVel = vel1[particleArrayIndices[n]];
//Distance between current neighboring boid and boid at index
float distance = glm::distance(currNeighboringParticlePos, currParticlePos);
if (n != index)
{
if (distance < rule1Distance)
{
v1 += currNeighboringParticlePos;
neighborCount1++;
}
if (distance < rule2Distance)
{
v2 -= (currNeighboringParticlePos - currParticlePos);
}
if (distance < rule3Distance)
{
v3 += currNeighboringParticleVel;
neighborCount2++;
}
}//end if n!= index
}//end for n
}//end if startIdx != -1
}//end for k
}//end for j
}//end for i
if (neighborCount1 > 0)
{
v1 /= neighborCount1;
v1 = (v1 - currParticlePos) * rule1Scale;
}
if (neighborCount2 > 0)
{
v3 /= neighborCount2;
v3 *= rule3Scale;
}
v2 *= rule2Scale;
glm::vec3 totalVel = currParticleVel + v1 + v2 + v3;
//Clamp the speed change
if (glm::length(totalVel) > maxSpeed)
{
totalVel = glm::normalize(totalVel) * maxSpeed;
}
vel2[index] = totalVel;
}
// --------------------------------------------------------------------------------------------
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 currParticlePos = pos[index];
glm::vec3 currParticleVel = vel1[index];
glm::ivec3 particleGridPos = glm::round((currParticlePos - gridMin) * inverseCellWidth);
//Iterate through neighbors of current grid cell
glm::vec3 v1 = glm::vec3(0.0f);
glm::vec3 v2 = glm::vec3(0.0f);
glm::vec3 v3 = glm::vec3(0.0f);
int neighborCount1 = 0;
int neighborCount2 = 0;
for (int i = -1; i < 2; i++) {
for (int j = -1; j < 2; j++) {
for (int k = -1; k < 2; k++) {
//[QUESTION] Do I need to clamp from 0 to (gridResolution - 1)?
int particleGridIdx = gridIndex3Dto1D(particleGridPos.x + i, particleGridPos.y + j, particleGridPos.z + k, gridResolution);
int startIdx = gridCellStartIndices[particleGridIdx];
int endIdx = gridCellEndIndices[particleGridIdx];
if (startIdx != -1) //Shouldn't need to check endIdx b/c startIdx and endIdx should be -1 in the same places
{
for (int n = startIdx; n <= endIdx; n++)
{
glm::vec3 currNeighboringParticlePos = pos[n];
glm::vec3 currNeighboringParticleVel = vel1[n];
//Distance between current neigbor boid and boid at index
float distance = glm::distance(currNeighboringParticlePos, currParticlePos);
if (n != index)
{
if (distance < rule1Distance)
{
v1 += currNeighboringParticlePos;
neighborCount1++;
}
if (distance < rule2Distance)
{
v2 -= (currNeighboringParticlePos - currParticlePos);
}
if (distance < rule3Distance)
{
v3 += currNeighboringParticleVel;
neighborCount2++;
}
}//end if n!= index
}//end for n
}//end if startIdx != -1
}//end for k
}//end for j
}//end for i
if (neighborCount1 > 0)
{
v1 /= neighborCount1;
v1 = (v1 - currParticlePos) * rule1Scale;
}
if (neighborCount2 > 0)
{
v3 /= neighborCount2;
v3 *= rule3Scale;
}
v2 *= rule2Scale;
glm::vec3 totalVel = currParticleVel + v1 + v2 + v3;
//Clamp the speed change
if (glm::length(totalVel) > maxSpeed)
{
totalVel = glm::normalize(totalVel) * maxSpeed;
}
vel2[index] = totalVel;
}
// ===========================================================================================
/*
STEP SIMULATION FUNCTIONS
*/
// ===========================================================================================
/*
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
// TODO-1.2 ping-pong the velocity buffers
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernUpdateVelocityBruteForce), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
hipLaunchKernelGGL(( kernUpdatePos), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
std::swap(dev_vel1, dev_vel2);
}
// --------------------------------------------------------------------------------------------
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
dim3 fullBlocksPerGridParticle((numObjects + blockSize - 1) / blockSize);
//Make sure to initialize start and end arrays to -1
//Update them in kernIdentifyCellStartEnd
//-1 signifies no particles in that current gridcell
//Need this dim for resetintbuffers --> start and end index arrays are based on grid cell count, not num particles
dim3 fullBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernResetIntBuffer), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for gridCellStartIndices failed!");
hipLaunchKernelGGL(( kernResetIntBuffer), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for gridCellEndIndices failed!");
//Calculate the particleArrayIndices and particleGridIndices
hipLaunchKernelGGL(( kernComputeIndices), dim3(fullBlocksPerGridParticle), dim3(blockSize), 0, 0, numObjects,
gridSideCount, gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
//hipDeviceSynchronize();
//Sort particleArrayIndices by gridIndices
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
checkCUDAErrorWithLine("sorting by thrust key values failed!");
//Calculate the startIdxArray and endIdxArray
hipLaunchKernelGGL(( kernIdentifyCellStartEnd), dim3(fullBlocksPerGridParticle), dim3(blockSize), 0, 0, numObjects,
dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
//Calculate the total velocity based on neighbors
hipLaunchKernelGGL(( kernUpdateVelNeighborSearchScattered), dim3(fullBlocksPerGridParticle), dim3(blockSize), 0, 0, numObjects,
gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices,
dev_pos, dev_vel1, dev_vel2);
hipLaunchKernelGGL(( kernUpdatePos), dim3(fullBlocksPerGridParticle), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
std::swap(dev_vel1, dev_vel2);
}
// --------------------------------------------------------------------------------------------
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 fullBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernResetIntBuffer), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for gridCellStartIndices failed!");
hipLaunchKernelGGL(( kernResetIntBuffer), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for gridCellEndIndices failed!");
dim3 fullBlocksPerGridParticle((numObjects + blockSize - 1) / blockSize);
//Calculate the particleArrayIndices and particleGridIndices
hipLaunchKernelGGL(( kernComputeIndices), dim3(fullBlocksPerGridParticle), dim3(blockSize), 0, 0, numObjects,
gridSideCount, gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
//hipDeviceSynchronize();
//Sort particleArrayIndices by gridIndices
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
checkCUDAErrorWithLine("sorting by thrust key values failed!");
//Calculate the startIdxArray and endIdxArray
hipLaunchKernelGGL(( kernIdentifyCellStartEnd), dim3(fullBlocksPerGridParticle), dim3(blockSize), 0, 0, numObjects,
dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
hipLaunchKernelGGL(( kernComputeCoherentBuffers), dim3(fullBlocksPerGridParticle), dim3(blockSize), 0, 0, numObjects,
dev_particleArrayIndices, dev_pos, dev_vel1, coherent_pos, coherent_vel);
//Calculate the total velocity based on neighbors
hipLaunchKernelGGL(( kernUpdateVelNeighborSearchCoherent), dim3(fullBlocksPerGridParticle), dim3(blockSize), 0, 0, numObjects,
gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
coherent_pos, coherent_vel, dev_vel2);
hipLaunchKernelGGL(( kernUpdatePos), dim3(fullBlocksPerGridParticle), dim3(blockSize), 0, 0, numObjects, dt, coherent_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
std::swap(dev_pos, coherent_pos);
std::swap(dev_vel1, dev_vel2);
}
// --------------------------------------------------------------------------------------------
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
hipFree(dev_particleArrayIndices);
hipFree(dev_particleGridIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
hipFree(coherent_pos);
hipFree(coherent_vel);
}
// --------------------------------------------------------------------------------------------
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues, sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
/*
NOTE:
thrust::device_ptr stores a pointer to an object allocated in device memory.
This type provides type safety when dispatching standard algorithms on ranges resident in device memory.
Basically it wraps up the raw pointers for those thrust algorithm (such as sort) to use.
*/
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys, dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues, dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
delete[] intKeys;
delete[] intValues;
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
| e253916354dac8ddfc8eb2fcf8a3f1ca411c1ee6.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *coherent_pos;
glm::vec3 *coherent_vel;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount; //num of cells in entire grid
int gridSideCount; //num of cells along one side in the grid
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
//GridCellWidth is 2 times the neighborhood distance
//This means that need to search through surrounding 8 neighboring grid cells
//gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
//Changing gridCellWidth to be neighborhood distance
//Note: would need to search through surrounding 27 neighboring grid cells
gridCellWidth = std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
cudaMalloc((void**)&coherent_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc coherent_pos failed!");
cudaMalloc((void**)&coherent_vel, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc coherent_vel failed!");
cudaThreadSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaThreadSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
// Rule 2: boids try to stay a distance d away from each other
// Rule 3: boids try to match the speed of surrounding boids
glm::vec3 v1 = glm::vec3(0.0f);
glm::vec3 v2 = glm::vec3(0.0f);
glm::vec3 v3 = glm::vec3(0.0f);
int neighborCount1 = 0;
int neighborCount2 = 0;
for (int j = 0; j < N; j++)
{
//if (j == iSelf) continue;
float distance = glm::distance(pos[j], pos[iSelf]);
if (j != iSelf)
{
if (distance < rule1Distance)
{
v1 += pos[j];
neighborCount1++;
}
if (distance < rule2Distance)
{
v2 -= (pos[j] - pos[iSelf]);
}
if (distance < rule3Distance)
{
v3 += vel[j];
neighborCount2++;
}
}//end if j!= iself
}//end for loop
if (neighborCount1 > 0)
{
v1 /= neighborCount1;
v1 = (v1 - pos[iSelf]) * rule1Scale;
}
if (neighborCount2 > 0)
{
v3 /= neighborCount2;
v3 *= rule3Scale;
}
v2 *= rule2Scale;
return v1 + v2 + v3;
}
// --------------------------------------------------------------------------------------------
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 totalVel = vel1[index] + computeVelocityChange(N, index, pos, vel1);
if (glm::length(totalVel) > maxSpeed)
{
totalVel = glm::normalize(totalVel) * maxSpeed;
}
vel2[index] = totalVel;
}
// --------------------------------------------------------------------------------------------
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// ===========================================================================================
/*
COMPUTE INDICES FUNCTIONS
*/
// ===========================================================================================
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
// --------------------------------------------------------------------------------------------
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
//Need to do this b/c the simulation cube origin is (0,0,0), but bottom left corner of grid is (0,0,0).
glm::ivec3 particleGridPos = glm::round((pos[index] - gridMin) * inverseCellWidth);
//[QUESTION] Do I need to clamp so that position doesn't produce an index out of range? 0 to gridResolution - 1?
int particleGridIdx = gridIndex3Dto1D(particleGridPos.x, particleGridPos.y, particleGridPos.z, gridResolution);
indices[index] = index;
gridIndices[index] = particleGridIdx;
}
// --------------------------------------------------------------------------------------------
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
// --------------------------------------------------------------------------------------------
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
//Edge cases:
//Since you're always filling in the start for index + 1, you need to separately figure out start for first index
//Since you're always filling the end for index, you need to separately figure out end for last index
//Note: this is NOT ideal since this assumes there are always particles in the first and last grid cells
if (index == 0)
{
gridCellStartIndices[particleGridIndices[index]] = 0;
}
else if (index == N - 1)
{
gridCellEndIndices[particleGridIndices[index]] = N - 1;
}
//Fill in the start of index + 1 and the end of index
//only checking in particleGridIndices where the values differ
if (particleGridIndices[index] != particleGridIndices[index + 1])
{
gridCellStartIndices[particleGridIndices[index + 1]] = index + 1;
gridCellEndIndices[particleGridIndices[index]] = index;
}
}
// --------------------------------------------------------------------------------------------
__global__ void kernComputeCoherentBuffers(int N, int *particleArrayIndices,
const glm::vec3 *pos, const glm::vec3 *vel,
glm::vec3 *coher_pos, glm::vec3 *coher_vel)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
//Reorder dev_pos and dev_vel1 to be in order to particleArrayIndices
//Store in coher_pos and coher_vel
int particleIdx = particleArrayIndices[index];
coher_pos[index] = pos[particleIdx];
coher_vel[index] = vel[particleIdx];
}
// ===========================================================================================
/*
UPDATE VELOCITY FUNCTIONS
*/
// ===========================================================================================
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 currParticlePos = pos[index];
glm::vec3 currParticleVel = vel1[index];
glm::ivec3 particleGridPos = glm::round((currParticlePos - gridMin) * inverseCellWidth);
//Iterate through neighbors of current grid cell
glm::vec3 v1 = glm::vec3(0.0f);
glm::vec3 v2 = glm::vec3(0.0f);
glm::vec3 v3 = glm::vec3(0.0f);
int neighborCount1 = 0;
int neighborCount2 = 0;
//Loop through to get the indices of all the particles within the neighborhood of the current particle at index
for (int i = -1; i < 2; i++) {
for (int j = -1; j < 2; j++) {
for (int k = -1; k < 2; k++) {
//[QUESTION] Do I need to clamp from 0 to (gridResolution - 1)?
int particleGridIdx = gridIndex3Dto1D(particleGridPos.x + i, particleGridPos.y + j, particleGridPos.z + k, gridResolution);
int startIdx = gridCellStartIndices[particleGridIdx];
int endIdx = gridCellEndIndices[particleGridIdx];
if (startIdx != -1) //Shouldn't need to check endIdx b/c startIdx and endIdx should be -1 in the same places
{
for (int n = startIdx; n <= endIdx; n++)
{
glm::vec3 currNeighboringParticlePos = pos[particleArrayIndices[n]];
glm::vec3 currNeighboringParticleVel = vel1[particleArrayIndices[n]];
//Distance between current neighboring boid and boid at index
float distance = glm::distance(currNeighboringParticlePos, currParticlePos);
if (n != index)
{
if (distance < rule1Distance)
{
v1 += currNeighboringParticlePos;
neighborCount1++;
}
if (distance < rule2Distance)
{
v2 -= (currNeighboringParticlePos - currParticlePos);
}
if (distance < rule3Distance)
{
v3 += currNeighboringParticleVel;
neighborCount2++;
}
}//end if n!= index
}//end for n
}//end if startIdx != -1
}//end for k
}//end for j
}//end for i
if (neighborCount1 > 0)
{
v1 /= neighborCount1;
v1 = (v1 - currParticlePos) * rule1Scale;
}
if (neighborCount2 > 0)
{
v3 /= neighborCount2;
v3 *= rule3Scale;
}
v2 *= rule2Scale;
glm::vec3 totalVel = currParticleVel + v1 + v2 + v3;
//Clamp the speed change
if (glm::length(totalVel) > maxSpeed)
{
totalVel = glm::normalize(totalVel) * maxSpeed;
}
vel2[index] = totalVel;
}
// --------------------------------------------------------------------------------------------
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 currParticlePos = pos[index];
glm::vec3 currParticleVel = vel1[index];
glm::ivec3 particleGridPos = glm::round((currParticlePos - gridMin) * inverseCellWidth);
//Iterate through neighbors of current grid cell
glm::vec3 v1 = glm::vec3(0.0f);
glm::vec3 v2 = glm::vec3(0.0f);
glm::vec3 v3 = glm::vec3(0.0f);
int neighborCount1 = 0;
int neighborCount2 = 0;
for (int i = -1; i < 2; i++) {
for (int j = -1; j < 2; j++) {
for (int k = -1; k < 2; k++) {
//[QUESTION] Do I need to clamp from 0 to (gridResolution - 1)?
int particleGridIdx = gridIndex3Dto1D(particleGridPos.x + i, particleGridPos.y + j, particleGridPos.z + k, gridResolution);
int startIdx = gridCellStartIndices[particleGridIdx];
int endIdx = gridCellEndIndices[particleGridIdx];
if (startIdx != -1) //Shouldn't need to check endIdx b/c startIdx and endIdx should be -1 in the same places
{
for (int n = startIdx; n <= endIdx; n++)
{
glm::vec3 currNeighboringParticlePos = pos[n];
glm::vec3 currNeighboringParticleVel = vel1[n];
//Distance between current neigbor boid and boid at index
float distance = glm::distance(currNeighboringParticlePos, currParticlePos);
if (n != index)
{
if (distance < rule1Distance)
{
v1 += currNeighboringParticlePos;
neighborCount1++;
}
if (distance < rule2Distance)
{
v2 -= (currNeighboringParticlePos - currParticlePos);
}
if (distance < rule3Distance)
{
v3 += currNeighboringParticleVel;
neighborCount2++;
}
}//end if n!= index
}//end for n
}//end if startIdx != -1
}//end for k
}//end for j
}//end for i
if (neighborCount1 > 0)
{
v1 /= neighborCount1;
v1 = (v1 - currParticlePos) * rule1Scale;
}
if (neighborCount2 > 0)
{
v3 /= neighborCount2;
v3 *= rule3Scale;
}
v2 *= rule2Scale;
glm::vec3 totalVel = currParticleVel + v1 + v2 + v3;
//Clamp the speed change
if (glm::length(totalVel) > maxSpeed)
{
totalVel = glm::normalize(totalVel) * maxSpeed;
}
vel2[index] = totalVel;
}
// ===========================================================================================
/*
STEP SIMULATION FUNCTIONS
*/
// ===========================================================================================
/*
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
// TODO-1.2 ping-pong the velocity buffers
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernUpdateVelocityBruteForce<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
kernUpdatePos<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
std::swap(dev_vel1, dev_vel2);
}
// --------------------------------------------------------------------------------------------
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
dim3 fullBlocksPerGridParticle((numObjects + blockSize - 1) / blockSize);
//Make sure to initialize start and end arrays to -1
//Update them in kernIdentifyCellStartEnd
//-1 signifies no particles in that current gridcell
//Need this dim for resetintbuffers --> start and end index arrays are based on grid cell count, not num particles
dim3 fullBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer<<<fullBlocksPerGrid, blockSize>>>(gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for gridCellStartIndices failed!");
kernResetIntBuffer<<<fullBlocksPerGrid, blockSize>>>(gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for gridCellEndIndices failed!");
//Calculate the particleArrayIndices and particleGridIndices
kernComputeIndices<<<fullBlocksPerGridParticle, blockSize>>>(numObjects,
gridSideCount, gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
//cudaThreadSynchronize();
//Sort particleArrayIndices by gridIndices
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
checkCUDAErrorWithLine("sorting by thrust key values failed!");
//Calculate the startIdxArray and endIdxArray
kernIdentifyCellStartEnd<<<fullBlocksPerGridParticle, blockSize>>>(numObjects,
dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
//Calculate the total velocity based on neighbors
kernUpdateVelNeighborSearchScattered<<<fullBlocksPerGridParticle, blockSize>>>(numObjects,
gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices,
dev_pos, dev_vel1, dev_vel2);
kernUpdatePos<<<fullBlocksPerGridParticle, blockSize>>>(numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
std::swap(dev_vel1, dev_vel2);
}
// --------------------------------------------------------------------------------------------
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 fullBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize);
kernResetIntBuffer<<<fullBlocksPerGrid, blockSize>>>(gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for gridCellStartIndices failed!");
kernResetIntBuffer<<<fullBlocksPerGrid, blockSize>>>(gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for gridCellEndIndices failed!");
dim3 fullBlocksPerGridParticle((numObjects + blockSize - 1) / blockSize);
//Calculate the particleArrayIndices and particleGridIndices
kernComputeIndices<<<fullBlocksPerGridParticle, blockSize>>>(numObjects,
gridSideCount, gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
//cudaThreadSynchronize();
//Sort particleArrayIndices by gridIndices
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
checkCUDAErrorWithLine("sorting by thrust key values failed!");
//Calculate the startIdxArray and endIdxArray
kernIdentifyCellStartEnd<<<fullBlocksPerGridParticle, blockSize>>>(numObjects,
dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
kernComputeCoherentBuffers<<<fullBlocksPerGridParticle, blockSize>>>(numObjects,
dev_particleArrayIndices, dev_pos, dev_vel1, coherent_pos, coherent_vel);
//Calculate the total velocity based on neighbors
kernUpdateVelNeighborSearchCoherent<<<fullBlocksPerGridParticle, blockSize>>>(numObjects,
gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
coherent_pos, coherent_vel, dev_vel2);
kernUpdatePos<<<fullBlocksPerGridParticle, blockSize>>>(numObjects, dt, coherent_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
std::swap(dev_pos, coherent_pos);
std::swap(dev_vel1, dev_vel2);
}
// --------------------------------------------------------------------------------------------
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
cudaFree(dev_particleArrayIndices);
cudaFree(dev_particleGridIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
cudaFree(coherent_pos);
cudaFree(coherent_vel);
}
// --------------------------------------------------------------------------------------------
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues, sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
/*
NOTE:
thrust::device_ptr stores a pointer to an object allocated in device memory.
This type provides type safety when dispatching standard algorithms on ranges resident in device memory.
Basically it wraps up the raw pointers for those thrust algorithm (such as sort) to use.
*/
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys, dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues, dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
delete[] intKeys;
delete[] intValues;
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
03f206def27c29ca103c60f936550236626c6677.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernelFeedForward2(float *zs,float *biases,int b_off,float *activations) {
zs[threadIdx.x]+=biases[b_off+threadIdx.x];
activations[threadIdx.x]=1.0/(1.0+expf(-zs[threadIdx.x]));
} | 03f206def27c29ca103c60f936550236626c6677.cu | #include "includes.h"
__global__ void kernelFeedForward2(float *zs,float *biases,int b_off,float *activations) {
zs[threadIdx.x]+=biases[b_off+threadIdx.x];
activations[threadIdx.x]=1.0/(1.0+expf(-zs[threadIdx.x]));
} |
e6152bac250b9343a777344a7555963b2b9636be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Ogolne.cuh"
__global__ void randInit(hiprandState_t* state, int seed) {
int threadIndex = threadIdx.x;
hiprand_init(seed, threadIndex, 0, &state[threadIndex]);
}
| e6152bac250b9343a777344a7555963b2b9636be.cu | #include "Ogolne.cuh"
__global__ void randInit(curandState* state, int seed) {
int threadIndex = threadIdx.x;
curand_init(seed, threadIndex, 0, &state[threadIndex]);
}
|
ecada611ef067d3ac95a806052c2ff71d797dce6.hip | // !!! This is a file automatically generated by hipify!!!
#include<iostream>
#include<stdio.h>
#include<stdlib.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
int checkResults(float*res, float* cudaRes,int length)
{
int nDiffs=0;
const float smallVal = 0.2f; // Keeping this extra high as we have repetitive addition and sequence matters
for(int i=0; i<length; i++)
if(fabs(cudaRes[i]-res[i])>smallVal){
nDiffs++;
}
return nDiffs;
}
void initializeArray(FILE* fp,float* arr, int nElements)
{
for( int i=0; i<nElements; i++){
int r=fscanf(fp,"%f",&arr[i]);
if(r == EOF){
rewind(fp);
}
arr[i]-=5; // This is to make the data zero mean. Otherwise we reach large numbers and lose precision
}
}
void inclusiveScan_SEQ(float *in, float *out,int length) {
float sum=0.f;
for (int i =0; i < length; i++) {
sum+=in[i];
out[i]=sum;
}
}
int main(int argc, char* argv[]) {
if(argc!=2){
printf("Usage %s N\n",argv[0]);
return 1;
}
int N=atoi(argv[1]);
FILE *fp = fopen("problem1.inp","r");
int size = N * sizeof(float);
//allocate resources
float *h_in = (float *)malloc(size);
float *h_out = (float *)malloc(size);
float *cuda_out= (float *)malloc(size);
float time = 0.f;
initializeArray(fp,h_in, N);
//start inclusive timing
hipEvent_t startIn,stopIn;
hipEventCreate(&startIn);
hipEventCreate(&stopIn);
hipEventRecord(startIn, 0);
float *d_in;
//float *d_out;
hipMalloc(&d_in, size);
//hipMalloc(&d_out, size);
//copy the memory to device
assert(hipSuccess == hipMemcpy(d_in, h_in, size, hipMemcpyHostToDevice));
//set up the pointer
thrust::device_ptr<float> dev_ptr(d_in);
//perform in-place inclusive scan
thrust::inclusive_scan(dev_ptr,dev_ptr + N, dev_ptr);
hipMemcpy(cuda_out, d_in, size, hipMemcpyDeviceToHost);
//stop inclusive timing
hipEventRecord(stopIn, 0);
hipEventSynchronize(stopIn);
hipEventElapsedTime(&time, startIn, stopIn);
hipEventDestroy(startIn);
hipEventDestroy(stopIn);
inclusiveScan_SEQ(h_in, h_out,N);
int nDiffs = checkResults(h_out, cuda_out,N);
if(nDiffs)printf("Test Failed\n"); // This should never print
printf("%d\n%f\n%f\n",N,cuda_out[N-1],time);
//printf("%f\n", time);
//free resources
free(h_in); free(h_out); free(cuda_out);
return 0;
}
| ecada611ef067d3ac95a806052c2ff71d797dce6.cu | #include<iostream>
#include<stdio.h>
#include<stdlib.h>
#include <cuda.h>
#include <math.h>
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
int checkResults(float*res, float* cudaRes,int length)
{
int nDiffs=0;
const float smallVal = 0.2f; // Keeping this extra high as we have repetitive addition and sequence matters
for(int i=0; i<length; i++)
if(fabs(cudaRes[i]-res[i])>smallVal){
nDiffs++;
}
return nDiffs;
}
void initializeArray(FILE* fp,float* arr, int nElements)
{
for( int i=0; i<nElements; i++){
int r=fscanf(fp,"%f",&arr[i]);
if(r == EOF){
rewind(fp);
}
arr[i]-=5; // This is to make the data zero mean. Otherwise we reach large numbers and lose precision
}
}
void inclusiveScan_SEQ(float *in, float *out,int length) {
float sum=0.f;
for (int i =0; i < length; i++) {
sum+=in[i];
out[i]=sum;
}
}
int main(int argc, char* argv[]) {
if(argc!=2){
printf("Usage %s N\n",argv[0]);
return 1;
}
int N=atoi(argv[1]);
FILE *fp = fopen("problem1.inp","r");
int size = N * sizeof(float);
//allocate resources
float *h_in = (float *)malloc(size);
float *h_out = (float *)malloc(size);
float *cuda_out= (float *)malloc(size);
float time = 0.f;
initializeArray(fp,h_in, N);
//start inclusive timing
cudaEvent_t startIn,stopIn;
cudaEventCreate(&startIn);
cudaEventCreate(&stopIn);
cudaEventRecord(startIn, 0);
float *d_in;
//float *d_out;
cudaMalloc(&d_in, size);
//cudaMalloc(&d_out, size);
//copy the memory to device
assert(cudaSuccess == cudaMemcpy(d_in, h_in, size, cudaMemcpyHostToDevice));
//set up the pointer
thrust::device_ptr<float> dev_ptr(d_in);
//perform in-place inclusive scan
thrust::inclusive_scan(dev_ptr,dev_ptr + N, dev_ptr);
cudaMemcpy(cuda_out, d_in, size, cudaMemcpyDeviceToHost);
//stop inclusive timing
cudaEventRecord(stopIn, 0);
cudaEventSynchronize(stopIn);
cudaEventElapsedTime(&time, startIn, stopIn);
cudaEventDestroy(startIn);
cudaEventDestroy(stopIn);
inclusiveScan_SEQ(h_in, h_out,N);
int nDiffs = checkResults(h_out, cuda_out,N);
if(nDiffs)printf("Test Failed\n"); // This should never print
printf("%d\n%f\n%f\n",N,cuda_out[N-1],time);
//printf("%f\n", time);
//free resources
free(h_in); free(h_out); free(cuda_out);
return 0;
}
|
3d97e508db2830f04050aea460d4be4a921d566d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//////////////////////////////////////////////////////////
/* VOIGT PROFILE */
//////////////////////////////////////////////////////////
__global__ void device_compute_cross_section_voigt_stepone(double* g_energies,const int* __restrict__ g_gns,const double* __restrict__ g_nu,const double* __restrict__ g_aif, const int N_ener){
//The stored shared data
//Get the global and local thread number
int g_idx = blockIdx.x * blockDim.x + threadIdx.x;
double ei,gns,nu_if,aif,abscoef;
double temp_2 = 1.0;//cross_constants.ln2pi/cross_constants.halfwidth;
//if(g_idx == 0) printf("partition = %12.6f\n",cross_constants.partition);
if(g_idx < N_ener){
//Store values in local memory
ei = g_energies[g_idx];
gns = g_gns[g_idx];
nu_if = g_nu[g_idx];
aif = g_aif[g_idx];
abscoef= cross_constants.cmcoef*temp_2*aif*gns
*exp(-cross_constants.beta*ei)*(1.0-exp(-cross_constants.beta*nu_if))/
(nu_if*nu_if*cross_constants.partition);
if(nu_if==0)abscoef=0.0;
g_energies[g_idx] = abscoef;
}
}
__global__ void device_compute_cross_section_voigt_stepone(double* g_energies,const int* g_gns,const double* g_nu,const double* g_aif,double* g_gamma,double* g_n, const int N_ener){
//The stored shared data
//Get the global and local thread number
int g_idx = blockIdx.x * blockDim.x + threadIdx.x;
double ei,gns,nu_if,aif,abscoef;
double gammaL;
//cross_constants.ln2pi/cross_constants.halfwidth;
//if(g_idx == 0) printf("partition = %12.6f\n",cross_constants.partition);
if(g_idx < N_ener){
//Store values in local memory
ei = g_energies[g_idx];
gns = g_gns[g_idx];
nu_if = g_nu[g_idx];
aif = g_aif[g_idx];
if(nu_if==0) nu_if = 1e-6;
abscoef= cross_constants.cmcoef*aif*gns
*exp(-cross_constants.beta*ei)*(1.0-exp(-cross_constants.beta*nu_if))/
(nu_if*nu_if*cross_constants.partition);
if(gns==-1) abscoef = aif;
g_energies[g_idx] = abscoef;
gammaL = g_gamma[g_idx]*pow(296.0/cross_constants.temperature,g_n[g_idx])*cross_constants.pressure;
g_gamma[g_idx] = gammaL;
//if(threadIdx.x == 0) printf("%14.2E %14.2E\n",abscoef,gammaL) ;
}
}
__global__ void device_compute_cross_section_voigt_steptwo(const double* g_freq, double* g_cs,const double* g_nu,const double* g_abscoef,const int N,const int N_ener,const int start_idx){
//The stored shared data
__shared__ double l_nu[VOIGT_SHARED_SIZE];
__shared__ double l_abscoef[VOIGT_SHARED_SIZE];
//Get the global and local thread number
int g_idx = blockIdx.x * blockDim.x + threadIdx.x;
int l_idx = threadIdx.x;
int block_dim = VOIGT_SHARED_SIZE;
double cs_val = 0.0;
double dfreq_=0.0;
double freq = 0.0;
double gammaG=0.05,gammaL=0.05,x,y;
double dpwcoeff = sqrt(2.0*LN2*BOLTZ*cross_constants.temperature/(cross_constants.mean_mass))/VELLGT;
//double nu_if;
//if(g_idx == 0) printf("BLOCK_SIZE = %d\n",blockDim.x);
if(g_idx < N){
freq = g_freq[start_idx+g_idx];
//cs_val = g_cs[start_idx+g_idx];
}
//if(g_idx==9999) printf("%12.6f\n",freq);
for(int i = 0; i < N_ener; i+=VOIGT_SHARED_SIZE){
l_nu[l_idx] = 1.0;
l_abscoef[l_idx] = 0.0;
if(i + l_idx < N_ener)
{
l_nu[l_idx] = g_nu[i + l_idx];
l_abscoef[l_idx] = g_abscoef[i + l_idx];
}
__syncthreads();
for(int j = 0; j < VOIGT_SHARED_SIZE; j++){
dfreq_=l_nu[j]-freq;
gammaG = l_nu[j]*dpwcoeff;
x =SQRTLN2*abs(dfreq_)/gammaG;
y =SQRTLN2*gammaL/gammaG;
double xxyy = x * x + y * y;
//Algorithm 916
if(xxyy < 100.0){
cs_val+=l_abscoef[j]*SQRTLN2PI/(gammaG)*y*voigt_916(x,y,1.0);
}
else{
//3-point gauss hermite
cs_val+=l_abscoef[j]*(SQRTLN2PI/gammaG)*voigt_threegausshermite(x,y,xxyy);
}
//*__expf(temp_3*dfreq_*dfreq_);
}
__syncthreads();
}
if(g_idx < N) g_cs[start_idx+g_idx]+=cs_val;
}
__global__ void device_compute_cross_section_voigt_steptwo_block(const double* g_freq, double* g_cs,const double* g_nu,const double* g_abscoef,const int N,const int N_ener,const int start_idx){
//The stored shared data
//__shared__ double l_nu[BLOCK_SIZE];
//__shared__ double l_abscoef[BLOCK_SIZE];
__shared__ double l_cs_result[VOIGT_BLOCK];
//Get the global and local thread number
int b_idx = blockIdx.x;
int l_idx = threadIdx.x;
double cs_val = 0.0;
double dfreq_=0.0;
double freq = 0.0;
double nu = 0.0;
double gammaG=0.05,gammaL=0.05;
double x,y;
double dpwcoeff = sqrt(2.0*LN2*BOLTZ*cross_constants.temperature/(cross_constants.mean_mass))/VELLGT;
//double temp_2=cross_constants.ln2pi/cross_constants.halfwidth;
//double temp_3 = -cross_constants.ln2*(1.0/(cross_constants.halfwidth*cross_constants.halfwidth));
freq = g_freq[start_idx + b_idx];
//cs_val = g_cs[start_idx+g_idx];
//if(g_idx==9999) printf("%12.6f\n",freq);
l_cs_result[l_idx] = cs_val;
for(int i = l_idx; i < N_ener; i+=VOIGT_BLOCK){
nu = 0.0;
//Read value of nu
nu = g_nu[i];
dfreq_ = nu-freq;
if(dfreq_ < -500.0*gammaL)
continue;
if(dfreq_ > 500.0*gammaL)
break;
gammaG = nu*dpwcoeff;
x =SQRTLN2*dfreq_/gammaG;
y =SQRTLN2*gammaL/gammaG;
double xxyy = x * x + y * y;
////Algorithm 916
if(xxyy < 100.0){
cs_val+=g_abscoef[i]*SQRTLN2PI/(gammaG)*y*voigt_916(x,y,1.0);
}else{
//3-point gauss hermite
cs_val+=g_abscoef[i]*(SQRTLN2PI/gammaG)*voigt_threegausshermite(x,y,xxyy);
}
}
//Store results into shared memory
l_cs_result[l_idx] = cs_val;
cs_val = 0;
//Wait for everone to finish nicely
__syncthreads();
if(l_idx == 0){
for(int i = 0; i < VOIGT_BLOCK; i++)
cs_val+=l_cs_result[i];
g_cs[start_idx+b_idx]+=cs_val;
}
}
__global__ void device_compute_cross_section_voigt_steptwo_block(const double* g_freq, double* g_cs,const double* g_nu,const double* g_abscoef,const double* g_gamma,const int N,const int N_ener,const int start_idx){
//The stored shared data
//__shared__ double l_nu[BLOCK_SIZE];
//__shared__ double l_abscoef[BLOCK_SIZE];
__shared__ double l_cs_result[VOIGT_BLOCK];
//Get the global and local thread number
int b_idx = blockIdx.x;
int l_idx = threadIdx.x;
double cs_val = 0.0;
double dfreq_=0.0;
double freq = 0.0;
double nu = 0.0;
double gammaG=0.05,gammaL=0.05;
double x,y;
double dpwcoeff = sqrt(2.0*BOLTZ*cross_constants.temperature*NA/((cross_constants.mean_mass)))/VELLGT;
//double temp_2=cross_constants.ln2pi/cross_constants.halfwidth;
//double temp_3 = -cross_constants.ln2*(1.0/(cross_constants.halfwidth*cross_constants.halfwidth));
freq = g_freq[start_idx + b_idx];
//cs_val = g_cs[start_idx+g_idx];
//if(g_idx==9999) printf("%12.6f\n",freq);
l_cs_result[l_idx] = cs_val;
for(int i = l_idx; i < N_ener; i+=VOIGT_BLOCK){
nu = 0.0;
//Read value of nu
nu = g_nu[i];
dfreq_ = nu-freq;
gammaL = g_gamma[i];
if(dfreq_ < -500.0*gammaL)
continue;
if(dfreq_ > 500.0*gammaL)
break;
gammaG = 1.0/(nu*dpwcoeff);
x =abs(dfreq_)*gammaG;
y =gammaL*gammaG;
double xxyy = x * x + y * y;
double voigt_check;// = voigt_916(x,y,0.9);
////Algorithm 916
if(xxyy < 100.0){
voigt_check = voigt_916(x,y,0.9);
//cs_val+=g_abscoef[i]*voigt_check*gammaG*ISQRTPI;
}else if(xxyy < 1.0e6){
//3-point gauss hermite
voigt_check = voigt_threegausshermite(x,y,xxyy);
//cs_val+=g_abscoef[i]*ISQRTPI*gammaG;
}else{
voigt_check = y/(PI*xxyy);
//cs_val+= g_abscoef[i]*ISQRTPI*gammaG;
}
cs_val+=g_abscoef[i]*voigt_check*gammaG*ISQRTPI;
//if((blockIdx.x * blockDim.x + threadIdx.x)==0) printf("dfreq = %14.4E x=%14.4E y=%14.4E gammaL = %14.4E gammaG = %14.4E abscoef=%14.4E voigt=%14.4E cs_val=%14.4E\n",dfreq_,x,y,gammaL,gammaG,g_abscoef[i],voigt_check,cs_val);
}
//Store results into shared memory
l_cs_result[l_idx] = cs_val;
cs_val = 0;
//Wait for everone to finish nicely
__syncthreads();
if(l_idx == 0){
for(int i = 0; i < VOIGT_BLOCK; i++)
cs_val+=l_cs_result[i];
g_cs[start_idx+b_idx]+=cs_val;
}
}
| 3d97e508db2830f04050aea460d4be4a921d566d.cu | //////////////////////////////////////////////////////////
/* VOIGT PROFILE */
//////////////////////////////////////////////////////////
__global__ void device_compute_cross_section_voigt_stepone(double* g_energies,const int* __restrict__ g_gns,const double* __restrict__ g_nu,const double* __restrict__ g_aif, const int N_ener){
//The stored shared data
//Get the global and local thread number
int g_idx = blockIdx.x * blockDim.x + threadIdx.x;
double ei,gns,nu_if,aif,abscoef;
double temp_2 = 1.0;//cross_constants.ln2pi/cross_constants.halfwidth;
//if(g_idx == 0) printf("partition = %12.6f\n",cross_constants.partition);
if(g_idx < N_ener){
//Store values in local memory
ei = g_energies[g_idx];
gns = g_gns[g_idx];
nu_if = g_nu[g_idx];
aif = g_aif[g_idx];
abscoef= cross_constants.cmcoef*temp_2*aif*gns
*exp(-cross_constants.beta*ei)*(1.0-exp(-cross_constants.beta*nu_if))/
(nu_if*nu_if*cross_constants.partition);
if(nu_if==0)abscoef=0.0;
g_energies[g_idx] = abscoef;
}
}
__global__ void device_compute_cross_section_voigt_stepone(double* g_energies,const int* g_gns,const double* g_nu,const double* g_aif,double* g_gamma,double* g_n, const int N_ener){
//The stored shared data
//Get the global and local thread number
int g_idx = blockIdx.x * blockDim.x + threadIdx.x;
double ei,gns,nu_if,aif,abscoef;
double gammaL;
//cross_constants.ln2pi/cross_constants.halfwidth;
//if(g_idx == 0) printf("partition = %12.6f\n",cross_constants.partition);
if(g_idx < N_ener){
//Store values in local memory
ei = g_energies[g_idx];
gns = g_gns[g_idx];
nu_if = g_nu[g_idx];
aif = g_aif[g_idx];
if(nu_if==0) nu_if = 1e-6;
abscoef= cross_constants.cmcoef*aif*gns
*exp(-cross_constants.beta*ei)*(1.0-exp(-cross_constants.beta*nu_if))/
(nu_if*nu_if*cross_constants.partition);
if(gns==-1) abscoef = aif;
g_energies[g_idx] = abscoef;
gammaL = g_gamma[g_idx]*pow(296.0/cross_constants.temperature,g_n[g_idx])*cross_constants.pressure;
g_gamma[g_idx] = gammaL;
//if(threadIdx.x == 0) printf("%14.2E %14.2E\n",abscoef,gammaL) ;
}
}
__global__ void device_compute_cross_section_voigt_steptwo(const double* g_freq, double* g_cs,const double* g_nu,const double* g_abscoef,const int N,const int N_ener,const int start_idx){
//The stored shared data
__shared__ double l_nu[VOIGT_SHARED_SIZE];
__shared__ double l_abscoef[VOIGT_SHARED_SIZE];
//Get the global and local thread number
int g_idx = blockIdx.x * blockDim.x + threadIdx.x;
int l_idx = threadIdx.x;
int block_dim = VOIGT_SHARED_SIZE;
double cs_val = 0.0;
double dfreq_=0.0;
double freq = 0.0;
double gammaG=0.05,gammaL=0.05,x,y;
double dpwcoeff = sqrt(2.0*LN2*BOLTZ*cross_constants.temperature/(cross_constants.mean_mass))/VELLGT;
//double nu_if;
//if(g_idx == 0) printf("BLOCK_SIZE = %d\n",blockDim.x);
if(g_idx < N){
freq = g_freq[start_idx+g_idx];
//cs_val = g_cs[start_idx+g_idx];
}
//if(g_idx==9999) printf("%12.6f\n",freq);
for(int i = 0; i < N_ener; i+=VOIGT_SHARED_SIZE){
l_nu[l_idx] = 1.0;
l_abscoef[l_idx] = 0.0;
if(i + l_idx < N_ener)
{
l_nu[l_idx] = g_nu[i + l_idx];
l_abscoef[l_idx] = g_abscoef[i + l_idx];
}
__syncthreads();
for(int j = 0; j < VOIGT_SHARED_SIZE; j++){
dfreq_=l_nu[j]-freq;
gammaG = l_nu[j]*dpwcoeff;
x =SQRTLN2*abs(dfreq_)/gammaG;
y =SQRTLN2*gammaL/gammaG;
double xxyy = x * x + y * y;
//Algorithm 916
if(xxyy < 100.0){
cs_val+=l_abscoef[j]*SQRTLN2PI/(gammaG)*y*voigt_916(x,y,1.0);
}
else{
//3-point gauss hermite
cs_val+=l_abscoef[j]*(SQRTLN2PI/gammaG)*voigt_threegausshermite(x,y,xxyy);
}
//*__expf(temp_3*dfreq_*dfreq_);
}
__syncthreads();
}
if(g_idx < N) g_cs[start_idx+g_idx]+=cs_val;
}
__global__ void device_compute_cross_section_voigt_steptwo_block(const double* g_freq, double* g_cs,const double* g_nu,const double* g_abscoef,const int N,const int N_ener,const int start_idx){
//The stored shared data
//__shared__ double l_nu[BLOCK_SIZE];
//__shared__ double l_abscoef[BLOCK_SIZE];
__shared__ double l_cs_result[VOIGT_BLOCK];
//Get the global and local thread number
int b_idx = blockIdx.x;
int l_idx = threadIdx.x;
double cs_val = 0.0;
double dfreq_=0.0;
double freq = 0.0;
double nu = 0.0;
double gammaG=0.05,gammaL=0.05;
double x,y;
double dpwcoeff = sqrt(2.0*LN2*BOLTZ*cross_constants.temperature/(cross_constants.mean_mass))/VELLGT;
//double temp_2=cross_constants.ln2pi/cross_constants.halfwidth;
//double temp_3 = -cross_constants.ln2*(1.0/(cross_constants.halfwidth*cross_constants.halfwidth));
freq = g_freq[start_idx + b_idx];
//cs_val = g_cs[start_idx+g_idx];
//if(g_idx==9999) printf("%12.6f\n",freq);
l_cs_result[l_idx] = cs_val;
for(int i = l_idx; i < N_ener; i+=VOIGT_BLOCK){
nu = 0.0;
//Read value of nu
nu = g_nu[i];
dfreq_ = nu-freq;
if(dfreq_ < -500.0*gammaL)
continue;
if(dfreq_ > 500.0*gammaL)
break;
gammaG = nu*dpwcoeff;
x =SQRTLN2*dfreq_/gammaG;
y =SQRTLN2*gammaL/gammaG;
double xxyy = x * x + y * y;
////Algorithm 916
if(xxyy < 100.0){
cs_val+=g_abscoef[i]*SQRTLN2PI/(gammaG)*y*voigt_916(x,y,1.0);
}else{
//3-point gauss hermite
cs_val+=g_abscoef[i]*(SQRTLN2PI/gammaG)*voigt_threegausshermite(x,y,xxyy);
}
}
//Store results into shared memory
l_cs_result[l_idx] = cs_val;
cs_val = 0;
//Wait for everone to finish nicely
__syncthreads();
if(l_idx == 0){
for(int i = 0; i < VOIGT_BLOCK; i++)
cs_val+=l_cs_result[i];
g_cs[start_idx+b_idx]+=cs_val;
}
}
__global__ void device_compute_cross_section_voigt_steptwo_block(const double* g_freq, double* g_cs,const double* g_nu,const double* g_abscoef,const double* g_gamma,const int N,const int N_ener,const int start_idx){
//The stored shared data
//__shared__ double l_nu[BLOCK_SIZE];
//__shared__ double l_abscoef[BLOCK_SIZE];
__shared__ double l_cs_result[VOIGT_BLOCK];
//Get the global and local thread number
int b_idx = blockIdx.x;
int l_idx = threadIdx.x;
double cs_val = 0.0;
double dfreq_=0.0;
double freq = 0.0;
double nu = 0.0;
double gammaG=0.05,gammaL=0.05;
double x,y;
double dpwcoeff = sqrt(2.0*BOLTZ*cross_constants.temperature*NA/((cross_constants.mean_mass)))/VELLGT;
//double temp_2=cross_constants.ln2pi/cross_constants.halfwidth;
//double temp_3 = -cross_constants.ln2*(1.0/(cross_constants.halfwidth*cross_constants.halfwidth));
freq = g_freq[start_idx + b_idx];
//cs_val = g_cs[start_idx+g_idx];
//if(g_idx==9999) printf("%12.6f\n",freq);
l_cs_result[l_idx] = cs_val;
for(int i = l_idx; i < N_ener; i+=VOIGT_BLOCK){
nu = 0.0;
//Read value of nu
nu = g_nu[i];
dfreq_ = nu-freq;
gammaL = g_gamma[i];
if(dfreq_ < -500.0*gammaL)
continue;
if(dfreq_ > 500.0*gammaL)
break;
gammaG = 1.0/(nu*dpwcoeff);
x =abs(dfreq_)*gammaG;
y =gammaL*gammaG;
double xxyy = x * x + y * y;
double voigt_check;// = voigt_916(x,y,0.9);
////Algorithm 916
if(xxyy < 100.0){
voigt_check = voigt_916(x,y,0.9);
//cs_val+=g_abscoef[i]*voigt_check*gammaG*ISQRTPI;
}else if(xxyy < 1.0e6){
//3-point gauss hermite
voigt_check = voigt_threegausshermite(x,y,xxyy);
//cs_val+=g_abscoef[i]*ISQRTPI*gammaG;
}else{
voigt_check = y/(PI*xxyy);
//cs_val+= g_abscoef[i]*ISQRTPI*gammaG;
}
cs_val+=g_abscoef[i]*voigt_check*gammaG*ISQRTPI;
//if((blockIdx.x * blockDim.x + threadIdx.x)==0) printf("dfreq = %14.4E x=%14.4E y=%14.4E gammaL = %14.4E gammaG = %14.4E abscoef=%14.4E voigt=%14.4E cs_val=%14.4E\n",dfreq_,x,y,gammaL,gammaG,g_abscoef[i],voigt_check,cs_val);
}
//Store results into shared memory
l_cs_result[l_idx] = cs_val;
cs_val = 0;
//Wait for everone to finish nicely
__syncthreads();
if(l_idx == 0){
for(int i = 0; i < VOIGT_BLOCK; i++)
cs_val+=l_cs_result[i];
g_cs[start_idx+b_idx]+=cs_val;
}
}
|
4760d8c96c11fa3704b7ae6a7af7e049462d8bde.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__
void saxpy_1(float a, float *x, float *out, size_t n) {
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n) {
out[tid] = a * x[tid];
}
}
| 4760d8c96c11fa3704b7ae6a7af7e049462d8bde.cu |
extern "C" __global__
void saxpy_1(float a, float *x, float *out, size_t n) {
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n) {
out[tid] = a * x[tid];
}
}
|
98dde5358a2443b2930da1791482199bc3c20b64.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/local_share/forward/local_share_fwd_chwn_f32_batch_size_aware.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "./local_share_forward.cuh"
using namespace megdnn;
using namespace cuda;
using namespace local_share;
namespace {
template <int unroll_co_, int unroll_ci_, int unroll_wo_>
struct UnrollConfig {
static int const unroll_co = unroll_co_;
static int const unroll_ci = unroll_ci_;
static int const unroll_wo = unroll_wo_;
};
template <int thread_x, int thread_y>
struct ThreadConfig {
static int const nr_thread_x = thread_x;
static int const nr_thread_y = thread_y;
};
template <typename LocalShareConfig, typename UnrollConfig, typename ThreadConfig>
struct DataTileCount {
static int const tile_hi = LocalShareConfig::fh;
static int const tile_wi = UnrollConfig::unroll_wo * LocalShareConfig::sw +
LocalShareConfig::fw - LocalShareConfig::sw;
static int const tile_hw = tile_hi * tile_wi;
static int const tile_chw = UnrollConfig::unroll_ci * tile_hi * tile_wi;
static int const reg_gl2sh =
(tile_chw + ThreadConfig::nr_thread_y - 1) / ThreadConfig::nr_thread_y;
static int const smem_h = tile_chw;
static int const smem_w = ThreadConfig::nr_thread_x;
static int const smem_stride = smem_w;
static int const smem_tot = smem_h * smem_stride;
};
template <typename LocalShareConfig, typename UnrollConfig, typename ThreadConfig>
struct FilterTileCount {
static int const tile_co = ThreadConfig::nr_thread_y * UnrollConfig::unroll_co;
static int const tile_ci = UnrollConfig::unroll_ci;
static int const smem_h = tile_ci * LocalShareConfig::fh * LocalShareConfig::fw;
static int const smem_w = tile_co;
static int const smem_stride = smem_w + 1;
static int const smem_tot = smem_h * smem_stride;
MEGDNN_STATIC_ASSERT(
smem_w % ThreadConfig::nr_thread_x == 0,
"col of share memory must be divided by nr_thread_x");
static int const reg_h =
(smem_h + ThreadConfig::nr_thread_y - 1) / ThreadConfig::nr_thread_y;
static int const reg_w = smem_w / ThreadConfig::nr_thread_x;
};
template <typename LocalShareConfig, typename UnrollConfig, typename ThreadConfig>
struct DataGlobal2ShareMemVisitor {
typedef float copy_t;
typedef DataTileCount<LocalShareConfig, UnrollConfig, ThreadConfig> DataTileCount;
float* smem;
const float* g_ptr;
int c_stride;
int h_stride;
int w_stride;
int h1, h2;
int w1, w2;
const int tid_x = threadIdx.x;
const int tid_y = threadIdx.y;
copy_t reg[DataTileCount::reg_gl2sh];
__device__ DataGlobal2ShareMemVisitor(
float* smem, const float* g_ptr, int c_stride, int h_stride, int w_stride,
int h1, int h2, int w1, int w2)
: smem{smem},
g_ptr{g_ptr},
c_stride{c_stride},
h_stride{h_stride},
w_stride{w_stride},
h1{h1},
h2{h2},
w1{w1},
w2{w2} {};
__device__ __forceinline__ void first_copy() {
int chw = tid_y;
#pragma unroll
for (int i = 0; i < DataTileCount::reg_gl2sh; ++i) {
if (chw < DataTileCount::tile_chw) {
int ic = chw / DataTileCount::tile_hw;
int hw = chw - ic * DataTileCount::tile_hw;
int ih = hw / DataTileCount::tile_wi;
int iw = hw - ih * DataTileCount::tile_wi;
copy_t val = 0.f;
if (ih >= h1 && ih < h2 && iw >= w1 && iw < w2) {
val = g_ptr[ic * c_stride + ih * h_stride + iw * w_stride];
}
*(sh_ptr(chw, tid_x)) = val;
}
chw += ThreadConfig::nr_thread_y;
}
}
__device__ __forceinline__ void copy() {
int chw = tid_y;
#pragma unroll
for (int i = 0; i < DataTileCount::reg_gl2sh; ++i) {
if (chw < DataTileCount::tile_chw) {
int ic = chw / DataTileCount::tile_hw;
int hw = chw - ic * DataTileCount::tile_hw;
int ih = hw / DataTileCount::tile_wi;
int iw = hw - ih * DataTileCount::tile_wi;
copy_t val = 0.f;
if (ih >= h1 && ih < h2 && iw >= w1 && iw < w2) {
val = g_ptr[ic * c_stride + ih * h_stride + iw * w_stride];
}
reg[i] = val;
}
chw += ThreadConfig::nr_thread_y;
}
}
__device__ __forceinline__ void commit() {
int chw = tid_y;
#pragma unroll
for (int i = 0; i < DataTileCount::reg_gl2sh; ++i) {
if (chw < DataTileCount::tile_chw)
*(sh_ptr(chw, tid_x)) = reg[i];
chw += ThreadConfig::nr_thread_y;
}
};
__device__ __forceinline__ float* sh_ptr(int y, int x) {
return &smem[y * DataTileCount::smem_stride + x];
}
__device__ __forceinline__ void move_forward() {
g_ptr += UnrollConfig::unroll_ci * c_stride;
};
};
template <typename LocalShareConfig, typename UnrollConfig, typename ThreadConfig>
struct FilterGlobal2ShareMemVisitor {
typedef float copy_t;
typedef FilterTileCount<LocalShareConfig, UnrollConfig, ThreadConfig>
FilterTileCount;
float* smem;
const float* g_ptr;
int remain;
int stride;
const int tid_x = threadIdx.x;
const int tid_y = threadIdx.y;
copy_t reg[FilterTileCount::reg_h][FilterTileCount::reg_w];
__device__ FilterGlobal2ShareMemVisitor(
float* smem, const float* g_ptr, int remain, int stride)
: smem{smem}, g_ptr{g_ptr}, remain{remain}, stride{stride} {};
__device__ __forceinline__ void first_copy() {
#pragma unroll
for (int i = 0; i < FilterTileCount::reg_h; ++i) {
int h_idx = tid_y + i * ThreadConfig::nr_thread_y;
#pragma unroll
for (int j = 0; j < FilterTileCount::reg_w; ++j) {
int w_idx = tid_x + j * ThreadConfig::nr_thread_x;
if (h_idx < FilterTileCount::smem_h) {
float val = 0.f;
if (w_idx < remain)
val = g_ptr[h_idx * stride + w_idx];
*(sh_ptr(h_idx, w_idx)) = val;
}
}
}
}
__device__ __forceinline__ void copy() {
// TODO: co bound check
#pragma unroll
for (int i = 0; i < FilterTileCount::reg_h; ++i) {
int h_idx = tid_y + i * ThreadConfig::nr_thread_y;
#pragma unroll
for (int j = 0; j < FilterTileCount::reg_w; ++j) {
int w_idx = tid_x + j * ThreadConfig::nr_thread_x;
if (h_idx < FilterTileCount::smem_h) {
float val = 0.f;
if (w_idx < remain)
val = g_ptr[h_idx * stride + w_idx];
reg[i][j] = val;
}
}
}
}
__device__ __forceinline__ void commit() {
#pragma unroll
for (int i = 0; i < FilterTileCount::reg_h; ++i) {
int h_idx = tid_y + i * ThreadConfig::nr_thread_y;
#pragma unroll
for (int j = 0; j < FilterTileCount::reg_w; ++j) {
int w_idx = tid_x + j * ThreadConfig::nr_thread_x;
if (h_idx < FilterTileCount::smem_h)
*(sh_ptr(h_idx, w_idx)) = reg[i][j];
}
}
}
__device__ __forceinline__ float* sh_ptr(int y, int x) {
return &smem[y * FilterTileCount::smem_stride + x];
}
__device__ __forceinline__ void move_forward() {
g_ptr += UnrollConfig::unroll_ci * LocalShareConfig::fh * LocalShareConfig::fw *
stride;
}
};
template <typename LocalShareConfig, typename UnrollConfig, typename ThreadConfig>
__device__ __forceinline__ void consume_block(
DataGlobal2ShareMemVisitor<LocalShareConfig, UnrollConfig, ThreadConfig>&
src_gl2sh_visitor,
FilterGlobal2ShareMemVisitor<LocalShareConfig, UnrollConfig, ThreadConfig>&
filter_gl2sh_visitor,
float r_src
[DataTileCount<LocalShareConfig, UnrollConfig, ThreadConfig>::tile_wi],
float r_filter[UnrollConfig::unroll_co][LocalShareConfig::fw],
float r_acc[UnrollConfig::unroll_co][UnrollConfig::unroll_wo]) {
typedef DataTileCount<LocalShareConfig, UnrollConfig, ThreadConfig> DataTileCount;
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
for (int ci_inner = 0; ci_inner < UnrollConfig::unroll_ci; ++ci_inner) {
int sh_flt_row_base = ci_inner * LocalShareConfig::fh * LocalShareConfig::fw;
int sh_flt_col_base = tidy * UnrollConfig::unroll_co;
int sh_src_row_base = ci_inner * DataTileCount::tile_hw;
#pragma unroll
for (int kh = 0; kh < LocalShareConfig::fh; ++kh) {
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_co; ++i) {
#pragma unroll
for (int j = 0; j < LocalShareConfig::fw; ++j) {
r_filter[i][j] = *(filter_gl2sh_visitor.sh_ptr(
sh_flt_row_base + kh * LocalShareConfig::fw + j,
sh_flt_col_base + i));
}
}
#pragma unroll
for (int i = 0; i < DataTileCount::tile_wi; ++i) {
int sh_src_row = kh * DataTileCount::tile_wi + i;
r_src[i] =
*(src_gl2sh_visitor.sh_ptr(sh_src_row_base + sh_src_row, tidx));
}
#pragma unroll
for (int kw = 0; kw < LocalShareConfig::fw; ++kw) {
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_co; ++i) {
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_wo; ++j) {
r_acc[i][j] +=
r_src[j * LocalShareConfig::sw + kw] * r_filter[i][kw];
}
}
}
}
}
}
/*
* Src tensor format is (c, h, w, n), filter tensor format is (sgh, sgw, co, ci,
* fh, fw), and dst tensor format (c, h, w, n). Thread block size is (32, BY).
* Each thread compute 1 x UnrollConfig::unroll_wo entries
* of one slice with height ho and width wo of the output tensor. Each block
* compute 32 batches and BY x UnrollConfig::unroll_co output channels.
*/
template <typename LocalShareConfig, typename UnrollConfig, typename ThreadConfig>
__global__ void local_share_device_template_f32(
const float* __restrict__ src, const float* __restrict__ filter,
float* __restrict__ dst, Param param) {
typedef DataTileCount<LocalShareConfig, UnrollConfig, ThreadConfig> DataTileCount;
typedef FilterTileCount<LocalShareConfig, UnrollConfig, ThreadConfig>
FilterTileCount;
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int bidy = blockIdx.y;
const int bidz = blockIdx.z;
const int blks_per_grp_wo =
(param.grp_wo + UnrollConfig::unroll_wo - 1) / UnrollConfig::unroll_wo;
const int b_co = bidy / param.grp_ho;
const int b_grp_ho = bidy - b_co * param.grp_ho;
const int b_n = bidx / blks_per_grp_wo;
const int b_grp_wo = bidx - b_n * blks_per_grp_wo;
const int b_sgh = bidz / param.sgw;
const int b_sgw = bidz - b_sgh * param.sgw;
const int b_ho = b_sgh * param.grp_ho + b_grp_ho;
const int b_wo = b_sgw * param.grp_wo + b_grp_wo * UnrollConfig::unroll_wo;
const int b_hi = b_ho * LocalShareConfig::sh - param.ph;
const int b_wi = b_wo * LocalShareConfig::sw - param.pw;
const int ho = param.sgh * param.grp_ho;
const int wo = param.sgw * param.grp_wo;
const int t_co = b_co * FilterTileCount::tile_co + tidy * UnrollConfig::unroll_co;
const float* __restrict__ g_ptr_src = src + (b_hi * param.wi + b_wi) * param.n +
b_n * ThreadConfig::nr_thread_x + tidx;
const float* __restrict__ g_ptr_filter =
filter +
(b_sgh * param.sgw + b_sgw) * param.co * param.ci * LocalShareConfig::fh *
LocalShareConfig::fw // spatial group
+ b_co; // output channel
float* __restrict__ g_ptr_dst = dst + t_co * ho * wo * param.n +
(b_ho * wo + b_wo) * param.n +
b_n * ThreadConfig::nr_thread_x + tidx;
extern __shared__ float smem[];
float* sh_src = smem;
float* sh_filter = smem + DataTileCount::smem_tot;
// TODO check register
DataGlobal2ShareMemVisitor<LocalShareConfig, UnrollConfig, ThreadConfig>
src_gl2sh_visitor{
sh_src,
g_ptr_src,
param.hi * param.wi * param.n,
param.wi * param.n,
param.n,
-b_hi,
param.hi - b_hi,
-b_wi,
param.wi - b_wi};
FilterGlobal2ShareMemVisitor<LocalShareConfig, UnrollConfig, ThreadConfig>
filter_gl2sh_visitor{sh_filter, g_ptr_filter, param.co - b_co, param.co};
float r_src[DataTileCount::tile_wi];
float r_filter[UnrollConfig::unroll_co][LocalShareConfig::fw];
float r_acc[UnrollConfig::unroll_co][UnrollConfig::unroll_wo];
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_co; ++i) {
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_wo; ++j) {
r_acc[i][j] = 0;
}
}
src_gl2sh_visitor.first_copy();
filter_gl2sh_visitor.first_copy();
__syncthreads();
int ci_blks = (param.ci + UnrollConfig::unroll_ci - 1) / UnrollConfig::unroll_ci;
for (int ci_outer = 0; ci_outer < ci_blks - 1; ci_outer++) {
src_gl2sh_visitor.move_forward();
filter_gl2sh_visitor.move_forward();
src_gl2sh_visitor.copy();
filter_gl2sh_visitor.copy();
consume_block<LocalShareConfig, UnrollConfig, ThreadConfig>(
src_gl2sh_visitor, filter_gl2sh_visitor, r_src, r_filter, r_acc);
__syncthreads();
src_gl2sh_visitor.commit();
filter_gl2sh_visitor.commit();
__syncthreads();
}
consume_block<LocalShareConfig, UnrollConfig, ThreadConfig>(
src_gl2sh_visitor, filter_gl2sh_visitor, r_src, r_filter, r_acc);
const int co_stride = ho * wo * param.n;
const int t_grp_wo_base = b_grp_wo * UnrollConfig::unroll_wo;
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_co; ++i) {
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_wo; ++j) {
int g_co = t_co + i;
int t_grp_wo = t_grp_wo_base + j;
if (g_co < param.co && t_grp_wo < param.grp_wo) {
g_ptr_dst[i * co_stride + j * param.n] = r_acc[i][j];
}
}
}
}
void (*get_kern(
int fh, int fw, int sh, int sw, const Param& param,
LaunchConfig& launch_config))(
const float* __restrict__, const float* __restrict__, float* __restrict__,
Param) {
void (*kern)(
const float* __restrict__, const float* __restrict__, float* __restrict__,
Param);
kern = nullptr;
if (fh == 1 && fw == 1 && sh == 1 && sw == 1) {
static constexpr int fh_ = 1;
static constexpr int fw_ = 1;
static constexpr int sh_ = 1;
static constexpr int sw_ = 1;
#define CK_GRP_WO(_grp_wo) \
if (param.grp_wo >= _grp_wo) { \
static constexpr int unroll_co = 8; \
static constexpr int unroll_ci = 4; \
static constexpr int unroll_wo = _grp_wo; \
static constexpr int nr_thread_x = 32; \
static constexpr int nr_thread_y = 8; \
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_; \
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_; \
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_; \
kern = local_share_device_template_f32< \
LocalShareConfig_, UnrollConfig_, ThreadConfig_>; \
launch_config.nr_threads_x = nr_thread_x; \
launch_config.nr_threads_y = nr_thread_y; \
launch_config.nr_threads_z = 1; \
launch_config.nr_blocks_x = \
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo); \
launch_config.nr_blocks_y = \
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho; \
launch_config.nr_blocks_z = param.sgh * param.sgw; \
launch_config.smem_size_in_bytes = \
sizeof(float) * DataTileCount< \
LocalShareConfig_, UnrollConfig_, \
ThreadConfig_>::smem_tot + \
sizeof(float) * FilterTileCount< \
LocalShareConfig_, UnrollConfig_, \
ThreadConfig_>::smem_tot; \
}
CK_GRP_WO(1);
CK_GRP_WO(2);
CK_GRP_WO(3);
CK_GRP_WO(4);
#undef CK_GRP_WO
} else if (fh == 1 && fw == 1 && sh == 2 && sw == 2) {
static constexpr int fh_ = 1;
static constexpr int fw_ = 1;
static constexpr int sh_ = 2;
static constexpr int sw_ = 2;
#define CK_GRP_WO(_grp_wo) \
if (param.grp_wo >= _grp_wo) { \
static constexpr int unroll_co = 8; \
static constexpr int unroll_ci = 4; \
static constexpr int unroll_wo = _grp_wo; \
static constexpr int nr_thread_x = 32; \
static constexpr int nr_thread_y = 8; \
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_; \
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_; \
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_; \
kern = local_share_device_template_f32< \
LocalShareConfig_, UnrollConfig_, ThreadConfig_>; \
launch_config.nr_threads_x = nr_thread_x; \
launch_config.nr_threads_y = nr_thread_y; \
launch_config.nr_threads_z = 1; \
launch_config.nr_blocks_x = \
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo); \
launch_config.nr_blocks_y = \
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho; \
launch_config.nr_blocks_z = param.sgh * param.sgw; \
launch_config.smem_size_in_bytes = \
sizeof(float) * DataTileCount< \
LocalShareConfig_, UnrollConfig_, \
ThreadConfig_>::smem_tot + \
sizeof(float) * FilterTileCount< \
LocalShareConfig_, UnrollConfig_, \
ThreadConfig_>::smem_tot; \
}
CK_GRP_WO(1);
CK_GRP_WO(2);
CK_GRP_WO(3);
CK_GRP_WO(4);
CK_GRP_WO(5);
CK_GRP_WO(6);
CK_GRP_WO(7);
CK_GRP_WO(8);
#undef CK_GRP_WO
} else if (fh == 3 && fw == 3 && sh == 1 && sw == 1) {
static constexpr int fh_ = 3;
static constexpr int fw_ = 3;
static constexpr int sh_ = 1;
static constexpr int sw_ = 1;
#define CK_GRP_WO(_grp_wo) \
if (param.grp_wo >= _grp_wo) { \
static constexpr int unroll_co = 4; \
static constexpr int unroll_ci = 1; \
static constexpr int unroll_wo = _grp_wo; \
static constexpr int nr_thread_x = 32; \
static constexpr int nr_thread_y = 8; \
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_; \
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_; \
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_; \
kern = local_share_device_template_f32< \
LocalShareConfig_, UnrollConfig_, ThreadConfig_>; \
launch_config.nr_threads_x = nr_thread_x; \
launch_config.nr_threads_y = nr_thread_y; \
launch_config.nr_threads_z = 1; \
launch_config.nr_blocks_x = \
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo); \
launch_config.nr_blocks_y = \
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho; \
launch_config.nr_blocks_z = param.sgh * param.sgw; \
launch_config.smem_size_in_bytes = \
sizeof(float) * DataTileCount< \
LocalShareConfig_, UnrollConfig_, \
ThreadConfig_>::smem_tot + \
sizeof(float) * FilterTileCount< \
LocalShareConfig_, UnrollConfig_, \
ThreadConfig_>::smem_tot; \
}
CK_GRP_WO(1);
CK_GRP_WO(2);
CK_GRP_WO(3);
CK_GRP_WO(4);
CK_GRP_WO(5);
CK_GRP_WO(6);
CK_GRP_WO(7);
CK_GRP_WO(8);
#undef CK_GRP_WO
} else if (fh == 3 && fw == 3 && sh == 2 && sw == 2) {
static constexpr int fh_ = 3;
static constexpr int fw_ = 3;
static constexpr int sh_ = 2;
static constexpr int sw_ = 2;
#define CK_GRP_WO(_grp_wo) \
if (param.grp_wo >= _grp_wo) { \
static constexpr int unroll_co = 8; \
static constexpr int unroll_ci = 1; \
static constexpr int unroll_wo = _grp_wo; \
static constexpr int nr_thread_x = 32; \
static constexpr int nr_thread_y = 4; \
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_; \
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_; \
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_; \
kern = local_share_device_template_f32< \
LocalShareConfig_, UnrollConfig_, ThreadConfig_>; \
launch_config.nr_threads_x = nr_thread_x; \
launch_config.nr_threads_y = nr_thread_y; \
launch_config.nr_threads_z = 1; \
launch_config.nr_blocks_x = \
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo); \
launch_config.nr_blocks_y = \
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho; \
launch_config.nr_blocks_z = param.sgh * param.sgw; \
launch_config.smem_size_in_bytes = \
sizeof(float) * DataTileCount< \
LocalShareConfig_, UnrollConfig_, \
ThreadConfig_>::smem_tot + \
sizeof(float) * FilterTileCount< \
LocalShareConfig_, UnrollConfig_, \
ThreadConfig_>::smem_tot; \
}
CK_GRP_WO(1);
CK_GRP_WO(2);
CK_GRP_WO(3);
CK_GRP_WO(4);
CK_GRP_WO(5);
CK_GRP_WO(6);
CK_GRP_WO(7);
CK_GRP_WO(8);
#undef CK_GRP_WO
//! TODO: tune performance for kern size = (5x5, and 7x7)
} else if (fh == 5 && fw == 5 && sh == 1 && sw == 1) {
static constexpr int fh_ = 5;
static constexpr int fw_ = 5;
static constexpr int sh_ = 1;
static constexpr int sw_ = 1;
if (param.grp_wo >= 8) {
static constexpr int unroll_co = 8;
static constexpr int unroll_ci = 2;
static constexpr int unroll_wo = 8;
static constexpr int nr_thread_x = 32;
static constexpr int nr_thread_y = 8;
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_;
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_;
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_;
kern = local_share_device_template_f32<
LocalShareConfig_, UnrollConfig_, ThreadConfig_>;
launch_config.nr_threads_x = nr_thread_x;
launch_config.nr_threads_y = nr_thread_y;
launch_config.nr_threads_z = 1;
launch_config.nr_blocks_x =
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo);
launch_config.nr_blocks_y =
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho;
launch_config.nr_blocks_z = param.sgh * param.sgw;
launch_config.smem_size_in_bytes =
sizeof(float) * DataTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot +
sizeof(float) * FilterTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot;
} else if (param.grp_wo >= 4) {
static constexpr int unroll_co = 16;
static constexpr int unroll_ci = 2;
static constexpr int unroll_wo = 4;
static constexpr int nr_thread_x = 32;
static constexpr int nr_thread_y = 8;
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_;
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_;
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_;
kern = local_share_device_template_f32<
LocalShareConfig_, UnrollConfig_, ThreadConfig_>;
launch_config.nr_threads_x = nr_thread_x;
launch_config.nr_threads_y = nr_thread_y;
launch_config.nr_threads_z = 1;
launch_config.nr_blocks_x =
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo);
launch_config.nr_blocks_y =
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho;
launch_config.nr_blocks_z = param.sgh * param.sgw;
launch_config.smem_size_in_bytes =
sizeof(float) * DataTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot +
sizeof(float) * FilterTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot;
} else {
static constexpr int unroll_co = 16;
static constexpr int unroll_ci = 2;
static constexpr int unroll_wo = 2;
static constexpr int nr_thread_x = 32;
static constexpr int nr_thread_y = 8;
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_;
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_;
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_;
kern = local_share_device_template_f32<
LocalShareConfig_, UnrollConfig_, ThreadConfig_>;
launch_config.nr_threads_x = nr_thread_x;
launch_config.nr_threads_y = nr_thread_y;
launch_config.nr_threads_z = 1;
launch_config.nr_blocks_x =
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo);
launch_config.nr_blocks_y =
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho;
launch_config.nr_blocks_z = param.sgh * param.sgw;
launch_config.smem_size_in_bytes =
sizeof(float) * DataTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot +
sizeof(float) * FilterTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot;
}
} else if (fh == 5 && fw == 5 && sh == 2 && sw == 2) {
static constexpr int fh_ = 5;
static constexpr int fw_ = 5;
static constexpr int sh_ = 2;
static constexpr int sw_ = 2;
if (param.grp_wo >= 4) {
static constexpr int unroll_co = 16;
static constexpr int unroll_ci = 2;
static constexpr int unroll_wo = 4;
static constexpr int nr_thread_x = 32;
static constexpr int nr_thread_y = 8;
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_;
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_;
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_;
kern = local_share_device_template_f32<
LocalShareConfig_, UnrollConfig_, ThreadConfig_>;
launch_config.nr_threads_x = nr_thread_x;
launch_config.nr_threads_y = nr_thread_y;
launch_config.nr_threads_z = 1;
launch_config.nr_blocks_x =
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo);
launch_config.nr_blocks_y =
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho;
launch_config.nr_blocks_z = param.sgh * param.sgw;
launch_config.smem_size_in_bytes =
sizeof(float) * DataTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot +
sizeof(float) * FilterTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot;
} else {
static constexpr int unroll_co = 16;
static constexpr int unroll_ci = 2;
static constexpr int unroll_wo = 2;
static constexpr int nr_thread_x = 32;
static constexpr int nr_thread_y = 8;
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_;
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_;
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_;
kern = local_share_device_template_f32<
LocalShareConfig_, UnrollConfig_, ThreadConfig_>;
launch_config.nr_threads_x = nr_thread_x;
launch_config.nr_threads_y = nr_thread_y;
launch_config.nr_threads_z = 1;
launch_config.nr_blocks_x =
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo);
launch_config.nr_blocks_y =
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho;
launch_config.nr_blocks_z = param.sgh * param.sgw;
launch_config.smem_size_in_bytes =
sizeof(float) * DataTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot +
sizeof(float) * FilterTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot;
}
} else if (fh == 7 && fw == 7 && sh == 1 && sw == 1) {
static constexpr int fh_ = 7;
static constexpr int fw_ = 7;
static constexpr int sh_ = 1;
static constexpr int sw_ = 1;
if (param.grp_wo >= 8) {
static constexpr int unroll_co = 8;
static constexpr int unroll_ci = 1;
static constexpr int unroll_wo = 8;
static constexpr int nr_thread_x = 32;
static constexpr int nr_thread_y = 8;
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_;
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_;
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_;
kern = local_share_device_template_f32<
LocalShareConfig_, UnrollConfig_, ThreadConfig_>;
launch_config.nr_threads_x = nr_thread_x;
launch_config.nr_threads_y = nr_thread_y;
launch_config.nr_threads_z = 1;
launch_config.nr_blocks_x =
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo);
launch_config.nr_blocks_y =
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho;
launch_config.nr_blocks_z = param.sgh * param.sgw;
launch_config.smem_size_in_bytes =
sizeof(float) * DataTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot +
sizeof(float) * FilterTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot;
} else if (param.grp_wo >= 4) {
static constexpr int unroll_co = 16;
static constexpr int unroll_ci = 1;
static constexpr int unroll_wo = 4;
static constexpr int nr_thread_x = 32;
static constexpr int nr_thread_y = 8;
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_;
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_;
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_;
kern = local_share_device_template_f32<
LocalShareConfig_, UnrollConfig_, ThreadConfig_>;
launch_config.nr_threads_x = nr_thread_x;
launch_config.nr_threads_y = nr_thread_y;
launch_config.nr_threads_z = 1;
launch_config.nr_blocks_x =
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo);
launch_config.nr_blocks_y =
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho;
launch_config.nr_blocks_z = param.sgh * param.sgw;
launch_config.smem_size_in_bytes =
sizeof(float) * DataTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot +
sizeof(float) * FilterTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot;
} else {
static constexpr int unroll_co = 16;
static constexpr int unroll_ci = 1;
static constexpr int unroll_wo = 2;
static constexpr int nr_thread_x = 32;
static constexpr int nr_thread_y = 8;
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_;
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_;
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_;
kern = local_share_device_template_f32<
LocalShareConfig_, UnrollConfig_, ThreadConfig_>;
launch_config.nr_threads_x = nr_thread_x;
launch_config.nr_threads_y = nr_thread_y;
launch_config.nr_threads_z = 1;
launch_config.nr_blocks_x =
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo);
launch_config.nr_blocks_y =
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho;
launch_config.nr_blocks_z = param.sgh * param.sgw;
launch_config.smem_size_in_bytes =
sizeof(float) * DataTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot +
sizeof(float) * FilterTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot;
}
} else if (fh == 7 && fw == 7 && sh == 2 && sw == 2) {
static constexpr int fh_ = 7;
static constexpr int fw_ = 7;
static constexpr int sh_ = 2;
static constexpr int sw_ = 2;
static constexpr int unroll_co = 16;
static constexpr int unroll_ci = 1;
static constexpr int unroll_wo = 2;
static constexpr int nr_thread_x = 32;
static constexpr int nr_thread_y = 8;
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_;
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_;
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_;
kern = local_share_device_template_f32<
LocalShareConfig_, UnrollConfig_, ThreadConfig_>;
launch_config.nr_threads_x = nr_thread_x;
launch_config.nr_threads_y = nr_thread_y;
launch_config.nr_threads_z = 1;
launch_config.nr_blocks_x =
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo);
launch_config.nr_blocks_y =
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho;
launch_config.nr_blocks_z = param.sgh * param.sgw;
launch_config.smem_size_in_bytes =
sizeof(float) *
DataTileCount<LocalShareConfig_, UnrollConfig_, ThreadConfig_>::
smem_tot +
sizeof(float) * FilterTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot;
} else {
megdnn_assert(
false,
"no usable kernel implementation for local share "
"convolution (fh,fw)=(%d,%d), (sh,sw)=(%d,%d)",
fh, fw, sh, sw);
}
return kern;
}
} // namespace
//! this is a dummy kernel
#if 0
namespace batch_size_aware {
template <int unroll_ho_, int unroll_wo_, int unroll_ci_>
struct UnrollConfig {
static int const unroll_ho = unroll_ho_;
static int const unroll_wo = unroll_wo_;
static int const unroll_ci = unroll_ci_;
};
template <int thread_x, int thread_y>
struct ThreadConfig {
static int const nr_thread_x = thread_x;
static int const nr_thread_y = thread_y;
};
template <typename LocalShareConfig, typename UnrollConfig,
typename ThreadConfig>
struct DataTileCount {
static int const tile_hi = UnrollConfig::unroll_ho * LocalShareConfig::sh +
LocalShareConfig::fh - 1;
static int const tile_wi = UnrollConfig::unroll_wo * LocalShareConfig::sw +
LocalShareConfig::fw - 1;
static int const tile_hw = tile_hi * tile_wi;
static int const tile_chw = UnrollConfig::unroll_ci * tile_hi * tile_wi;
static int const reg_gl2sh = (tile_chw + ThreadConfig::nr_thread_y - 1) /
ThreadConfig::nr_thread_y;
static int const smem_h = tile_chw;
static int const smem_w = ThreadConfig::nr_thread_x;
static int const smem_stride = smem_w;
static int const smem_tot = smem_h * smem_stride;
};
template <typename LocalShareConfig, typename UnrollConfig,
typename ThreadConfig>
struct FilterTileCount {
static int const tile_co = ThreadConfig::nr_thread_y;
static int const tile_ci = UnrollConfig::unroll_ci;
static int const smem_h = tile_co;
static int const smem_w =
tile_ci * LocalShareConfig::fh * LocalShareConfig::fw;
static int const smem_stride = smem_w;
static int const smem_tot = smem_h * smem_stride;
static int const reg_gl2sh = (smem_w + ThreadConfig::nr_thread_x - 1) /
ThreadConfig::nr_thread_x;
};
template <typename LocalShareConfig, typename UnrollConfig,
typename ThreadConfig>
struct DataGlobal2ShareMemVisitor {
typedef float copy_t;
typedef DataTileCount<LocalShareConfig, UnrollConfig, ThreadConfig>
DataTileCount;
float* smem;
const float* g_ptr;
int c_stride;
int h_stride;
int w_stride;
int h1, h2;
int w1, w2;
const int tid_x = threadIdx.x;
const int tid_y = threadIdx.y;
copy_t reg[DataTileCount::reg_gl2sh];
__device__ DataGlobal2ShareMemVisitor(float* smem, const float* g_ptr,
int c_stride, int h_stride,
int w_stride, int h1, int h2, int w1,
int w2)
: smem{smem},
g_ptr{g_ptr},
c_stride{c_stride},
h_stride{h_stride},
w_stride{w_stride},
h1{h1},
h2{h2},
w1{w1},
w2{w2} {};
__device__ __forceinline__ void first_copy() {
int chw = tid_y;
#pragma unroll
for (int i = 0; i < DataTileCount::reg_gl2sh; ++i) {
if (chw < DataTileCount::tile_chw) {
int ic = chw / DataTileCount::tile_hw;
int hw = chw - ic * DataTileCount::tile_hw;
int ih = hw / DataTileCount::tile_wi;
int iw = hw - ih * DataTileCount::tile_wi;
copy_t val = 0.f;
if (ih >= h1 && ih < h2 && iw >= w1 && iw < w2) {
val = g_ptr[ic * c_stride + ih * h_stride + iw * w_stride];
}
*(sh_ptr(chw, tid_x)) = val;
}
chw += ThreadConfig::nr_thread_y;
}
}
__device__ __forceinline__ void copy() {
int chw = tid_y;
#pragma unroll
for (int i = 0; i < DataTileCount::reg_gl2sh; ++i) {
if (chw < DataTileCount::tile_chw) {
int ic = chw / DataTileCount::tile_hw;
int hw = chw - ic * DataTileCount::tile_hw;
int ih = hw / DataTileCount::tile_wi;
int iw = hw - ih * DataTileCount::tile_wi;
copy_t val = 0.f;
if (ih >= h1 && ih < h2 && iw >= w1 && iw < w2) {
val = g_ptr[ic * c_stride + ih * h_stride + iw * w_stride];
}
reg[i] = val;
}
chw += ThreadConfig::nr_thread_y;
}
}
__device__ __forceinline__ void commit() {
int chw = tid_y;
#pragma unroll
for (int i = 0; i < DataTileCount::reg_gl2sh; ++i) {
if (chw < DataTileCount::tile_chw)
*(sh_ptr(chw, tid_x)) = reg[i];
chw += ThreadConfig::nr_thread_y;
}
};
__device__ __forceinline__ float* sh_ptr(int y, int x) {
return &smem[y * DataTileCount::smem_stride + x];
}
__device__ __forceinline__ void move_forward() {
g_ptr += UnrollConfig::unroll_ci * c_stride;
};
};
template <typename LocalShareConfig, typename UnrollConfig,
typename ThreadConfig>
struct FilterGlobal2ShareMemVisitor {
typedef float copy_t;
typedef FilterTileCount<LocalShareConfig, UnrollConfig, ThreadConfig>
FilterTileCount;
float* smem;
const float* g_ptr;
const int tid_x = threadIdx.x;
const int tid_y = threadIdx.y;
copy_t reg[FilterTileCount::reg_gl2sh];
__device__ FilterGlobal2ShareMemVisitor(float* smem, const float* g_ptr)
: smem{smem}, g_ptr{g_ptr} {};
__device__ __forceinline__ void first_copy() {
#pragma unroll
for (int i = 0; i < FilterTileCount::reg_gl2sh; ++i) {
int idx = i * ThreadConfig::nr_thread_x;
if (idx < FilterTileCount::smem_w)
*(sh_ptr(tid_y, idx + tid_x)) = g_ptr[idx];
}
}
__device__ __forceinline__ void copy() {
#pragma unroll
for (int i = 0; i < FilterTileCount::reg_gl2sh; ++i) {
int idx = i * ThreadConfig::nr_thread_x;
if (idx < FilterTileCount::smem_w)
reg[i] = g_ptr[idx];
}
}
__device__ __forceinline__ void commit() {
#pragma unroll
for (int i = 0; i < FilterTileCount::reg_gl2sh; ++i) {
int idx = tid_x + i * ThreadConfig::nr_thread_x;
if (idx < FilterTileCount::smem_w)
*(sh_ptr(tid_y, idx)) = reg[i];
}
}
__device__ __forceinline__ float* sh_ptr(int y, int x) {
return &smem[y * FilterTileCount::smem_stride + x];
}
__device__ __forceinline__ void move_forward() {
g_ptr += UnrollConfig::unroll_ci * LocalShareConfig::fh *
LocalShareConfig::fw;
}
};
/*
* Src tensor format is (c, h, w, n), filter tensor format is (sgh, sgw, co, ci,
* fh, fw), and dst tensor format (c, h, w, n). Thread block size is (32, BY).
* Each thread compute UnrollConfig::unroll_ho x UnrollConfig::unroll_wo entries
* of one slice with height ho and width wo of the output tensor. Each block
* compute 32 batches and BY output channels.
*/
template <typename LocalShareConfig, typename UnrollConfig,
typename ThreadConfig>
__global__ void local_share_device_template_f32(
const float* __restrict__ src, const float* __restrict__ filter,
float* __restrict__ dst, Param param) {
typedef DataTileCount<LocalShareConfig, UnrollConfig, ThreadConfig>
DataTileCount;
typedef FilterTileCount<LocalShareConfig, UnrollConfig, ThreadConfig>
FilterTileCount;
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int bidy = blockIdx.y;
const int bidz = blockIdx.z;
const int blks_per_grp_ho = (param.grp_ho + UnrollConfig::unroll_ho - 1) /
UnrollConfig::unroll_ho;
const int blks_per_grp_wo = (param.grp_wo + UnrollConfig::unroll_wo - 1) /
UnrollConfig::unroll_wo;
const int b_co = bidy / blks_per_grp_ho;
const int b_grp_ho = bidy - b_co * blks_per_grp_ho;
const int b_n = bidx / blks_per_grp_wo;
const int b_grp_wo = bidx - b_n * blks_per_grp_wo;
const int b_sgh = bidz / param.sgw;
const int b_sgw = bidz - b_sgh * param.sgw;
const int b_ho = b_sgh * param.grp_ho + b_grp_ho * UnrollConfig::unroll_ho;
const int b_wo = b_sgw * param.grp_wo + b_grp_wo * UnrollConfig::unroll_wo;
const int b_hi = b_ho * LocalShareConfig::sh - param.ph;
const int b_wi = b_wo * LocalShareConfig::sw - param.pw;
const int ho = param.sgh * param.grp_ho;
const int wo = param.sgw * param.grp_wo;
const int t_co = b_co * ThreadConfig::nr_thread_y + tidy;
const float* __restrict__ g_ptr_src =
src + (b_hi * param.wi + b_wi) * param.n +
b_n * ThreadConfig::nr_thread_x + tidx;
const float* __restrict__ g_ptr_filter =
filter +
(b_sgh * param.sgw + b_sgw) * param.co * param.ci *
LocalShareConfig::fh *
LocalShareConfig::fw // spatial group
+ t_co * param.ci * LocalShareConfig::fh *
LocalShareConfig::fw // output channel
+ tidx;
float* __restrict__ g_ptr_dst = dst + t_co * ho * wo * param.n +
(b_ho * wo + b_wo) * param.n +
b_n * ThreadConfig::nr_thread_x + tidx;
extern __shared__ float smem[];
float* sh_src = smem;
float* sh_filter = smem + DataTileCount::smem_tot;
// TODO check register
DataGlobal2ShareMemVisitor<LocalShareConfig, UnrollConfig, ThreadConfig>
src_gl2sh_visitor{sh_src,
g_ptr_src,
param.hi * param.wi * param.n,
param.wi * param.n,
param.n,
-b_hi,
param.hi - b_hi,
-b_wi,
param.wi - b_wi};
FilterGlobal2ShareMemVisitor<LocalShareConfig, UnrollConfig, ThreadConfig>
filter_gl2sh_visitor{sh_filter, g_ptr_filter};
float r_src[UnrollConfig::unroll_ho][DataTileCount::tile_wi];
float r_filter[LocalShareConfig::fw];
float r_acc[UnrollConfig::unroll_ho][UnrollConfig::unroll_wo];
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_ho; ++i) {
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_wo; ++j) {
r_acc[i][j] = 0;
}
}
src_gl2sh_visitor.first_copy();
filter_gl2sh_visitor.first_copy();
__syncthreads();
int ci_blks =
(param.ci + UnrollConfig::unroll_ci - 1) / UnrollConfig::unroll_ci;
#pragma unroll
for (int ci_outer = 0; ci_outer < ci_blks - 1; ci_outer++) {
src_gl2sh_visitor.move_forward();
filter_gl2sh_visitor.move_forward();
src_gl2sh_visitor.copy();
filter_gl2sh_visitor.copy();
for (int ci_inner = 0; ci_inner < UnrollConfig::unroll_ci; ++ci_inner) {
int sh_flt_col_base =
ci_inner * LocalShareConfig::fh * LocalShareConfig::fw;
int sh_src_row_base = ci_inner * DataTileCount::tile_hw;
#pragma unroll
for (int kh = 0; kh < LocalShareConfig::fh; ++kh) {
#pragma unroll
for (int i = 0; i < LocalShareConfig::fw; ++i) {
r_filter[i] = *(filter_gl2sh_visitor.sh_ptr(
tidy,
sh_flt_col_base + kh * LocalShareConfig::fw + i));
}
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_ho; ++i) {
#pragma unroll
for (int j = 0; j < DataTileCount::tile_wi; ++j) {
int sh_src_row = (i * LocalShareConfig::sh + kh) *
DataTileCount::tile_wi +
j;
r_src[i][j] = *(src_gl2sh_visitor.sh_ptr(
sh_src_row_base + sh_src_row, tidx));
}
}
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_ho; ++i) {
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_wo; ++j) {
#pragma unroll
for (int kw = 0; kw < LocalShareConfig::fw; ++kw) {
r_acc[i][j] +=
r_src[i][j * LocalShareConfig::sw + kw] *
r_filter[kw];
}
}
}
}
}
__syncthreads();
src_gl2sh_visitor.commit();
filter_gl2sh_visitor.commit();
__syncthreads();
}
for (int ci_inner = 0; ci_inner < UnrollConfig::unroll_ci; ++ci_inner) {
int sh_flt_col_base =
ci_inner * LocalShareConfig::fh * LocalShareConfig::fw;
int sh_src_row_base = ci_inner * DataTileCount::tile_hw;
#pragma unroll
for (int kh = 0; kh < LocalShareConfig::fh; ++kh) {
#pragma unroll
for (int i = 0; i < LocalShareConfig::fw; ++i) {
r_filter[i] = *(filter_gl2sh_visitor.sh_ptr(
tidy,
sh_flt_col_base + kh * LocalShareConfig::fw + i));
}
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_ho; ++i) {
#pragma unroll
for (int j = 0; j < DataTileCount::tile_wi; ++j) {
int sh_src_row = (i * LocalShareConfig::sh + kh) *
DataTileCount::tile_wi +
j;
r_src[i][j] = *(src_gl2sh_visitor.sh_ptr(
sh_src_row_base + sh_src_row, tidx));
}
}
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_ho; ++i) {
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_wo; ++j) {
#pragma unroll
for (int kw = 0; kw < LocalShareConfig::fw; ++kw) {
r_acc[i][j] +=
r_src[i][j * LocalShareConfig::sw + kw] *
r_filter[kw];
}
}
}
}
}
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_ho; ++i) {
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_wo; ++j) {
int oh = b_ho + i;
int ow = b_wo + j;
if (t_co < param.co && oh < ho && ow < wo) {
g_ptr_dst[(i * wo + j) * param.n] = r_acc[i][j];
}
}
}
}
} // namespace batch_size_aware
#endif
void megdnn::cuda::local_share::_do_local_share_convolution_large_batch_size(
const float* d_src, const float* d_filter, float* d_dst, float* workspace,
int fh, int fw, int sh, int sw, const Param& param,
hipblasHandle_t cublas_handle, hipStream_t stream, float* one, float* zero) {
float* ws_src = workspace;
int nr_elem_total = param.n * param.ci * param.hi * param.wi;
float* ws_dst = workspace + nr_elem_total;
// tensor reformat from (n, c, h, w) -> (c, h, w, n)
{
int m = param.n, n = param.ci * param.hi * param.wi;
int lda, ldb;
lda = ldb = param.ci * param.hi * param.wi;
int ldc = param.n;
cublas_check(hipblasSgeam(
cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_T, m, n, one, d_src, lda, zero,
d_src, ldb, ws_src, ldc));
}
{
void (*kern)(
const float* __restrict__, const float* __restrict__,
float* __restrict__, Param);
LaunchConfig launch_config;
kern = get_kern(fh, fw, sh, sw, param, launch_config);
uint32_t nr_threads_x = launch_config.nr_threads_x,
nr_threads_y = launch_config.nr_threads_y,
nr_blocks_x = launch_config.nr_blocks_x,
nr_blocks_y = launch_config.nr_blocks_y,
nr_blocks_z = launch_config.nr_blocks_z,
smem_size_in_bytes = launch_config.smem_size_in_bytes;
_check_launch_config(launch_config);
dim3 block_size{nr_threads_x, nr_threads_y, 1};
dim3 grid_size{nr_blocks_x, nr_blocks_y, nr_blocks_z};
hipLaunchKernelGGL(( kern), dim3(grid_size), dim3(block_size), smem_size_in_bytes, stream,
ws_src, d_filter, ws_dst, param);
after_kernel_launch();
}
// tensor reformat form (c, h, w, n) -> (n, c, h, w)
{
int ho = param.grp_ho * param.sgh, wo = param.grp_wo * param.sgw;
int m = param.co * ho * wo, n = param.n;
int lda, ldb;
lda = ldb = param.n;
int ldc = param.co * ho * wo;
cublas_check(hipblasSgeam(
cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_T, m, n, one, ws_dst, lda, zero,
ws_dst, ldb, d_dst, ldc));
}
}
// vim: syntax=cuda.doxygen
| 98dde5358a2443b2930da1791482199bc3c20b64.cu | /**
* \file dnn/src/cuda/local_share/forward/local_share_fwd_chwn_f32_batch_size_aware.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "./local_share_forward.cuh"
using namespace megdnn;
using namespace cuda;
using namespace local_share;
namespace {
template <int unroll_co_, int unroll_ci_, int unroll_wo_>
struct UnrollConfig {
static int const unroll_co = unroll_co_;
static int const unroll_ci = unroll_ci_;
static int const unroll_wo = unroll_wo_;
};
template <int thread_x, int thread_y>
struct ThreadConfig {
static int const nr_thread_x = thread_x;
static int const nr_thread_y = thread_y;
};
template <typename LocalShareConfig, typename UnrollConfig, typename ThreadConfig>
struct DataTileCount {
static int const tile_hi = LocalShareConfig::fh;
static int const tile_wi = UnrollConfig::unroll_wo * LocalShareConfig::sw +
LocalShareConfig::fw - LocalShareConfig::sw;
static int const tile_hw = tile_hi * tile_wi;
static int const tile_chw = UnrollConfig::unroll_ci * tile_hi * tile_wi;
static int const reg_gl2sh =
(tile_chw + ThreadConfig::nr_thread_y - 1) / ThreadConfig::nr_thread_y;
static int const smem_h = tile_chw;
static int const smem_w = ThreadConfig::nr_thread_x;
static int const smem_stride = smem_w;
static int const smem_tot = smem_h * smem_stride;
};
template <typename LocalShareConfig, typename UnrollConfig, typename ThreadConfig>
struct FilterTileCount {
static int const tile_co = ThreadConfig::nr_thread_y * UnrollConfig::unroll_co;
static int const tile_ci = UnrollConfig::unroll_ci;
static int const smem_h = tile_ci * LocalShareConfig::fh * LocalShareConfig::fw;
static int const smem_w = tile_co;
static int const smem_stride = smem_w + 1;
static int const smem_tot = smem_h * smem_stride;
MEGDNN_STATIC_ASSERT(
smem_w % ThreadConfig::nr_thread_x == 0,
"col of share memory must be divided by nr_thread_x");
static int const reg_h =
(smem_h + ThreadConfig::nr_thread_y - 1) / ThreadConfig::nr_thread_y;
static int const reg_w = smem_w / ThreadConfig::nr_thread_x;
};
template <typename LocalShareConfig, typename UnrollConfig, typename ThreadConfig>
struct DataGlobal2ShareMemVisitor {
typedef float copy_t;
typedef DataTileCount<LocalShareConfig, UnrollConfig, ThreadConfig> DataTileCount;
float* smem;
const float* g_ptr;
int c_stride;
int h_stride;
int w_stride;
int h1, h2;
int w1, w2;
const int tid_x = threadIdx.x;
const int tid_y = threadIdx.y;
copy_t reg[DataTileCount::reg_gl2sh];
__device__ DataGlobal2ShareMemVisitor(
float* smem, const float* g_ptr, int c_stride, int h_stride, int w_stride,
int h1, int h2, int w1, int w2)
: smem{smem},
g_ptr{g_ptr},
c_stride{c_stride},
h_stride{h_stride},
w_stride{w_stride},
h1{h1},
h2{h2},
w1{w1},
w2{w2} {};
__device__ __forceinline__ void first_copy() {
int chw = tid_y;
#pragma unroll
for (int i = 0; i < DataTileCount::reg_gl2sh; ++i) {
if (chw < DataTileCount::tile_chw) {
int ic = chw / DataTileCount::tile_hw;
int hw = chw - ic * DataTileCount::tile_hw;
int ih = hw / DataTileCount::tile_wi;
int iw = hw - ih * DataTileCount::tile_wi;
copy_t val = 0.f;
if (ih >= h1 && ih < h2 && iw >= w1 && iw < w2) {
val = g_ptr[ic * c_stride + ih * h_stride + iw * w_stride];
}
*(sh_ptr(chw, tid_x)) = val;
}
chw += ThreadConfig::nr_thread_y;
}
}
__device__ __forceinline__ void copy() {
int chw = tid_y;
#pragma unroll
for (int i = 0; i < DataTileCount::reg_gl2sh; ++i) {
if (chw < DataTileCount::tile_chw) {
int ic = chw / DataTileCount::tile_hw;
int hw = chw - ic * DataTileCount::tile_hw;
int ih = hw / DataTileCount::tile_wi;
int iw = hw - ih * DataTileCount::tile_wi;
copy_t val = 0.f;
if (ih >= h1 && ih < h2 && iw >= w1 && iw < w2) {
val = g_ptr[ic * c_stride + ih * h_stride + iw * w_stride];
}
reg[i] = val;
}
chw += ThreadConfig::nr_thread_y;
}
}
__device__ __forceinline__ void commit() {
int chw = tid_y;
#pragma unroll
for (int i = 0; i < DataTileCount::reg_gl2sh; ++i) {
if (chw < DataTileCount::tile_chw)
*(sh_ptr(chw, tid_x)) = reg[i];
chw += ThreadConfig::nr_thread_y;
}
};
__device__ __forceinline__ float* sh_ptr(int y, int x) {
return &smem[y * DataTileCount::smem_stride + x];
}
__device__ __forceinline__ void move_forward() {
g_ptr += UnrollConfig::unroll_ci * c_stride;
};
};
template <typename LocalShareConfig, typename UnrollConfig, typename ThreadConfig>
struct FilterGlobal2ShareMemVisitor {
typedef float copy_t;
typedef FilterTileCount<LocalShareConfig, UnrollConfig, ThreadConfig>
FilterTileCount;
float* smem;
const float* g_ptr;
int remain;
int stride;
const int tid_x = threadIdx.x;
const int tid_y = threadIdx.y;
copy_t reg[FilterTileCount::reg_h][FilterTileCount::reg_w];
__device__ FilterGlobal2ShareMemVisitor(
float* smem, const float* g_ptr, int remain, int stride)
: smem{smem}, g_ptr{g_ptr}, remain{remain}, stride{stride} {};
__device__ __forceinline__ void first_copy() {
#pragma unroll
for (int i = 0; i < FilterTileCount::reg_h; ++i) {
int h_idx = tid_y + i * ThreadConfig::nr_thread_y;
#pragma unroll
for (int j = 0; j < FilterTileCount::reg_w; ++j) {
int w_idx = tid_x + j * ThreadConfig::nr_thread_x;
if (h_idx < FilterTileCount::smem_h) {
float val = 0.f;
if (w_idx < remain)
val = g_ptr[h_idx * stride + w_idx];
*(sh_ptr(h_idx, w_idx)) = val;
}
}
}
}
__device__ __forceinline__ void copy() {
// TODO: co bound check
#pragma unroll
for (int i = 0; i < FilterTileCount::reg_h; ++i) {
int h_idx = tid_y + i * ThreadConfig::nr_thread_y;
#pragma unroll
for (int j = 0; j < FilterTileCount::reg_w; ++j) {
int w_idx = tid_x + j * ThreadConfig::nr_thread_x;
if (h_idx < FilterTileCount::smem_h) {
float val = 0.f;
if (w_idx < remain)
val = g_ptr[h_idx * stride + w_idx];
reg[i][j] = val;
}
}
}
}
__device__ __forceinline__ void commit() {
#pragma unroll
for (int i = 0; i < FilterTileCount::reg_h; ++i) {
int h_idx = tid_y + i * ThreadConfig::nr_thread_y;
#pragma unroll
for (int j = 0; j < FilterTileCount::reg_w; ++j) {
int w_idx = tid_x + j * ThreadConfig::nr_thread_x;
if (h_idx < FilterTileCount::smem_h)
*(sh_ptr(h_idx, w_idx)) = reg[i][j];
}
}
}
__device__ __forceinline__ float* sh_ptr(int y, int x) {
return &smem[y * FilterTileCount::smem_stride + x];
}
__device__ __forceinline__ void move_forward() {
g_ptr += UnrollConfig::unroll_ci * LocalShareConfig::fh * LocalShareConfig::fw *
stride;
}
};
template <typename LocalShareConfig, typename UnrollConfig, typename ThreadConfig>
__device__ __forceinline__ void consume_block(
DataGlobal2ShareMemVisitor<LocalShareConfig, UnrollConfig, ThreadConfig>&
src_gl2sh_visitor,
FilterGlobal2ShareMemVisitor<LocalShareConfig, UnrollConfig, ThreadConfig>&
filter_gl2sh_visitor,
float r_src
[DataTileCount<LocalShareConfig, UnrollConfig, ThreadConfig>::tile_wi],
float r_filter[UnrollConfig::unroll_co][LocalShareConfig::fw],
float r_acc[UnrollConfig::unroll_co][UnrollConfig::unroll_wo]) {
typedef DataTileCount<LocalShareConfig, UnrollConfig, ThreadConfig> DataTileCount;
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
for (int ci_inner = 0; ci_inner < UnrollConfig::unroll_ci; ++ci_inner) {
int sh_flt_row_base = ci_inner * LocalShareConfig::fh * LocalShareConfig::fw;
int sh_flt_col_base = tidy * UnrollConfig::unroll_co;
int sh_src_row_base = ci_inner * DataTileCount::tile_hw;
#pragma unroll
for (int kh = 0; kh < LocalShareConfig::fh; ++kh) {
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_co; ++i) {
#pragma unroll
for (int j = 0; j < LocalShareConfig::fw; ++j) {
r_filter[i][j] = *(filter_gl2sh_visitor.sh_ptr(
sh_flt_row_base + kh * LocalShareConfig::fw + j,
sh_flt_col_base + i));
}
}
#pragma unroll
for (int i = 0; i < DataTileCount::tile_wi; ++i) {
int sh_src_row = kh * DataTileCount::tile_wi + i;
r_src[i] =
*(src_gl2sh_visitor.sh_ptr(sh_src_row_base + sh_src_row, tidx));
}
#pragma unroll
for (int kw = 0; kw < LocalShareConfig::fw; ++kw) {
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_co; ++i) {
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_wo; ++j) {
r_acc[i][j] +=
r_src[j * LocalShareConfig::sw + kw] * r_filter[i][kw];
}
}
}
}
}
}
/*
* Src tensor format is (c, h, w, n), filter tensor format is (sgh, sgw, co, ci,
* fh, fw), and dst tensor format (c, h, w, n). Thread block size is (32, BY).
* Each thread compute 1 x UnrollConfig::unroll_wo entries
* of one slice with height ho and width wo of the output tensor. Each block
* compute 32 batches and BY x UnrollConfig::unroll_co output channels.
*/
template <typename LocalShareConfig, typename UnrollConfig, typename ThreadConfig>
__global__ void local_share_device_template_f32(
const float* __restrict__ src, const float* __restrict__ filter,
float* __restrict__ dst, Param param) {
typedef DataTileCount<LocalShareConfig, UnrollConfig, ThreadConfig> DataTileCount;
typedef FilterTileCount<LocalShareConfig, UnrollConfig, ThreadConfig>
FilterTileCount;
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int bidy = blockIdx.y;
const int bidz = blockIdx.z;
const int blks_per_grp_wo =
(param.grp_wo + UnrollConfig::unroll_wo - 1) / UnrollConfig::unroll_wo;
const int b_co = bidy / param.grp_ho;
const int b_grp_ho = bidy - b_co * param.grp_ho;
const int b_n = bidx / blks_per_grp_wo;
const int b_grp_wo = bidx - b_n * blks_per_grp_wo;
const int b_sgh = bidz / param.sgw;
const int b_sgw = bidz - b_sgh * param.sgw;
const int b_ho = b_sgh * param.grp_ho + b_grp_ho;
const int b_wo = b_sgw * param.grp_wo + b_grp_wo * UnrollConfig::unroll_wo;
const int b_hi = b_ho * LocalShareConfig::sh - param.ph;
const int b_wi = b_wo * LocalShareConfig::sw - param.pw;
const int ho = param.sgh * param.grp_ho;
const int wo = param.sgw * param.grp_wo;
const int t_co = b_co * FilterTileCount::tile_co + tidy * UnrollConfig::unroll_co;
const float* __restrict__ g_ptr_src = src + (b_hi * param.wi + b_wi) * param.n +
b_n * ThreadConfig::nr_thread_x + tidx;
const float* __restrict__ g_ptr_filter =
filter +
(b_sgh * param.sgw + b_sgw) * param.co * param.ci * LocalShareConfig::fh *
LocalShareConfig::fw // spatial group
+ b_co; // output channel
float* __restrict__ g_ptr_dst = dst + t_co * ho * wo * param.n +
(b_ho * wo + b_wo) * param.n +
b_n * ThreadConfig::nr_thread_x + tidx;
extern __shared__ float smem[];
float* sh_src = smem;
float* sh_filter = smem + DataTileCount::smem_tot;
// TODO check register
DataGlobal2ShareMemVisitor<LocalShareConfig, UnrollConfig, ThreadConfig>
src_gl2sh_visitor{
sh_src,
g_ptr_src,
param.hi * param.wi * param.n,
param.wi * param.n,
param.n,
-b_hi,
param.hi - b_hi,
-b_wi,
param.wi - b_wi};
FilterGlobal2ShareMemVisitor<LocalShareConfig, UnrollConfig, ThreadConfig>
filter_gl2sh_visitor{sh_filter, g_ptr_filter, param.co - b_co, param.co};
float r_src[DataTileCount::tile_wi];
float r_filter[UnrollConfig::unroll_co][LocalShareConfig::fw];
float r_acc[UnrollConfig::unroll_co][UnrollConfig::unroll_wo];
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_co; ++i) {
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_wo; ++j) {
r_acc[i][j] = 0;
}
}
src_gl2sh_visitor.first_copy();
filter_gl2sh_visitor.first_copy();
__syncthreads();
int ci_blks = (param.ci + UnrollConfig::unroll_ci - 1) / UnrollConfig::unroll_ci;
for (int ci_outer = 0; ci_outer < ci_blks - 1; ci_outer++) {
src_gl2sh_visitor.move_forward();
filter_gl2sh_visitor.move_forward();
src_gl2sh_visitor.copy();
filter_gl2sh_visitor.copy();
consume_block<LocalShareConfig, UnrollConfig, ThreadConfig>(
src_gl2sh_visitor, filter_gl2sh_visitor, r_src, r_filter, r_acc);
__syncthreads();
src_gl2sh_visitor.commit();
filter_gl2sh_visitor.commit();
__syncthreads();
}
consume_block<LocalShareConfig, UnrollConfig, ThreadConfig>(
src_gl2sh_visitor, filter_gl2sh_visitor, r_src, r_filter, r_acc);
const int co_stride = ho * wo * param.n;
const int t_grp_wo_base = b_grp_wo * UnrollConfig::unroll_wo;
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_co; ++i) {
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_wo; ++j) {
int g_co = t_co + i;
int t_grp_wo = t_grp_wo_base + j;
if (g_co < param.co && t_grp_wo < param.grp_wo) {
g_ptr_dst[i * co_stride + j * param.n] = r_acc[i][j];
}
}
}
}
void (*get_kern(
int fh, int fw, int sh, int sw, const Param& param,
LaunchConfig& launch_config))(
const float* __restrict__, const float* __restrict__, float* __restrict__,
Param) {
void (*kern)(
const float* __restrict__, const float* __restrict__, float* __restrict__,
Param);
kern = nullptr;
if (fh == 1 && fw == 1 && sh == 1 && sw == 1) {
static constexpr int fh_ = 1;
static constexpr int fw_ = 1;
static constexpr int sh_ = 1;
static constexpr int sw_ = 1;
#define CK_GRP_WO(_grp_wo) \
if (param.grp_wo >= _grp_wo) { \
static constexpr int unroll_co = 8; \
static constexpr int unroll_ci = 4; \
static constexpr int unroll_wo = _grp_wo; \
static constexpr int nr_thread_x = 32; \
static constexpr int nr_thread_y = 8; \
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_; \
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_; \
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_; \
kern = local_share_device_template_f32< \
LocalShareConfig_, UnrollConfig_, ThreadConfig_>; \
launch_config.nr_threads_x = nr_thread_x; \
launch_config.nr_threads_y = nr_thread_y; \
launch_config.nr_threads_z = 1; \
launch_config.nr_blocks_x = \
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo); \
launch_config.nr_blocks_y = \
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho; \
launch_config.nr_blocks_z = param.sgh * param.sgw; \
launch_config.smem_size_in_bytes = \
sizeof(float) * DataTileCount< \
LocalShareConfig_, UnrollConfig_, \
ThreadConfig_>::smem_tot + \
sizeof(float) * FilterTileCount< \
LocalShareConfig_, UnrollConfig_, \
ThreadConfig_>::smem_tot; \
}
CK_GRP_WO(1);
CK_GRP_WO(2);
CK_GRP_WO(3);
CK_GRP_WO(4);
#undef CK_GRP_WO
} else if (fh == 1 && fw == 1 && sh == 2 && sw == 2) {
static constexpr int fh_ = 1;
static constexpr int fw_ = 1;
static constexpr int sh_ = 2;
static constexpr int sw_ = 2;
#define CK_GRP_WO(_grp_wo) \
if (param.grp_wo >= _grp_wo) { \
static constexpr int unroll_co = 8; \
static constexpr int unroll_ci = 4; \
static constexpr int unroll_wo = _grp_wo; \
static constexpr int nr_thread_x = 32; \
static constexpr int nr_thread_y = 8; \
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_; \
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_; \
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_; \
kern = local_share_device_template_f32< \
LocalShareConfig_, UnrollConfig_, ThreadConfig_>; \
launch_config.nr_threads_x = nr_thread_x; \
launch_config.nr_threads_y = nr_thread_y; \
launch_config.nr_threads_z = 1; \
launch_config.nr_blocks_x = \
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo); \
launch_config.nr_blocks_y = \
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho; \
launch_config.nr_blocks_z = param.sgh * param.sgw; \
launch_config.smem_size_in_bytes = \
sizeof(float) * DataTileCount< \
LocalShareConfig_, UnrollConfig_, \
ThreadConfig_>::smem_tot + \
sizeof(float) * FilterTileCount< \
LocalShareConfig_, UnrollConfig_, \
ThreadConfig_>::smem_tot; \
}
CK_GRP_WO(1);
CK_GRP_WO(2);
CK_GRP_WO(3);
CK_GRP_WO(4);
CK_GRP_WO(5);
CK_GRP_WO(6);
CK_GRP_WO(7);
CK_GRP_WO(8);
#undef CK_GRP_WO
} else if (fh == 3 && fw == 3 && sh == 1 && sw == 1) {
static constexpr int fh_ = 3;
static constexpr int fw_ = 3;
static constexpr int sh_ = 1;
static constexpr int sw_ = 1;
#define CK_GRP_WO(_grp_wo) \
if (param.grp_wo >= _grp_wo) { \
static constexpr int unroll_co = 4; \
static constexpr int unroll_ci = 1; \
static constexpr int unroll_wo = _grp_wo; \
static constexpr int nr_thread_x = 32; \
static constexpr int nr_thread_y = 8; \
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_; \
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_; \
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_; \
kern = local_share_device_template_f32< \
LocalShareConfig_, UnrollConfig_, ThreadConfig_>; \
launch_config.nr_threads_x = nr_thread_x; \
launch_config.nr_threads_y = nr_thread_y; \
launch_config.nr_threads_z = 1; \
launch_config.nr_blocks_x = \
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo); \
launch_config.nr_blocks_y = \
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho; \
launch_config.nr_blocks_z = param.sgh * param.sgw; \
launch_config.smem_size_in_bytes = \
sizeof(float) * DataTileCount< \
LocalShareConfig_, UnrollConfig_, \
ThreadConfig_>::smem_tot + \
sizeof(float) * FilterTileCount< \
LocalShareConfig_, UnrollConfig_, \
ThreadConfig_>::smem_tot; \
}
CK_GRP_WO(1);
CK_GRP_WO(2);
CK_GRP_WO(3);
CK_GRP_WO(4);
CK_GRP_WO(5);
CK_GRP_WO(6);
CK_GRP_WO(7);
CK_GRP_WO(8);
#undef CK_GRP_WO
} else if (fh == 3 && fw == 3 && sh == 2 && sw == 2) {
static constexpr int fh_ = 3;
static constexpr int fw_ = 3;
static constexpr int sh_ = 2;
static constexpr int sw_ = 2;
#define CK_GRP_WO(_grp_wo) \
if (param.grp_wo >= _grp_wo) { \
static constexpr int unroll_co = 8; \
static constexpr int unroll_ci = 1; \
static constexpr int unroll_wo = _grp_wo; \
static constexpr int nr_thread_x = 32; \
static constexpr int nr_thread_y = 4; \
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_; \
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_; \
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_; \
kern = local_share_device_template_f32< \
LocalShareConfig_, UnrollConfig_, ThreadConfig_>; \
launch_config.nr_threads_x = nr_thread_x; \
launch_config.nr_threads_y = nr_thread_y; \
launch_config.nr_threads_z = 1; \
launch_config.nr_blocks_x = \
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo); \
launch_config.nr_blocks_y = \
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho; \
launch_config.nr_blocks_z = param.sgh * param.sgw; \
launch_config.smem_size_in_bytes = \
sizeof(float) * DataTileCount< \
LocalShareConfig_, UnrollConfig_, \
ThreadConfig_>::smem_tot + \
sizeof(float) * FilterTileCount< \
LocalShareConfig_, UnrollConfig_, \
ThreadConfig_>::smem_tot; \
}
CK_GRP_WO(1);
CK_GRP_WO(2);
CK_GRP_WO(3);
CK_GRP_WO(4);
CK_GRP_WO(5);
CK_GRP_WO(6);
CK_GRP_WO(7);
CK_GRP_WO(8);
#undef CK_GRP_WO
//! TODO: tune performance for kern size = (5x5, and 7x7)
} else if (fh == 5 && fw == 5 && sh == 1 && sw == 1) {
static constexpr int fh_ = 5;
static constexpr int fw_ = 5;
static constexpr int sh_ = 1;
static constexpr int sw_ = 1;
if (param.grp_wo >= 8) {
static constexpr int unroll_co = 8;
static constexpr int unroll_ci = 2;
static constexpr int unroll_wo = 8;
static constexpr int nr_thread_x = 32;
static constexpr int nr_thread_y = 8;
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_;
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_;
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_;
kern = local_share_device_template_f32<
LocalShareConfig_, UnrollConfig_, ThreadConfig_>;
launch_config.nr_threads_x = nr_thread_x;
launch_config.nr_threads_y = nr_thread_y;
launch_config.nr_threads_z = 1;
launch_config.nr_blocks_x =
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo);
launch_config.nr_blocks_y =
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho;
launch_config.nr_blocks_z = param.sgh * param.sgw;
launch_config.smem_size_in_bytes =
sizeof(float) * DataTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot +
sizeof(float) * FilterTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot;
} else if (param.grp_wo >= 4) {
static constexpr int unroll_co = 16;
static constexpr int unroll_ci = 2;
static constexpr int unroll_wo = 4;
static constexpr int nr_thread_x = 32;
static constexpr int nr_thread_y = 8;
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_;
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_;
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_;
kern = local_share_device_template_f32<
LocalShareConfig_, UnrollConfig_, ThreadConfig_>;
launch_config.nr_threads_x = nr_thread_x;
launch_config.nr_threads_y = nr_thread_y;
launch_config.nr_threads_z = 1;
launch_config.nr_blocks_x =
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo);
launch_config.nr_blocks_y =
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho;
launch_config.nr_blocks_z = param.sgh * param.sgw;
launch_config.smem_size_in_bytes =
sizeof(float) * DataTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot +
sizeof(float) * FilterTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot;
} else {
static constexpr int unroll_co = 16;
static constexpr int unroll_ci = 2;
static constexpr int unroll_wo = 2;
static constexpr int nr_thread_x = 32;
static constexpr int nr_thread_y = 8;
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_;
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_;
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_;
kern = local_share_device_template_f32<
LocalShareConfig_, UnrollConfig_, ThreadConfig_>;
launch_config.nr_threads_x = nr_thread_x;
launch_config.nr_threads_y = nr_thread_y;
launch_config.nr_threads_z = 1;
launch_config.nr_blocks_x =
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo);
launch_config.nr_blocks_y =
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho;
launch_config.nr_blocks_z = param.sgh * param.sgw;
launch_config.smem_size_in_bytes =
sizeof(float) * DataTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot +
sizeof(float) * FilterTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot;
}
} else if (fh == 5 && fw == 5 && sh == 2 && sw == 2) {
static constexpr int fh_ = 5;
static constexpr int fw_ = 5;
static constexpr int sh_ = 2;
static constexpr int sw_ = 2;
if (param.grp_wo >= 4) {
static constexpr int unroll_co = 16;
static constexpr int unroll_ci = 2;
static constexpr int unroll_wo = 4;
static constexpr int nr_thread_x = 32;
static constexpr int nr_thread_y = 8;
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_;
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_;
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_;
kern = local_share_device_template_f32<
LocalShareConfig_, UnrollConfig_, ThreadConfig_>;
launch_config.nr_threads_x = nr_thread_x;
launch_config.nr_threads_y = nr_thread_y;
launch_config.nr_threads_z = 1;
launch_config.nr_blocks_x =
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo);
launch_config.nr_blocks_y =
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho;
launch_config.nr_blocks_z = param.sgh * param.sgw;
launch_config.smem_size_in_bytes =
sizeof(float) * DataTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot +
sizeof(float) * FilterTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot;
} else {
static constexpr int unroll_co = 16;
static constexpr int unroll_ci = 2;
static constexpr int unroll_wo = 2;
static constexpr int nr_thread_x = 32;
static constexpr int nr_thread_y = 8;
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_;
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_;
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_;
kern = local_share_device_template_f32<
LocalShareConfig_, UnrollConfig_, ThreadConfig_>;
launch_config.nr_threads_x = nr_thread_x;
launch_config.nr_threads_y = nr_thread_y;
launch_config.nr_threads_z = 1;
launch_config.nr_blocks_x =
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo);
launch_config.nr_blocks_y =
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho;
launch_config.nr_blocks_z = param.sgh * param.sgw;
launch_config.smem_size_in_bytes =
sizeof(float) * DataTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot +
sizeof(float) * FilterTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot;
}
} else if (fh == 7 && fw == 7 && sh == 1 && sw == 1) {
static constexpr int fh_ = 7;
static constexpr int fw_ = 7;
static constexpr int sh_ = 1;
static constexpr int sw_ = 1;
if (param.grp_wo >= 8) {
static constexpr int unroll_co = 8;
static constexpr int unroll_ci = 1;
static constexpr int unroll_wo = 8;
static constexpr int nr_thread_x = 32;
static constexpr int nr_thread_y = 8;
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_;
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_;
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_;
kern = local_share_device_template_f32<
LocalShareConfig_, UnrollConfig_, ThreadConfig_>;
launch_config.nr_threads_x = nr_thread_x;
launch_config.nr_threads_y = nr_thread_y;
launch_config.nr_threads_z = 1;
launch_config.nr_blocks_x =
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo);
launch_config.nr_blocks_y =
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho;
launch_config.nr_blocks_z = param.sgh * param.sgw;
launch_config.smem_size_in_bytes =
sizeof(float) * DataTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot +
sizeof(float) * FilterTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot;
} else if (param.grp_wo >= 4) {
static constexpr int unroll_co = 16;
static constexpr int unroll_ci = 1;
static constexpr int unroll_wo = 4;
static constexpr int nr_thread_x = 32;
static constexpr int nr_thread_y = 8;
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_;
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_;
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_;
kern = local_share_device_template_f32<
LocalShareConfig_, UnrollConfig_, ThreadConfig_>;
launch_config.nr_threads_x = nr_thread_x;
launch_config.nr_threads_y = nr_thread_y;
launch_config.nr_threads_z = 1;
launch_config.nr_blocks_x =
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo);
launch_config.nr_blocks_y =
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho;
launch_config.nr_blocks_z = param.sgh * param.sgw;
launch_config.smem_size_in_bytes =
sizeof(float) * DataTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot +
sizeof(float) * FilterTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot;
} else {
static constexpr int unroll_co = 16;
static constexpr int unroll_ci = 1;
static constexpr int unroll_wo = 2;
static constexpr int nr_thread_x = 32;
static constexpr int nr_thread_y = 8;
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_;
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_;
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_;
kern = local_share_device_template_f32<
LocalShareConfig_, UnrollConfig_, ThreadConfig_>;
launch_config.nr_threads_x = nr_thread_x;
launch_config.nr_threads_y = nr_thread_y;
launch_config.nr_threads_z = 1;
launch_config.nr_blocks_x =
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo);
launch_config.nr_blocks_y =
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho;
launch_config.nr_blocks_z = param.sgh * param.sgw;
launch_config.smem_size_in_bytes =
sizeof(float) * DataTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot +
sizeof(float) * FilterTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot;
}
} else if (fh == 7 && fw == 7 && sh == 2 && sw == 2) {
static constexpr int fh_ = 7;
static constexpr int fw_ = 7;
static constexpr int sh_ = 2;
static constexpr int sw_ = 2;
static constexpr int unroll_co = 16;
static constexpr int unroll_ci = 1;
static constexpr int unroll_wo = 2;
static constexpr int nr_thread_x = 32;
static constexpr int nr_thread_y = 8;
typedef LocalShareConfig<fh_, fw_, sh_, sw_> LocalShareConfig_;
typedef UnrollConfig<unroll_co, unroll_ci, unroll_wo> UnrollConfig_;
typedef ThreadConfig<nr_thread_x, nr_thread_y> ThreadConfig_;
kern = local_share_device_template_f32<
LocalShareConfig_, UnrollConfig_, ThreadConfig_>;
launch_config.nr_threads_x = nr_thread_x;
launch_config.nr_threads_y = nr_thread_y;
launch_config.nr_threads_z = 1;
launch_config.nr_blocks_x =
DIVUP(param.n, nr_thread_x) * DIVUP(param.grp_wo, unroll_wo);
launch_config.nr_blocks_y =
DIVUP(param.co, nr_thread_y * unroll_co) * param.grp_ho;
launch_config.nr_blocks_z = param.sgh * param.sgw;
launch_config.smem_size_in_bytes =
sizeof(float) *
DataTileCount<LocalShareConfig_, UnrollConfig_, ThreadConfig_>::
smem_tot +
sizeof(float) * FilterTileCount<
LocalShareConfig_, UnrollConfig_,
ThreadConfig_>::smem_tot;
} else {
megdnn_assert(
false,
"no usable kernel implementation for local share "
"convolution (fh,fw)=(%d,%d), (sh,sw)=(%d,%d)",
fh, fw, sh, sw);
}
return kern;
}
} // namespace
//! this is a dummy kernel
#if 0
namespace batch_size_aware {
template <int unroll_ho_, int unroll_wo_, int unroll_ci_>
struct UnrollConfig {
static int const unroll_ho = unroll_ho_;
static int const unroll_wo = unroll_wo_;
static int const unroll_ci = unroll_ci_;
};
template <int thread_x, int thread_y>
struct ThreadConfig {
static int const nr_thread_x = thread_x;
static int const nr_thread_y = thread_y;
};
template <typename LocalShareConfig, typename UnrollConfig,
typename ThreadConfig>
struct DataTileCount {
static int const tile_hi = UnrollConfig::unroll_ho * LocalShareConfig::sh +
LocalShareConfig::fh - 1;
static int const tile_wi = UnrollConfig::unroll_wo * LocalShareConfig::sw +
LocalShareConfig::fw - 1;
static int const tile_hw = tile_hi * tile_wi;
static int const tile_chw = UnrollConfig::unroll_ci * tile_hi * tile_wi;
static int const reg_gl2sh = (tile_chw + ThreadConfig::nr_thread_y - 1) /
ThreadConfig::nr_thread_y;
static int const smem_h = tile_chw;
static int const smem_w = ThreadConfig::nr_thread_x;
static int const smem_stride = smem_w;
static int const smem_tot = smem_h * smem_stride;
};
template <typename LocalShareConfig, typename UnrollConfig,
typename ThreadConfig>
struct FilterTileCount {
static int const tile_co = ThreadConfig::nr_thread_y;
static int const tile_ci = UnrollConfig::unroll_ci;
static int const smem_h = tile_co;
static int const smem_w =
tile_ci * LocalShareConfig::fh * LocalShareConfig::fw;
static int const smem_stride = smem_w;
static int const smem_tot = smem_h * smem_stride;
static int const reg_gl2sh = (smem_w + ThreadConfig::nr_thread_x - 1) /
ThreadConfig::nr_thread_x;
};
template <typename LocalShareConfig, typename UnrollConfig,
typename ThreadConfig>
struct DataGlobal2ShareMemVisitor {
typedef float copy_t;
typedef DataTileCount<LocalShareConfig, UnrollConfig, ThreadConfig>
DataTileCount;
float* smem;
const float* g_ptr;
int c_stride;
int h_stride;
int w_stride;
int h1, h2;
int w1, w2;
const int tid_x = threadIdx.x;
const int tid_y = threadIdx.y;
copy_t reg[DataTileCount::reg_gl2sh];
__device__ DataGlobal2ShareMemVisitor(float* smem, const float* g_ptr,
int c_stride, int h_stride,
int w_stride, int h1, int h2, int w1,
int w2)
: smem{smem},
g_ptr{g_ptr},
c_stride{c_stride},
h_stride{h_stride},
w_stride{w_stride},
h1{h1},
h2{h2},
w1{w1},
w2{w2} {};
__device__ __forceinline__ void first_copy() {
int chw = tid_y;
#pragma unroll
for (int i = 0; i < DataTileCount::reg_gl2sh; ++i) {
if (chw < DataTileCount::tile_chw) {
int ic = chw / DataTileCount::tile_hw;
int hw = chw - ic * DataTileCount::tile_hw;
int ih = hw / DataTileCount::tile_wi;
int iw = hw - ih * DataTileCount::tile_wi;
copy_t val = 0.f;
if (ih >= h1 && ih < h2 && iw >= w1 && iw < w2) {
val = g_ptr[ic * c_stride + ih * h_stride + iw * w_stride];
}
*(sh_ptr(chw, tid_x)) = val;
}
chw += ThreadConfig::nr_thread_y;
}
}
__device__ __forceinline__ void copy() {
int chw = tid_y;
#pragma unroll
for (int i = 0; i < DataTileCount::reg_gl2sh; ++i) {
if (chw < DataTileCount::tile_chw) {
int ic = chw / DataTileCount::tile_hw;
int hw = chw - ic * DataTileCount::tile_hw;
int ih = hw / DataTileCount::tile_wi;
int iw = hw - ih * DataTileCount::tile_wi;
copy_t val = 0.f;
if (ih >= h1 && ih < h2 && iw >= w1 && iw < w2) {
val = g_ptr[ic * c_stride + ih * h_stride + iw * w_stride];
}
reg[i] = val;
}
chw += ThreadConfig::nr_thread_y;
}
}
__device__ __forceinline__ void commit() {
int chw = tid_y;
#pragma unroll
for (int i = 0; i < DataTileCount::reg_gl2sh; ++i) {
if (chw < DataTileCount::tile_chw)
*(sh_ptr(chw, tid_x)) = reg[i];
chw += ThreadConfig::nr_thread_y;
}
};
__device__ __forceinline__ float* sh_ptr(int y, int x) {
return &smem[y * DataTileCount::smem_stride + x];
}
__device__ __forceinline__ void move_forward() {
g_ptr += UnrollConfig::unroll_ci * c_stride;
};
};
template <typename LocalShareConfig, typename UnrollConfig,
typename ThreadConfig>
struct FilterGlobal2ShareMemVisitor {
typedef float copy_t;
typedef FilterTileCount<LocalShareConfig, UnrollConfig, ThreadConfig>
FilterTileCount;
float* smem;
const float* g_ptr;
const int tid_x = threadIdx.x;
const int tid_y = threadIdx.y;
copy_t reg[FilterTileCount::reg_gl2sh];
__device__ FilterGlobal2ShareMemVisitor(float* smem, const float* g_ptr)
: smem{smem}, g_ptr{g_ptr} {};
__device__ __forceinline__ void first_copy() {
#pragma unroll
for (int i = 0; i < FilterTileCount::reg_gl2sh; ++i) {
int idx = i * ThreadConfig::nr_thread_x;
if (idx < FilterTileCount::smem_w)
*(sh_ptr(tid_y, idx + tid_x)) = g_ptr[idx];
}
}
__device__ __forceinline__ void copy() {
#pragma unroll
for (int i = 0; i < FilterTileCount::reg_gl2sh; ++i) {
int idx = i * ThreadConfig::nr_thread_x;
if (idx < FilterTileCount::smem_w)
reg[i] = g_ptr[idx];
}
}
__device__ __forceinline__ void commit() {
#pragma unroll
for (int i = 0; i < FilterTileCount::reg_gl2sh; ++i) {
int idx = tid_x + i * ThreadConfig::nr_thread_x;
if (idx < FilterTileCount::smem_w)
*(sh_ptr(tid_y, idx)) = reg[i];
}
}
__device__ __forceinline__ float* sh_ptr(int y, int x) {
return &smem[y * FilterTileCount::smem_stride + x];
}
__device__ __forceinline__ void move_forward() {
g_ptr += UnrollConfig::unroll_ci * LocalShareConfig::fh *
LocalShareConfig::fw;
}
};
/*
* Src tensor format is (c, h, w, n), filter tensor format is (sgh, sgw, co, ci,
* fh, fw), and dst tensor format (c, h, w, n). Thread block size is (32, BY).
* Each thread compute UnrollConfig::unroll_ho x UnrollConfig::unroll_wo entries
* of one slice with height ho and width wo of the output tensor. Each block
* compute 32 batches and BY output channels.
*/
template <typename LocalShareConfig, typename UnrollConfig,
typename ThreadConfig>
__global__ void local_share_device_template_f32(
const float* __restrict__ src, const float* __restrict__ filter,
float* __restrict__ dst, Param param) {
typedef DataTileCount<LocalShareConfig, UnrollConfig, ThreadConfig>
DataTileCount;
typedef FilterTileCount<LocalShareConfig, UnrollConfig, ThreadConfig>
FilterTileCount;
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int bidy = blockIdx.y;
const int bidz = blockIdx.z;
const int blks_per_grp_ho = (param.grp_ho + UnrollConfig::unroll_ho - 1) /
UnrollConfig::unroll_ho;
const int blks_per_grp_wo = (param.grp_wo + UnrollConfig::unroll_wo - 1) /
UnrollConfig::unroll_wo;
const int b_co = bidy / blks_per_grp_ho;
const int b_grp_ho = bidy - b_co * blks_per_grp_ho;
const int b_n = bidx / blks_per_grp_wo;
const int b_grp_wo = bidx - b_n * blks_per_grp_wo;
const int b_sgh = bidz / param.sgw;
const int b_sgw = bidz - b_sgh * param.sgw;
const int b_ho = b_sgh * param.grp_ho + b_grp_ho * UnrollConfig::unroll_ho;
const int b_wo = b_sgw * param.grp_wo + b_grp_wo * UnrollConfig::unroll_wo;
const int b_hi = b_ho * LocalShareConfig::sh - param.ph;
const int b_wi = b_wo * LocalShareConfig::sw - param.pw;
const int ho = param.sgh * param.grp_ho;
const int wo = param.sgw * param.grp_wo;
const int t_co = b_co * ThreadConfig::nr_thread_y + tidy;
const float* __restrict__ g_ptr_src =
src + (b_hi * param.wi + b_wi) * param.n +
b_n * ThreadConfig::nr_thread_x + tidx;
const float* __restrict__ g_ptr_filter =
filter +
(b_sgh * param.sgw + b_sgw) * param.co * param.ci *
LocalShareConfig::fh *
LocalShareConfig::fw // spatial group
+ t_co * param.ci * LocalShareConfig::fh *
LocalShareConfig::fw // output channel
+ tidx;
float* __restrict__ g_ptr_dst = dst + t_co * ho * wo * param.n +
(b_ho * wo + b_wo) * param.n +
b_n * ThreadConfig::nr_thread_x + tidx;
extern __shared__ float smem[];
float* sh_src = smem;
float* sh_filter = smem + DataTileCount::smem_tot;
// TODO check register
DataGlobal2ShareMemVisitor<LocalShareConfig, UnrollConfig, ThreadConfig>
src_gl2sh_visitor{sh_src,
g_ptr_src,
param.hi * param.wi * param.n,
param.wi * param.n,
param.n,
-b_hi,
param.hi - b_hi,
-b_wi,
param.wi - b_wi};
FilterGlobal2ShareMemVisitor<LocalShareConfig, UnrollConfig, ThreadConfig>
filter_gl2sh_visitor{sh_filter, g_ptr_filter};
float r_src[UnrollConfig::unroll_ho][DataTileCount::tile_wi];
float r_filter[LocalShareConfig::fw];
float r_acc[UnrollConfig::unroll_ho][UnrollConfig::unroll_wo];
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_ho; ++i) {
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_wo; ++j) {
r_acc[i][j] = 0;
}
}
src_gl2sh_visitor.first_copy();
filter_gl2sh_visitor.first_copy();
__syncthreads();
int ci_blks =
(param.ci + UnrollConfig::unroll_ci - 1) / UnrollConfig::unroll_ci;
#pragma unroll
for (int ci_outer = 0; ci_outer < ci_blks - 1; ci_outer++) {
src_gl2sh_visitor.move_forward();
filter_gl2sh_visitor.move_forward();
src_gl2sh_visitor.copy();
filter_gl2sh_visitor.copy();
for (int ci_inner = 0; ci_inner < UnrollConfig::unroll_ci; ++ci_inner) {
int sh_flt_col_base =
ci_inner * LocalShareConfig::fh * LocalShareConfig::fw;
int sh_src_row_base = ci_inner * DataTileCount::tile_hw;
#pragma unroll
for (int kh = 0; kh < LocalShareConfig::fh; ++kh) {
#pragma unroll
for (int i = 0; i < LocalShareConfig::fw; ++i) {
r_filter[i] = *(filter_gl2sh_visitor.sh_ptr(
tidy,
sh_flt_col_base + kh * LocalShareConfig::fw + i));
}
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_ho; ++i) {
#pragma unroll
for (int j = 0; j < DataTileCount::tile_wi; ++j) {
int sh_src_row = (i * LocalShareConfig::sh + kh) *
DataTileCount::tile_wi +
j;
r_src[i][j] = *(src_gl2sh_visitor.sh_ptr(
sh_src_row_base + sh_src_row, tidx));
}
}
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_ho; ++i) {
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_wo; ++j) {
#pragma unroll
for (int kw = 0; kw < LocalShareConfig::fw; ++kw) {
r_acc[i][j] +=
r_src[i][j * LocalShareConfig::sw + kw] *
r_filter[kw];
}
}
}
}
}
__syncthreads();
src_gl2sh_visitor.commit();
filter_gl2sh_visitor.commit();
__syncthreads();
}
for (int ci_inner = 0; ci_inner < UnrollConfig::unroll_ci; ++ci_inner) {
int sh_flt_col_base =
ci_inner * LocalShareConfig::fh * LocalShareConfig::fw;
int sh_src_row_base = ci_inner * DataTileCount::tile_hw;
#pragma unroll
for (int kh = 0; kh < LocalShareConfig::fh; ++kh) {
#pragma unroll
for (int i = 0; i < LocalShareConfig::fw; ++i) {
r_filter[i] = *(filter_gl2sh_visitor.sh_ptr(
tidy,
sh_flt_col_base + kh * LocalShareConfig::fw + i));
}
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_ho; ++i) {
#pragma unroll
for (int j = 0; j < DataTileCount::tile_wi; ++j) {
int sh_src_row = (i * LocalShareConfig::sh + kh) *
DataTileCount::tile_wi +
j;
r_src[i][j] = *(src_gl2sh_visitor.sh_ptr(
sh_src_row_base + sh_src_row, tidx));
}
}
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_ho; ++i) {
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_wo; ++j) {
#pragma unroll
for (int kw = 0; kw < LocalShareConfig::fw; ++kw) {
r_acc[i][j] +=
r_src[i][j * LocalShareConfig::sw + kw] *
r_filter[kw];
}
}
}
}
}
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_ho; ++i) {
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_wo; ++j) {
int oh = b_ho + i;
int ow = b_wo + j;
if (t_co < param.co && oh < ho && ow < wo) {
g_ptr_dst[(i * wo + j) * param.n] = r_acc[i][j];
}
}
}
}
} // namespace batch_size_aware
#endif
void megdnn::cuda::local_share::_do_local_share_convolution_large_batch_size(
const float* d_src, const float* d_filter, float* d_dst, float* workspace,
int fh, int fw, int sh, int sw, const Param& param,
cublasHandle_t cublas_handle, cudaStream_t stream, float* one, float* zero) {
float* ws_src = workspace;
int nr_elem_total = param.n * param.ci * param.hi * param.wi;
float* ws_dst = workspace + nr_elem_total;
// tensor reformat from (n, c, h, w) -> (c, h, w, n)
{
int m = param.n, n = param.ci * param.hi * param.wi;
int lda, ldb;
lda = ldb = param.ci * param.hi * param.wi;
int ldc = param.n;
cublas_check(cublasSgeam(
cublas_handle, CUBLAS_OP_T, CUBLAS_OP_T, m, n, one, d_src, lda, zero,
d_src, ldb, ws_src, ldc));
}
{
void (*kern)(
const float* __restrict__, const float* __restrict__,
float* __restrict__, Param);
LaunchConfig launch_config;
kern = get_kern(fh, fw, sh, sw, param, launch_config);
uint32_t nr_threads_x = launch_config.nr_threads_x,
nr_threads_y = launch_config.nr_threads_y,
nr_blocks_x = launch_config.nr_blocks_x,
nr_blocks_y = launch_config.nr_blocks_y,
nr_blocks_z = launch_config.nr_blocks_z,
smem_size_in_bytes = launch_config.smem_size_in_bytes;
_check_launch_config(launch_config);
dim3 block_size{nr_threads_x, nr_threads_y, 1};
dim3 grid_size{nr_blocks_x, nr_blocks_y, nr_blocks_z};
kern<<<grid_size, block_size, smem_size_in_bytes, stream>>>(
ws_src, d_filter, ws_dst, param);
after_kernel_launch();
}
// tensor reformat form (c, h, w, n) -> (n, c, h, w)
{
int ho = param.grp_ho * param.sgh, wo = param.grp_wo * param.sgw;
int m = param.co * ho * wo, n = param.n;
int lda, ldb;
lda = ldb = param.n;
int ldc = param.co * ho * wo;
cublas_check(cublasSgeam(
cublas_handle, CUBLAS_OP_T, CUBLAS_OP_T, m, n, one, ws_dst, lda, zero,
ws_dst, ldb, d_dst, ldc));
}
}
// vim: syntax=cuda.doxygen
|
bb58c3264718c4c7c7ae9ef15a1fa0ee533de4b6.hip | // !!! This is a file automatically generated by hipify!!!
// Simple Matrix Multiply - Workshop 6
// w6.cu
#include <iostream>
#include <iomanip>
#include <cstdlib>
#include <chrono>
#include <hip/hip_runtime.h>
#include <cstdlib>
#include "device_launch_parameters.h" // intellisence on CUDA syntax
using namespace std::chrono;
const int ntpb = 32; // number of threads per block
__global__ void firstKernel(const float* d_A, const float* d_B, float* resultM, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n && j < n) {
float sum = 0.0f;
for (int x = 0; x < n; x++) {
sum += d_A[i*n + x] * d_B[x*n + j];
}
resultM[i*n + j] = sum;
}
}
// check reports error if any
void check(const char* msg, const hipError_t err) {
if (err != hipSuccess)
std::cerr << "*** " << msg << ":" << hipGetErrorString(err) << " ***\n";
}
// display matrix M, which is stored in row-major order
void display(const char* str, const float* M, int nr, int nc)
{
std::cout << str << std::endl;
std::cout << std::fixed << std::setprecision(4);
for (int i = 0; i < nr; i++) {
for (int j = 0; j < nc; j++)
std::cout << std::setw(10)
<< M[i * nc + j];
std::cout << std::endl;
}
std::cout << std::endl;
}
// report system time
void reportTime(const char* msg, steady_clock::duration span) {
auto ms = duration_cast<milliseconds>(span);
std::cout << msg << " - took - " <<
ms.count() << " millisecs" << std::endl;
}
// matrix multiply
void sgemm(const float* h_a, const float* h_b, float* h_c, int n) {
int size = n * n * sizeof(float);
// Calculate number of blocks
int nb = (n + ntpb - 1) / ntpb;
// Matricies variables
float* d_A;
float* d_B;
float* d_C;
// Memory allocation for DEVICE matricies
hipMalloc((void**)&d_A, size);
hipMalloc((void**)&d_B, size);
hipMalloc((void**)&d_C, size);
// Copy matricies from HOST to the DEVICE
hipMemcpy(d_A, h_a, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_b, size, hipMemcpyHostToDevice);
// launch execution configuration
dim3 dGrid(nb, nb);
dim3 dBlock(ntpb, ntpb);
firstKernel << <dGrid, dBlock >> >(d_A, d_B, d_C, n);
hipDeviceSynchronize();
// Copy resulting matrix from DEVICE to HOST
hipMemcpy(h_c, d_C, size, hipMemcpyDeviceToHost);
// deallocate device memory
hipFree(d_C);
// reset the device
hipDeviceReset();
}
int main(int argc, char* argv[]) {
if (argc != 2) {
std::cerr << argv[0] << ": invalid number of arguments\n";
std::cerr << "Usage: " << argv[0] << " size_of_vector\n";
return 1;
}
int n = std::atoi(argv[1]); // number of rows/columns in h_a, h_b, h_c
std::cout << "Matrix size [" << n << " x " << n << "]\n";
steady_clock::time_point ts, te;
// allocate host memory
ts = steady_clock::now();
float* h_a = new float[n * n];
float* h_b = new float[n * n];
float* h_c = new float[n * n];
// populate host matrices a and b
for (int i = 0, kk = 0; i < n; i++)
for (int j = 0; j < n; j++, kk++)
h_a[kk] = h_b[kk] = (float)kk / (n * n);
te = steady_clock::now();
reportTime("allocation and initialization", te - ts);
// h_c = h_a * h_b
ts = steady_clock::now();
sgemm(h_a, h_b, h_c, n);
te = steady_clock::now();
reportTime("matrix-matrix multiplication", te - ts);
// display results
if (n <= 5) {
display("h_a :", h_a, n, n);
display("h_b :", h_b, n, n);
display("h_c = h_a h_b :", h_c, n, n);
}
// check correctness
std::cout << "correctness test ..." << std::endl;
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++) {
float sum = 0.0f;
for (int k = 0; k < n; k++)
sum += h_a[i * n + k] * h_b[k * n + j];
if (std::abs(h_c[i * n + j] - sum) > 1.0e-3f)
std::cout << "[" << i << "," << j << "]" << h_c[i * n + j]
<< " != " << sum << std::endl;
}
std::cout << "done" << std::endl;
// deallocate host memory
delete[] h_a;
delete[] h_b;
delete[] h_c;
} | bb58c3264718c4c7c7ae9ef15a1fa0ee533de4b6.cu | // Simple Matrix Multiply - Workshop 6
// w6.cu
#include <iostream>
#include <iomanip>
#include <cstdlib>
#include <chrono>
#include <cuda_runtime.h>
#include <cstdlib>
#include "device_launch_parameters.h" // intellisence on CUDA syntax
using namespace std::chrono;
const int ntpb = 32; // number of threads per block
__global__ void firstKernel(const float* d_A, const float* d_B, float* resultM, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n && j < n) {
float sum = 0.0f;
for (int x = 0; x < n; x++) {
sum += d_A[i*n + x] * d_B[x*n + j];
}
resultM[i*n + j] = sum;
}
}
// check reports error if any
void check(const char* msg, const cudaError_t err) {
if (err != cudaSuccess)
std::cerr << "*** " << msg << ":" << cudaGetErrorString(err) << " ***\n";
}
// display matrix M, which is stored in row-major order
void display(const char* str, const float* M, int nr, int nc)
{
std::cout << str << std::endl;
std::cout << std::fixed << std::setprecision(4);
for (int i = 0; i < nr; i++) {
for (int j = 0; j < nc; j++)
std::cout << std::setw(10)
<< M[i * nc + j];
std::cout << std::endl;
}
std::cout << std::endl;
}
// report system time
void reportTime(const char* msg, steady_clock::duration span) {
auto ms = duration_cast<milliseconds>(span);
std::cout << msg << " - took - " <<
ms.count() << " millisecs" << std::endl;
}
// matrix multiply
void sgemm(const float* h_a, const float* h_b, float* h_c, int n) {
int size = n * n * sizeof(float);
// Calculate number of blocks
int nb = (n + ntpb - 1) / ntpb;
// Matricies variables
float* d_A;
float* d_B;
float* d_C;
// Memory allocation for DEVICE matricies
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_B, size);
cudaMalloc((void**)&d_C, size);
// Copy matricies from HOST to the DEVICE
cudaMemcpy(d_A, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_b, size, cudaMemcpyHostToDevice);
// launch execution configuration
dim3 dGrid(nb, nb);
dim3 dBlock(ntpb, ntpb);
firstKernel << <dGrid, dBlock >> >(d_A, d_B, d_C, n);
cudaDeviceSynchronize();
// Copy resulting matrix from DEVICE to HOST
cudaMemcpy(h_c, d_C, size, cudaMemcpyDeviceToHost);
// deallocate device memory
cudaFree(d_C);
// reset the device
cudaDeviceReset();
}
int main(int argc, char* argv[]) {
if (argc != 2) {
std::cerr << argv[0] << ": invalid number of arguments\n";
std::cerr << "Usage: " << argv[0] << " size_of_vector\n";
return 1;
}
int n = std::atoi(argv[1]); // number of rows/columns in h_a, h_b, h_c
std::cout << "Matrix size [" << n << " x " << n << "]\n";
steady_clock::time_point ts, te;
// allocate host memory
ts = steady_clock::now();
float* h_a = new float[n * n];
float* h_b = new float[n * n];
float* h_c = new float[n * n];
// populate host matrices a and b
for (int i = 0, kk = 0; i < n; i++)
for (int j = 0; j < n; j++, kk++)
h_a[kk] = h_b[kk] = (float)kk / (n * n);
te = steady_clock::now();
reportTime("allocation and initialization", te - ts);
// h_c = h_a * h_b
ts = steady_clock::now();
sgemm(h_a, h_b, h_c, n);
te = steady_clock::now();
reportTime("matrix-matrix multiplication", te - ts);
// display results
if (n <= 5) {
display("h_a :", h_a, n, n);
display("h_b :", h_b, n, n);
display("h_c = h_a h_b :", h_c, n, n);
}
// check correctness
std::cout << "correctness test ..." << std::endl;
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++) {
float sum = 0.0f;
for (int k = 0; k < n; k++)
sum += h_a[i * n + k] * h_b[k * n + j];
if (std::abs(h_c[i * n + j] - sum) > 1.0e-3f)
std::cout << "[" << i << "," << j << "]" << h_c[i * n + j]
<< " != " << sum << std::endl;
}
std::cout << "done" << std::endl;
// deallocate host memory
delete[] h_a;
delete[] h_b;
delete[] h_c;
} |
b346d0e2a7534bdc9a20b9272bba399fbb7e8531.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Full license terms provided in LICENSE.md file.
*/
#include <iostream>
#include <string>
#include <vector>
#include <sstream>
#include <chrono>
#include <stdexcept>
#include <fstream>
#include <opencv2/opencv.hpp>
#include <NvInfer.h>
#define MS_PER_SEC 1000.0
using namespace std;
using namespace nvinfer1;
class TestConfig;
typedef void (*preprocess_fn_t)(float *input, size_t channels, size_t height, size_t width);
float * imageToTensor(const cv::Mat & image);
void preprocessVgg(float *input, size_t channels, size_t height, size_t width);
void preprocessInception(float *input, size_t channels, size_t height, size_t width);
size_t argmax(float *input, size_t numel);
void test(const TestConfig &testConfig);
class TestConfig
{
public:
string imagePath;
string planPath;
string inputNodeName;
string outputNodeName;
string preprocessFnName;
string inputHeight;
string inputWidth;
string numOutputCategories;
string dataType;
string maxBatchSize;
string workspaceSize;
string numRuns;
string useMappedMemory;
string statsPath;
TestConfig(int argc, char * argv[])
{
imagePath = argv[1];
planPath = argv[2];
inputNodeName = argv[3];
inputHeight = argv[4];
inputWidth = argv[5];
outputNodeName = argv[6];
numOutputCategories = argv[7];
preprocessFnName = argv[8];
numRuns = argv[9];
dataType = argv[10];
maxBatchSize = argv[11];
workspaceSize = argv[12];
useMappedMemory = argv[13];
statsPath = argv[14];
}
static string UsageString()
{
string s = "";
s += "imagePath: \n";
s += "planPath: \n";
s += "inputNodeName: \n";
s += "inputHeight: \n";
s += "inputWidth: \n";
s += "outputNodeName: \n";
s += "numOutputCategories: \n";
s += "preprocessFnName: \n";
s += "numRuns: \n";
s += "dataType: \n";
s += "maxBatchSize: \n";
s += "workspaceSize: \n";
s += "useMappedMemory: \n";
s += "statsPath: \n";
return s;
}
string ToString()
{
string s = "";
s += "imagePath: " + imagePath + "\n";
s += "planPath: " + planPath + "\n";
s += "inputNodeName: " + inputNodeName + "\n";
s += "inputHeight: " + inputHeight + "\n";
s += "inputWidth: " + inputWidth + "\n";
s += "outputNodeName: " + outputNodeName + "\n";
s += "numOutputCategories: " + numOutputCategories + "\n";
s += "preprocessFnName: " + preprocessFnName + "\n";
s += "numRuns: " + numRuns + "\n";
s += "dataType: " + dataType + "\n";
s += "maxBatchSize: " + maxBatchSize + "\n";
s += "workspaceSize: " + workspaceSize + "\n";
s += "useMappedMemory: " + useMappedMemory + "\n";
s += "statsPath: " + statsPath + "\n";
return s;
}
static int ToInteger(string value)
{
int valueInt;
stringstream ss;
ss << value;
ss >> valueInt;
return valueInt;
}
preprocess_fn_t PreprocessFn() const {
if (preprocessFnName == "preprocess_vgg")
return preprocessVgg;
else if (preprocessFnName == "preprocess_inception")
return preprocessInception;
else
throw runtime_error("Invalid preprocessing function name.");
}
int InputWidth() const { return ToInteger(inputWidth); }
int InputHeight() const { return ToInteger(inputHeight); }
int NumOutputCategories() const { return ToInteger(numOutputCategories); }
nvinfer1::DataType DataType() const {
if (dataType == "float")
return nvinfer1::DataType::kFLOAT;
else if (dataType == "half")
return nvinfer1::DataType::kHALF;
else
throw runtime_error("Invalid data type.");
}
int MaxBatchSize() const { return ToInteger(maxBatchSize); }
int WorkspaceSize() const { return ToInteger(workspaceSize); }
int NumRuns() const { return ToInteger(numRuns); }
int UseMappedMemory() const { return ToInteger(useMappedMemory); }
};
class Logger : public ILogger
{
void log(Severity severity, const char * msg) override
{
cout << msg << endl;
}
} gLogger;
int main(int argc, char * argv[])
{
if (argc != 15)
{
cout << TestConfig::UsageString() << endl;
return 0;
}
TestConfig testConfig(argc, argv);
cout << "\ntestConfig: \n" << testConfig.ToString() << endl;
test(testConfig);
return 0;
}
float *imageToTensor(const cv::Mat & image)
{
const size_t height = image.rows;
const size_t width = image.cols;
const size_t channels = image.channels();
const size_t numel = height * width * channels;
const size_t stridesCv[3] = { width * channels, channels, 1 };
const size_t strides[3] = { height * width, width, 1 };
float * tensor;
hipHostMalloc((void**)&tensor, numel * sizeof(float), hipHostMallocMapped);
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
for (int k = 0; k < channels; k++)
{
const size_t offsetCv = i * stridesCv[0] + j * stridesCv[1] + k * stridesCv[2];
const size_t offset = k * strides[0] + i * strides[1] + j * strides[2];
tensor[offset] = (float) image.data[offsetCv];
}
}
}
return tensor;
}
void preprocessVgg(float * tensor, size_t channels, size_t height, size_t width)
{
const size_t strides[3] = { height * width, width, 1 };
const float mean[3] = { 123.68, 116.78, 103.94 };
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
for (int k = 0; k < channels; k++)
{
const size_t offset = k * strides[0] + i * strides[1] + j * strides[2];
tensor[offset] -= mean[k];
}
}
}
}
void preprocessInception(float * tensor, size_t channels, size_t height, size_t width)
{
const size_t numel = channels * height * width;
for (int i = 0; i < numel; i++)
tensor[i] = 2.0 * (tensor[i] / 255.0 - 0.5);
}
size_t argmax(float * tensor, size_t numel)
{
if (numel <= 0)
return 0;
size_t maxIndex = 0;
float max = tensor[0];
for (int i = 0; i < numel; i++)
{
if (tensor[i] > max)
{
maxIndex = i;
max = tensor[i];
}
}
return maxIndex;
}
void test(const TestConfig &testConfig)
{
ifstream planFile(testConfig.planPath);
stringstream planBuffer;
planBuffer << planFile.rdbuf();
string plan = planBuffer.str();
IRuntime *runtime = createInferRuntime(gLogger);
ICudaEngine *engine = runtime->deserializeCudaEngine((void*)plan.data(),
plan.size(), nullptr);
IExecutionContext *context = engine->createExecutionContext();
int inputBindingIndex, outputBindingIndex;
inputBindingIndex = engine->getBindingIndex(testConfig.inputNodeName.c_str());
outputBindingIndex = engine->getBindingIndex(testConfig.outputNodeName.c_str());
// load and preprocess image
cv::Mat image = cv::imread(testConfig.imagePath, 0);
/* cv::cvtColor(image, image, cv::COLOR_BGR2RGB, 3); */
cv::resize(image, image, cv::Size(testConfig.InputWidth(), testConfig.InputHeight()));
float *input = imageToTensor(image);
testConfig.PreprocessFn()(input, 1, testConfig.InputHeight(), testConfig.InputWidth());
// allocate memory on host / device for input / output
float *output;
float *inputDevice;
float *outputDevice;
size_t inputSize = testConfig.InputHeight() * testConfig.InputWidth() * 1 * sizeof(float);
hipHostMalloc(&output, testConfig.NumOutputCategories() * sizeof(float), hipHostMallocMapped);
if (testConfig.UseMappedMemory())
{
hipHostGetDevicePointer(&inputDevice, input, 0);
hipHostGetDevicePointer(&outputDevice, output, 0);
}
else
{
hipMalloc(&inputDevice, inputSize);
hipMalloc(&outputDevice, testConfig.NumOutputCategories() * sizeof(float));
}
float *bindings[2];
bindings[inputBindingIndex] = inputDevice;
bindings[outputBindingIndex] = outputDevice;
// run and compute average time over numRuns iterations
double avgTime = 0;
for (int i = 0; i < testConfig.NumRuns() + 1; i++)
{
chrono::duration<double> diff;
if (testConfig.UseMappedMemory())
{
auto t0 = chrono::steady_clock::now();
context->execute(1, (void**)bindings);
auto t1 = chrono::steady_clock::now();
diff = t1 - t0;
}
else
{
auto t0 = chrono::steady_clock::now();
hipMemcpy(inputDevice, input, inputSize, hipMemcpyHostToDevice);
context->execute(1, (void**)bindings);
hipMemcpy(output, outputDevice, testConfig.NumOutputCategories() * sizeof(float), hipMemcpyDeviceToHost);
auto t1 = chrono::steady_clock::now();
diff = t1 - t0;
}
if (i != 0)
avgTime += MS_PER_SEC * diff.count();
}
avgTime /= testConfig.NumRuns();
// save results to file
int maxCategoryIndex = argmax(output, testConfig.NumOutputCategories()) + 1001 - testConfig.NumOutputCategories();
cout << "Most likely category id is " << maxCategoryIndex << endl;
cout << "Average execution time in ms is " << avgTime << endl;
ofstream outfile;
outfile.open(testConfig.statsPath, ios_base::app);
outfile << "\n" << testConfig.planPath
<< " " << avgTime;
// << " " << maxCategoryIndex
// << " " << testConfig.InputWidth()
// << " " << testConfig.InputHeight()
// << " " << testConfig.MaxBatchSize()
// << " " << testConfig.WorkspaceSize()
// << " " << testConfig.dataType
// << " " << testConfig.NumRuns()
// << " " << testConfig.UseMappedMemory();
outfile.close();
hipFree(inputDevice);
hipFree(outputDevice);
hipHostFree(input);
hipHostFree(output);
engine->destroy();
context->destroy();
runtime->destroy();
}
| b346d0e2a7534bdc9a20b9272bba399fbb7e8531.cu | /**
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Full license terms provided in LICENSE.md file.
*/
#include <iostream>
#include <string>
#include <vector>
#include <sstream>
#include <chrono>
#include <stdexcept>
#include <fstream>
#include <opencv2/opencv.hpp>
#include <NvInfer.h>
#define MS_PER_SEC 1000.0
using namespace std;
using namespace nvinfer1;
class TestConfig;
typedef void (*preprocess_fn_t)(float *input, size_t channels, size_t height, size_t width);
float * imageToTensor(const cv::Mat & image);
void preprocessVgg(float *input, size_t channels, size_t height, size_t width);
void preprocessInception(float *input, size_t channels, size_t height, size_t width);
size_t argmax(float *input, size_t numel);
void test(const TestConfig &testConfig);
class TestConfig
{
public:
string imagePath;
string planPath;
string inputNodeName;
string outputNodeName;
string preprocessFnName;
string inputHeight;
string inputWidth;
string numOutputCategories;
string dataType;
string maxBatchSize;
string workspaceSize;
string numRuns;
string useMappedMemory;
string statsPath;
TestConfig(int argc, char * argv[])
{
imagePath = argv[1];
planPath = argv[2];
inputNodeName = argv[3];
inputHeight = argv[4];
inputWidth = argv[5];
outputNodeName = argv[6];
numOutputCategories = argv[7];
preprocessFnName = argv[8];
numRuns = argv[9];
dataType = argv[10];
maxBatchSize = argv[11];
workspaceSize = argv[12];
useMappedMemory = argv[13];
statsPath = argv[14];
}
static string UsageString()
{
string s = "";
s += "imagePath: \n";
s += "planPath: \n";
s += "inputNodeName: \n";
s += "inputHeight: \n";
s += "inputWidth: \n";
s += "outputNodeName: \n";
s += "numOutputCategories: \n";
s += "preprocessFnName: \n";
s += "numRuns: \n";
s += "dataType: \n";
s += "maxBatchSize: \n";
s += "workspaceSize: \n";
s += "useMappedMemory: \n";
s += "statsPath: \n";
return s;
}
string ToString()
{
string s = "";
s += "imagePath: " + imagePath + "\n";
s += "planPath: " + planPath + "\n";
s += "inputNodeName: " + inputNodeName + "\n";
s += "inputHeight: " + inputHeight + "\n";
s += "inputWidth: " + inputWidth + "\n";
s += "outputNodeName: " + outputNodeName + "\n";
s += "numOutputCategories: " + numOutputCategories + "\n";
s += "preprocessFnName: " + preprocessFnName + "\n";
s += "numRuns: " + numRuns + "\n";
s += "dataType: " + dataType + "\n";
s += "maxBatchSize: " + maxBatchSize + "\n";
s += "workspaceSize: " + workspaceSize + "\n";
s += "useMappedMemory: " + useMappedMemory + "\n";
s += "statsPath: " + statsPath + "\n";
return s;
}
static int ToInteger(string value)
{
int valueInt;
stringstream ss;
ss << value;
ss >> valueInt;
return valueInt;
}
preprocess_fn_t PreprocessFn() const {
if (preprocessFnName == "preprocess_vgg")
return preprocessVgg;
else if (preprocessFnName == "preprocess_inception")
return preprocessInception;
else
throw runtime_error("Invalid preprocessing function name.");
}
int InputWidth() const { return ToInteger(inputWidth); }
int InputHeight() const { return ToInteger(inputHeight); }
int NumOutputCategories() const { return ToInteger(numOutputCategories); }
nvinfer1::DataType DataType() const {
if (dataType == "float")
return nvinfer1::DataType::kFLOAT;
else if (dataType == "half")
return nvinfer1::DataType::kHALF;
else
throw runtime_error("Invalid data type.");
}
int MaxBatchSize() const { return ToInteger(maxBatchSize); }
int WorkspaceSize() const { return ToInteger(workspaceSize); }
int NumRuns() const { return ToInteger(numRuns); }
int UseMappedMemory() const { return ToInteger(useMappedMemory); }
};
class Logger : public ILogger
{
void log(Severity severity, const char * msg) override
{
cout << msg << endl;
}
} gLogger;
int main(int argc, char * argv[])
{
if (argc != 15)
{
cout << TestConfig::UsageString() << endl;
return 0;
}
TestConfig testConfig(argc, argv);
cout << "\ntestConfig: \n" << testConfig.ToString() << endl;
test(testConfig);
return 0;
}
float *imageToTensor(const cv::Mat & image)
{
const size_t height = image.rows;
const size_t width = image.cols;
const size_t channels = image.channels();
const size_t numel = height * width * channels;
const size_t stridesCv[3] = { width * channels, channels, 1 };
const size_t strides[3] = { height * width, width, 1 };
float * tensor;
cudaHostAlloc((void**)&tensor, numel * sizeof(float), cudaHostAllocMapped);
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
for (int k = 0; k < channels; k++)
{
const size_t offsetCv = i * stridesCv[0] + j * stridesCv[1] + k * stridesCv[2];
const size_t offset = k * strides[0] + i * strides[1] + j * strides[2];
tensor[offset] = (float) image.data[offsetCv];
}
}
}
return tensor;
}
void preprocessVgg(float * tensor, size_t channels, size_t height, size_t width)
{
const size_t strides[3] = { height * width, width, 1 };
const float mean[3] = { 123.68, 116.78, 103.94 };
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
for (int k = 0; k < channels; k++)
{
const size_t offset = k * strides[0] + i * strides[1] + j * strides[2];
tensor[offset] -= mean[k];
}
}
}
}
void preprocessInception(float * tensor, size_t channels, size_t height, size_t width)
{
const size_t numel = channels * height * width;
for (int i = 0; i < numel; i++)
tensor[i] = 2.0 * (tensor[i] / 255.0 - 0.5);
}
size_t argmax(float * tensor, size_t numel)
{
if (numel <= 0)
return 0;
size_t maxIndex = 0;
float max = tensor[0];
for (int i = 0; i < numel; i++)
{
if (tensor[i] > max)
{
maxIndex = i;
max = tensor[i];
}
}
return maxIndex;
}
void test(const TestConfig &testConfig)
{
ifstream planFile(testConfig.planPath);
stringstream planBuffer;
planBuffer << planFile.rdbuf();
string plan = planBuffer.str();
IRuntime *runtime = createInferRuntime(gLogger);
ICudaEngine *engine = runtime->deserializeCudaEngine((void*)plan.data(),
plan.size(), nullptr);
IExecutionContext *context = engine->createExecutionContext();
int inputBindingIndex, outputBindingIndex;
inputBindingIndex = engine->getBindingIndex(testConfig.inputNodeName.c_str());
outputBindingIndex = engine->getBindingIndex(testConfig.outputNodeName.c_str());
// load and preprocess image
cv::Mat image = cv::imread(testConfig.imagePath, 0);
/* cv::cvtColor(image, image, cv::COLOR_BGR2RGB, 3); */
cv::resize(image, image, cv::Size(testConfig.InputWidth(), testConfig.InputHeight()));
float *input = imageToTensor(image);
testConfig.PreprocessFn()(input, 1, testConfig.InputHeight(), testConfig.InputWidth());
// allocate memory on host / device for input / output
float *output;
float *inputDevice;
float *outputDevice;
size_t inputSize = testConfig.InputHeight() * testConfig.InputWidth() * 1 * sizeof(float);
cudaHostAlloc(&output, testConfig.NumOutputCategories() * sizeof(float), cudaHostAllocMapped);
if (testConfig.UseMappedMemory())
{
cudaHostGetDevicePointer(&inputDevice, input, 0);
cudaHostGetDevicePointer(&outputDevice, output, 0);
}
else
{
cudaMalloc(&inputDevice, inputSize);
cudaMalloc(&outputDevice, testConfig.NumOutputCategories() * sizeof(float));
}
float *bindings[2];
bindings[inputBindingIndex] = inputDevice;
bindings[outputBindingIndex] = outputDevice;
// run and compute average time over numRuns iterations
double avgTime = 0;
for (int i = 0; i < testConfig.NumRuns() + 1; i++)
{
chrono::duration<double> diff;
if (testConfig.UseMappedMemory())
{
auto t0 = chrono::steady_clock::now();
context->execute(1, (void**)bindings);
auto t1 = chrono::steady_clock::now();
diff = t1 - t0;
}
else
{
auto t0 = chrono::steady_clock::now();
cudaMemcpy(inputDevice, input, inputSize, cudaMemcpyHostToDevice);
context->execute(1, (void**)bindings);
cudaMemcpy(output, outputDevice, testConfig.NumOutputCategories() * sizeof(float), cudaMemcpyDeviceToHost);
auto t1 = chrono::steady_clock::now();
diff = t1 - t0;
}
if (i != 0)
avgTime += MS_PER_SEC * diff.count();
}
avgTime /= testConfig.NumRuns();
// save results to file
int maxCategoryIndex = argmax(output, testConfig.NumOutputCategories()) + 1001 - testConfig.NumOutputCategories();
cout << "Most likely category id is " << maxCategoryIndex << endl;
cout << "Average execution time in ms is " << avgTime << endl;
ofstream outfile;
outfile.open(testConfig.statsPath, ios_base::app);
outfile << "\n" << testConfig.planPath
<< " " << avgTime;
// << " " << maxCategoryIndex
// << " " << testConfig.InputWidth()
// << " " << testConfig.InputHeight()
// << " " << testConfig.MaxBatchSize()
// << " " << testConfig.WorkspaceSize()
// << " " << testConfig.dataType
// << " " << testConfig.NumRuns()
// << " " << testConfig.UseMappedMemory();
outfile.close();
cudaFree(inputDevice);
cudaFree(outputDevice);
cudaFreeHost(input);
cudaFreeHost(output);
engine->destroy();
context->destroy();
runtime->destroy();
}
|
5713ce476b61477b63b6987a7fbe808d29ec0188.hip | // !!! This is a file automatically generated by hipify!!!
// This file is auto-generated. See "generate_kernels.sh"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM50(cutlass::bfloat16_t, true, 128);
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM70(cutlass::bfloat16_t, true, 128);
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM75(cutlass::bfloat16_t, true, 128);
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM80(cutlass::bfloat16_t, true, 128);
| 5713ce476b61477b63b6987a7fbe808d29ec0188.cu | // This file is auto-generated. See "generate_kernels.sh"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM50(cutlass::bfloat16_t, true, 128);
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM70(cutlass::bfloat16_t, true, 128);
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM75(cutlass::bfloat16_t, true, 128);
INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM80(cutlass::bfloat16_t, true, 128);
|
509bbc7284357f775ae4d0a6ab8908bb7deef6eb.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
/* Template project which demonstrates the basics on how to setup a project
* example application.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdbool.h>
#include <math.h>
// includes CUDA
#include <hip/hip_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
#include "MatUtil.h"
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char **argv);
extern "C"
void computeGold(float *reference, float *idata, const unsigned int len);
void GenMatrix(int *mat, const size_t N);
bool CmpArray(const int *l, const int *r, const size_t eleNum);
void ST_APSP(int *mat, const size_t N);
void printArray(const int *l, const size_t eleNum);
////////////////////////////////////////////////////////////////////////////////
//! APSP optimized kernel for device functionality
//! @param g_idata input data in global memory Matrix
//! @param k input data in global memory Current k
//! @param N input data in global memory Size of the matrix
////////////////////////////////////////////////////////////////////////////////
__global__ void
apspKernel(int *g_idata, int k, int N)
{
extern __shared__ int kRow[];
extern __shared__ int kCol[];
int mX = blockIdx.x*blockDim.x + threadIdx.x;
int mY = blockIdx.y*blockDim.y + threadIdx.y;
int i0 = mX*N + mY;
int i1 = mX*N + k;
int i2 = k*N + mY;
if(threadIdx.x == 0)
kRow[threadIdx.y] = g_idata[i1];
if(threadIdx.y == 0)
kCol[threadIdx.x] = g_idata[i2];
__syncthreads();
int curr_elmnt = g_idata[i0];
if(kCol[threadIdx.x] != -1 && kRow[threadIdx.y] != -1)
{
int sum = (kCol[threadIdx.x] + kRow[threadIdx.y]);
if (curr_elmnt == -1 || sum < curr_elmnt)
g_idata[i0] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char **argv)
{
size_t N = atoi(argv[1]);
unsigned int mem_size = sizeof(int)*N*N;
int *mat = (int*)malloc(mem_size);
GenMatrix(mat, N);
////////////////////////////////////////////////////////////////////////////////
//! Compute the reference result
////////////////////////////////////////////////////////////////////////////////
StopWatchInterface *timerRef = 0;
sdkCreateTimer(&timerRef);
int *ref = (int*)malloc(mem_size);
memcpy(ref, mat, mem_size);
sdkStartTimer(&timerRef);
ST_APSP(ref, N);
sdkStopTimer(&timerRef);
double tseq = sdkGetTimerValue(&timerRef);
printf("Processing ref time: %f (ms)\n", tseq);
// printf("Processing ref time: %f (ms)\n", sdkGetTimerValue(&timerRef));
sdkDeleteTimer(&timerRef);
////////////////////////////////////////////////////////////////////////////////
//! Compute the parallel result with the APSP optimized kernel
////////////////////////////////////////////////////////////////////////////////
// specified CUDA device, otherwise use device with highest Gflops/s
int devID = findCudaDevice(argc, (const char **)argv);
StopWatchInterface *timer = 0;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
// allocate device memory
int *d_idata;
checkCudaErrors(hipMalloc((void **) &d_idata, sizeof(int)*N*N));
// copy host memory to device
checkCudaErrors(hipMemcpyAsync(d_idata, mat, sizeof(int)*N*N,
hipMemcpyHostToDevice, 0));
// setup execution parameters
int width = N;
int tileWidth = 8; // 8x8 = 64 threads/block
int sizeGrid = ceil(width/tileWidth);
dim3 dimGrid(sizeGrid, sizeGrid, 1);
dim3 dimBlock(tileWidth, tileWidth, 1);
// execute the kernel
for(int k = 0; k < N; k++){
hipLaunchKernelGGL(( apspKernel), dim3(dimGrid), dim3(dimBlock), tileWidth*sizeof(int) , 0, d_idata, k, N);
}
// check if kernel execution generated and error
getLastCudaError("Kernel execution failed");
// allocate mem for the result on host side
int *result = (int *) malloc(mem_size);
// copy result from device to host
checkCudaErrors(hipMemcpyAsync(result, d_idata, sizeof(int) * N*N,
hipMemcpyDeviceToHost, 0));
sdkStopTimer(&timer);
double tp = sdkGetTimerValue(&timer);
printf("Processing parallel time: %f (ms)\n", tp);
sdkDeleteTimer(&timer);
////////////////////////////////////////////////////////////////////////////////
//! Compute speedup and compare results
////////////////////////////////////////////////////////////////////////////////
double speed = tseq/tp;
printf("Speed = %f \n", speed);
//compare results
if(CmpArray(result, ref, N*N))
{
printf("Your result is correct.\n");
bool bTestResult = true;
}
else
{
printf("Your result is wrong.\n");
bool bTestResult = false;
}
// cleanup memory
free(ref);
checkCudaErrors(hipFree(d_idata));
hipDeviceReset();
exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
| 509bbc7284357f775ae4d0a6ab8908bb7deef6eb.cu | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
/* Template project which demonstrates the basics on how to setup a project
* example application.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdbool.h>
#include <math.h>
// includes CUDA
#include <cuda_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
#include "MatUtil.h"
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char **argv);
extern "C"
void computeGold(float *reference, float *idata, const unsigned int len);
void GenMatrix(int *mat, const size_t N);
bool CmpArray(const int *l, const int *r, const size_t eleNum);
void ST_APSP(int *mat, const size_t N);
void printArray(const int *l, const size_t eleNum);
////////////////////////////////////////////////////////////////////////////////
//! APSP optimized kernel for device functionality
//! @param g_idata input data in global memory Matrix
//! @param k input data in global memory Current k
//! @param N input data in global memory Size of the matrix
////////////////////////////////////////////////////////////////////////////////
__global__ void
apspKernel(int *g_idata, int k, int N)
{
extern __shared__ int kRow[];
extern __shared__ int kCol[];
int mX = blockIdx.x*blockDim.x + threadIdx.x;
int mY = blockIdx.y*blockDim.y + threadIdx.y;
int i0 = mX*N + mY;
int i1 = mX*N + k;
int i2 = k*N + mY;
if(threadIdx.x == 0)
kRow[threadIdx.y] = g_idata[i1];
if(threadIdx.y == 0)
kCol[threadIdx.x] = g_idata[i2];
__syncthreads();
int curr_elmnt = g_idata[i0];
if(kCol[threadIdx.x] != -1 && kRow[threadIdx.y] != -1)
{
int sum = (kCol[threadIdx.x] + kRow[threadIdx.y]);
if (curr_elmnt == -1 || sum < curr_elmnt)
g_idata[i0] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char **argv)
{
size_t N = atoi(argv[1]);
unsigned int mem_size = sizeof(int)*N*N;
int *mat = (int*)malloc(mem_size);
GenMatrix(mat, N);
////////////////////////////////////////////////////////////////////////////////
//! Compute the reference result
////////////////////////////////////////////////////////////////////////////////
StopWatchInterface *timerRef = 0;
sdkCreateTimer(&timerRef);
int *ref = (int*)malloc(mem_size);
memcpy(ref, mat, mem_size);
sdkStartTimer(&timerRef);
ST_APSP(ref, N);
sdkStopTimer(&timerRef);
double tseq = sdkGetTimerValue(&timerRef);
printf("Processing ref time: %f (ms)\n", tseq);
// printf("Processing ref time: %f (ms)\n", sdkGetTimerValue(&timerRef));
sdkDeleteTimer(&timerRef);
////////////////////////////////////////////////////////////////////////////////
//! Compute the parallel result with the APSP optimized kernel
////////////////////////////////////////////////////////////////////////////////
// specified CUDA device, otherwise use device with highest Gflops/s
int devID = findCudaDevice(argc, (const char **)argv);
StopWatchInterface *timer = 0;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
// allocate device memory
int *d_idata;
checkCudaErrors(cudaMalloc((void **) &d_idata, sizeof(int)*N*N));
// copy host memory to device
checkCudaErrors(cudaMemcpyAsync(d_idata, mat, sizeof(int)*N*N,
cudaMemcpyHostToDevice, 0));
// setup execution parameters
int width = N;
int tileWidth = 8; // 8x8 = 64 threads/block
int sizeGrid = ceil(width/tileWidth);
dim3 dimGrid(sizeGrid, sizeGrid, 1);
dim3 dimBlock(tileWidth, tileWidth, 1);
// execute the kernel
for(int k = 0; k < N; k++){
apspKernel<<< dimGrid, dimBlock, tileWidth*sizeof(int) >>>(d_idata, k, N);
}
// check if kernel execution generated and error
getLastCudaError("Kernel execution failed");
// allocate mem for the result on host side
int *result = (int *) malloc(mem_size);
// copy result from device to host
checkCudaErrors(cudaMemcpyAsync(result, d_idata, sizeof(int) * N*N,
cudaMemcpyDeviceToHost, 0));
sdkStopTimer(&timer);
double tp = sdkGetTimerValue(&timer);
printf("Processing parallel time: %f (ms)\n", tp);
sdkDeleteTimer(&timer);
////////////////////////////////////////////////////////////////////////////////
//! Compute speedup and compare results
////////////////////////////////////////////////////////////////////////////////
double speed = tseq/tp;
printf("Speed = %f \n", speed);
//compare results
if(CmpArray(result, ref, N*N))
{
printf("Your result is correct.\n");
bool bTestResult = true;
}
else
{
printf("Your result is wrong.\n");
bool bTestResult = false;
}
// cleanup memory
free(ref);
checkCudaErrors(cudaFree(d_idata));
cudaDeviceReset();
exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
059d8a9931a27b60da82d07325f01a407bdc79a3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <openssl/md5.h>
#include <hip/hip_runtime.h>
#include "md5.cu"
#define PASSWORD_LENGTH 6
#define FIFTH_POWER (26 * 26 * 26 * 26 * 26)
#define FOURTH_POWER (26 * 26 * 26 * 26)
#define THIRD_POWER (26 * 26 * 26)
#define SECOND_POWER (26 * 26)
#define FIRST_POWER 26
__device__ int POWER_ARR[] = {1, FIRST_POWER, SECOND_POWER, THIRD_POWER, FOURTH_POWER, FIFTH_POWER};
typedef struct output{
char password[PASSWORD_LENGTH+1];
uint hash[4];
}output_t;
/*hash_it_up takes a pointer to output and feeds out 'max' passwords and
hashes. Runs on the GPU*/
__global__ void hash_it_up (output_t * output, int max){
int tempNum;
for(int count = 0; count < max; count++){
char word[] = "aaaaaa";
tempNum = count;
//Generate and store a permutation
for(int i = PASSWORD_LENGTH - 1; i >= 0; i--){
int temp = (int) (tempNum/POWER_ARR[i]);
word[PASSWORD_LENGTH -1 - i] += temp;
tempNum = tempNum % POWER_ARR[i];
}
word[PASSWORD_LENGTH] = '\0';
//Store the Password
memcpy(output[count].password, word, sizeof(char)*PASSWORD_LENGTH+1);
//Hash a pasword
uint candidate_hash[4];
md5((uint*)word, candidate_hash);
//Store the hash
memcpy(output[count].hash, candidate_hash, sizeof(uint)*4);
}
}
int main(int argc,char* args[]){
FILE * file;
int max = atoi(args[1]);
output_t * gpu_input;
output_t * output = (output_t *) malloc(sizeof(output_t)*max);
if(argc != 2){
printf("make-hash-file <int size>\n");
}
//Allocate space on GPU
if(hipMalloc(&gpu_input, sizeof(output_t)*max) != hipSuccess){
perror("Cuda Malloc Failed\n");
}
//Call the hash-generating function
hipLaunchKernelGGL(( hash_it_up), dim3(1),dim3(1), 0, 0, gpu_input, max);
//Wait for all of the threads to finish
if(hipDeviceSynchronize() != hipSuccess){
perror("Cuda Sync Failed\n");
}
//Copy back the generated passwords from the GPU
if(hipMemcpy(output, gpu_input, sizeof(output_t)*max,
hipMemcpyDeviceToHost) != hipSuccess){
perror("Cuda Memcpy Failed Here\n");
exit(2);
}
//Write passwords and hashes to a file
file = fopen("outputFile.txt", "w");
fprintf(file, "%d\n", max);
for(int i = 0; i < max; i++){
fprintf(file, "%s ",output[i].password);
for(int j = 0; j < 4; j++){
fprintf(file, "%u ",output[i].hash[j]);
}
fprintf(file, "\n");
}
fclose(file);
return 0;
}
| 059d8a9931a27b60da82d07325f01a407bdc79a3.cu | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <openssl/md5.h>
#include <cuda.h>
#include "md5.cu"
#define PASSWORD_LENGTH 6
#define FIFTH_POWER (26 * 26 * 26 * 26 * 26)
#define FOURTH_POWER (26 * 26 * 26 * 26)
#define THIRD_POWER (26 * 26 * 26)
#define SECOND_POWER (26 * 26)
#define FIRST_POWER 26
__device__ int POWER_ARR[] = {1, FIRST_POWER, SECOND_POWER, THIRD_POWER, FOURTH_POWER, FIFTH_POWER};
typedef struct output{
char password[PASSWORD_LENGTH+1];
uint hash[4];
}output_t;
/*hash_it_up takes a pointer to output and feeds out 'max' passwords and
hashes. Runs on the GPU*/
__global__ void hash_it_up (output_t * output, int max){
int tempNum;
for(int count = 0; count < max; count++){
char word[] = "aaaaaa";
tempNum = count;
//Generate and store a permutation
for(int i = PASSWORD_LENGTH - 1; i >= 0; i--){
int temp = (int) (tempNum/POWER_ARR[i]);
word[PASSWORD_LENGTH -1 - i] += temp;
tempNum = tempNum % POWER_ARR[i];
}
word[PASSWORD_LENGTH] = '\0';
//Store the Password
memcpy(output[count].password, word, sizeof(char)*PASSWORD_LENGTH+1);
//Hash a pasword
uint candidate_hash[4];
md5((uint*)word, candidate_hash);
//Store the hash
memcpy(output[count].hash, candidate_hash, sizeof(uint)*4);
}
}
int main(int argc,char* args[]){
FILE * file;
int max = atoi(args[1]);
output_t * gpu_input;
output_t * output = (output_t *) malloc(sizeof(output_t)*max);
if(argc != 2){
printf("make-hash-file <int size>\n");
}
//Allocate space on GPU
if(cudaMalloc(&gpu_input, sizeof(output_t)*max) != cudaSuccess){
perror("Cuda Malloc Failed\n");
}
//Call the hash-generating function
hash_it_up<<<1,1>>>(gpu_input, max);
//Wait for all of the threads to finish
if(cudaDeviceSynchronize() != cudaSuccess){
perror("Cuda Sync Failed\n");
}
//Copy back the generated passwords from the GPU
if(cudaMemcpy(output, gpu_input, sizeof(output_t)*max,
cudaMemcpyDeviceToHost) != cudaSuccess){
perror("Cuda Memcpy Failed Here\n");
exit(2);
}
//Write passwords and hashes to a file
file = fopen("outputFile.txt", "w");
fprintf(file, "%d\n", max);
for(int i = 0; i < max; i++){
fprintf(file, "%s ",output[i].password);
for(int j = 0; j < 4; j++){
fprintf(file, "%u ",output[i].hash[j]);
}
fprintf(file, "\n");
}
fclose(file);
return 0;
}
|
acba4903a61394149c1f5b85f6c60687dbda9ece.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <iostream>
#include <vector>
#include <cuml/manifold/umapparams.h>
#include <datasets/digits.h>
#include <raft/cudart_utils.h>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/common/device_buffer.hpp>
#include <cuml/common/logger.hpp>
#include <cuml/cuml.hpp>
#include <cuml/neighbors/knn.hpp>
#include <distance/distance.cuh>
#include <metrics/trustworthiness.cuh>
#include <raft/cuda_utils.cuh>
#include <umap/runner.cuh>
using namespace ML;
using namespace ML::Metrics;
using namespace std;
using namespace MLCommon;
using namespace MLCommon::Distance;
using namespace MLCommon::Datasets::Digits;
class UMAPTest : public ::testing::Test {
protected:
void xformTest() {
raft::handle_t handle;
hipStream_t stream = handle.get_stream();
UMAPParams *umap_params = new UMAPParams();
umap_params->n_neighbors = 10;
umap_params->init = 1;
umap_params->verbosity = CUML_LEVEL_INFO;
UMAPAlgo::find_ab(umap_params, handle.get_device_allocator(), stream);
device_buffer<float> X_d(handle.get_device_allocator(), handle.get_stream(),
n_samples * n_features);
MLCommon::updateDevice(X_d.data(), digits.data(), n_samples * n_features,
handle.get_stream());
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
device_buffer<float> embeddings(handle.get_device_allocator(),
handle.get_stream(),
n_samples * umap_params->n_components);
UMAPAlgo::_fit<float, 256>(handle, X_d.data(), n_samples, n_features,
nullptr, nullptr, umap_params,
embeddings.data());
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
device_buffer<float> xformed(handle.get_device_allocator(),
handle.get_stream(),
n_samples * umap_params->n_components);
UMAPAlgo::_transform<float, 256>(
handle, X_d.data(), n_samples, n_features, nullptr, nullptr, X_d.data(),
n_samples, embeddings.data(), n_samples, umap_params, xformed.data());
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
xformed_score = trustworthiness_score<float, EucUnexpandedL2Sqrt>(
handle, X_d.data(), xformed.data(), n_samples, n_features,
umap_params->n_components, umap_params->n_neighbors);
}
void fitTest() {
raft::handle_t handle;
hipStream_t stream = handle.get_stream();
UMAPParams *umap_params = new UMAPParams();
umap_params->n_neighbors = 10;
umap_params->init = 1;
umap_params->verbosity = CUML_LEVEL_INFO;
UMAPAlgo::find_ab(umap_params, handle.get_device_allocator(), stream);
device_buffer<float> X_d(handle.get_device_allocator(), handle.get_stream(),
n_samples * n_features);
MLCommon::updateDevice(X_d.data(), digits.data(), n_samples * n_features,
handle.get_stream());
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
device_buffer<float> embeddings(handle.get_device_allocator(),
handle.get_stream(),
n_samples * umap_params->n_components);
UMAPAlgo::_fit<float, 256>(handle, X_d.data(), n_samples, n_features,
nullptr, nullptr, umap_params,
embeddings.data());
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
fit_score = trustworthiness_score<float, EucUnexpandedL2Sqrt>(
handle, X_d.data(), embeddings.data(), n_samples, n_features,
umap_params->n_components, umap_params->n_neighbors);
}
void supervisedTest() {
raft::handle_t handle;
hipStream_t stream = handle.get_stream();
UMAPParams *umap_params = new UMAPParams();
umap_params->n_neighbors = 10;
umap_params->init = 1;
umap_params->verbosity = CUML_LEVEL_INFO;
UMAPAlgo::find_ab(umap_params, handle.get_device_allocator(), stream);
device_buffer<float> X_d(handle.get_device_allocator(), handle.get_stream(),
n_samples * n_features);
device_buffer<float> Y_d(handle.get_device_allocator(), handle.get_stream(),
n_samples * 2);
MLCommon::updateDevice(X_d.data(), digits.data(), n_samples * n_features,
handle.get_stream());
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
device_buffer<float> embeddings(handle.get_device_allocator(),
handle.get_stream(),
n_samples * umap_params->n_components);
UMAPAlgo::_fit<float, 256>(handle, X_d.data(), Y_d.data(), n_samples,
n_features, nullptr, nullptr, umap_params,
embeddings.data());
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
supervised_score = trustworthiness_score<float, EucUnexpandedL2Sqrt>(
handle, X_d.data(), embeddings.data(), n_samples, n_features,
umap_params->n_components, umap_params->n_neighbors);
}
void fitWithKNNTest() {
raft::handle_t handle;
UMAPParams *umap_params = new UMAPParams();
umap_params->n_neighbors = 10;
umap_params->init = 1;
umap_params->verbosity = CUML_LEVEL_INFO;
UMAPAlgo::find_ab(umap_params, handle.get_device_allocator(),
handle.get_stream());
device_buffer<float> X_d(handle.get_device_allocator(), handle.get_stream(),
n_samples * n_features);
MLCommon::updateDevice(X_d.data(), digits.data(), n_samples * n_features,
handle.get_stream());
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
device_buffer<float> embeddings(handle.get_device_allocator(),
handle.get_stream(),
n_samples * umap_params->n_components);
MLCommon::device_buffer<int64_t> knn_indices(
handle.get_device_allocator(), handle.get_stream(),
n_samples * umap_params->n_components);
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
MLCommon::device_buffer<float> knn_dists(
handle.get_device_allocator(), handle.get_stream(),
n_samples * umap_params->n_components);
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
std::vector<float *> ptrs(1);
std::vector<int> sizes(1);
ptrs[0] = X_d.data();
sizes[0] = n_samples;
MLCommon::Selection::brute_force_knn(
ptrs, sizes, n_features, X_d.data(), n_samples, knn_indices.data(),
knn_dists.data(), umap_params->n_neighbors, handle.get_device_allocator(),
handle.get_stream());
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
UMAPAlgo::_fit<float, 256>(
handle, X_d.data(), n_samples, n_features,
//knn_indices.data(), knn_dists.data(), umap_params,
nullptr, nullptr, umap_params, embeddings.data());
CUDA_CHECK(hipStreamSynchronize(handle.get_stream()));
fit_with_knn_score = trustworthiness_score<float, EucUnexpandedL2Sqrt>(
handle, X_d.data(), embeddings.data(), n_samples, n_features,
umap_params->n_components, umap_params->n_neighbors);
}
void SetUp() override {
fitTest();
xformTest();
supervisedTest();
fitWithKNNTest();
CUML_LOG_DEBUG("fit_score=%lf", fit_score);
CUML_LOG_DEBUG("xform_score=%lf", xformed_score);
CUML_LOG_DEBUG("supervised_score=%f", supervised_score);
CUML_LOG_DEBUG("fit_with_knn_score=%lf", fit_with_knn_score);
}
void TearDown() override {}
protected:
double fit_score;
double xformed_score;
double supervised_score;
double fit_with_knn_score;
};
typedef UMAPTest UMAPTestF;
TEST_F(UMAPTestF, Result) {
ASSERT_TRUE(fit_score > 0.98);
ASSERT_TRUE(xformed_score > 0.80);
ASSERT_TRUE(supervised_score > 0.98);
ASSERT_TRUE(fit_with_knn_score > 0.96);
}
| acba4903a61394149c1f5b85f6c60687dbda9ece.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <iostream>
#include <vector>
#include <cuml/manifold/umapparams.h>
#include <datasets/digits.h>
#include <raft/cudart_utils.h>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/common/device_buffer.hpp>
#include <cuml/common/logger.hpp>
#include <cuml/cuml.hpp>
#include <cuml/neighbors/knn.hpp>
#include <distance/distance.cuh>
#include <metrics/trustworthiness.cuh>
#include <raft/cuda_utils.cuh>
#include <umap/runner.cuh>
using namespace ML;
using namespace ML::Metrics;
using namespace std;
using namespace MLCommon;
using namespace MLCommon::Distance;
using namespace MLCommon::Datasets::Digits;
class UMAPTest : public ::testing::Test {
protected:
void xformTest() {
raft::handle_t handle;
cudaStream_t stream = handle.get_stream();
UMAPParams *umap_params = new UMAPParams();
umap_params->n_neighbors = 10;
umap_params->init = 1;
umap_params->verbosity = CUML_LEVEL_INFO;
UMAPAlgo::find_ab(umap_params, handle.get_device_allocator(), stream);
device_buffer<float> X_d(handle.get_device_allocator(), handle.get_stream(),
n_samples * n_features);
MLCommon::updateDevice(X_d.data(), digits.data(), n_samples * n_features,
handle.get_stream());
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
device_buffer<float> embeddings(handle.get_device_allocator(),
handle.get_stream(),
n_samples * umap_params->n_components);
UMAPAlgo::_fit<float, 256>(handle, X_d.data(), n_samples, n_features,
nullptr, nullptr, umap_params,
embeddings.data());
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
device_buffer<float> xformed(handle.get_device_allocator(),
handle.get_stream(),
n_samples * umap_params->n_components);
UMAPAlgo::_transform<float, 256>(
handle, X_d.data(), n_samples, n_features, nullptr, nullptr, X_d.data(),
n_samples, embeddings.data(), n_samples, umap_params, xformed.data());
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
xformed_score = trustworthiness_score<float, EucUnexpandedL2Sqrt>(
handle, X_d.data(), xformed.data(), n_samples, n_features,
umap_params->n_components, umap_params->n_neighbors);
}
void fitTest() {
raft::handle_t handle;
cudaStream_t stream = handle.get_stream();
UMAPParams *umap_params = new UMAPParams();
umap_params->n_neighbors = 10;
umap_params->init = 1;
umap_params->verbosity = CUML_LEVEL_INFO;
UMAPAlgo::find_ab(umap_params, handle.get_device_allocator(), stream);
device_buffer<float> X_d(handle.get_device_allocator(), handle.get_stream(),
n_samples * n_features);
MLCommon::updateDevice(X_d.data(), digits.data(), n_samples * n_features,
handle.get_stream());
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
device_buffer<float> embeddings(handle.get_device_allocator(),
handle.get_stream(),
n_samples * umap_params->n_components);
UMAPAlgo::_fit<float, 256>(handle, X_d.data(), n_samples, n_features,
nullptr, nullptr, umap_params,
embeddings.data());
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
fit_score = trustworthiness_score<float, EucUnexpandedL2Sqrt>(
handle, X_d.data(), embeddings.data(), n_samples, n_features,
umap_params->n_components, umap_params->n_neighbors);
}
void supervisedTest() {
raft::handle_t handle;
cudaStream_t stream = handle.get_stream();
UMAPParams *umap_params = new UMAPParams();
umap_params->n_neighbors = 10;
umap_params->init = 1;
umap_params->verbosity = CUML_LEVEL_INFO;
UMAPAlgo::find_ab(umap_params, handle.get_device_allocator(), stream);
device_buffer<float> X_d(handle.get_device_allocator(), handle.get_stream(),
n_samples * n_features);
device_buffer<float> Y_d(handle.get_device_allocator(), handle.get_stream(),
n_samples * 2);
MLCommon::updateDevice(X_d.data(), digits.data(), n_samples * n_features,
handle.get_stream());
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
device_buffer<float> embeddings(handle.get_device_allocator(),
handle.get_stream(),
n_samples * umap_params->n_components);
UMAPAlgo::_fit<float, 256>(handle, X_d.data(), Y_d.data(), n_samples,
n_features, nullptr, nullptr, umap_params,
embeddings.data());
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
supervised_score = trustworthiness_score<float, EucUnexpandedL2Sqrt>(
handle, X_d.data(), embeddings.data(), n_samples, n_features,
umap_params->n_components, umap_params->n_neighbors);
}
void fitWithKNNTest() {
raft::handle_t handle;
UMAPParams *umap_params = new UMAPParams();
umap_params->n_neighbors = 10;
umap_params->init = 1;
umap_params->verbosity = CUML_LEVEL_INFO;
UMAPAlgo::find_ab(umap_params, handle.get_device_allocator(),
handle.get_stream());
device_buffer<float> X_d(handle.get_device_allocator(), handle.get_stream(),
n_samples * n_features);
MLCommon::updateDevice(X_d.data(), digits.data(), n_samples * n_features,
handle.get_stream());
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
device_buffer<float> embeddings(handle.get_device_allocator(),
handle.get_stream(),
n_samples * umap_params->n_components);
MLCommon::device_buffer<int64_t> knn_indices(
handle.get_device_allocator(), handle.get_stream(),
n_samples * umap_params->n_components);
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
MLCommon::device_buffer<float> knn_dists(
handle.get_device_allocator(), handle.get_stream(),
n_samples * umap_params->n_components);
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
std::vector<float *> ptrs(1);
std::vector<int> sizes(1);
ptrs[0] = X_d.data();
sizes[0] = n_samples;
MLCommon::Selection::brute_force_knn(
ptrs, sizes, n_features, X_d.data(), n_samples, knn_indices.data(),
knn_dists.data(), umap_params->n_neighbors, handle.get_device_allocator(),
handle.get_stream());
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
UMAPAlgo::_fit<float, 256>(
handle, X_d.data(), n_samples, n_features,
//knn_indices.data(), knn_dists.data(), umap_params,
nullptr, nullptr, umap_params, embeddings.data());
CUDA_CHECK(cudaStreamSynchronize(handle.get_stream()));
fit_with_knn_score = trustworthiness_score<float, EucUnexpandedL2Sqrt>(
handle, X_d.data(), embeddings.data(), n_samples, n_features,
umap_params->n_components, umap_params->n_neighbors);
}
void SetUp() override {
fitTest();
xformTest();
supervisedTest();
fitWithKNNTest();
CUML_LOG_DEBUG("fit_score=%lf", fit_score);
CUML_LOG_DEBUG("xform_score=%lf", xformed_score);
CUML_LOG_DEBUG("supervised_score=%f", supervised_score);
CUML_LOG_DEBUG("fit_with_knn_score=%lf", fit_with_knn_score);
}
void TearDown() override {}
protected:
double fit_score;
double xformed_score;
double supervised_score;
double fit_with_knn_score;
};
typedef UMAPTest UMAPTestF;
TEST_F(UMAPTestF, Result) {
ASSERT_TRUE(fit_score > 0.98);
ASSERT_TRUE(xformed_score > 0.80);
ASSERT_TRUE(supervised_score > 0.98);
ASSERT_TRUE(fit_with_knn_score > 0.96);
}
|
b72168f71e0297ec4d4bff3c60237151a6d58361.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void rinit(float * init, const unsigned int * fsum, const float * ncrs) {
int idx = threadIdx.x + blockIdx.x*blockDim.x;
init[idx] = sqrtf((float)fsum[idx] / ncrs[idx]);
} | b72168f71e0297ec4d4bff3c60237151a6d58361.cu | #include "includes.h"
__global__ void rinit(float * init, const unsigned int * fsum, const float * ncrs) {
int idx = threadIdx.x + blockIdx.x*blockDim.x;
init[idx] = sqrtf((float)fsum[idx] / ncrs[idx]);
} |
134e7222d3fc2a28ceb91cf845ae8d8cf8e51018.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define BLOCK_SIZE 256
int UI(int argc, char* argv[], int* jkl);
void initData(int* data, int len);
void showData(int* data, int len);
void histCountCPU(int* data, int* hist, int len, int bin);
void checkCUDAError(hipError_t e);
__global__
void histCountGPU(int* d_data, int* d_hist, int len, int bin, int part);
void resultCheck(int* result_CPU, int* result_GPU, int size);
int main(int argc, char* argv[]){
// reset rand seed
srand((unsigned)time(NULL));
clock_t start, finish;
int total_time;
// Go through UI first.
// In UI section, only command with valid param can go to the next step.
int UIStatus;
int param[2];
UIStatus = UI(argc, argv, param);
if (UIStatus != 0) {
printf("\nApplication terminates.\n");
return 0;
}
// UI section ends
// Initialize data array with int type
const int bin = param[0];
const int len = param[1];
int* data = (int*)malloc(len * sizeof(int));
initData(data, len);
showData(data, len);
printf("Done initializing data array with length %d.\n", len);
// Initialzing ends
// CPU code for calculating the histogram
// Use this result to varify the kernel result later
// Using calloc to initialize the hist with zeros.
int* hist_CPU = (int*)calloc(len, sizeof(int));
int* hist_GPU = (int*)calloc(len, sizeof(int));
start = clock();
histCountCPU(data, hist_CPU, len, bin);
finish = clock();
total_time = (int)(finish - start);
printf("\nhist_CPU:");
showData(hist_CPU, bin);
printf("Done histogrm calculation with CPU in %d miliseconds.\n", total_time);
// Histogram calculating with CPU ends
// Allocate device memory, copy data from host to device, initialize device histogram with zeros.
int *d_data, *d_hist;
checkCUDAError(hipMalloc((int**)&d_data, len * sizeof(int)));
checkCUDAError(hipMalloc((int**)&d_hist, bin * sizeof(int)));
printf("\nDone allocating space in device.");
checkCUDAError(hipMemcpy(d_data, data, len * sizeof(int), hipMemcpyHostToDevice));
printf("\nDone copying memory from host to device.");
checkCUDAError(hipMemset(d_hist, 0, bin));
printf("\nDone initializing device histogram with zeros.\n");
// Done allocating, transfering and initializing
// Initialize thread block and kernel grid dimensions
dim3 threads(BLOCK_SIZE);
// dim3 grid((int)ceil(1.0 * len / threads.x));
dim3 grid(120);
printf("\nDone initializing block dimention and grid dimention.");
// Done initializing thread block and kernel grid dimensions
// launch CUDA device kernel
start = clock();
hipLaunchKernelGGL(( histCountGPU), dim3(grid), dim3(threads), bin * sizeof(int) , 0, d_data, d_hist, len, bin, 1024 / bin);
// Done CUDA device kernel
// Copy results from device to host and free device memory
checkCUDAError(hipDeviceSynchronize());
finish = clock();
total_time = (int)(finish - start);
printf("\nDone matrix multiplication with GPU in %d miliseconds.\n", total_time);
checkCUDAError(hipMemcpy(hist_GPU, d_hist, bin * sizeof(int), hipMemcpyDeviceToHost));
printf("hist_GPU:");
showData(hist_GPU, bin);
checkCUDAError(hipFree(d_hist));
checkCUDAError(hipFree(d_data));
// Done copying results and freeing device memory
// Check the result of the Calculated Matrix
resultCheck(hist_CPU, hist_GPU, bin);
// Done result checking.
return 0;
}
// UI for main function
// return 0 means everything's fine, just continue;
// return 1 means there's invalid input or '--help', terminate running.
int UI(int argc, char* argv[], int* param) {
// UI for the exe file
// while input with -h or --help; tell that what we need as params in linux style
// while input with 0 or 1 or more than 2 parameters; tell that we need 2 params
// while input with 2 paramerters; print the size of two input matrix; check if all params are valid;
// param[0] is valid if it is exponent of 2. param[1] is valid if it is greater than 0
if (argc == 2 && (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-h") == 0)) {
printf("CUDA Programming Homework. Histogram Algorithm.\n");
printf("\nUsage: hist [OPTION]...\n");
printf("\nOptions:\n");
printf("%5s, %-10s %-50s\n", "-h", "--help", "Show helping information.");
printf("%5s, %-10s %-50s\n", "-i", "--input", "Followed by 2 integers as input parameters.");
printf("\nExamples:\n");
printf("hist -h\n");
printf(" Shows the helping information.\n");
printf("hist -i 8 200\n");
printf(" 8 represents 8 bins in histogram, 200 means the length of the data\n");
return 1;
}
if (argc == 4 && (strcmp(argv[1], "-i") == 0 || strcmp(argv[1], "--input") == 0)) {
int bin = atoi(argv[2]);
int len = atoi(argv[3]);
int div, mod, cache = bin, count = 0;
while (cache > 1) {
++count;
div = cache / 2;
mod = cache - div * 2;
if (mod == 1) {
printf("Invalid bin numbers. The bin numbers should be exponent of 2, range from 2^2 to 2^8\n");
return 1;
}
cache = div;
}
if (count > 8 || count < 2) {
printf("Invalid bin numbers. The bin numbers should be exponent of 2, range from 2^2 to 2^8\n");
return 1;
}
if (len <= 0) {
printf("Invalid array length. The array length should be an integer greater than 0.\n");
return 1;
}
else {
printf("Bin numbers: %d\n", bin);
printf("Array length: %d\n", len);
param[0] = bin;
param[1] = len;
return 0;
}
}
else {
printf("Invalid command. Please check how to make valid command by '-h' or '--help'.\n");
return 1;
}
}
// initialize data with int type range [0, 1024)
void initData(int* data, int len) {
for (int i = 0; i < len; ++i)
data[i] = rand() % 1024;
return;
}
// show the data in the command prompt.
// this function is used for configuration
// only show previous 10 elements when length of array is too large
void showData(int* data, int len) {
printf("data:\n[");
for (int i = 0; i < len && i < 10; ++i) {
if (i != 0) printf(",");
printf("%4d", data[i]);
}
if (len > 10) printf("...");
printf("]\n");
return;
}
// matrix multiplication with CPU in the most stupid algo
// Algo Complexity: O((2k-1)*j*l)
void histCountCPU(int* data, int* hist, int len, int bin) {
int part = 1024 / bin;
for (int i = 0; i < len; ++i) {
++hist[data[i] / part];
}
return;
}
// Check the result of the CUDA function.
void checkCUDAError(hipError_t e) {
if (e == 0) return;
printf("\nError: %s\n", hipGetErrorName(e));
printf("%s\n", hipGetErrorString(e));
exit(0);
}
// matrix multiplication with GPU device
// using tiling algorithm, take use of the shared memory to higher the compute-to-global-memory-access ratio
__global__
void histCountGPU(int* d_data, int* d_hist, int len, int bin, int part) {
// calculating thread id
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Privatized bins
extern __shared__ int histo_s[];
for (unsigned int binIdx = threadIdx.x; binIdx < bin; binIdx += blockDim.x) {
histo_s[binIdx] = 0;
}
__syncthreads();
// Histogram Count
for (unsigned int i = tid; i < len; i += blockDim.x * gridDim.x) {
atomicAdd(&(histo_s[d_data[i] / part]), 1);
}
__syncthreads();
// Commit to global memory
for (unsigned int binIdx = threadIdx.x; binIdx < bin; binIdx += blockDim.x) {
atomicAdd(&(d_hist[binIdx]), histo_s[binIdx]);
}
}
// check if two array is exactly the same
void resultCheck(int* result_CPU, int* result_GPU, int size) {
for (int i = 0; i < size; ++i) {
if (result_CPU[i] != result_GPU[i]) {
printf("\nResult check: Error!!!! Didn't pass.");
return;
}
}
printf("\nResult check: ---PASS---.");
return;
}
| 134e7222d3fc2a28ceb91cf845ae8d8cf8e51018.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define BLOCK_SIZE 256
int UI(int argc, char* argv[], int* jkl);
void initData(int* data, int len);
void showData(int* data, int len);
void histCountCPU(int* data, int* hist, int len, int bin);
void checkCUDAError(cudaError_t e);
__global__
void histCountGPU(int* d_data, int* d_hist, int len, int bin, int part);
void resultCheck(int* result_CPU, int* result_GPU, int size);
int main(int argc, char* argv[]){
// reset rand seed
srand((unsigned)time(NULL));
clock_t start, finish;
int total_time;
// Go through UI first.
// In UI section, only command with valid param can go to the next step.
int UIStatus;
int param[2];
UIStatus = UI(argc, argv, param);
if (UIStatus != 0) {
printf("\nApplication terminates.\n");
return 0;
}
// UI section ends
// Initialize data array with int type
const int bin = param[0];
const int len = param[1];
int* data = (int*)malloc(len * sizeof(int));
initData(data, len);
showData(data, len);
printf("Done initializing data array with length %d.\n", len);
// Initialzing ends
// CPU code for calculating the histogram
// Use this result to varify the kernel result later
// Using calloc to initialize the hist with zeros.
int* hist_CPU = (int*)calloc(len, sizeof(int));
int* hist_GPU = (int*)calloc(len, sizeof(int));
start = clock();
histCountCPU(data, hist_CPU, len, bin);
finish = clock();
total_time = (int)(finish - start);
printf("\nhist_CPU:");
showData(hist_CPU, bin);
printf("Done histogrm calculation with CPU in %d miliseconds.\n", total_time);
// Histogram calculating with CPU ends
// Allocate device memory, copy data from host to device, initialize device histogram with zeros.
int *d_data, *d_hist;
checkCUDAError(cudaMalloc((int**)&d_data, len * sizeof(int)));
checkCUDAError(cudaMalloc((int**)&d_hist, bin * sizeof(int)));
printf("\nDone allocating space in device.");
checkCUDAError(cudaMemcpy(d_data, data, len * sizeof(int), cudaMemcpyHostToDevice));
printf("\nDone copying memory from host to device.");
checkCUDAError(cudaMemset(d_hist, 0, bin));
printf("\nDone initializing device histogram with zeros.\n");
// Done allocating, transfering and initializing
// Initialize thread block and kernel grid dimensions
dim3 threads(BLOCK_SIZE);
// dim3 grid((int)ceil(1.0 * len / threads.x));
dim3 grid(120);
printf("\nDone initializing block dimention and grid dimention.");
// Done initializing thread block and kernel grid dimensions
// launch CUDA device kernel
start = clock();
histCountGPU<<< grid, threads, bin * sizeof(int) >>>(d_data, d_hist, len, bin, 1024 / bin);
// Done CUDA device kernel
// Copy results from device to host and free device memory
checkCUDAError(cudaDeviceSynchronize());
finish = clock();
total_time = (int)(finish - start);
printf("\nDone matrix multiplication with GPU in %d miliseconds.\n", total_time);
checkCUDAError(cudaMemcpy(hist_GPU, d_hist, bin * sizeof(int), cudaMemcpyDeviceToHost));
printf("hist_GPU:");
showData(hist_GPU, bin);
checkCUDAError(cudaFree(d_hist));
checkCUDAError(cudaFree(d_data));
// Done copying results and freeing device memory
// Check the result of the Calculated Matrix
resultCheck(hist_CPU, hist_GPU, bin);
// Done result checking.
return 0;
}
// UI for main function
// return 0 means everything's fine, just continue;
// return 1 means there's invalid input or '--help', terminate running.
int UI(int argc, char* argv[], int* param) {
// UI for the exe file
// while input with -h or --help; tell that what we need as params in linux style
// while input with 0 or 1 or more than 2 parameters; tell that we need 2 params
// while input with 2 paramerters; print the size of two input matrix; check if all params are valid;
// param[0] is valid if it is exponent of 2. param[1] is valid if it is greater than 0
if (argc == 2 && (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-h") == 0)) {
printf("CUDA Programming Homework. Histogram Algorithm.\n");
printf("\nUsage: hist [OPTION]...\n");
printf("\nOptions:\n");
printf("%5s, %-10s %-50s\n", "-h", "--help", "Show helping information.");
printf("%5s, %-10s %-50s\n", "-i", "--input", "Followed by 2 integers as input parameters.");
printf("\nExamples:\n");
printf("hist -h\n");
printf(" Shows the helping information.\n");
printf("hist -i 8 200\n");
printf(" 8 represents 8 bins in histogram, 200 means the length of the data\n");
return 1;
}
if (argc == 4 && (strcmp(argv[1], "-i") == 0 || strcmp(argv[1], "--input") == 0)) {
int bin = atoi(argv[2]);
int len = atoi(argv[3]);
int div, mod, cache = bin, count = 0;
while (cache > 1) {
++count;
div = cache / 2;
mod = cache - div * 2;
if (mod == 1) {
printf("Invalid bin numbers. The bin numbers should be exponent of 2, range from 2^2 to 2^8\n");
return 1;
}
cache = div;
}
if (count > 8 || count < 2) {
printf("Invalid bin numbers. The bin numbers should be exponent of 2, range from 2^2 to 2^8\n");
return 1;
}
if (len <= 0) {
printf("Invalid array length. The array length should be an integer greater than 0.\n");
return 1;
}
else {
printf("Bin numbers: %d\n", bin);
printf("Array length: %d\n", len);
param[0] = bin;
param[1] = len;
return 0;
}
}
else {
printf("Invalid command. Please check how to make valid command by '-h' or '--help'.\n");
return 1;
}
}
// initialize data with int type range [0, 1024)
void initData(int* data, int len) {
for (int i = 0; i < len; ++i)
data[i] = rand() % 1024;
return;
}
// show the data in the command prompt.
// this function is used for configuration
// only show previous 10 elements when length of array is too large
void showData(int* data, int len) {
printf("data:\n[");
for (int i = 0; i < len && i < 10; ++i) {
if (i != 0) printf(",");
printf("%4d", data[i]);
}
if (len > 10) printf("...");
printf("]\n");
return;
}
// matrix multiplication with CPU in the most stupid algo
// Algo Complexity: O((2k-1)*j*l)
void histCountCPU(int* data, int* hist, int len, int bin) {
int part = 1024 / bin;
for (int i = 0; i < len; ++i) {
++hist[data[i] / part];
}
return;
}
// Check the result of the CUDA function.
void checkCUDAError(cudaError_t e) {
if (e == 0) return;
printf("\nError: %s\n", cudaGetErrorName(e));
printf("%s\n", cudaGetErrorString(e));
exit(0);
}
// matrix multiplication with GPU device
// using tiling algorithm, take use of the shared memory to higher the compute-to-global-memory-access ratio
__global__
void histCountGPU(int* d_data, int* d_hist, int len, int bin, int part) {
// calculating thread id
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Privatized bins
extern __shared__ int histo_s[];
for (unsigned int binIdx = threadIdx.x; binIdx < bin; binIdx += blockDim.x) {
histo_s[binIdx] = 0;
}
__syncthreads();
// Histogram Count
for (unsigned int i = tid; i < len; i += blockDim.x * gridDim.x) {
atomicAdd(&(histo_s[d_data[i] / part]), 1);
}
__syncthreads();
// Commit to global memory
for (unsigned int binIdx = threadIdx.x; binIdx < bin; binIdx += blockDim.x) {
atomicAdd(&(d_hist[binIdx]), histo_s[binIdx]);
}
}
// check if two array is exactly the same
void resultCheck(int* result_CPU, int* result_GPU, int size) {
for (int i = 0; i < size; ++i) {
if (result_CPU[i] != result_GPU[i]) {
printf("\nResult check: Error!!!! Didn't pass.");
return;
}
}
printf("\nResult check: ---PASS---.");
return;
}
|
d4d18bcc39ac98cb9ad6dc837822c23eb1db8251.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void find_primes(int *a, int n) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// int total_threads = gridDim.x * blockDim.x;
int is_prime = 1;
if (idx > 1 && idx < n){
int j;
for (j=2; j<idx/2+1; ++j){
if (!(idx % j) && j != idx){
is_prime = 0;
break;
}
}
if (is_prime) a[idx] = 1;
is_prime = 1;
}
}
| d4d18bcc39ac98cb9ad6dc837822c23eb1db8251.cu | __global__ void find_primes(int *a, int n) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// int total_threads = gridDim.x * blockDim.x;
int is_prime = 1;
if (idx > 1 && idx < n){
int j;
for (j=2; j<idx/2+1; ++j){
if (!(idx % j) && j != idx){
is_prime = 0;
break;
}
}
if (is_prime) a[idx] = 1;
is_prime = 1;
}
}
|
90365492e91aa158a5ed651bc285b2bb481f3262.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
template <typename T, typename C>
__global__ void
awkward_UnionArray_fillna(T* toindex,
const C* fromindex,
int64_t length,
uint64_t invocation_index,
uint64_t* err_code) {
if (err_code[0] == NO_ERROR) {
int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id < length) {
toindex[thread_id] = fromindex[thread_id] >= 0 ? fromindex[thread_id] : 0;
}
}
}
| 90365492e91aa158a5ed651bc285b2bb481f3262.cu | // BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
template <typename T, typename C>
__global__ void
awkward_UnionArray_fillna(T* toindex,
const C* fromindex,
int64_t length,
uint64_t invocation_index,
uint64_t* err_code) {
if (err_code[0] == NO_ERROR) {
int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id < length) {
toindex[thread_id] = fromindex[thread_id] >= 0 ? fromindex[thread_id] : 0;
}
}
}
|
35a6e26dab644e1fccf268194e454b7678dd6b60.hip | // !!! This is a file automatically generated by hipify!!!
#include <Utils/Timer.h>
#include <Renderer/Application.h>
#include <Cuda/Reduce.cuh>
#include <Cuda/Scan.cuh>
#include <Cuda/RadixSort.cuh>
#include <Cuda/Agent.cuh>
#include <Cuda/ContinuousCollision.cuh>
#include <Cuda/CudaSimulator.cuh>
#include <Test/InsertionSortCudaTest.cuh>
#include <stdio.h>
#include <iostream>
#include <memory>
#define checkCudaErrors(call) \
do { \
hipError_t err = call; \
if (err != hipSuccess) { \
printf("CUDA error at %s %d: %s\n", __FILE__, __LINE__, \
hipGetErrorString(err)); \
exit(EXIT_FAILURE); \
} \
} while (0)
__device__
int add(int a, int b) {
return a + b;
}
int serialReduce(int* ints, int size) {
int sum = 0;
for (int i = 0; i < size; ++i) {
sum += ints[i];
}
return sum;
}
void reducePlayground() {
printf("\nBegin reducePlayground\n");
constexpr int kSize = 1024 * 1024 * 32;
int* in = (int*) malloc(kSize *sizeof(int));
for (int i = 0; i < kSize; ++i) {
in[i] = 1;
}
int* d_in;
int* d_out;
hipMalloc(&d_in, kSize * sizeof(int));
hipMalloc(&d_out, kSize * sizeof(int));
hipMemcpy(d_in, in, kSize * sizeof(int), hipMemcpyHostToDevice);
hipMemset(d_out, 0, kSize * sizeof(int));
{
Timer timer("Reduce GPU");
int result = Reduce::reduce<int, add>(d_in, d_out, kSize);
printf("\nGPU result: %d\n", result);
}
{
Timer timer("Reduce SER");
int result = serialReduce(in, kSize);
printf("\nSER result: %d\n", result);
}
free(in);
hipFree(d_in);
hipFree(d_out);
printf("\nEnd reducePlayground\n\n");
}
void checkScanErrors(int* input, int * output, int* d_out, int size) {
hipMemcpy(output, d_out, size * sizeof(int), hipMemcpyDeviceToHost);
int expected = 0;
int numErrors = 0;
for (int i = 0; i < size; ++i) {
expected += input[i];
int actual = output[i];
if (expected != actual) {
//printf("Mismatch i %d exp %d act %d\n", i, expected, actual);
numErrors += 1;
}
}
if (numErrors > 0) {
printf("Num scan errors %d\n", numErrors);
}
}
void scanPlayground() {
printf("\nBegin scanPlayground\n");
constexpr int kSize = 1024 * 1024 * 4;
int* input = (int*) malloc(kSize * sizeof(int));
int* output = (int*) malloc(kSize * sizeof(int));
for (int i = 0; i < kSize; ++i) {
input[i] = 1;
}
int* d_data;
checkCudaErrors(hipMalloc(&d_data, kSize * sizeof(int) * 2));
hipMemcpy(d_data, input, kSize * sizeof(int), hipMemcpyHostToDevice);
{
Timer timer("GPU Scan");
for (int i = 0; i < 10; ++i) {
Scan::scan<int, add>(d_data, kSize);
checkScanErrors(input, output, d_data, kSize);
}
}
free(input);
free(output);
hipFree(d_data);
printf("\nEnd scanPlayground\n\n");
}
void radixSortPlayground() {
printf("\nBegin radixSortPlayground\n");
constexpr int kSize = 1024 * 32;
unsigned int* input = (unsigned int*) malloc(kSize * sizeof(unsigned int));
unsigned int* output = (unsigned int*) malloc(kSize * sizeof(unsigned int));
for (int i = 0; i < kSize; ++i) {
input[i] = 100 - (i % 100);
output[i] = 0;
}
unsigned int* d_a;
unsigned int* d_b;
uint4* d_flags;
hipMalloc(&d_a, kSize * sizeof(unsigned int));
hipMalloc(&d_b, kSize * sizeof(unsigned int));
hipMalloc(&d_flags, kSize * sizeof(uint4) * 2);
hipMemcpy(d_a, input, kSize * sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemset(d_b, 0, kSize * sizeof(unsigned int));
unsigned int* sorted;
{
Timer timer("GPU Radix Sort");
sorted = RadixSort::sort<unsigned int>(d_a, d_b, d_flags, kSize);
}
hipMemcpy(output, sorted, kSize * sizeof(unsigned int), hipMemcpyDeviceToHost);
int numErrors = 0;
for (int i = 1; i < kSize; ++i) {
unsigned int left = output[i - 1];
unsigned int right = output[i];
if (left > right) {
printf("Radix Sort Mismatch at Index %d Left %d Right %d\n", i, left, right);
numErrors += 1;
}
}
printf("Radix Sort numErrors %d\n", numErrors);
free(input);
free(output);
hipFree(d_a);
hipFree(d_b);
hipFree(d_flags);
printf("\nEnd radixSortPlayground\n");
}
void extractResultsFromCudaSimulator(CudaAgent* agents, float3* positions, size_t size) {
float3* h_positions = (float3*) malloc(size * sizeof(float3));
hipMemcpy(h_positions, positions, size * sizeof(float3), hipMemcpyDeviceToHost);
for (size_t i = 0; i < size; ++i) {
//float3 v = h_positions[i];
//std::cout << "i " << i << " " << v.x << " " << v.y << " " << v.z << "\n";
}
free(h_positions);
}
void cudaSimulator() {
printf("\nBegin Cuda Simulator\n");
size_t xDim = 512;
size_t numElements = 32 * xDim;
CudaAgent* agents = (CudaAgent*) malloc(numElements * sizeof(CudaAgent));
for (size_t i = 0; i < numElements; ++i) {
float fi = (float) i;
agents[i] = CudaAgent{float3{fi, fi, fi,}, float3{fi + 100, fi + 100, fi + 100}};
}
CudaAgent* d_agents;
hipMalloc(&d_agents, numElements * sizeof(CudaAgent));
hipMemcpy(d_agents, agents, numElements * sizeof(CudaAgent), hipMemcpyHostToDevice);
float3* d_positions;
hipMalloc(&d_positions, numElements * sizeof(float3));
{
Timer timer("Cuda Simulator");
CudaSimulator::simulate(d_agents, d_positions, numElements);
extractResultsFromCudaSimulator(d_agents, d_positions, numElements);
}
free(agents);
hipFree(d_agents);
hipFree(d_positions);
printf("\nEnd Cuda Simulator\n");
}
// TODO
// For some mysterious reason, reduce and scan are non deterministic and suffer from errors when threadsPerBlock is not 1024
int main() {
srand(time(NULL));
return Application::create()->run();
//cudaSimulator();
//reducePlayground();
//scanPlayground();
//radixSortPlayground();
}
| 35a6e26dab644e1fccf268194e454b7678dd6b60.cu | #include <Utils/Timer.h>
#include <Renderer/Application.h>
#include <Cuda/Reduce.cuh>
#include <Cuda/Scan.cuh>
#include <Cuda/RadixSort.cuh>
#include <Cuda/Agent.cuh>
#include <Cuda/ContinuousCollision.cuh>
#include <Cuda/CudaSimulator.cuh>
#include <Test/InsertionSortCudaTest.cuh>
#include <stdio.h>
#include <iostream>
#include <memory>
#define checkCudaErrors(call) \
do { \
cudaError_t err = call; \
if (err != cudaSuccess) { \
printf("CUDA error at %s %d: %s\n", __FILE__, __LINE__, \
cudaGetErrorString(err)); \
exit(EXIT_FAILURE); \
} \
} while (0)
__device__
int add(int a, int b) {
return a + b;
}
int serialReduce(int* ints, int size) {
int sum = 0;
for (int i = 0; i < size; ++i) {
sum += ints[i];
}
return sum;
}
void reducePlayground() {
printf("\nBegin reducePlayground\n");
constexpr int kSize = 1024 * 1024 * 32;
int* in = (int*) malloc(kSize *sizeof(int));
for (int i = 0; i < kSize; ++i) {
in[i] = 1;
}
int* d_in;
int* d_out;
cudaMalloc(&d_in, kSize * sizeof(int));
cudaMalloc(&d_out, kSize * sizeof(int));
cudaMemcpy(d_in, in, kSize * sizeof(int), cudaMemcpyHostToDevice);
cudaMemset(d_out, 0, kSize * sizeof(int));
{
Timer timer("Reduce GPU");
int result = Reduce::reduce<int, add>(d_in, d_out, kSize);
printf("\nGPU result: %d\n", result);
}
{
Timer timer("Reduce SER");
int result = serialReduce(in, kSize);
printf("\nSER result: %d\n", result);
}
free(in);
cudaFree(d_in);
cudaFree(d_out);
printf("\nEnd reducePlayground\n\n");
}
void checkScanErrors(int* input, int * output, int* d_out, int size) {
cudaMemcpy(output, d_out, size * sizeof(int), cudaMemcpyDeviceToHost);
int expected = 0;
int numErrors = 0;
for (int i = 0; i < size; ++i) {
expected += input[i];
int actual = output[i];
if (expected != actual) {
//printf("Mismatch i %d exp %d act %d\n", i, expected, actual);
numErrors += 1;
}
}
if (numErrors > 0) {
printf("Num scan errors %d\n", numErrors);
}
}
void scanPlayground() {
printf("\nBegin scanPlayground\n");
constexpr int kSize = 1024 * 1024 * 4;
int* input = (int*) malloc(kSize * sizeof(int));
int* output = (int*) malloc(kSize * sizeof(int));
for (int i = 0; i < kSize; ++i) {
input[i] = 1;
}
int* d_data;
checkCudaErrors(cudaMalloc(&d_data, kSize * sizeof(int) * 2));
cudaMemcpy(d_data, input, kSize * sizeof(int), cudaMemcpyHostToDevice);
{
Timer timer("GPU Scan");
for (int i = 0; i < 10; ++i) {
Scan::scan<int, add>(d_data, kSize);
checkScanErrors(input, output, d_data, kSize);
}
}
free(input);
free(output);
cudaFree(d_data);
printf("\nEnd scanPlayground\n\n");
}
void radixSortPlayground() {
printf("\nBegin radixSortPlayground\n");
constexpr int kSize = 1024 * 32;
unsigned int* input = (unsigned int*) malloc(kSize * sizeof(unsigned int));
unsigned int* output = (unsigned int*) malloc(kSize * sizeof(unsigned int));
for (int i = 0; i < kSize; ++i) {
input[i] = 100 - (i % 100);
output[i] = 0;
}
unsigned int* d_a;
unsigned int* d_b;
uint4* d_flags;
cudaMalloc(&d_a, kSize * sizeof(unsigned int));
cudaMalloc(&d_b, kSize * sizeof(unsigned int));
cudaMalloc(&d_flags, kSize * sizeof(uint4) * 2);
cudaMemcpy(d_a, input, kSize * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemset(d_b, 0, kSize * sizeof(unsigned int));
unsigned int* sorted;
{
Timer timer("GPU Radix Sort");
sorted = RadixSort::sort<unsigned int>(d_a, d_b, d_flags, kSize);
}
cudaMemcpy(output, sorted, kSize * sizeof(unsigned int), cudaMemcpyDeviceToHost);
int numErrors = 0;
for (int i = 1; i < kSize; ++i) {
unsigned int left = output[i - 1];
unsigned int right = output[i];
if (left > right) {
printf("Radix Sort Mismatch at Index %d Left %d Right %d\n", i, left, right);
numErrors += 1;
}
}
printf("Radix Sort numErrors %d\n", numErrors);
free(input);
free(output);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_flags);
printf("\nEnd radixSortPlayground\n");
}
void extractResultsFromCudaSimulator(CudaAgent* agents, float3* positions, size_t size) {
float3* h_positions = (float3*) malloc(size * sizeof(float3));
cudaMemcpy(h_positions, positions, size * sizeof(float3), cudaMemcpyDeviceToHost);
for (size_t i = 0; i < size; ++i) {
//float3 v = h_positions[i];
//std::cout << "i " << i << " " << v.x << " " << v.y << " " << v.z << "\n";
}
free(h_positions);
}
void cudaSimulator() {
printf("\nBegin Cuda Simulator\n");
size_t xDim = 512;
size_t numElements = 32 * xDim;
CudaAgent* agents = (CudaAgent*) malloc(numElements * sizeof(CudaAgent));
for (size_t i = 0; i < numElements; ++i) {
float fi = (float) i;
agents[i] = CudaAgent{float3{fi, fi, fi,}, float3{fi + 100, fi + 100, fi + 100}};
}
CudaAgent* d_agents;
cudaMalloc(&d_agents, numElements * sizeof(CudaAgent));
cudaMemcpy(d_agents, agents, numElements * sizeof(CudaAgent), cudaMemcpyHostToDevice);
float3* d_positions;
cudaMalloc(&d_positions, numElements * sizeof(float3));
{
Timer timer("Cuda Simulator");
CudaSimulator::simulate(d_agents, d_positions, numElements);
extractResultsFromCudaSimulator(d_agents, d_positions, numElements);
}
free(agents);
cudaFree(d_agents);
cudaFree(d_positions);
printf("\nEnd Cuda Simulator\n");
}
// TODO
// For some mysterious reason, reduce and scan are non deterministic and suffer from errors when threadsPerBlock is not 1024
int main() {
srand(time(NULL));
return Application::create()->run();
//cudaSimulator();
//reducePlayground();
//scanPlayground();
//radixSortPlayground();
}
|
15587e0380929e7f32a5fcd03217fa86878e8fab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Mary Barker
Homework 2
Vector addition on GPU that allows for more than 1024 elements
In particular, it can do 2 different parallel memory setups:
(1) each element in the vector assigned a thread, and the number of
blocks assigned accordingly,
(2) 2 blocks, 1024 threads each, and the algorithm iterates until
each element in the vector has been reached
to compile: nvcc BarkerHW2.cu
OUTPUTS:
with more than 2 blocks:
Time in milliseconds= 0.068000000000000
Last Values are A[4999] = 9998.000000000000000 B[4999] = 4999.000000000000000 C[4999] = 14997.000000000000000
with only 2 blocks:
Time in milliseconds= 0.073000000000000
Last Values are A[4999] = 9998.000000000000000 B[4999] = 4999.000000000000000 C[4999] = 14997.000000000000000
*/
#include <sys/time.h>
#include <stdio.h>
//Length of vectors to be added.
#define N 5000
bool two_blocks = true;
int num_iters;
dim3 dimBlock, dimGrid;
float *A_CPU, *B_CPU, *C_CPU; //CPU pointers
float *A_GPU, *B_GPU, *C_GPU; //GPU pointers
void SetupCudaDevices()
{
dimBlock.x = 1024;
dimBlock.y = 1;
dimBlock.z = 1;
if (two_blocks)
{
dimGrid.x = 2;
dimGrid.y = 1;
dimGrid.z = 1;
num_iters = (N - 1) / (dimGrid.x * dimBlock.x) + 1;
}
else
{
num_iters = 1;
dimGrid.x = (N - 1) / dimBlock.x + 1;
dimGrid.y = 1;
dimGrid.z = 1;
}
}
void AllocateMemory()
{
//Allocate Device (GPU) Memory, & allocates the value of the specific pointer/array
hipMalloc(&A_GPU,N*sizeof(float));
hipMalloc(&B_GPU,N*sizeof(float));
hipMalloc(&C_GPU,N*sizeof(float));
//Allocate Host (CPU) Memory
A_CPU = (float*)malloc(N*sizeof(float));
B_CPU = (float*)malloc(N*sizeof(float));
C_CPU = (float*)malloc(N*sizeof(float));
}
//Loads values into vectors that we will add.
void Innitialize()
{
int i;
for(i = 0; i < N; i++)
{
A_CPU[i] = (float)2*i;
B_CPU[i] = (float)i;
}
}
//Cleaning up memory after we are finished.
void CleanUp(float *A_CPU,float *B_CPU,float *C_CPU,float *A_GPU,float *B_GPU,float *C_GPU) //free
{
free(A_CPU); free(B_CPU); free(C_CPU);
hipFree(A_GPU); hipFree(B_GPU); hipFree(C_GPU);
}
//This is the kernel. It is the function that will run on the GPU.
//It adds vectors A and B then stores result in vector C
__global__ void Addition(float *A, float *B, float *C, int n, int num_iterations_over_blocks)
{
int id;
for(int i = 0; i < num_iterations_over_blocks; i++)
{
id = i * (blockDim.x * gridDim.x) + blockDim.x * blockIdx.x + threadIdx.x;
if(id < n) C[id] = A[id] + B[id];
}
}
int main()
{
int i;
timeval start, end;
//setup parallel structure
SetupCudaDevices();
//Partitioning off the memory that you will be using.
AllocateMemory();
//Loading up values to be added.
Innitialize();
//Starting the timer
gettimeofday(&start, NULL);
//Copy Memory from CPU to GPU
hipMemcpyAsync(A_GPU, A_CPU, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpyAsync(B_GPU, B_CPU, N*sizeof(float), hipMemcpyHostToDevice);
//Calling the Kernel (GPU) function.
hipLaunchKernelGGL(( Addition), dim3(dimGrid), dim3(dimBlock), 0, 0, A_GPU, B_GPU, C_GPU, N, num_iters);
//Copy Memory from GPU to CPU
hipMemcpyAsync(C_CPU, C_GPU, N*sizeof(float), hipMemcpyDeviceToHost);
//Stopping the timer
gettimeofday(&end, NULL);
//Calculating the total time used in the addition and converting it to milliseconds.
float time = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec);
//Displaying the time
printf("Time in milliseconds= %.15f\n", (time/1000.0));
// Displaying vector info you will want to comment out the vector print line when your
//vector becomes big. This is just to make sure everything is running correctly.
for(i = 0; i < N; i++)
{
//printf("A[%d] = %.15f B[%d] = %.15f C[%d] = %.15f\n", i, A_CPU[i], i, B_CPU[i], i, C_CPU[i]);
}
//Displaying the last value of the addition for a check when all vector display has been commented out.
printf("Last Values are A[%d] = %.15f B[%d] = %.15f C[%d] = %.15f\n", N-1, A_CPU[N-1], N-1, B_CPU[N-1], N-1, C_CPU[N-1]);
//You're done so cleanup your mess.
CleanUp(A_CPU,B_CPU,C_CPU,A_GPU,B_GPU,C_GPU);
return(0);
}
| 15587e0380929e7f32a5fcd03217fa86878e8fab.cu | /*
Mary Barker
Homework 2
Vector addition on GPU that allows for more than 1024 elements
In particular, it can do 2 different parallel memory setups:
(1) each element in the vector assigned a thread, and the number of
blocks assigned accordingly,
(2) 2 blocks, 1024 threads each, and the algorithm iterates until
each element in the vector has been reached
to compile: nvcc BarkerHW2.cu
OUTPUTS:
with more than 2 blocks:
Time in milliseconds= 0.068000000000000
Last Values are A[4999] = 9998.000000000000000 B[4999] = 4999.000000000000000 C[4999] = 14997.000000000000000
with only 2 blocks:
Time in milliseconds= 0.073000000000000
Last Values are A[4999] = 9998.000000000000000 B[4999] = 4999.000000000000000 C[4999] = 14997.000000000000000
*/
#include <sys/time.h>
#include <stdio.h>
//Length of vectors to be added.
#define N 5000
bool two_blocks = true;
int num_iters;
dim3 dimBlock, dimGrid;
float *A_CPU, *B_CPU, *C_CPU; //CPU pointers
float *A_GPU, *B_GPU, *C_GPU; //GPU pointers
void SetupCudaDevices()
{
dimBlock.x = 1024;
dimBlock.y = 1;
dimBlock.z = 1;
if (two_blocks)
{
dimGrid.x = 2;
dimGrid.y = 1;
dimGrid.z = 1;
num_iters = (N - 1) / (dimGrid.x * dimBlock.x) + 1;
}
else
{
num_iters = 1;
dimGrid.x = (N - 1) / dimBlock.x + 1;
dimGrid.y = 1;
dimGrid.z = 1;
}
}
void AllocateMemory()
{
//Allocate Device (GPU) Memory, & allocates the value of the specific pointer/array
cudaMalloc(&A_GPU,N*sizeof(float));
cudaMalloc(&B_GPU,N*sizeof(float));
cudaMalloc(&C_GPU,N*sizeof(float));
//Allocate Host (CPU) Memory
A_CPU = (float*)malloc(N*sizeof(float));
B_CPU = (float*)malloc(N*sizeof(float));
C_CPU = (float*)malloc(N*sizeof(float));
}
//Loads values into vectors that we will add.
void Innitialize()
{
int i;
for(i = 0; i < N; i++)
{
A_CPU[i] = (float)2*i;
B_CPU[i] = (float)i;
}
}
//Cleaning up memory after we are finished.
void CleanUp(float *A_CPU,float *B_CPU,float *C_CPU,float *A_GPU,float *B_GPU,float *C_GPU) //free
{
free(A_CPU); free(B_CPU); free(C_CPU);
cudaFree(A_GPU); cudaFree(B_GPU); cudaFree(C_GPU);
}
//This is the kernel. It is the function that will run on the GPU.
//It adds vectors A and B then stores result in vector C
__global__ void Addition(float *A, float *B, float *C, int n, int num_iterations_over_blocks)
{
int id;
for(int i = 0; i < num_iterations_over_blocks; i++)
{
id = i * (blockDim.x * gridDim.x) + blockDim.x * blockIdx.x + threadIdx.x;
if(id < n) C[id] = A[id] + B[id];
}
}
int main()
{
int i;
timeval start, end;
//setup parallel structure
SetupCudaDevices();
//Partitioning off the memory that you will be using.
AllocateMemory();
//Loading up values to be added.
Innitialize();
//Starting the timer
gettimeofday(&start, NULL);
//Copy Memory from CPU to GPU
cudaMemcpyAsync(A_GPU, A_CPU, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpyAsync(B_GPU, B_CPU, N*sizeof(float), cudaMemcpyHostToDevice);
//Calling the Kernel (GPU) function.
Addition<<<dimGrid, dimBlock>>>(A_GPU, B_GPU, C_GPU, N, num_iters);
//Copy Memory from GPU to CPU
cudaMemcpyAsync(C_CPU, C_GPU, N*sizeof(float), cudaMemcpyDeviceToHost);
//Stopping the timer
gettimeofday(&end, NULL);
//Calculating the total time used in the addition and converting it to milliseconds.
float time = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec);
//Displaying the time
printf("Time in milliseconds= %.15f\n", (time/1000.0));
// Displaying vector info you will want to comment out the vector print line when your
//vector becomes big. This is just to make sure everything is running correctly.
for(i = 0; i < N; i++)
{
//printf("A[%d] = %.15f B[%d] = %.15f C[%d] = %.15f\n", i, A_CPU[i], i, B_CPU[i], i, C_CPU[i]);
}
//Displaying the last value of the addition for a check when all vector display has been commented out.
printf("Last Values are A[%d] = %.15f B[%d] = %.15f C[%d] = %.15f\n", N-1, A_CPU[N-1], N-1, B_CPU[N-1], N-1, C_CPU[N-1]);
//You're done so cleanup your mess.
CleanUp(A_CPU,B_CPU,C_CPU,A_GPU,B_GPU,C_GPU);
return(0);
}
|
ea064fb4c8af7649c1a7718cdfffc8889bc0d4c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_force_with_atom_energy_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void AngleForceWithAtomEnergyKernel(int angle_numbers, const UNSIGNED_INT_VECTOR *uint_crd,
const VECTOR *scaler, const int *atom_a, const int *atom_b,
const int *atom_c, const float *angle_k, const float *angle_theta0,
VECTOR *frc, float *atom_energy) {
int angle_i = blockDim.x * blockIdx.x + threadIdx.x;
if (angle_i < angle_numbers) {
int atom_i = atom_a[angle_i];
int atom_j = atom_b[angle_i];
int atom_k = atom_c[angle_i];
float theta0 = angle_theta0[angle_i];
float k = angle_k[angle_i];
float k2 = k;
VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]);
float rij_2 = 1. / (drij * drij);
float rkj_2 = 1. / (drkj * drkj);
float rij_1_rkj_1 = sqrtf(rij_2 * rkj_2);
float costheta = drij * drkj * rij_1_rkj_1;
costheta = fmaxf(-0.999999, fminf(costheta, 0.999999));
float theta = acosf(costheta);
float dtheta = theta - theta0;
k = -2 * k * dtheta / sinf(theta);
float common_factor_cross = k * rij_1_rkj_1;
float common_factor_self = k * costheta;
VECTOR fi = common_factor_self * rij_2 * drij - common_factor_cross * drkj;
VECTOR fk = common_factor_self * rkj_2 * drkj - common_factor_cross * drij;
atomicAdd(&frc[atom_i].x, fi.x);
atomicAdd(&frc[atom_i].y, fi.y);
atomicAdd(&frc[atom_i].z, fi.z);
atomicAdd(&frc[atom_k].x, fk.x);
atomicAdd(&frc[atom_k].y, fk.y);
atomicAdd(&frc[atom_k].z, fk.z);
fi = -fi - fk;
atomicAdd(&frc[atom_j].x, fi.x);
atomicAdd(&frc[atom_j].y, fi.y);
atomicAdd(&frc[atom_j].z, fi.z);
atomicAdd(&atom_energy[atom_i], k2 * dtheta * dtheta);
}
}
void AngleForceWithAtomEnergy(int angle_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f,
const int *atom_a, const int *atom_b, const int *atom_c, const float *angle_k,
const float *angle_theta0, float *frc_f, float *ene, hipStream_t stream) {
hipLaunchKernelGGL(( Reset_List), dim3(ceilf(static_cast<float>(3. * atom_numbers) / 128)), dim3(128), 0, stream, 3 * atom_numbers, frc_f, 0.);
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(angle_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
hipLaunchKernelGGL(( AngleForceWithAtomEnergyKernel), dim3(block_per_grid), dim3(thread_per_block), 0, stream,
angle_numbers, uint_crd, scaler, atom_a, atom_b, atom_c, angle_k, angle_theta0, frc, ene);
return;
}
void AngleForceWithAtomEnergy(int angle_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f,
const int *atom_a, const int *atom_b, const int *atom_c, const float *angle_k,
const float *angle_theta0, float *frc_f, float *ene, hipStream_t stream);
| ea064fb4c8af7649c1a7718cdfffc8889bc0d4c1.cu | /**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_force_with_atom_energy_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void AngleForceWithAtomEnergyKernel(int angle_numbers, const UNSIGNED_INT_VECTOR *uint_crd,
const VECTOR *scaler, const int *atom_a, const int *atom_b,
const int *atom_c, const float *angle_k, const float *angle_theta0,
VECTOR *frc, float *atom_energy) {
int angle_i = blockDim.x * blockIdx.x + threadIdx.x;
if (angle_i < angle_numbers) {
int atom_i = atom_a[angle_i];
int atom_j = atom_b[angle_i];
int atom_k = atom_c[angle_i];
float theta0 = angle_theta0[angle_i];
float k = angle_k[angle_i];
float k2 = k;
VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]);
float rij_2 = 1. / (drij * drij);
float rkj_2 = 1. / (drkj * drkj);
float rij_1_rkj_1 = sqrtf(rij_2 * rkj_2);
float costheta = drij * drkj * rij_1_rkj_1;
costheta = fmaxf(-0.999999, fminf(costheta, 0.999999));
float theta = acosf(costheta);
float dtheta = theta - theta0;
k = -2 * k * dtheta / sinf(theta);
float common_factor_cross = k * rij_1_rkj_1;
float common_factor_self = k * costheta;
VECTOR fi = common_factor_self * rij_2 * drij - common_factor_cross * drkj;
VECTOR fk = common_factor_self * rkj_2 * drkj - common_factor_cross * drij;
atomicAdd(&frc[atom_i].x, fi.x);
atomicAdd(&frc[atom_i].y, fi.y);
atomicAdd(&frc[atom_i].z, fi.z);
atomicAdd(&frc[atom_k].x, fk.x);
atomicAdd(&frc[atom_k].y, fk.y);
atomicAdd(&frc[atom_k].z, fk.z);
fi = -fi - fk;
atomicAdd(&frc[atom_j].x, fi.x);
atomicAdd(&frc[atom_j].y, fi.y);
atomicAdd(&frc[atom_j].z, fi.z);
atomicAdd(&atom_energy[atom_i], k2 * dtheta * dtheta);
}
}
void AngleForceWithAtomEnergy(int angle_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f,
const int *atom_a, const int *atom_b, const int *atom_c, const float *angle_k,
const float *angle_theta0, float *frc_f, float *ene, cudaStream_t stream) {
Reset_List<<<ceilf(static_cast<float>(3. * atom_numbers) / 128), 128, 0, stream>>>(3 * atom_numbers, frc_f, 0.);
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(angle_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
AngleForceWithAtomEnergyKernel<<<block_per_grid, thread_per_block, 0, stream>>>(
angle_numbers, uint_crd, scaler, atom_a, atom_b, atom_c, angle_k, angle_theta0, frc, ene);
return;
}
void AngleForceWithAtomEnergy(int angle_numbers, int atom_numbers, const int *uint_crd_f, const float *scaler_f,
const int *atom_a, const int *atom_b, const int *atom_c, const float *angle_k,
const float *angle_theta0, float *frc_f, float *ene, cudaStream_t stream);
|
7ca836c738946826580c09e12733034042459094.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2016 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "entry_convolution_layer_updater_cuda.h"
#include <hip/hip_runtime.h>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
namespace nnforge
{
namespace cuda
{
__global__ void entry_convolution_upd_kernel(
float * __restrict output,
const float * __restrict input,
int neuron_count_per_feature_map,
int input_feature_map_count,
int output_feature_map_count,
int input_neuron_count,
int output_neuron_count,
int entry_count)
{
int neuron_id = blockIdx.x;
int total_thread_id = blockIdx.y * blockDim.x + threadIdx.x;
int feature_map_id = total_thread_id >> 5;
int lane_id = total_thread_id & 31;
int entry_id = blockIdx.z;
if ((neuron_id < neuron_count_per_feature_map) && (feature_map_id < output_feature_map_count) && (entry_id < entry_count))
{
const float * in_base1 = input + entry_id * 2 * input_neuron_count + neuron_id;
const float * in_base2 = in_base1 + input_neuron_count;
int base_input_index1 = 0;
int base_input_index2 = feature_map_id;
if (feature_map_id > (input_feature_map_count - 1))
{
base_input_index1 = feature_map_id - (input_feature_map_count - 1);
base_input_index2 = (input_feature_map_count - 1);
}
int iteration_count = min(input_feature_map_count - base_input_index1, base_input_index2 + 1);
float sum = 0.0F;
#pragma unroll 4
for(int i = lane_id; i < iteration_count; i += 32)
sum += in_base1[(base_input_index1 + i) * neuron_count_per_feature_map] * in_base2[(base_input_index2 - i) * neuron_count_per_feature_map];
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
#ifdef __CUDACC_VER_MAJOR__
#if __CUDACC_VER_MAJOR__ < 9
sum += __shfl_down(sum, tx);
#else
sum += __shfl_down_sync(0xFFFFFFFF, sum, tx);
#endif
#endif
if (lane_id == 0)
output[entry_id * output_neuron_count + feature_map_id * neuron_count_per_feature_map + neuron_id] = sum;
}
}
template<bool add_update_to_destination>
__global__ void entry_convolution_backprop_upd_kernel(
float * __restrict input_errors,
const float * __restrict input_neurons,
const float * __restrict output_errors,
int neuron_count_per_feature_map,
int input_feature_map_count,
int output_feature_map_count,
int input_neuron_count,
int output_neuron_count,
int entry_count)
{
int neuron_id = blockIdx.x;
int total_thread_id = blockIdx.y * blockDim.x + threadIdx.x;
int input_feature_map_id = total_thread_id >> 5;
int lane_id = total_thread_id & 31;
int entry_id = blockIdx.z;
if ((neuron_id < neuron_count_per_feature_map) && (input_feature_map_id < input_feature_map_count) && (entry_id < entry_count))
{
const float * in_base1 = input_neurons + entry_id * 2 * input_neuron_count + neuron_id;
const float * in_base2 = in_base1 + input_neuron_count;
const float * out_err_base = output_errors + entry_id * output_neuron_count + input_feature_map_id * neuron_count_per_feature_map + neuron_id;
float sum1 = 0.0F;
float sum2 = 0.0F;
#pragma unroll 4
for(int i = lane_id; i < input_feature_map_count; i += 32)
{
float out_err = out_err_base[i * neuron_count_per_feature_map];
sum1 += in_base2[i * neuron_count_per_feature_map] * out_err;
sum2 += in_base1[i * neuron_count_per_feature_map] * out_err;
}
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
#ifdef __CUDACC_VER_MAJOR__
#if __CUDACC_VER_MAJOR__ < 9
sum1 += __shfl_down(sum1, tx);
sum2 += __shfl_down(sum2, tx);
#else
sum1 += __shfl_down_sync(0xFFFFFFFF, sum1, tx);
sum2 += __shfl_down_sync(0xFFFFFFFF, sum2, tx);
#endif
#endif
}
if (lane_id == 0)
{
float * in_err_base1 = input_errors + entry_id * 2 * input_neuron_count + input_feature_map_id * neuron_count_per_feature_map + neuron_id;
float * in_err_base2 = in_err_base1 + input_neuron_count;
if (add_update_to_destination)
{
*in_err_base1 += sum1;
*in_err_base2 += sum2;
}
else
{
*in_err_base1 = sum1;
*in_err_base2 = sum2;
}
}
}
}
void entry_convolution_layer_updater_cuda::enqueue_forward_propagation(
hipStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::ptr temporary_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
int warps_per_threadblock = 8;
int threadblock_size = warps_per_threadblock * 32;
int threadblocks_to_cover_all_feature_maps = (output_configuration_specific.feature_map_count + warps_per_threadblock - 1) / warps_per_threadblock;
hipLaunchKernelGGL(( entry_convolution_upd_kernel), dim3(dim3(output_elem_count_per_feature_map, threadblocks_to_cover_all_feature_maps, entry_count)), dim3(threadblock_size), 0, stream_id,
*output_buffer,
*input_buffers[0],
output_elem_count_per_feature_map,
input_configuration_specific_list[0].feature_map_count,
output_configuration_specific.feature_map_count,
input_elem_count_per_entry_list[0],
output_elem_count_per_entry,
entry_count);
}
void entry_convolution_layer_updater_cuda::enqueue_backward_data_propagation(
hipStream_t stream_id,
unsigned int input_index,
cuda_linear_buffer_device::ptr input_errors_buffer,
cuda_linear_buffer_device::const_ptr output_errors_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers,
cuda_linear_buffer_device::const_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::const_ptr temporary_fixed_buffer,
cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer,
bool add_update_to_destination,
unsigned int entry_count)
{
int warps_per_threadblock = 8;
int threadblock_size = warps_per_threadblock * 32;
int threadblocks_to_cover_all_feature_maps = (input_configuration_specific_list[0].feature_map_count + warps_per_threadblock - 1) / warps_per_threadblock;
if (add_update_to_destination)
hipLaunchKernelGGL(( entry_convolution_backprop_upd_kernel<true>), dim3(dim3(output_elem_count_per_feature_map, threadblocks_to_cover_all_feature_maps, entry_count)), dim3(threadblock_size), 0, stream_id,
*input_errors_buffer,
*input_neurons_buffers[0],
*output_errors_buffer,
output_elem_count_per_feature_map,
input_configuration_specific_list[0].feature_map_count,
output_configuration_specific.feature_map_count,
input_elem_count_per_entry_list[0],
output_elem_count_per_entry,
entry_count);
else
hipLaunchKernelGGL(( entry_convolution_backprop_upd_kernel<false>), dim3(dim3(output_elem_count_per_feature_map, threadblocks_to_cover_all_feature_maps, entry_count)), dim3(threadblock_size), 0, stream_id,
*input_errors_buffer,
*input_neurons_buffers[0],
*output_errors_buffer,
output_elem_count_per_feature_map,
input_configuration_specific_list[0].feature_map_count,
output_configuration_specific.feature_map_count,
input_elem_count_per_entry_list[0],
output_elem_count_per_entry,
entry_count);
}
bool entry_convolution_layer_updater_cuda::is_backward_data_dependent_on_input_buffer(unsigned int action_input_index, unsigned int data_input_index) const
{
return true;
}
bool entry_convolution_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const
{
return false;
}
}
}
| 7ca836c738946826580c09e12733034042459094.cu | /*
* Copyright 2011-2016 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "entry_convolution_layer_updater_cuda.h"
#include <cuda_runtime.h>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
namespace nnforge
{
namespace cuda
{
__global__ void entry_convolution_upd_kernel(
float * __restrict output,
const float * __restrict input,
int neuron_count_per_feature_map,
int input_feature_map_count,
int output_feature_map_count,
int input_neuron_count,
int output_neuron_count,
int entry_count)
{
int neuron_id = blockIdx.x;
int total_thread_id = blockIdx.y * blockDim.x + threadIdx.x;
int feature_map_id = total_thread_id >> 5;
int lane_id = total_thread_id & 31;
int entry_id = blockIdx.z;
if ((neuron_id < neuron_count_per_feature_map) && (feature_map_id < output_feature_map_count) && (entry_id < entry_count))
{
const float * in_base1 = input + entry_id * 2 * input_neuron_count + neuron_id;
const float * in_base2 = in_base1 + input_neuron_count;
int base_input_index1 = 0;
int base_input_index2 = feature_map_id;
if (feature_map_id > (input_feature_map_count - 1))
{
base_input_index1 = feature_map_id - (input_feature_map_count - 1);
base_input_index2 = (input_feature_map_count - 1);
}
int iteration_count = min(input_feature_map_count - base_input_index1, base_input_index2 + 1);
float sum = 0.0F;
#pragma unroll 4
for(int i = lane_id; i < iteration_count; i += 32)
sum += in_base1[(base_input_index1 + i) * neuron_count_per_feature_map] * in_base2[(base_input_index2 - i) * neuron_count_per_feature_map];
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
#ifdef __CUDACC_VER_MAJOR__
#if __CUDACC_VER_MAJOR__ < 9
sum += __shfl_down(sum, tx);
#else
sum += __shfl_down_sync(0xFFFFFFFF, sum, tx);
#endif
#endif
if (lane_id == 0)
output[entry_id * output_neuron_count + feature_map_id * neuron_count_per_feature_map + neuron_id] = sum;
}
}
template<bool add_update_to_destination>
__global__ void entry_convolution_backprop_upd_kernel(
float * __restrict input_errors,
const float * __restrict input_neurons,
const float * __restrict output_errors,
int neuron_count_per_feature_map,
int input_feature_map_count,
int output_feature_map_count,
int input_neuron_count,
int output_neuron_count,
int entry_count)
{
int neuron_id = blockIdx.x;
int total_thread_id = blockIdx.y * blockDim.x + threadIdx.x;
int input_feature_map_id = total_thread_id >> 5;
int lane_id = total_thread_id & 31;
int entry_id = blockIdx.z;
if ((neuron_id < neuron_count_per_feature_map) && (input_feature_map_id < input_feature_map_count) && (entry_id < entry_count))
{
const float * in_base1 = input_neurons + entry_id * 2 * input_neuron_count + neuron_id;
const float * in_base2 = in_base1 + input_neuron_count;
const float * out_err_base = output_errors + entry_id * output_neuron_count + input_feature_map_id * neuron_count_per_feature_map + neuron_id;
float sum1 = 0.0F;
float sum2 = 0.0F;
#pragma unroll 4
for(int i = lane_id; i < input_feature_map_count; i += 32)
{
float out_err = out_err_base[i * neuron_count_per_feature_map];
sum1 += in_base2[i * neuron_count_per_feature_map] * out_err;
sum2 += in_base1[i * neuron_count_per_feature_map] * out_err;
}
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
#ifdef __CUDACC_VER_MAJOR__
#if __CUDACC_VER_MAJOR__ < 9
sum1 += __shfl_down(sum1, tx);
sum2 += __shfl_down(sum2, tx);
#else
sum1 += __shfl_down_sync(0xFFFFFFFF, sum1, tx);
sum2 += __shfl_down_sync(0xFFFFFFFF, sum2, tx);
#endif
#endif
}
if (lane_id == 0)
{
float * in_err_base1 = input_errors + entry_id * 2 * input_neuron_count + input_feature_map_id * neuron_count_per_feature_map + neuron_id;
float * in_err_base2 = in_err_base1 + input_neuron_count;
if (add_update_to_destination)
{
*in_err_base1 += sum1;
*in_err_base2 += sum2;
}
else
{
*in_err_base1 = sum1;
*in_err_base2 = sum2;
}
}
}
}
void entry_convolution_layer_updater_cuda::enqueue_forward_propagation(
cudaStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::ptr temporary_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
int warps_per_threadblock = 8;
int threadblock_size = warps_per_threadblock * 32;
int threadblocks_to_cover_all_feature_maps = (output_configuration_specific.feature_map_count + warps_per_threadblock - 1) / warps_per_threadblock;
entry_convolution_upd_kernel<<<dim3(output_elem_count_per_feature_map, threadblocks_to_cover_all_feature_maps, entry_count), threadblock_size, 0, stream_id>>>(
*output_buffer,
*input_buffers[0],
output_elem_count_per_feature_map,
input_configuration_specific_list[0].feature_map_count,
output_configuration_specific.feature_map_count,
input_elem_count_per_entry_list[0],
output_elem_count_per_entry,
entry_count);
}
void entry_convolution_layer_updater_cuda::enqueue_backward_data_propagation(
cudaStream_t stream_id,
unsigned int input_index,
cuda_linear_buffer_device::ptr input_errors_buffer,
cuda_linear_buffer_device::const_ptr output_errors_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers,
cuda_linear_buffer_device::const_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::const_ptr temporary_fixed_buffer,
cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer,
bool add_update_to_destination,
unsigned int entry_count)
{
int warps_per_threadblock = 8;
int threadblock_size = warps_per_threadblock * 32;
int threadblocks_to_cover_all_feature_maps = (input_configuration_specific_list[0].feature_map_count + warps_per_threadblock - 1) / warps_per_threadblock;
if (add_update_to_destination)
entry_convolution_backprop_upd_kernel<true><<<dim3(output_elem_count_per_feature_map, threadblocks_to_cover_all_feature_maps, entry_count), threadblock_size, 0, stream_id>>>(
*input_errors_buffer,
*input_neurons_buffers[0],
*output_errors_buffer,
output_elem_count_per_feature_map,
input_configuration_specific_list[0].feature_map_count,
output_configuration_specific.feature_map_count,
input_elem_count_per_entry_list[0],
output_elem_count_per_entry,
entry_count);
else
entry_convolution_backprop_upd_kernel<false><<<dim3(output_elem_count_per_feature_map, threadblocks_to_cover_all_feature_maps, entry_count), threadblock_size, 0, stream_id>>>(
*input_errors_buffer,
*input_neurons_buffers[0],
*output_errors_buffer,
output_elem_count_per_feature_map,
input_configuration_specific_list[0].feature_map_count,
output_configuration_specific.feature_map_count,
input_elem_count_per_entry_list[0],
output_elem_count_per_entry,
entry_count);
}
bool entry_convolution_layer_updater_cuda::is_backward_data_dependent_on_input_buffer(unsigned int action_input_index, unsigned int data_input_index) const
{
return true;
}
bool entry_convolution_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const
{
return false;
}
}
}
|
339e761cfd2b0516b9dd98ddd93de85b645b17e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* From PyTorch:
*
* Copyright (c) 2016- Facebook, Inc (Adam Paszke)
* Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
* Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
* Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
* Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
* Copyright (c) 2011-2013 NYU (Clement Farabet)
* Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
* Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
* Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
*
* From Caffe2:
*
* Copyright (c) 2016-present, Facebook Inc. All rights reserved.
*
* All contributions by Facebook:
* Copyright (c) 2016 Facebook Inc.
*
* All contributions by Google:
* Copyright (c) 2015 Google Inc.
* All rights reserved.
*
* All contributions by Yangqing Jia:
* Copyright (c) 2015 Yangqing Jia
* All rights reserved.
*
* All contributions from Caffe:
* Copyright(c) 2013, 2014, 2015, the respective contributors
* All rights reserved.
*
* All other contributions:
* Copyright(c) 2015, 2016 the respective contributors
* All rights reserved.
*
* Caffe2 uses a copyright model similar to Caffe: each contributor holds
* copyright over their contributions to Caffe2. The project versioning records
* all such contribution and copyright details. If a contributor wants to further
* mark their specific copyright on a particular contribution, they should
* indicate their copyright solely in the commit message of the change when it is
* committed.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
* and IDIAP Research Institute nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/NumericLimits.cuh>
#include <THH/THH.h>
#include <THH/THHGeneral.h>
#include "type_shim.h"
#include "compat.h"
#define ALIGN_BYTES 16
using Tensor = at::Tensor;
using TensorList = at::TensorList;
using ScalarType = at::ScalarType;
using at::acc_type;
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxForwardEpilogue {
__device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_input, AccumT sum)
: logsum(max_input + ::log(sum)) {}
__device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_log_sum_exp)
: logsum(max_log_sum_exp) {}
__device__ __forceinline__ OutT operator()(T input) const {
return static_cast<OutT>(input - logsum);
}
const AccumT logsum;
};
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxBackwardEpilogue {
__device__ __forceinline__ LogSoftMaxBackwardEpilogue(AccumT sum)
: sum(sum) {}
__device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const {
return static_cast<T>(gradOutput - ::exp(static_cast<AccumT>(output)) * sum);
}
const AccumT sum;
};
const int max_threads = 1024;
inline dim3 SoftMax_getBlockSize(int ILP, uint64_t dim_size) {
uint64_t block_size = 1;
uint64_t max_block_size = ::min(dim_size / ILP, static_cast<uint64_t>(max_threads));
while (block_size < (max_block_size/2)) block_size *= 2;
// Launch at least a single warp - the kernel assumes that.
block_size = ::max(block_size, static_cast<uint64_t>(32));
return dim3(block_size);
}
template<typename T>
struct Add {
__device__ __forceinline__ T operator()(T a, T b) const {
return a + b;
}
};
template<typename T>
struct Max {
__device__ __forceinline__ T operator()(T a, T b) const {
return a < b ? b : a;
}
};
////////////////////////////////////////////////////////////////////////////////
// Regular kernel (fast when dim_size is large; requires inner_size == 1)
////////////////////////////////////////////////////////////////////////////////
template <typename T, typename AccumT>
struct MaxFloat
{
__device__ __forceinline__ AccumT operator()(AccumT max, T v) const {
return ::max(max, (AccumT)v);
}
};
template<typename T, typename AccumT>
struct AddFloat
{
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + v;
}
};
template<typename T, typename AccumT>
struct SumExpFloat
{
__device__ __forceinline__ SumExpFloat(AccumT v)
: max_k(v) {}
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + ::exp(v - max_k);
}
const AccumT max_k;
};
template <template<typename> class Reduction, typename AccumT>
__device__ __forceinline__ AccumT
blockReduce(AccumT* smem, AccumT val,
const Reduction<AccumT>& r,
AccumT defaultVal)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val;
__syncthreads();
AccumT warpVal = defaultVal;
// First warp will perform per-warp reductions for the remaining warps
uint32_t mask = (((uint64_t)1) << (blockDim.x / 32)) - 1;
if (threadIdx.x < 32) {
int lane = threadIdx.x % 32;
if (lane < blockDim.x / 32) {
#pragma unroll
for (int i = 0; i < 32; ++i) {
warpVal = r(warpVal, smem[lane * 32 + i]);
}
__syncwarp(mask);
smem[lane] = warpVal;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
AccumT blockVal = defaultVal;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / 32; ++i) {
blockVal = r(blockVal, smem[i]);
}
smem[0] = blockVal;
}
// Sync and broadcast
__syncthreads();
return smem[0];
}
template <template<typename> class Reduction1, template<typename> class Reduction2, typename AccumT>
__device__ __forceinline__ void
blockReduce(AccumT* smem,
AccumT* reducVal1,
AccumT val1,
const Reduction1<AccumT>& r1,
AccumT defaultVal1,
AccumT* reducVal2,
AccumT val2,
const Reduction2<AccumT>& r2,
AccumT defaultVal2)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val1;
smem[blockDim.x + threadIdx.x] = val2;
__syncthreads();
AccumT warpVal1 = defaultVal1;
AccumT warpVal2 = defaultVal2;
// First warp will perform per-warp reductions for the remaining warps
uint32_t mask = (((uint64_t)1) << (blockDim.x / 32)) - 1;
if (threadIdx.x < 32) {
int lane = threadIdx.x % 32;
if (lane < blockDim.x / 32) {
#pragma unroll
for (int i = 0; i < 32; ++i) {
warpVal1 = r1(warpVal1, smem[lane * 32 + i]);
warpVal2 = r2(warpVal2, smem[lane * 32 + i + blockDim.x]);
}
__syncwarp(mask);
smem[lane] = warpVal1;
smem[lane + blockDim.x] = warpVal2;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
AccumT blockVal1 = defaultVal1;
AccumT blockVal2 = defaultVal2;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / 32; ++i) {
blockVal1 = r1(blockVal1, smem[i]);
blockVal2 = r2(blockVal2, smem[i + blockDim.x]);
}
smem[0] = blockVal1;
smem[blockDim.x] = blockVal2;
}
// Sync and broadcast
__syncthreads();
*reducVal1 = smem[0];
*reducVal2 = smem[blockDim.x];
__syncthreads();
}
template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT>
__device__ __forceinline__ AccumT
ilpReduce(int shift,
T* data,
int size,
const Reduction<T, AccumT>& r,
AccumT defaultVal)
{
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LoadT;
AccumT threadVal = defaultVal;
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
data -= shift;
size += shift;
if(threadIdx.x >= shift){
threadVal = r(threadVal, data[offset]);
}
size -= blockDim.x;
data += blockDim.x;
}
int last = size % (ILP * blockDim.x);
T v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
for (; offset * ILP < (size - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(data)[offset];
for (int j = 0; j < ILP; ++j) {
threadVal = r(threadVal, v[j]);
}
}
offset = size - last + threadIdx.x;
// Epilogue
for (; offset < size; offset += blockDim.x)
threadVal = r(threadVal, data[offset]);
return threadVal;
}
template <template<typename, typename> class Reduction1, template<typename, typename> class Reduction2, int ILP, typename T, typename AccumT>
__device__ __forceinline__ void
ilpReduce(int shift,
T* data,
int size,
AccumT* reducVal1,
const Reduction1<T, AccumT>& r1,
AccumT defaultVal1,
AccumT* reducVal2,
const Reduction2<T, AccumT>& r2,
AccumT defaultVal2)
{
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LoadT;
AccumT threadVal1 = defaultVal1;
AccumT threadVal2 = defaultVal2;
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
data -= shift;
size += shift;
if(threadIdx.x >= shift){
threadVal1 = r1(threadVal1, data[offset]);
threadVal2 = r2(threadVal2, data[offset]);
}
size -= blockDim.x;
data += blockDim.x;
}
int last = size % (ILP * blockDim.x);
T v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
for (; offset * ILP < (size - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(data)[offset];
for (int j = 0; j < ILP; ++j) {
threadVal1 = r1(threadVal1, v[j]);
threadVal2 = r2(threadVal2, v[j]);
}
}
offset = size - last + threadIdx.x;
// Epilogue
for (; offset < size; offset += blockDim.x) {
threadVal1 = r1(threadVal1, data[offset]);
threadVal2 = r2(threadVal2, data[offset]);
}
*reducVal1 = threadVal1;
*reducVal2 = threadVal2;
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template <typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxXEntropyForward(
accscalar_t *losses,
outscalar_t *max_log_sum_exp,
scalar_t *input,
int64_t *labels,
int64_t classes,
const float smoothing)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
// forward pointers to batch[blockIdx.x]
// each block handles a sample in the mini-batch
input += blockIdx.x * classes;
//output += blockIdx.x * classes;
const int shift = ((uint64_t)input) % ALIGN_BYTES / sizeof(scalar_t);
int64_t label = labels[blockIdx.x];
// find the max and sum
accscalar_t threadMax, threadSum, max_k, sum_k;
ilpReduce<MaxFloat, AddFloat, ILP, scalar_t, accscalar_t>(
shift, input, classes,
&threadMax, MaxFloat<scalar_t, accscalar_t>(),
-at::numeric_limits<accscalar_t>::max(),
&threadSum, AddFloat<scalar_t, accscalar_t>(),
static_cast<accscalar_t>(0));
blockReduce<Max, Add, accscalar_t>(
sdata,
&max_k, threadMax, Max<accscalar_t>(),
-at::numeric_limits<accscalar_t>::max(),
&sum_k, threadSum, Add<accscalar_t>(),
static_cast<accscalar_t>(0));
accscalar_t threadExp = ilpReduce<SumExpFloat, ILP, scalar_t, accscalar_t>(shift, input, classes, SumExpFloat<scalar_t, accscalar_t>(max_k), static_cast<accscalar_t>(0));
accscalar_t sumAll = blockReduce<Add, accscalar_t>(
sdata, threadExp, Add<accscalar_t>(), static_cast<accscalar_t>(0));
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_k, sumAll);
// calculate per element loss with label smoothing
// reserve max + log_sum_exp for bprop
if (threadIdx.x == 0) {
accscalar_t log_prob = epilogue(static_cast<accscalar_t>(input[label]));
losses[blockIdx.x] = (max_k + ::log(sumAll) - sum_k / classes) \
* smoothing - log_prob * (1 - smoothing);
max_log_sum_exp[blockIdx.x] = max_k + ::log(sumAll);
}
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t>
__device__ __forceinline__ void
apply(scalar_t *gradInput,
scalar_t *logits,
outscalar_t *max_log_sum_exp,
outscalar_t *gradOutput,
int64_t *labels,
const float smoothing,
int classes)
{
accscalar_t smooth_positives = 1.0 - smoothing;
accscalar_t smooth_negatives = smoothing / classes;
accscalar_t tmpGradOutput = gradOutput[blockIdx.x];
int64_t label = labels[blockIdx.x];
accscalar_t coeff = max_log_sum_exp[blockIdx.x];
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP) {
accscalar_t tmpLogits[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
tmpLogits[j] = static_cast<accscalar_t>(logits[offset + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < ILP; ++j)
gradInput[offset + j * blockDim.x] = tmpGradOutput * (
::exp(tmpLogits[j] - coeff) - static_cast<accscalar_t>(
(offset + j * blockDim.x == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
for (; offset < classes; offset += blockDim.x)
gradInput[offset] = tmpGradOutput * (::exp(
static_cast<accscalar_t>(logits[offset]) - coeff) -
static_cast<accscalar_t>((offset == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t>
__device__ __forceinline__ void
aligned_apply(int shift,
scalar_t *gradInput,
scalar_t *logits,
outscalar_t *max_log_sum_exp,
outscalar_t *gradOutput,
int64_t *labels,
const float smoothing,
int classes)
{
accscalar_t smooth_positives = 1.0 - smoothing;
accscalar_t smooth_negatives = smoothing / classes;
accscalar_t tmpGradOutput = gradOutput[blockIdx.x];
int64_t label = labels[blockIdx.x];
accscalar_t coeff = max_log_sum_exp[blockIdx.x];
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
logits -= shift;
gradInput -= shift;
classes += shift;
if(threadIdx.x >= shift){
gradInput[offset] = tmpGradOutput * (::exp(
static_cast<accscalar_t>(logits[offset]) - coeff) -
static_cast<accscalar_t>(((offset - shift) == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
classes -= blockDim.x;
gradInput += blockDim.x;
logits += blockDim.x;
shift -= blockDim.x;
}
int last = classes % (ILP * blockDim.x);
typedef typename std::aligned_storage<ILP*sizeof(scalar_t), ILP*alignof(scalar_t)>::type LoadT;
// input
scalar_t v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
// output
scalar_t r[ILP];
LoadT* result = reinterpret_cast<LoadT*>(&r);
for (; offset * ILP < (classes - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(logits)[offset];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
r[j] = tmpGradOutput * (::exp(
static_cast<accscalar_t>(v[j]) - coeff) -
static_cast<accscalar_t>(((ILP * offset + j - shift) == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
reinterpret_cast<LoadT*>(gradInput)[offset] = *result;
}
offset = classes - last + threadIdx.x;
for (; offset < classes; offset += blockDim.x)
gradInput[offset] = tmpGradOutput * (::exp(
static_cast<accscalar_t>(logits[offset]) - coeff) -
static_cast<accscalar_t>(((offset - shift) == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxXEntropyBackward(
scalar_t *gradInput,
scalar_t *logits,
outscalar_t *max_log_sum_exp,
outscalar_t *gradOutput,
int64_t *labels,
const float smoothing,
int classes)
{
gradInput += blockIdx.x * classes;
logits += blockIdx.x * classes;
// Do vectorized load/store when input/output have same alignment
const int shift = ((uint64_t)logits) % ALIGN_BYTES / sizeof(scalar_t);
const int shift_ = ((uint64_t)gradInput) % ALIGN_BYTES / sizeof(scalar_t);
if (shift == shift_){
aligned_apply<ILP, scalar_t, accscalar_t, outscalar_t>(shift, gradInput, logits, max_log_sum_exp, gradOutput, labels, smoothing, classes);
}
else {
apply<ILP, scalar_t, accscalar_t, outscalar_t>(gradInput, logits, max_log_sum_exp, gradOutput, labels, smoothing, classes);
}
}
template<template<typename, typename, typename> class Epilogue>
std::vector<Tensor> host_softmax_xentropy(
const Tensor & input_,
const Tensor & labels_,
const float smoothing,
const bool half_to_float){
if (half_to_float) AT_ASSERTM(input_.type().scalarType() == ScalarType::Half,"conversion is supported for Half type only");
AT_ASSERTM(labels_.type().scalarType() == ScalarType::Long,"Label type should be CUDA Long");
auto input = input_.contiguous();
Tensor max_log_sum_exp = at::empty_like(labels_, half_to_float ? input.options().dtype(ScalarType::Float) : input.options());
Tensor losses = at::empty_like(labels_, input_.options().dtype(ScalarType::Float));
static_assert(std::is_same<acc_type<at::Half, true>, float>::value ||
std::is_same<acc_type<at::Half, true>, double>::value,
"accscalar_t for half should be float or double");
AT_ASSERTM(input.dim() == 2, "Currently only 2 dim input supported");
AT_ASSERTM(labels_.dim() == 1, "Labels should be 1 dimensional");
AT_ASSERTM(input.size(0) == labels_.size(0), "Input and label should have same number of examples");
AT_ASSERTM(input.numel() > 0, "Number of classes in input should not be 0");
const int64_t dim = 1;
int64_t outer_size = 1;
int64_t dim_size = input.size(dim);
int64_t inner_size = 1;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
for (int64_t i = 0; i < dim; ++i)
outer_size *= input.size(i);
for (int64_t i = dim + 1; i < input.dim(); ++i)
inner_size *= input.size(i);
// This kernel spawns a block per each element in the batch.
// XXX: it assumes that inner_size == 1
TORCH_CHECK(inner_size == 1, "Currently only inner size 1 supported");
dim3 grid(outer_size);
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "host_softmax_xentropy",
using accscalar_t = at::acc_type<scalar_t_0, true>;
const int ILP = sizeof(float4)/sizeof(scalar_t_0);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
if (!half_to_float) {
hipLaunchKernelGGL(( cunn_SoftMaxXEntropyForward<ILP, scalar_t_0, accscalar_t, scalar_t_0, Epilogue>)
, dim3(grid), dim3(block), 2 * block.x * sizeof(accscalar_t), stream,
losses.DATA_PTR<accscalar_t>(), max_log_sum_exp.DATA_PTR<scalar_t_0>(),
input.DATA_PTR<scalar_t_0>(), labels_.DATA_PTR<int64_t>(),
dim_size, smoothing
);
} else {
hipLaunchKernelGGL(( cunn_SoftMaxXEntropyForward<ILP, scalar_t_0, accscalar_t, accscalar_t, Epilogue>)
, dim3(grid), dim3(block), 2 * block.x * sizeof(accscalar_t), stream,
losses.DATA_PTR<accscalar_t>(), max_log_sum_exp.DATA_PTR<accscalar_t>(),
input.DATA_PTR<scalar_t_0>(), labels_.DATA_PTR<int64_t>(),
dim_size, smoothing
);
}
);
THCudaCheck(hipGetLastError());
std::vector<at::Tensor> ret = {losses, max_log_sum_exp};
return ret;
}
template<template<typename, typename, typename> class Epilogue>
Tensor host_softmax_xentropy_backward(
const at::Tensor &grad_loss,
const at::Tensor &logits_,
const at::Tensor &max_log_sum_exp,
const at::Tensor &labels,
const float smoothing,
bool half_to_float) {
const int64_t dim = 1;
Tensor gI = at::empty_like(logits_);
if (grad_loss.numel() == 0) {
return gI;
}
auto grad = grad_loss.contiguous();
auto logits = logits_.contiguous();
static_assert(std::is_same<acc_type<at::Half, true>, float>::value ||
std::is_same<acc_type<at::Half, true>, double>::value,
"accscalar_t for half should be float or double");
if (grad.dim() == 0) grad = grad.view(1);
AT_ASSERTM(logits_.dim() == 2, "Currently only 2 dim input supported");
AT_ASSERTM(labels.dim() == 1, "Labels should be 1 dimensional");
AT_ASSERTM(logits_.numel() > 0, "Number of classes in input should not be 0");
AT_ASSERTM(logits_.size(0) == labels.size(0), "Input and label should have same number of examples");
AT_ASSERTM(labels.size(0) == grad.size(0), "Label and loss should have same number of examples");
int64_t outer_size = 1;
int64_t dim_size = logits.size(dim);
int64_t inner_size = 1;
for (int64_t i = 0; i < dim; ++i)
outer_size *= logits.size(i);
for (int64_t i = dim + 1; i < logits.dim(); ++i)
inner_size *= logits.size(i);
// See descriptions of kernels above.
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
TORCH_CHECK(inner_size == 1, "Currently only inner size 1 supported");
dim3 grid(outer_size);
DISPATCH_FLOAT_AND_HALF(gI.scalar_type(), 0, "host_softmax_xentropy_backward",
using accscalar_t = acc_type<scalar_t_0, true>;
const int ILP = sizeof(float4)/sizeof(scalar_t_0);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
if (!half_to_float) {
hipLaunchKernelGGL(( cunn_SoftMaxXEntropyBackward<ILP, scalar_t_0, accscalar_t, scalar_t_0, Epilogue>)
, dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream,
gI.DATA_PTR<scalar_t_0>(), logits.DATA_PTR<scalar_t_0>(),
max_log_sum_exp.DATA_PTR<scalar_t_0>(),
grad.DATA_PTR<scalar_t_0>(), labels.DATA_PTR<int64_t>(),
smoothing, dim_size
);
} else {
hipLaunchKernelGGL(( cunn_SoftMaxXEntropyBackward<ILP, scalar_t_0, accscalar_t, accscalar_t, Epilogue>)
, dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream,
gI.DATA_PTR<scalar_t_0>(), logits.DATA_PTR<scalar_t_0>(),
max_log_sum_exp.DATA_PTR<accscalar_t>(),
grad.DATA_PTR<accscalar_t>(), labels.DATA_PTR<int64_t>(),
smoothing, dim_size
);
}
);
THCudaCheck(hipGetLastError());
return gI;
}
std::vector<Tensor> softmax_xentropy_cuda(const Tensor &input, const Tensor &labels, const float smoothing, const bool half_to_float){
return host_softmax_xentropy<LogSoftMaxForwardEpilogue>(input, labels, smoothing, half_to_float);
}
at::Tensor softmax_xentropy_backward_cuda(
const at::Tensor &grad_loss,
const at::Tensor &logits,
const at::Tensor &max_log_sum_exp,
const at::Tensor &labels,
const float smoothing) {
bool half_to_float = grad_loss.type().scalarType() != logits.type().scalarType();
if (half_to_float) {
AT_ASSERTM((grad_loss.type().scalarType() == ScalarType::Float && logits.type().scalarType() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float");
}
return host_softmax_xentropy_backward<LogSoftMaxBackwardEpilogue>(grad_loss, logits, max_log_sum_exp, labels, smoothing, half_to_float);
}
| 339e761cfd2b0516b9dd98ddd93de85b645b17e8.cu | /**
* From PyTorch:
*
* Copyright (c) 2016- Facebook, Inc (Adam Paszke)
* Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
* Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
* Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
* Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
* Copyright (c) 2011-2013 NYU (Clement Farabet)
* Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
* Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
* Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
*
* From Caffe2:
*
* Copyright (c) 2016-present, Facebook Inc. All rights reserved.
*
* All contributions by Facebook:
* Copyright (c) 2016 Facebook Inc.
*
* All contributions by Google:
* Copyright (c) 2015 Google Inc.
* All rights reserved.
*
* All contributions by Yangqing Jia:
* Copyright (c) 2015 Yangqing Jia
* All rights reserved.
*
* All contributions from Caffe:
* Copyright(c) 2013, 2014, 2015, the respective contributors
* All rights reserved.
*
* All other contributions:
* Copyright(c) 2015, 2016 the respective contributors
* All rights reserved.
*
* Caffe2 uses a copyright model similar to Caffe: each contributor holds
* copyright over their contributions to Caffe2. The project versioning records
* all such contribution and copyright details. If a contributor wants to further
* mark their specific copyright on a particular contribution, they should
* indicate their copyright solely in the commit message of the change when it is
* committed.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
* and IDIAP Research Institute nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/NumericLimits.cuh>
#include <THC/THC.h>
#include <THC/THCGeneral.h>
#include "type_shim.h"
#include "compat.h"
#define ALIGN_BYTES 16
using Tensor = at::Tensor;
using TensorList = at::TensorList;
using ScalarType = at::ScalarType;
using at::acc_type;
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxForwardEpilogue {
__device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_input, AccumT sum)
: logsum(max_input + std::log(sum)) {}
__device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_log_sum_exp)
: logsum(max_log_sum_exp) {}
__device__ __forceinline__ OutT operator()(T input) const {
return static_cast<OutT>(input - logsum);
}
const AccumT logsum;
};
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxBackwardEpilogue {
__device__ __forceinline__ LogSoftMaxBackwardEpilogue(AccumT sum)
: sum(sum) {}
__device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const {
return static_cast<T>(gradOutput - std::exp(static_cast<AccumT>(output)) * sum);
}
const AccumT sum;
};
const int max_threads = 1024;
inline dim3 SoftMax_getBlockSize(int ILP, uint64_t dim_size) {
uint64_t block_size = 1;
uint64_t max_block_size = std::min(dim_size / ILP, static_cast<uint64_t>(max_threads));
while (block_size < (max_block_size/2)) block_size *= 2;
// Launch at least a single warp - the kernel assumes that.
block_size = std::max(block_size, static_cast<uint64_t>(32));
return dim3(block_size);
}
template<typename T>
struct Add {
__device__ __forceinline__ T operator()(T a, T b) const {
return a + b;
}
};
template<typename T>
struct Max {
__device__ __forceinline__ T operator()(T a, T b) const {
return a < b ? b : a;
}
};
////////////////////////////////////////////////////////////////////////////////
// Regular kernel (fast when dim_size is large; requires inner_size == 1)
////////////////////////////////////////////////////////////////////////////////
template <typename T, typename AccumT>
struct MaxFloat
{
__device__ __forceinline__ AccumT operator()(AccumT max, T v) const {
return ::max(max, (AccumT)v);
}
};
template<typename T, typename AccumT>
struct AddFloat
{
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + v;
}
};
template<typename T, typename AccumT>
struct SumExpFloat
{
__device__ __forceinline__ SumExpFloat(AccumT v)
: max_k(v) {}
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + std::exp(v - max_k);
}
const AccumT max_k;
};
template <template<typename> class Reduction, typename AccumT>
__device__ __forceinline__ AccumT
blockReduce(AccumT* smem, AccumT val,
const Reduction<AccumT>& r,
AccumT defaultVal)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val;
__syncthreads();
AccumT warpVal = defaultVal;
// First warp will perform per-warp reductions for the remaining warps
uint32_t mask = (((uint64_t)1) << (blockDim.x / 32)) - 1;
if (threadIdx.x < 32) {
int lane = threadIdx.x % 32;
if (lane < blockDim.x / 32) {
#pragma unroll
for (int i = 0; i < 32; ++i) {
warpVal = r(warpVal, smem[lane * 32 + i]);
}
__syncwarp(mask);
smem[lane] = warpVal;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
AccumT blockVal = defaultVal;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / 32; ++i) {
blockVal = r(blockVal, smem[i]);
}
smem[0] = blockVal;
}
// Sync and broadcast
__syncthreads();
return smem[0];
}
template <template<typename> class Reduction1, template<typename> class Reduction2, typename AccumT>
__device__ __forceinline__ void
blockReduce(AccumT* smem,
AccumT* reducVal1,
AccumT val1,
const Reduction1<AccumT>& r1,
AccumT defaultVal1,
AccumT* reducVal2,
AccumT val2,
const Reduction2<AccumT>& r2,
AccumT defaultVal2)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val1;
smem[blockDim.x + threadIdx.x] = val2;
__syncthreads();
AccumT warpVal1 = defaultVal1;
AccumT warpVal2 = defaultVal2;
// First warp will perform per-warp reductions for the remaining warps
uint32_t mask = (((uint64_t)1) << (blockDim.x / 32)) - 1;
if (threadIdx.x < 32) {
int lane = threadIdx.x % 32;
if (lane < blockDim.x / 32) {
#pragma unroll
for (int i = 0; i < 32; ++i) {
warpVal1 = r1(warpVal1, smem[lane * 32 + i]);
warpVal2 = r2(warpVal2, smem[lane * 32 + i + blockDim.x]);
}
__syncwarp(mask);
smem[lane] = warpVal1;
smem[lane + blockDim.x] = warpVal2;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
AccumT blockVal1 = defaultVal1;
AccumT blockVal2 = defaultVal2;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / 32; ++i) {
blockVal1 = r1(blockVal1, smem[i]);
blockVal2 = r2(blockVal2, smem[i + blockDim.x]);
}
smem[0] = blockVal1;
smem[blockDim.x] = blockVal2;
}
// Sync and broadcast
__syncthreads();
*reducVal1 = smem[0];
*reducVal2 = smem[blockDim.x];
__syncthreads();
}
template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT>
__device__ __forceinline__ AccumT
ilpReduce(int shift,
T* data,
int size,
const Reduction<T, AccumT>& r,
AccumT defaultVal)
{
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LoadT;
AccumT threadVal = defaultVal;
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
data -= shift;
size += shift;
if(threadIdx.x >= shift){
threadVal = r(threadVal, data[offset]);
}
size -= blockDim.x;
data += blockDim.x;
}
int last = size % (ILP * blockDim.x);
T v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
for (; offset * ILP < (size - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(data)[offset];
for (int j = 0; j < ILP; ++j) {
threadVal = r(threadVal, v[j]);
}
}
offset = size - last + threadIdx.x;
// Epilogue
for (; offset < size; offset += blockDim.x)
threadVal = r(threadVal, data[offset]);
return threadVal;
}
template <template<typename, typename> class Reduction1, template<typename, typename> class Reduction2, int ILP, typename T, typename AccumT>
__device__ __forceinline__ void
ilpReduce(int shift,
T* data,
int size,
AccumT* reducVal1,
const Reduction1<T, AccumT>& r1,
AccumT defaultVal1,
AccumT* reducVal2,
const Reduction2<T, AccumT>& r2,
AccumT defaultVal2)
{
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LoadT;
AccumT threadVal1 = defaultVal1;
AccumT threadVal2 = defaultVal2;
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
data -= shift;
size += shift;
if(threadIdx.x >= shift){
threadVal1 = r1(threadVal1, data[offset]);
threadVal2 = r2(threadVal2, data[offset]);
}
size -= blockDim.x;
data += blockDim.x;
}
int last = size % (ILP * blockDim.x);
T v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
for (; offset * ILP < (size - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(data)[offset];
for (int j = 0; j < ILP; ++j) {
threadVal1 = r1(threadVal1, v[j]);
threadVal2 = r2(threadVal2, v[j]);
}
}
offset = size - last + threadIdx.x;
// Epilogue
for (; offset < size; offset += blockDim.x) {
threadVal1 = r1(threadVal1, data[offset]);
threadVal2 = r2(threadVal2, data[offset]);
}
*reducVal1 = threadVal1;
*reducVal2 = threadVal2;
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template <typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxXEntropyForward(
accscalar_t *losses,
outscalar_t *max_log_sum_exp,
scalar_t *input,
int64_t *labels,
int64_t classes,
const float smoothing)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
// forward pointers to batch[blockIdx.x]
// each block handles a sample in the mini-batch
input += blockIdx.x * classes;
//output += blockIdx.x * classes;
const int shift = ((uint64_t)input) % ALIGN_BYTES / sizeof(scalar_t);
int64_t label = labels[blockIdx.x];
// find the max and sum
accscalar_t threadMax, threadSum, max_k, sum_k;
ilpReduce<MaxFloat, AddFloat, ILP, scalar_t, accscalar_t>(
shift, input, classes,
&threadMax, MaxFloat<scalar_t, accscalar_t>(),
-at::numeric_limits<accscalar_t>::max(),
&threadSum, AddFloat<scalar_t, accscalar_t>(),
static_cast<accscalar_t>(0));
blockReduce<Max, Add, accscalar_t>(
sdata,
&max_k, threadMax, Max<accscalar_t>(),
-at::numeric_limits<accscalar_t>::max(),
&sum_k, threadSum, Add<accscalar_t>(),
static_cast<accscalar_t>(0));
accscalar_t threadExp = ilpReduce<SumExpFloat, ILP, scalar_t, accscalar_t>(shift, input, classes, SumExpFloat<scalar_t, accscalar_t>(max_k), static_cast<accscalar_t>(0));
accscalar_t sumAll = blockReduce<Add, accscalar_t>(
sdata, threadExp, Add<accscalar_t>(), static_cast<accscalar_t>(0));
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_k, sumAll);
// calculate per element loss with label smoothing
// reserve max + log_sum_exp for bprop
if (threadIdx.x == 0) {
accscalar_t log_prob = epilogue(static_cast<accscalar_t>(input[label]));
losses[blockIdx.x] = (max_k + std::log(sumAll) - sum_k / classes) \
* smoothing - log_prob * (1 - smoothing);
max_log_sum_exp[blockIdx.x] = max_k + std::log(sumAll);
}
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t>
__device__ __forceinline__ void
apply(scalar_t *gradInput,
scalar_t *logits,
outscalar_t *max_log_sum_exp,
outscalar_t *gradOutput,
int64_t *labels,
const float smoothing,
int classes)
{
accscalar_t smooth_positives = 1.0 - smoothing;
accscalar_t smooth_negatives = smoothing / classes;
accscalar_t tmpGradOutput = gradOutput[blockIdx.x];
int64_t label = labels[blockIdx.x];
accscalar_t coeff = max_log_sum_exp[blockIdx.x];
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP) {
accscalar_t tmpLogits[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
tmpLogits[j] = static_cast<accscalar_t>(logits[offset + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < ILP; ++j)
gradInput[offset + j * blockDim.x] = tmpGradOutput * (
std::exp(tmpLogits[j] - coeff) - static_cast<accscalar_t>(
(offset + j * blockDim.x == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
for (; offset < classes; offset += blockDim.x)
gradInput[offset] = tmpGradOutput * (std::exp(
static_cast<accscalar_t>(logits[offset]) - coeff) -
static_cast<accscalar_t>((offset == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t>
__device__ __forceinline__ void
aligned_apply(int shift,
scalar_t *gradInput,
scalar_t *logits,
outscalar_t *max_log_sum_exp,
outscalar_t *gradOutput,
int64_t *labels,
const float smoothing,
int classes)
{
accscalar_t smooth_positives = 1.0 - smoothing;
accscalar_t smooth_negatives = smoothing / classes;
accscalar_t tmpGradOutput = gradOutput[blockIdx.x];
int64_t label = labels[blockIdx.x];
accscalar_t coeff = max_log_sum_exp[blockIdx.x];
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
logits -= shift;
gradInput -= shift;
classes += shift;
if(threadIdx.x >= shift){
gradInput[offset] = tmpGradOutput * (std::exp(
static_cast<accscalar_t>(logits[offset]) - coeff) -
static_cast<accscalar_t>(((offset - shift) == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
classes -= blockDim.x;
gradInput += blockDim.x;
logits += blockDim.x;
shift -= blockDim.x;
}
int last = classes % (ILP * blockDim.x);
typedef typename std::aligned_storage<ILP*sizeof(scalar_t), ILP*alignof(scalar_t)>::type LoadT;
// input
scalar_t v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
// output
scalar_t r[ILP];
LoadT* result = reinterpret_cast<LoadT*>(&r);
for (; offset * ILP < (classes - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(logits)[offset];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
r[j] = tmpGradOutput * (std::exp(
static_cast<accscalar_t>(v[j]) - coeff) -
static_cast<accscalar_t>(((ILP * offset + j - shift) == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
reinterpret_cast<LoadT*>(gradInput)[offset] = *result;
}
offset = classes - last + threadIdx.x;
for (; offset < classes; offset += blockDim.x)
gradInput[offset] = tmpGradOutput * (std::exp(
static_cast<accscalar_t>(logits[offset]) - coeff) -
static_cast<accscalar_t>(((offset - shift) == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxXEntropyBackward(
scalar_t *gradInput,
scalar_t *logits,
outscalar_t *max_log_sum_exp,
outscalar_t *gradOutput,
int64_t *labels,
const float smoothing,
int classes)
{
gradInput += blockIdx.x * classes;
logits += blockIdx.x * classes;
// Do vectorized load/store when input/output have same alignment
const int shift = ((uint64_t)logits) % ALIGN_BYTES / sizeof(scalar_t);
const int shift_ = ((uint64_t)gradInput) % ALIGN_BYTES / sizeof(scalar_t);
if (shift == shift_){
aligned_apply<ILP, scalar_t, accscalar_t, outscalar_t>(shift, gradInput, logits, max_log_sum_exp, gradOutput, labels, smoothing, classes);
}
else {
apply<ILP, scalar_t, accscalar_t, outscalar_t>(gradInput, logits, max_log_sum_exp, gradOutput, labels, smoothing, classes);
}
}
template<template<typename, typename, typename> class Epilogue>
std::vector<Tensor> host_softmax_xentropy(
const Tensor & input_,
const Tensor & labels_,
const float smoothing,
const bool half_to_float){
if (half_to_float) AT_ASSERTM(input_.type().scalarType() == ScalarType::Half,"conversion is supported for Half type only");
AT_ASSERTM(labels_.type().scalarType() == ScalarType::Long,"Label type should be CUDA Long");
auto input = input_.contiguous();
Tensor max_log_sum_exp = at::empty_like(labels_, half_to_float ? input.options().dtype(ScalarType::Float) : input.options());
Tensor losses = at::empty_like(labels_, input_.options().dtype(ScalarType::Float));
static_assert(std::is_same<acc_type<at::Half, true>, float>::value ||
std::is_same<acc_type<at::Half, true>, double>::value,
"accscalar_t for half should be float or double");
AT_ASSERTM(input.dim() == 2, "Currently only 2 dim input supported");
AT_ASSERTM(labels_.dim() == 1, "Labels should be 1 dimensional");
AT_ASSERTM(input.size(0) == labels_.size(0), "Input and label should have same number of examples");
AT_ASSERTM(input.numel() > 0, "Number of classes in input should not be 0");
const int64_t dim = 1;
int64_t outer_size = 1;
int64_t dim_size = input.size(dim);
int64_t inner_size = 1;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
for (int64_t i = 0; i < dim; ++i)
outer_size *= input.size(i);
for (int64_t i = dim + 1; i < input.dim(); ++i)
inner_size *= input.size(i);
// This kernel spawns a block per each element in the batch.
// XXX: it assumes that inner_size == 1
TORCH_CHECK(inner_size == 1, "Currently only inner size 1 supported");
dim3 grid(outer_size);
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "host_softmax_xentropy",
using accscalar_t = at::acc_type<scalar_t_0, true>;
const int ILP = sizeof(float4)/sizeof(scalar_t_0);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
if (!half_to_float) {
cunn_SoftMaxXEntropyForward<ILP, scalar_t_0, accscalar_t, scalar_t_0, Epilogue>
<<<grid, block, 2 * block.x * sizeof(accscalar_t), stream>>>(
losses.DATA_PTR<accscalar_t>(), max_log_sum_exp.DATA_PTR<scalar_t_0>(),
input.DATA_PTR<scalar_t_0>(), labels_.DATA_PTR<int64_t>(),
dim_size, smoothing
);
} else {
cunn_SoftMaxXEntropyForward<ILP, scalar_t_0, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, 2 * block.x * sizeof(accscalar_t), stream>>>(
losses.DATA_PTR<accscalar_t>(), max_log_sum_exp.DATA_PTR<accscalar_t>(),
input.DATA_PTR<scalar_t_0>(), labels_.DATA_PTR<int64_t>(),
dim_size, smoothing
);
}
);
THCudaCheck(cudaGetLastError());
std::vector<at::Tensor> ret = {losses, max_log_sum_exp};
return ret;
}
template<template<typename, typename, typename> class Epilogue>
Tensor host_softmax_xentropy_backward(
const at::Tensor &grad_loss,
const at::Tensor &logits_,
const at::Tensor &max_log_sum_exp,
const at::Tensor &labels,
const float smoothing,
bool half_to_float) {
const int64_t dim = 1;
Tensor gI = at::empty_like(logits_);
if (grad_loss.numel() == 0) {
return gI;
}
auto grad = grad_loss.contiguous();
auto logits = logits_.contiguous();
static_assert(std::is_same<acc_type<at::Half, true>, float>::value ||
std::is_same<acc_type<at::Half, true>, double>::value,
"accscalar_t for half should be float or double");
if (grad.dim() == 0) grad = grad.view(1);
AT_ASSERTM(logits_.dim() == 2, "Currently only 2 dim input supported");
AT_ASSERTM(labels.dim() == 1, "Labels should be 1 dimensional");
AT_ASSERTM(logits_.numel() > 0, "Number of classes in input should not be 0");
AT_ASSERTM(logits_.size(0) == labels.size(0), "Input and label should have same number of examples");
AT_ASSERTM(labels.size(0) == grad.size(0), "Label and loss should have same number of examples");
int64_t outer_size = 1;
int64_t dim_size = logits.size(dim);
int64_t inner_size = 1;
for (int64_t i = 0; i < dim; ++i)
outer_size *= logits.size(i);
for (int64_t i = dim + 1; i < logits.dim(); ++i)
inner_size *= logits.size(i);
// See descriptions of kernels above.
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
TORCH_CHECK(inner_size == 1, "Currently only inner size 1 supported");
dim3 grid(outer_size);
DISPATCH_FLOAT_AND_HALF(gI.scalar_type(), 0, "host_softmax_xentropy_backward",
using accscalar_t = acc_type<scalar_t_0, true>;
const int ILP = sizeof(float4)/sizeof(scalar_t_0);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
if (!half_to_float) {
cunn_SoftMaxXEntropyBackward<ILP, scalar_t_0, accscalar_t, scalar_t_0, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
gI.DATA_PTR<scalar_t_0>(), logits.DATA_PTR<scalar_t_0>(),
max_log_sum_exp.DATA_PTR<scalar_t_0>(),
grad.DATA_PTR<scalar_t_0>(), labels.DATA_PTR<int64_t>(),
smoothing, dim_size
);
} else {
cunn_SoftMaxXEntropyBackward<ILP, scalar_t_0, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
gI.DATA_PTR<scalar_t_0>(), logits.DATA_PTR<scalar_t_0>(),
max_log_sum_exp.DATA_PTR<accscalar_t>(),
grad.DATA_PTR<accscalar_t>(), labels.DATA_PTR<int64_t>(),
smoothing, dim_size
);
}
);
THCudaCheck(cudaGetLastError());
return gI;
}
std::vector<Tensor> softmax_xentropy_cuda(const Tensor &input, const Tensor &labels, const float smoothing, const bool half_to_float){
return host_softmax_xentropy<LogSoftMaxForwardEpilogue>(input, labels, smoothing, half_to_float);
}
at::Tensor softmax_xentropy_backward_cuda(
const at::Tensor &grad_loss,
const at::Tensor &logits,
const at::Tensor &max_log_sum_exp,
const at::Tensor &labels,
const float smoothing) {
bool half_to_float = grad_loss.type().scalarType() != logits.type().scalarType();
if (half_to_float) {
AT_ASSERTM((grad_loss.type().scalarType() == ScalarType::Float && logits.type().scalarType() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float");
}
return host_softmax_xentropy_backward<LogSoftMaxBackwardEpilogue>(grad_loss, logits, max_log_sum_exp, labels, smoothing, half_to_float);
}
|
6646ef3e2dadeb04b8a926f309c953ccff75ea56.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include <math.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include "cta_config.h"
#include "../common/cuda_check.h"
extern __global__ void upSample(
float* input, float* output, int num_input_rows, int num_input_cols);
void RandFloatArray(float* ptr, int length) {
for (int i = 0; i < length; ++i) {
float val = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
ptr[i] = val;
}
return;
}
void AssertArrayEqual(float* ptr1, float* ptr2, int length, float precision = 1e-5) {
for (int i = 0; i < length; ++i) {
assert(fabs(ptr1[i] - ptr2[i]) < precision);
}
return;
}
int main(int argc, char** argv) {
if (argc < 3) {
printf("Usag ./upsample <num of input rows> <num of input columns>");
return -1;
}
int num_input_rows = atoi(argv[1]);
int num_input_cols = atoi(argv[2]);
printf("Running the up sampling for an input matrix %d x %d\n",
num_input_rows, num_input_cols);
int num_output_rows = num_input_rows * 2;
int num_output_cols = num_input_cols * 2;
float* host_input = (float*) malloc(
num_input_rows * num_input_cols * sizeof(float));
float* host_output = (float*) malloc(
num_output_rows * num_output_cols * sizeof(float));
RandFloatArray(host_input, num_input_rows * num_input_cols);
for (int i = 0; i < num_input_rows; ++i) {
for (int j = 0; j < num_input_cols; j += NUM_THREADS) {
for (int kx = 0; kx < 2; ++kx) {
for (int ky = 0; ky < 2; ++ky) {
for (int k = 0; k < NUM_THREADS; ++k) {
host_output[(kx * 2 + ky) * num_input_rows * num_input_cols
+ i * num_input_cols + (j + k)] = host_input[
i * num_input_cols + j + (ky * NUM_THREADS + k) / 2];
}
}
}
}
}
printf("Completed ground truth computation!\n");
float* device_input;
float* device_output;
CUDA_CHECK(hipMalloc((void**) &device_input,
num_input_rows * num_input_cols * sizeof(float)));
CUDA_CHECK(hipMalloc((void**) &device_output,
num_output_rows * num_output_cols * sizeof(float)));
float* results = (float*) malloc(
num_output_rows * num_output_cols * sizeof(float));
CUDA_CHECK(hipMemcpy(device_input, host_input,
num_input_rows * num_input_cols * sizeof(float),
hipMemcpyHostToDevice));
#ifdef MEASURE_POWER
while (true) {
#endif
hipLaunchKernelGGL(( upSample), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, device_input, device_output,
num_input_rows, num_input_cols);
hipDeviceSynchronize();
#ifdef MEASURE_POWER
}
#endif
printf("Completed GPU computation!\n");
CUDA_CHECK(hipMemcpy(results, device_output,
num_output_rows * num_output_cols * sizeof(float),
hipMemcpyDeviceToHost));
AssertArrayEqual(host_output, results, num_output_rows * num_output_cols);
printf("Correctness Check: Acceptedd!\n");
free(host_input);
free(host_output);
free(results);
CUDA_CHECK(hipFree(device_input));
CUDA_CHECK(hipFree(device_output));
return 0;
}
| 6646ef3e2dadeb04b8a926f309c953ccff75ea56.cu | #include <stdlib.h>
#include <stdio.h>
#include <string>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include "cta_config.h"
#include "../common/cuda_check.h"
extern __global__ void upSample(
float* input, float* output, int num_input_rows, int num_input_cols);
void RandFloatArray(float* ptr, int length) {
for (int i = 0; i < length; ++i) {
float val = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
ptr[i] = val;
}
return;
}
void AssertArrayEqual(float* ptr1, float* ptr2, int length, float precision = 1e-5) {
for (int i = 0; i < length; ++i) {
assert(fabs(ptr1[i] - ptr2[i]) < precision);
}
return;
}
int main(int argc, char** argv) {
if (argc < 3) {
printf("Usag ./upsample <num of input rows> <num of input columns>");
return -1;
}
int num_input_rows = atoi(argv[1]);
int num_input_cols = atoi(argv[2]);
printf("Running the up sampling for an input matrix %d x %d\n",
num_input_rows, num_input_cols);
int num_output_rows = num_input_rows * 2;
int num_output_cols = num_input_cols * 2;
float* host_input = (float*) malloc(
num_input_rows * num_input_cols * sizeof(float));
float* host_output = (float*) malloc(
num_output_rows * num_output_cols * sizeof(float));
RandFloatArray(host_input, num_input_rows * num_input_cols);
for (int i = 0; i < num_input_rows; ++i) {
for (int j = 0; j < num_input_cols; j += NUM_THREADS) {
for (int kx = 0; kx < 2; ++kx) {
for (int ky = 0; ky < 2; ++ky) {
for (int k = 0; k < NUM_THREADS; ++k) {
host_output[(kx * 2 + ky) * num_input_rows * num_input_cols
+ i * num_input_cols + (j + k)] = host_input[
i * num_input_cols + j + (ky * NUM_THREADS + k) / 2];
}
}
}
}
}
printf("Completed ground truth computation!\n");
float* device_input;
float* device_output;
CUDA_CHECK(cudaMalloc((void**) &device_input,
num_input_rows * num_input_cols * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**) &device_output,
num_output_rows * num_output_cols * sizeof(float)));
float* results = (float*) malloc(
num_output_rows * num_output_cols * sizeof(float));
CUDA_CHECK(cudaMemcpy(device_input, host_input,
num_input_rows * num_input_cols * sizeof(float),
cudaMemcpyHostToDevice));
#ifdef MEASURE_POWER
while (true) {
#endif
upSample<<<NUM_BLOCKS, NUM_THREADS>>>(device_input, device_output,
num_input_rows, num_input_cols);
cudaDeviceSynchronize();
#ifdef MEASURE_POWER
}
#endif
printf("Completed GPU computation!\n");
CUDA_CHECK(cudaMemcpy(results, device_output,
num_output_rows * num_output_cols * sizeof(float),
cudaMemcpyDeviceToHost));
AssertArrayEqual(host_output, results, num_output_rows * num_output_cols);
printf("Correctness Check: Acceptedd!\n");
free(host_input);
free(host_output);
free(results);
CUDA_CHECK(cudaFree(device_input));
CUDA_CHECK(cudaFree(device_output));
return 0;
}
|
ae91ab0388d7068fed53daccf1fafb3ecc1b948f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "lamusfft.cuh"
#include "kernels_lam.cu"
#include "shift.hip"
lamusfft::lamusfft(size_t n0, size_t n1, size_t n2, size_t det, size_t ntheta, float phi, float gamma, float eps, size_t ngpus)
: n0(n0), n1(n1), n2(n2), det(det), ntheta(ntheta), phi(phi), gamma(gamma), ngpus(ngpus) {
mu0 = -log(eps) / (2 * n0 * n0);
mu1 = -log(eps) / (2 * n1 * n1);
mu2 = -log(eps) / (2 * n2 * n2);
m0 = ceil(2 * n0 * 1 / PI * sqrt(-mu0 * log(eps) + (mu0 * n0) * (mu0 * n0) / 4));
m1 = ceil(2 * n1 * 1 / PI * sqrt(-mu1 * log(eps) + (mu1 * n1) * (mu1 * n1) / 4));
m2 = ceil(2 * n2 * 1 / PI * sqrt(-mu2 * log(eps) + (mu2 * n2) * (mu2 * n2) / 4));
fprintf(stderr,"interp radius in USFFT: %d\n",m0);
hipMalloc((void **)&f, n0 * n1 * n2 * sizeof(float2));
hipMalloc((void **)&g, det * det * ntheta * sizeof(float2));
hipMalloc((void **)&fdee,
(2 * n0 + 2 * m0) * (2 * n1 + 2 * m1) * (2 * n2 + 2 * m2) * sizeof(float2));
hipMalloc((void **)&x, det * det * ntheta * sizeof(float));
hipMalloc((void **)&y, det * det * ntheta * sizeof(float));
hipMalloc((void **)&z, det * det * ntheta * sizeof(float));
hipMalloc((void **)&theta, ntheta * sizeof(float));
int ffts[3];
int idist;
int inembed[3];
// fft 2d
ffts[0] = 2 * n2;
ffts[1] = 2 * n1;
ffts[2] = 2 * n0;
idist = (2 * n0 + 2 * m0) * (2 * n1 + 2 * m1)* (2 * n2 + 2 * m2);
inembed[0] = 2 * n2 + 2 * m2; // Note the order is reverse!
inembed[1] = 2 * n1 + 2 * m1;
inembed[2] = 2 * n0 + 2 * m0;
hipfftResult a = hipfftPlanMany(&plan3d, 3, ffts, inembed, 1, idist, inembed, 1, idist,
HIPFFT_C2C, 1);
fprintf(stderr,"%d",a);
// fft 2d
ffts[0] = det;
ffts[1] = det;
idist = det*det;
inembed[0] = det;
inembed[1] = det;
a = hipfftPlanMany(&plan2d, 2, ffts, inembed, 1, idist, inembed, 1, idist,
HIPFFT_C2C, ntheta);
fprintf(stderr,"%d",a);
streams = new hipStream_t[ngpus];
for (int i=0;i<ngpus;i++)
hipStreamCreate(&streams[i]) ;
BS3d = dim3(16, 16, 4);
GS3d0 = dim3(ceil(det / (float)BS3d.x), ceil(det / (float)BS3d.y),
ceil(ntheta / (float)BS3d.z));
GS3d00 = dim3(ceil(det / (float)BS3d.x), ceil(det / (float)BS3d.y),
ceil(ntheta/ngpus / (float)BS3d.z));
GS3d1 = dim3(ceil(n0 / (float)BS3d.x), ceil(n1 / (float)BS3d.y),
ceil(n2 / (float)BS3d.z));
GS3d2 = dim3(ceil(2*n0 / (float)BS3d.x),
ceil(2*n1 / (float)BS3d.y),
ceil(2*n2 / (float)BS3d.z));
GS3d3 = dim3(ceil((2 * n0 + 2 * m0) / (float)BS3d.x),
ceil((2 * n1 + 2 * m1) / (float)BS3d.y),
ceil((2 * n2 + 2 * m2) / (float)BS3d.z));
}
// destructor, memory deallocation
lamusfft::~lamusfft() { free(); }
void lamusfft::free() {
if (!is_free) {
hipFree(f);
hipFree(g);
hipFree(fdee);
hipFree(x);
hipFree(y);
hipFree(z);
hipfftDestroy(plan3d);
hipfftDestroy(plan3d);
hipfftDestroy(plan2d);
is_free = true;
}
}
void lamusfft::fwd(size_t g_, size_t f_, size_t theta_) {
hipMemcpy(f, (float2 *)f_, n0 * n1 * n2 * sizeof(float2), hipMemcpyDefault);
//f = (float2 *)f_;
//g = (float2 *)g_;
hipMemcpy(theta, (float *)theta_, ntheta * sizeof(float), hipMemcpyDefault);
hipMemset(fdee, 0, (2 * n0 + 2 * m0) * (2 * n1 + 2 * m1) * (2 * n2 + 2 * m2) * sizeof(float2));
hipLaunchKernelGGL(( takexyz) , dim3(GS3d0), dim3(BS3d), 0, 0, x, y, z, theta, phi, gamma, det, ntheta);
hipLaunchKernelGGL(( divker) , dim3(GS3d1), dim3(BS3d), 0, 0, fdee, f, mu0, mu1, mu2, n0, n1, n2, m0, m1,m2, TOMO_FWD);
hipLaunchKernelGGL(( fftshiftc3d) , dim3(GS3d3), dim3(BS3d), 0, 0, fdee, 2 * n0 + 2 * m0, 2 * n1 +2 * m1, 2 * n2 +2 * m2);
hipfftExecC2C(plan3d, (hipfftComplex *)&fdee[m0 + m1 * (2 * n0 + 2 * m0) + m2 * (2 * n0 + 2 * m0) * (2 * n1 + 2 * m1)].x,
(hipfftComplex *)&fdee[m0 + m1 * (2 * n0 + 2 * m0) + m2 * (2 * n0 + 2 * m0) * (2 * n1 + 2 * m1)].x, HIPFFT_FORWARD);
hipLaunchKernelGGL(( fftshiftc3d) , dim3(GS3d3), dim3(BS3d), 0, 0, fdee, 2 * n0 + 2 * m0, 2 * n1 +2 * m1, 2 * n2 +2 * m2);
hipLaunchKernelGGL(( wrap) , dim3(GS3d3), dim3(BS3d), 0, 0, fdee, n0, n1, n2, m0, m1, m2, TOMO_FWD);
/*for (int i=0;i<ngpus;i++)
{
hipSetDevice(i) ;
int st = i*ntheta/ngpus*det*det;
hipMemPrefetchAsync(&g[st],ntheta/ngpus*det*det*sizeof(float2),i,streams[i]);
hipMemPrefetchAsync(&x[st],ntheta/ngpus*det*det*sizeof(float),i,streams[i]);
hipMemPrefetchAsync(&y[st],ntheta/ngpus*det*det*sizeof(float),i,streams[i]);
hipMemPrefetchAsync(&z[st],ntheta/ngpus*det*det*sizeof(float),i,streams[i]);
hipMemPrefetchAsync(fdee, (2 * n0 + 2 * m0) * (2 * n1 + 2 * m1) * (2 * n2 + 2 * m2) * sizeof(float2),i,streams[i]);
gather <<<GS3d00, BS3d, 0, streams[i]>>> (&g[st], fdee, &x[st], &y[st], &z[st], m0, m1, m2, mu0, mu1, mu2, n0, n1, n2, det, ntheta/ngpus, TOMO_FWD);
}
hipDeviceSynchronize();
hipMemPrefetchAsync(g,ntheta*det*det*sizeof(float),0,streams[0]);
hipSetDevice(0) ; */
hipLaunchKernelGGL(( gather) , dim3(GS3d0), dim3(BS3d), 0, 0, g, fdee, x, y, z, m0, m1, m2, mu0, mu1, mu2, n0, n1, n2, det, ntheta, TOMO_FWD);
hipLaunchKernelGGL(( fftshiftc2d) , dim3(GS3d0), dim3(BS3d), 0, 0, g, det, ntheta);
hipfftExecC2C(plan2d, (hipfftComplex *)g, (hipfftComplex *)g, HIPFFT_BACKWARD);
hipLaunchKernelGGL(( fftshiftc2d) , dim3(GS3d0), dim3(BS3d), 0, 0, g, det, ntheta);
hipMemcpy((float2 *)g_, g, det * det * ntheta * sizeof(float2), hipMemcpyDefault);
}
void lamusfft::adj(size_t f_, size_t g_, size_t theta_) {
//f = (float2 *)f_;
//g = (float2 *)g_;
hipMemcpy(g, (float2 *)g_, det * det * ntheta * sizeof(float2), hipMemcpyDefault);
hipMemset(fdee, 0, (2 * n0 + 2 * m0) * (2 * n1 + 2 * m1) * (2 * n2 + 2 * m2) * sizeof(float2));
hipLaunchKernelGGL(( takexyz) , dim3(GS3d0), dim3(BS3d), 0, 0, x, y, z, theta, phi, gamma, det, ntheta);
hipLaunchKernelGGL(( fftshiftc2d) , dim3(GS3d0), dim3(BS3d), 0, 0, g, det, ntheta);
hipfftExecC2C(plan2d, (hipfftComplex *)g, (hipfftComplex *)g, HIPFFT_FORWARD);
hipLaunchKernelGGL(( fftshiftc2d) , dim3(GS3d0), dim3(BS3d), 0, 0, g, det, ntheta);
/*
for (int i=0;i<ngpus;i++)
{
hipSetDevice(i) ;
printf("Device %d\n",i);
int st = i*ntheta/ngpus*det*det;
hipMemPrefetchAsync(&g[st],ntheta/ngpus*det*det*sizeof(float2),i,streams[i]);
hipMemPrefetchAsync(&x[st],ntheta/ngpus*det*det*sizeof(float),i,streams[i]);
hipMemPrefetchAsync(&y[st],ntheta/ngpus*det*det*sizeof(float),i,streams[i]);
hipMemPrefetchAsync(&z[st],ntheta/ngpus*det*det*sizeof(float),i,streams[i]);
hipMemPrefetchAsync(fdee, (2 * n0 + 2 * m0) * (2 * n1 + 2 * m1) * (2 * n2 + 2 * m2) * sizeof(float2),i,streams[i]);
gather <<<GS3d00, BS3d,0,streams[i]>>> (&g[st], fdee, &x[st], &y[st], &z[st], m0, m1, m2, mu0, mu1, mu2, n0, n1, n2, det, ntheta/ngpus, TOMO_ADJ);
}
hipDeviceSynchronize();
hipMemPrefetchAsync(fdee, (2 * n0 + 2 * m0) * (2 * n1 + 2 * m1) * (2 * n2 + 2 * m2) * sizeof(float2),0,streams[0]);
hipSetDevice(0) ;*/
hipLaunchKernelGGL(( gather) , dim3(GS3d0), dim3(BS3d), 0, 0, g, fdee, x, y, z, m0, m1, m2, mu0, mu1, mu2, n0, n1, n2, det, ntheta, TOMO_ADJ);
hipLaunchKernelGGL(( wrap) , dim3(GS3d3), dim3(BS3d), 0, 0, fdee, n0, n1, n2, m0, m1, m2, TOMO_ADJ);
hipLaunchKernelGGL(( fftshiftc3d) , dim3(GS3d3), dim3(BS3d), 0, 0, fdee, 2 * n0 + 2 * m0, 2 * n1 +2 * m1, 2 * n2 +2 * m2);
hipfftExecC2C(plan3d, (hipfftComplex *)&fdee[m0 + m1 * (2 * n0 + 2 * m0) + m2 * (2 * n0 + 2 * m0) * (2 * n1 + 2 * m1)],
(hipfftComplex *)&fdee[m0 + m1 * (2 * n0 + 2 * m0) + m2 * (2 * n0 + 2 * m0) * (2 * n1 + 2 * m1)], HIPFFT_BACKWARD);
hipLaunchKernelGGL(( fftshiftc3d) , dim3(GS3d3), dim3(BS3d), 0, 0, fdee, 2 * n0 + 2 * m0, 2 * n1 +2 * m1, 2 * n2 +2 * m2);
hipLaunchKernelGGL(( divker) , dim3(GS3d1), dim3(BS3d), 0, 0, fdee, f, mu0, mu1, mu2, n0, n1, n2, m0,m1,m2, TOMO_ADJ);
hipMemcpy((float2 *)f_, f, n0 * n1 * n2 * sizeof(float2),
hipMemcpyDefault);
} | ae91ab0388d7068fed53daccf1fafb3ecc1b948f.cu | #include <stdio.h>
#include "lamusfft.cuh"
#include "kernels_lam.cu"
#include "shift.cu"
lamusfft::lamusfft(size_t n0, size_t n1, size_t n2, size_t det, size_t ntheta, float phi, float gamma, float eps, size_t ngpus)
: n0(n0), n1(n1), n2(n2), det(det), ntheta(ntheta), phi(phi), gamma(gamma), ngpus(ngpus) {
mu0 = -log(eps) / (2 * n0 * n0);
mu1 = -log(eps) / (2 * n1 * n1);
mu2 = -log(eps) / (2 * n2 * n2);
m0 = ceil(2 * n0 * 1 / PI * sqrt(-mu0 * log(eps) + (mu0 * n0) * (mu0 * n0) / 4));
m1 = ceil(2 * n1 * 1 / PI * sqrt(-mu1 * log(eps) + (mu1 * n1) * (mu1 * n1) / 4));
m2 = ceil(2 * n2 * 1 / PI * sqrt(-mu2 * log(eps) + (mu2 * n2) * (mu2 * n2) / 4));
fprintf(stderr,"interp radius in USFFT: %d\n",m0);
cudaMalloc((void **)&f, n0 * n1 * n2 * sizeof(float2));
cudaMalloc((void **)&g, det * det * ntheta * sizeof(float2));
cudaMalloc((void **)&fdee,
(2 * n0 + 2 * m0) * (2 * n1 + 2 * m1) * (2 * n2 + 2 * m2) * sizeof(float2));
cudaMalloc((void **)&x, det * det * ntheta * sizeof(float));
cudaMalloc((void **)&y, det * det * ntheta * sizeof(float));
cudaMalloc((void **)&z, det * det * ntheta * sizeof(float));
cudaMalloc((void **)&theta, ntheta * sizeof(float));
int ffts[3];
int idist;
int inembed[3];
// fft 2d
ffts[0] = 2 * n2;
ffts[1] = 2 * n1;
ffts[2] = 2 * n0;
idist = (2 * n0 + 2 * m0) * (2 * n1 + 2 * m1)* (2 * n2 + 2 * m2);
inembed[0] = 2 * n2 + 2 * m2; // Note the order is reverse!
inembed[1] = 2 * n1 + 2 * m1;
inembed[2] = 2 * n0 + 2 * m0;
cufftResult a = cufftPlanMany(&plan3d, 3, ffts, inembed, 1, idist, inembed, 1, idist,
CUFFT_C2C, 1);
fprintf(stderr,"%d",a);
// fft 2d
ffts[0] = det;
ffts[1] = det;
idist = det*det;
inembed[0] = det;
inembed[1] = det;
a = cufftPlanMany(&plan2d, 2, ffts, inembed, 1, idist, inembed, 1, idist,
CUFFT_C2C, ntheta);
fprintf(stderr,"%d",a);
streams = new cudaStream_t[ngpus];
for (int i=0;i<ngpus;i++)
cudaStreamCreate(&streams[i]) ;
BS3d = dim3(16, 16, 4);
GS3d0 = dim3(ceil(det / (float)BS3d.x), ceil(det / (float)BS3d.y),
ceil(ntheta / (float)BS3d.z));
GS3d00 = dim3(ceil(det / (float)BS3d.x), ceil(det / (float)BS3d.y),
ceil(ntheta/ngpus / (float)BS3d.z));
GS3d1 = dim3(ceil(n0 / (float)BS3d.x), ceil(n1 / (float)BS3d.y),
ceil(n2 / (float)BS3d.z));
GS3d2 = dim3(ceil(2*n0 / (float)BS3d.x),
ceil(2*n1 / (float)BS3d.y),
ceil(2*n2 / (float)BS3d.z));
GS3d3 = dim3(ceil((2 * n0 + 2 * m0) / (float)BS3d.x),
ceil((2 * n1 + 2 * m1) / (float)BS3d.y),
ceil((2 * n2 + 2 * m2) / (float)BS3d.z));
}
// destructor, memory deallocation
lamusfft::~lamusfft() { free(); }
void lamusfft::free() {
if (!is_free) {
cudaFree(f);
cudaFree(g);
cudaFree(fdee);
cudaFree(x);
cudaFree(y);
cudaFree(z);
cufftDestroy(plan3d);
cufftDestroy(plan3d);
cufftDestroy(plan2d);
is_free = true;
}
}
void lamusfft::fwd(size_t g_, size_t f_, size_t theta_) {
cudaMemcpy(f, (float2 *)f_, n0 * n1 * n2 * sizeof(float2), cudaMemcpyDefault);
//f = (float2 *)f_;
//g = (float2 *)g_;
cudaMemcpy(theta, (float *)theta_, ntheta * sizeof(float), cudaMemcpyDefault);
cudaMemset(fdee, 0, (2 * n0 + 2 * m0) * (2 * n1 + 2 * m1) * (2 * n2 + 2 * m2) * sizeof(float2));
takexyz <<<GS3d0, BS3d>>> (x, y, z, theta, phi, gamma, det, ntheta);
divker <<<GS3d1, BS3d>>> (fdee, f, mu0, mu1, mu2, n0, n1, n2, m0, m1,m2, TOMO_FWD);
fftshiftc3d <<<GS3d3, BS3d>>> (fdee, 2 * n0 + 2 * m0, 2 * n1 +2 * m1, 2 * n2 +2 * m2);
cufftExecC2C(plan3d, (cufftComplex *)&fdee[m0 + m1 * (2 * n0 + 2 * m0) + m2 * (2 * n0 + 2 * m0) * (2 * n1 + 2 * m1)].x,
(cufftComplex *)&fdee[m0 + m1 * (2 * n0 + 2 * m0) + m2 * (2 * n0 + 2 * m0) * (2 * n1 + 2 * m1)].x, CUFFT_FORWARD);
fftshiftc3d <<<GS3d3, BS3d>>> (fdee, 2 * n0 + 2 * m0, 2 * n1 +2 * m1, 2 * n2 +2 * m2);
wrap <<<GS3d3, BS3d>>> (fdee, n0, n1, n2, m0, m1, m2, TOMO_FWD);
/*for (int i=0;i<ngpus;i++)
{
cudaSetDevice(i) ;
int st = i*ntheta/ngpus*det*det;
cudaMemPrefetchAsync(&g[st],ntheta/ngpus*det*det*sizeof(float2),i,streams[i]);
cudaMemPrefetchAsync(&x[st],ntheta/ngpus*det*det*sizeof(float),i,streams[i]);
cudaMemPrefetchAsync(&y[st],ntheta/ngpus*det*det*sizeof(float),i,streams[i]);
cudaMemPrefetchAsync(&z[st],ntheta/ngpus*det*det*sizeof(float),i,streams[i]);
cudaMemPrefetchAsync(fdee, (2 * n0 + 2 * m0) * (2 * n1 + 2 * m1) * (2 * n2 + 2 * m2) * sizeof(float2),i,streams[i]);
gather <<<GS3d00, BS3d, 0, streams[i]>>> (&g[st], fdee, &x[st], &y[st], &z[st], m0, m1, m2, mu0, mu1, mu2, n0, n1, n2, det, ntheta/ngpus, TOMO_FWD);
}
cudaDeviceSynchronize();
cudaMemPrefetchAsync(g,ntheta*det*det*sizeof(float),0,streams[0]);
cudaSetDevice(0) ; */
gather <<<GS3d0, BS3d>>> (g, fdee, x, y, z, m0, m1, m2, mu0, mu1, mu2, n0, n1, n2, det, ntheta, TOMO_FWD);
fftshiftc2d <<<GS3d0, BS3d>>> (g, det, ntheta);
cufftExecC2C(plan2d, (cufftComplex *)g, (cufftComplex *)g, CUFFT_INVERSE);
fftshiftc2d <<<GS3d0, BS3d>>> (g, det, ntheta);
cudaMemcpy((float2 *)g_, g, det * det * ntheta * sizeof(float2), cudaMemcpyDefault);
}
void lamusfft::adj(size_t f_, size_t g_, size_t theta_) {
//f = (float2 *)f_;
//g = (float2 *)g_;
cudaMemcpy(g, (float2 *)g_, det * det * ntheta * sizeof(float2), cudaMemcpyDefault);
cudaMemset(fdee, 0, (2 * n0 + 2 * m0) * (2 * n1 + 2 * m1) * (2 * n2 + 2 * m2) * sizeof(float2));
takexyz <<<GS3d0, BS3d>>> (x, y, z, theta, phi, gamma, det, ntheta);
fftshiftc2d <<<GS3d0, BS3d>>> (g, det, ntheta);
cufftExecC2C(plan2d, (cufftComplex *)g, (cufftComplex *)g, CUFFT_FORWARD);
fftshiftc2d <<<GS3d0, BS3d>>> (g, det, ntheta);
/*
for (int i=0;i<ngpus;i++)
{
cudaSetDevice(i) ;
printf("Device %d\n",i);
int st = i*ntheta/ngpus*det*det;
cudaMemPrefetchAsync(&g[st],ntheta/ngpus*det*det*sizeof(float2),i,streams[i]);
cudaMemPrefetchAsync(&x[st],ntheta/ngpus*det*det*sizeof(float),i,streams[i]);
cudaMemPrefetchAsync(&y[st],ntheta/ngpus*det*det*sizeof(float),i,streams[i]);
cudaMemPrefetchAsync(&z[st],ntheta/ngpus*det*det*sizeof(float),i,streams[i]);
cudaMemPrefetchAsync(fdee, (2 * n0 + 2 * m0) * (2 * n1 + 2 * m1) * (2 * n2 + 2 * m2) * sizeof(float2),i,streams[i]);
gather <<<GS3d00, BS3d,0,streams[i]>>> (&g[st], fdee, &x[st], &y[st], &z[st], m0, m1, m2, mu0, mu1, mu2, n0, n1, n2, det, ntheta/ngpus, TOMO_ADJ);
}
cudaDeviceSynchronize();
cudaMemPrefetchAsync(fdee, (2 * n0 + 2 * m0) * (2 * n1 + 2 * m1) * (2 * n2 + 2 * m2) * sizeof(float2),0,streams[0]);
cudaSetDevice(0) ;*/
gather <<<GS3d0, BS3d>>> (g, fdee, x, y, z, m0, m1, m2, mu0, mu1, mu2, n0, n1, n2, det, ntheta, TOMO_ADJ);
wrap <<<GS3d3, BS3d>>> (fdee, n0, n1, n2, m0, m1, m2, TOMO_ADJ);
fftshiftc3d <<<GS3d3, BS3d>>> (fdee, 2 * n0 + 2 * m0, 2 * n1 +2 * m1, 2 * n2 +2 * m2);
cufftExecC2C(plan3d, (cufftComplex *)&fdee[m0 + m1 * (2 * n0 + 2 * m0) + m2 * (2 * n0 + 2 * m0) * (2 * n1 + 2 * m1)],
(cufftComplex *)&fdee[m0 + m1 * (2 * n0 + 2 * m0) + m2 * (2 * n0 + 2 * m0) * (2 * n1 + 2 * m1)], CUFFT_INVERSE);
fftshiftc3d <<<GS3d3, BS3d>>> (fdee, 2 * n0 + 2 * m0, 2 * n1 +2 * m1, 2 * n2 +2 * m2);
divker <<<GS3d1, BS3d>>> (fdee, f, mu0, mu1, mu2, n0, n1, n2, m0,m1,m2, TOMO_ADJ);
cudaMemcpy((float2 *)f_, f, n0 * n1 * n2 * sizeof(float2),
cudaMemcpyDefault);
} |
af15c9f77e8ddabdadcd8090019d674c9cf76044.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _2DCONVOLUTION_KERNEL_H_
#define _2DCONVOLUTION_KERNEL_H_
#include <stdio.h>
#include "2Dconvolution.h"
// Matrix multiplication kernel thread specification
__global__ void ConvolutionKernel(Matrix N, Matrix P)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y * TILE_SIZE + ty;
int col_o = blockIdx.x * TILE_SIZE + tx;
int n = KERNEL_SIZE/2;
int row_i=row_o-n;
int col_i = col_o - n;
__shared__ float N_s[BLOCK_SIZE][BLOCK_SIZE];
if((row_i >= 0) && (row_i < N.height) &&
(col_i >= 0) && (col_i < N.width) ) {
N_s[ty][tx] = N.elements[row_i*N.width + col_i];
}
else{
N_s[ty][tx] = 0.0f;
}
__syncthreads();
float output=0.0f;
if(ty < TILE_SIZE && tx < TILE_SIZE){
for(int i = 0; i < KERNEL_SIZE; i++) {
for(int j = 0; j < KERNEL_SIZE; j++) {
output += Mc[i*KERNEL_SIZE+j] * N_s[i+ty][j+tx];
}
}
//__syncthreads();
if(row_o < P.height && col_o < P.width)
P.elements[row_o * P.width + col_o] = output;
}
}
#endif // #ifndef _2DCONVOLUTION_KERNEL_H_
| af15c9f77e8ddabdadcd8090019d674c9cf76044.cu | #ifndef _2DCONVOLUTION_KERNEL_H_
#define _2DCONVOLUTION_KERNEL_H_
#include <stdio.h>
#include "2Dconvolution.h"
// Matrix multiplication kernel thread specification
__global__ void ConvolutionKernel(Matrix N, Matrix P)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y * TILE_SIZE + ty;
int col_o = blockIdx.x * TILE_SIZE + tx;
int n = KERNEL_SIZE/2;
int row_i=row_o-n;
int col_i = col_o - n;
__shared__ float N_s[BLOCK_SIZE][BLOCK_SIZE];
if((row_i >= 0) && (row_i < N.height) &&
(col_i >= 0) && (col_i < N.width) ) {
N_s[ty][tx] = N.elements[row_i*N.width + col_i];
}
else{
N_s[ty][tx] = 0.0f;
}
__syncthreads();
float output=0.0f;
if(ty < TILE_SIZE && tx < TILE_SIZE){
for(int i = 0; i < KERNEL_SIZE; i++) {
for(int j = 0; j < KERNEL_SIZE; j++) {
output += Mc[i*KERNEL_SIZE+j] * N_s[i+ty][j+tx];
}
}
//__syncthreads();
if(row_o < P.height && col_o < P.width)
P.elements[row_o * P.width + col_o] = output;
}
}
#endif // #ifndef _2DCONVOLUTION_KERNEL_H_
|
b5607f8fa760c4b3b96646a1a8db368217771af0.hip | // !!! This is a file automatically generated by hipify!!!
#include "Timer.h"
#define TIMERSIZE 200
int timerIndex;
int arrayTop;
int zeroID;
int oneID;
struct timeval One_time, Zero_time;
int * timerID;
int * timerCallNum;
double * timerElapsedTime;
hipEvent_t start, stop;
void timerStart()
{
//size_t sizeTimer = TIMERSIZE * sizeof(char*);
size_t sizeTimerElapsedTime = TIMERSIZE * sizeof(double);
size_t sizeTimerCallNum = TIMERSIZE * sizeof(int);
timerElapsedTime = (double *)malloc(sizeTimerElapsedTime);
timerCallNum = (int *)malloc(sizeTimerCallNum);
//timerName = (char **)malloc(sizeTimer);
timerID = (int *)malloc(sizeTimerCallNum);
timerIndex = -1;
arrayTop = -1;
//for cuda event method
hipEventCreate(&start);
hipEventCreate(&stop);
}
void timerEnd()
{
for (int i = 0; i <= arrayTop; i++)
{
printf("Function No: %d, Timer No: %d, Time: %e s, Loop No: %d\n",
i, timerID[i], timerElapsedTime[i], timerCallNum[i]);
}
free(timerElapsedTime);
free(timerCallNum);
free(timerID);
hipEventDestroy(start);
hipEventDestroy(stop);
}
void timeZero(int value)
{
zeroID = value;
int indexFlag = 0;
for (int i = 0; i <= arrayTop; i++)
{
if (timerID[i] == zeroID)
{
timerIndex = i;
indexFlag = 1;
break;
}
}
if (!indexFlag)
{
arrayTop++;
timerID[arrayTop] = zeroID;
timerElapsedTime[arrayTop] = 0.0;
timerCallNum[arrayTop] = 0;
timerIndex = arrayTop;
}
hipDeviceSynchronize();
gettimeofday(&Zero_time, NULL);
}
void timeOne(int value)
{
hipDeviceSynchronize();
gettimeofday(&One_time, NULL);
oneID = value;
double elapsedTime = (One_time.tv_sec - Zero_time.tv_sec) + (One_time.tv_usec - Zero_time.tv_usec) * 1.0e-6;
if (oneID != zeroID)
{
printf("Error in timer: oneID is not equal to zeroID, oneID = %d, zeroID = %d\n", oneID, zeroID);
exit(1);
}
timerElapsedTime[timerIndex] += elapsedTime;
timerCallNum[timerIndex]++;
}
void timerGPUStart()
{
size_t sizeTimerElapsedTime = TIMERSIZE * sizeof(double);
size_t sizeTimerCallNum = TIMERSIZE * sizeof(int);
timerElapsedTime = (double *)malloc(sizeTimerElapsedTime);
timerCallNum = (int *)malloc(sizeTimerCallNum);
timerID = (int *)malloc(sizeTimerCallNum);
timerIndex = -1;
arrayTop = -1;
//for cuda event method
hipEventCreate(&start);
hipEventCreate(&stop);
}
void timeGPUZero(int value)
{
zeroID = value;
int indexFlag = 0;
for (int i = 0; i <= arrayTop; i++)
{
if (timerID[i] == zeroID)
{
timerIndex = i;
indexFlag = 1;
break;
}
}
if (!indexFlag)
{
arrayTop++;
timerID[arrayTop] = zeroID;
timerElapsedTime[arrayTop] = 0.0;
timerCallNum[arrayTop] = 0;
timerIndex = arrayTop;
}
//The cuda event is only recorded for stream 0.
hipEventRecord(start, 0);
}
void timeGPUOne(int value)
{
hipEventRecord(stop, 0);
hipEventSynchronize( stop );
oneID = value;
if (oneID != zeroID)
{
printf("Error in timer: oneID is not equal to zeroID, oneID = %d, zeroID = %d", oneID, zeroID);
exit(1);
}
float ELAPSEDTIME;
hipEventElapsedTime(&ELAPSEDTIME, start, stop);
ELAPSEDTIME /=1000.0; // from ms to s
timerElapsedTime[timerIndex] += ELAPSEDTIME;
timerCallNum[timerIndex]++;
}
void timerGPUEnd()
{
FILE * outputFile = fopen("timeRecord", "w");
for (int i = 0; i <= arrayTop; i++)
{
//printf("Function No: %d, Algorithm: %d, Time: %e s, Loop No: %d\n",
// i, timerID[i], timerElapsedTime[i], timerCallNum[i]);
printf("Function No: %d, Time: %e s, Loop No: %d\n",
i, timerElapsedTime[i], timerCallNum[i]);
//fprintf(outputFile, "%d %f\n", timerID[i], timerElapsedTime[i]);
fprintf(outputFile, "Function No. %d Performance: %f\n", i, timerElapsedTime[i]);
}
free(timerElapsedTime);
free(timerCallNum);
free(timerID);
//delete the cuda event
hipEventDestroy(start);
hipEventDestroy(stop);
fclose(outputFile);
}
| b5607f8fa760c4b3b96646a1a8db368217771af0.cu | #include "Timer.h"
#define TIMERSIZE 200
int timerIndex;
int arrayTop;
int zeroID;
int oneID;
struct timeval One_time, Zero_time;
int * timerID;
int * timerCallNum;
double * timerElapsedTime;
cudaEvent_t start, stop;
void timerStart()
{
//size_t sizeTimer = TIMERSIZE * sizeof(char*);
size_t sizeTimerElapsedTime = TIMERSIZE * sizeof(double);
size_t sizeTimerCallNum = TIMERSIZE * sizeof(int);
timerElapsedTime = (double *)malloc(sizeTimerElapsedTime);
timerCallNum = (int *)malloc(sizeTimerCallNum);
//timerName = (char **)malloc(sizeTimer);
timerID = (int *)malloc(sizeTimerCallNum);
timerIndex = -1;
arrayTop = -1;
//for cuda event method
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
void timerEnd()
{
for (int i = 0; i <= arrayTop; i++)
{
printf("Function No: %d, Timer No: %d, Time: %e s, Loop No: %d\n",
i, timerID[i], timerElapsedTime[i], timerCallNum[i]);
}
free(timerElapsedTime);
free(timerCallNum);
free(timerID);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void timeZero(int value)
{
zeroID = value;
int indexFlag = 0;
for (int i = 0; i <= arrayTop; i++)
{
if (timerID[i] == zeroID)
{
timerIndex = i;
indexFlag = 1;
break;
}
}
if (!indexFlag)
{
arrayTop++;
timerID[arrayTop] = zeroID;
timerElapsedTime[arrayTop] = 0.0;
timerCallNum[arrayTop] = 0;
timerIndex = arrayTop;
}
cudaDeviceSynchronize();
gettimeofday(&Zero_time, NULL);
}
void timeOne(int value)
{
cudaDeviceSynchronize();
gettimeofday(&One_time, NULL);
oneID = value;
double elapsedTime = (One_time.tv_sec - Zero_time.tv_sec) + (One_time.tv_usec - Zero_time.tv_usec) * 1.0e-6;
if (oneID != zeroID)
{
printf("Error in timer: oneID is not equal to zeroID, oneID = %d, zeroID = %d\n", oneID, zeroID);
exit(1);
}
timerElapsedTime[timerIndex] += elapsedTime;
timerCallNum[timerIndex]++;
}
void timerGPUStart()
{
size_t sizeTimerElapsedTime = TIMERSIZE * sizeof(double);
size_t sizeTimerCallNum = TIMERSIZE * sizeof(int);
timerElapsedTime = (double *)malloc(sizeTimerElapsedTime);
timerCallNum = (int *)malloc(sizeTimerCallNum);
timerID = (int *)malloc(sizeTimerCallNum);
timerIndex = -1;
arrayTop = -1;
//for cuda event method
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
void timeGPUZero(int value)
{
zeroID = value;
int indexFlag = 0;
for (int i = 0; i <= arrayTop; i++)
{
if (timerID[i] == zeroID)
{
timerIndex = i;
indexFlag = 1;
break;
}
}
if (!indexFlag)
{
arrayTop++;
timerID[arrayTop] = zeroID;
timerElapsedTime[arrayTop] = 0.0;
timerCallNum[arrayTop] = 0;
timerIndex = arrayTop;
}
//The cuda event is only recorded for stream 0.
cudaEventRecord(start, 0);
}
void timeGPUOne(int value)
{
cudaEventRecord(stop, 0);
cudaEventSynchronize( stop );
oneID = value;
if (oneID != zeroID)
{
printf("Error in timer: oneID is not equal to zeroID, oneID = %d, zeroID = %d", oneID, zeroID);
exit(1);
}
float ELAPSEDTIME;
cudaEventElapsedTime(&ELAPSEDTIME, start, stop);
ELAPSEDTIME /=1000.0; // from ms to s
timerElapsedTime[timerIndex] += ELAPSEDTIME;
timerCallNum[timerIndex]++;
}
void timerGPUEnd()
{
FILE * outputFile = fopen("timeRecord", "w");
for (int i = 0; i <= arrayTop; i++)
{
//printf("Function No: %d, Algorithm: %d, Time: %e s, Loop No: %d\n",
// i, timerID[i], timerElapsedTime[i], timerCallNum[i]);
printf("Function No: %d, Time: %e s, Loop No: %d\n",
i, timerElapsedTime[i], timerCallNum[i]);
//fprintf(outputFile, "%d %f\n", timerID[i], timerElapsedTime[i]);
fprintf(outputFile, "Function No. %d Performance: %f\n", i, timerElapsedTime[i]);
}
free(timerElapsedTime);
free(timerCallNum);
free(timerID);
//delete the cuda event
cudaEventDestroy(start);
cudaEventDestroy(stop);
fclose(outputFile);
}
|
5ddb1c5912994639cde98b59d34def2d32634681.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zmdot.cu normal z -> s, Fri Jan 30 19:00:29 2015
@author Hartwig Anzt
*/
#include "common_magma.h"
#define BLOCK_SIZE 256
#define PRECISION_s
// initialize arrays with zero
__global__ void
magma_sgpumemzero(
float * d,
int n,
int k )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < n ){
for( int j=0; j<k; j++)
d[ i+j*n ] = MAGMA_S_MAKE( 0.0, 0.0 );
}
}
// dot product
__global__ void
magma_sdot_kernel(
int Gs,
int n,
float * v,
float * r,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_S_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// dot product for multiple vectors
__global__ void
magma_sblockdot_kernel(
int Gs,
int n,
int k,
float * v,
float * r,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// k vectors v(i)
if (i<n){
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = v[i+j*n] * r[i];
}
else{
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] =MAGMA_S_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for multiple vectors
__global__ void
magma_sblockreduce_kernel(
int Gs,
int n,
int k,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ]
: MAGMA_S_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// accelerated reduction for one vector
__global__ void
magma_sreduce_kernel_fast( int Gs,
int n,
float * vtmp,
float * vtmp2 ){
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_S_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_S_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// accelerated block reduction for multiple vectors
__global__ void
magma_sblockreduce_kernel_fast(
int Gs,
int n,
int k,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<k; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_S_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_S_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of vectors v_i such that
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaFloat_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaFloat_ptr
r
@param[in]
d1 magmaFloat_ptr
workspace
@param[in]
d2 magmaFloat_ptr
workspace
@param[out]
skp magmaFloat_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_smdotc(
int n,
int k,
magmaFloat_ptr v,
magmaFloat_ptr r,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr skp,
magma_queue_t queue )
{
// set queue for old dense routines
magma_queue_t orig_queue;
magmablasGetKernelStream( &orig_queue );
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (n+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = (k)* (local_block_size) * sizeof( float ); // k vecs
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
if (k>1) {
hipLaunchKernelGGL(( magma_sblockdot_kernel), dim3(Gs), dim3(Bs), Ms, 0, Gs.x, n, k, v, r, d1 );
}
else {
hipLaunchKernelGGL(( magma_sdot_kernel), dim3(Gs), dim3(Bs), Ms, 0, Gs.x, n, v, r, d1 );
}
/*
// not necessary to zero GPU mem
magma_sgpumemzero<<<Gs, Bs, 0>>>( d1, n*k,1 );
magma_sgpumemzero<<<Gs, Bs, 0>>>( d2, n*k,1 );
//magmablas_slaset( MagmaUpperLower, n, k, d1, n );
//magmablas_slaset( MagmaUpperLower, n, k, d2, n );
while( Gs.x > 1 ) {
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
magma_sblockreduce_kernel<<< Gs_next.x, Bs.x, Ms >>>
( Gs.x, n, k, aux1, aux2 );
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
for( int j=0; j<k; j++) {
magma_scopyvector( 1, aux1+j*n, 1, skp+j, 1 );
}
*/
if ( k>1) {
while( Gs.x > 1 ) {
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_sblockreduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0,
Gs.x, n, k, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
else {
while( Gs.x > 1 ) {
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_sreduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
for( int j=0; j<k; j++) {
magma_scopyvector( 1, aux1+j*n, 1, skp+j, 1 );
}
magmablasSetKernelStream( orig_queue );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This is an extension of the merged dot product above by chunking
the set of vectors v_i such that the data always fits into cache.
It is equivalent to a matrix vecor product Vr where V
contains few rows and many columns. The computation is the same:
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaFloat_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaFloat_ptr
r
@param[in]
d1 magmaFloat_ptr
workspace
@param[in]
d2 magmaFloat_ptr
workspace
@param[out]
skp magmaFloat_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sgemvmdot(
int n,
int k,
magmaFloat_ptr v,
magmaFloat_ptr r,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int rows_left = k;
int offset = 0;
int chunk_size = 4;
// process in chunks of 10 - has to be adapted to hardware and precision
while( rows_left > (chunk_size) ) {
magma_smdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset, queue );
offset = offset + chunk_size;
rows_left = rows_left-chunk_size;
}
// process rest
magma_smdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset, queue );
return MAGMA_SUCCESS;
}
| 5ddb1c5912994639cde98b59d34def2d32634681.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zmdot.cu normal z -> s, Fri Jan 30 19:00:29 2015
@author Hartwig Anzt
*/
#include "common_magma.h"
#define BLOCK_SIZE 256
#define PRECISION_s
// initialize arrays with zero
__global__ void
magma_sgpumemzero(
float * d,
int n,
int k )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < n ){
for( int j=0; j<k; j++)
d[ i+j*n ] = MAGMA_S_MAKE( 0.0, 0.0 );
}
}
// dot product
__global__ void
magma_sdot_kernel(
int Gs,
int n,
float * v,
float * r,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_S_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// dot product for multiple vectors
__global__ void
magma_sblockdot_kernel(
int Gs,
int n,
int k,
float * v,
float * r,
float * vtmp)
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// k vectors v(i)
if (i<n){
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = v[i+j*n] * r[i];
}
else{
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] =MAGMA_S_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for multiple vectors
__global__ void
magma_sblockreduce_kernel(
int Gs,
int n,
int k,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ]
: MAGMA_S_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// accelerated reduction for one vector
__global__ void
magma_sreduce_kernel_fast( int Gs,
int n,
float * vtmp,
float * vtmp2 ){
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_S_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_S_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// accelerated block reduction for multiple vectors
__global__ void
magma_sblockreduce_kernel_fast(
int Gs,
int n,
int k,
float * vtmp,
float * vtmp2 )
{
extern __shared__ float temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<k; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_S_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_S_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of vectors v_i such that
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaFloat_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaFloat_ptr
r
@param[in]
d1 magmaFloat_ptr
workspace
@param[in]
d2 magmaFloat_ptr
workspace
@param[out]
skp magmaFloat_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_smdotc(
int n,
int k,
magmaFloat_ptr v,
magmaFloat_ptr r,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr skp,
magma_queue_t queue )
{
// set queue for old dense routines
magma_queue_t orig_queue;
magmablasGetKernelStream( &orig_queue );
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (n+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = (k)* (local_block_size) * sizeof( float ); // k vecs
magmaFloat_ptr aux1 = d1, aux2 = d2;
int b = 1;
if (k>1) {
magma_sblockdot_kernel<<<Gs, Bs, Ms>>>( Gs.x, n, k, v, r, d1 );
}
else {
magma_sdot_kernel<<<Gs, Bs, Ms>>>( Gs.x, n, v, r, d1 );
}
/*
// not necessary to zero GPU mem
magma_sgpumemzero<<<Gs, Bs, 0>>>( d1, n*k,1 );
magma_sgpumemzero<<<Gs, Bs, 0>>>( d2, n*k,1 );
//magmablas_slaset( MagmaUpperLower, n, k, d1, n );
//magmablas_slaset( MagmaUpperLower, n, k, d2, n );
while( Gs.x > 1 ) {
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
magma_sblockreduce_kernel<<< Gs_next.x, Bs.x, Ms >>>
( Gs.x, n, k, aux1, aux2 );
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
for( int j=0; j<k; j++) {
magma_scopyvector( 1, aux1+j*n, 1, skp+j, 1 );
}
*/
if ( k>1) {
while( Gs.x > 1 ) {
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_sblockreduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2 >>>
( Gs.x, n, k, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
else {
while( Gs.x > 1 ) {
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_sreduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2 >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
for( int j=0; j<k; j++) {
magma_scopyvector( 1, aux1+j*n, 1, skp+j, 1 );
}
magmablasSetKernelStream( orig_queue );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This is an extension of the merged dot product above by chunking
the set of vectors v_i such that the data always fits into cache.
It is equivalent to a matrix vecor product Vr where V
contains few rows and many columns. The computation is the same:
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaFloat_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaFloat_ptr
r
@param[in]
d1 magmaFloat_ptr
workspace
@param[in]
d2 magmaFloat_ptr
workspace
@param[out]
skp magmaFloat_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_s
********************************************************************/
extern "C" magma_int_t
magma_sgemvmdot(
int n,
int k,
magmaFloat_ptr v,
magmaFloat_ptr r,
magmaFloat_ptr d1,
magmaFloat_ptr d2,
magmaFloat_ptr skp,
magma_queue_t queue )
{
int rows_left = k;
int offset = 0;
int chunk_size = 4;
// process in chunks of 10 - has to be adapted to hardware and precision
while( rows_left > (chunk_size) ) {
magma_smdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset, queue );
offset = offset + chunk_size;
rows_left = rows_left-chunk_size;
}
// process rest
magma_smdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset, queue );
return MAGMA_SUCCESS;
}
|
b8d5e82064260161a437047bbb503988b912296c.hip | // !!! This is a file automatically generated by hipify!!!
#include <THH/THHTensorMathReduce.cuh>
#include <THH/THHTensor.hpp>
#include <THH/generic/THHTensorMathReduce.hip>
#include <THH/THHGenerateLongType.h>
| b8d5e82064260161a437047bbb503988b912296c.cu | #include <THC/THCTensorMathReduce.cuh>
#include <THC/THCTensor.hpp>
#include <THC/generic/THCTensorMathReduce.cu>
#include <THC/THCGenerateLongType.h>
|
ca650d8951ab55e39fafddc8d14923389c39c0a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "star3d1r-32x32-4-128_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 13
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 3 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
0.2500f * A[t%2][i][j][k] +
0.1248f * A[t%2][i-1][j][k] + 0.1249f * A[t%2][i+1][j][k] +
0.1250f * A[t%2][i][j-1][k] + 0.1251f * A[t%2][i][j+1][k] +
0.1252f * A[t%2][i][j][k-1] + 0.1253f * A[t%2][i][j][k+1];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| ca650d8951ab55e39fafddc8d14923389c39c0a0.cu | #include <assert.h>
#include <stdio.h>
#include "star3d1r-32x32-4-128_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 13
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 3 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
0.2500f * A[t%2][i][j][k] +
0.1248f * A[t%2][i-1][j][k] + 0.1249f * A[t%2][i+1][j][k] +
0.1250f * A[t%2][i][j-1][k] + 0.1251f * A[t%2][i][j+1][k] +
0.1252f * A[t%2][i][j][k-1] + 0.1253f * A[t%2][i][j][k+1];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
0fb7968e54f6966e4b04cf5ed25e570e340888e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/common_gpu.h"
#ifdef CAFFE_HAS_CUDA_FP16
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
namespace {
__global__ void FloatToHalfKernel(const int N, const float* X, half* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __float2half(X[i]);
}
}
__global__ void HalfToFloatKernel(const int N, const half* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __half2float(X[i]);
}
}
}
class FloatToHalfCUDA : public Operator<CUDAContext> {
public:
FloatToHalfCUDA(const OperatorDef& def, Workspace* ws)
: Operator<CUDAContext>(def, ws) {}
~FloatToHalfCUDA() {}
bool RunOnDevice() override {
auto& X = Input(0);
auto* Y = Output(0);
Y->ReshapeLike(X);
hipLaunchKernelGGL(( FloatToHalfKernel), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
X.size(), X.data<float>(),
reinterpret_cast<half*>(Y->mutable_data<float16>()));
return true;
}
DISABLE_COPY_AND_ASSIGN(FloatToHalfCUDA);
};
class HalfToFloatCUDA : public Operator<CUDAContext> {
public:
HalfToFloatCUDA(const OperatorDef& def, Workspace* ws)
: Operator<CUDAContext>(def, ws) {}
~HalfToFloatCUDA() {}
bool RunOnDevice() override {
auto& X = Input(0);
auto* Y = Output(0);
Y->ReshapeLike(X);
hipLaunchKernelGGL(( HalfToFloatKernel), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
X.size(), reinterpret_cast<const half*>(X.data<float16>()),
Y->mutable_data<float>());
return true;
}
DISABLE_COPY_AND_ASSIGN(HalfToFloatCUDA);
};
namespace {
REGISTER_CUDA_OPERATOR(FloatToHalf, FloatToHalfCUDA);
REGISTER_CUDA_OPERATOR(HalfToFloat, HalfToFloatCUDA);
OPERATOR_SCHEMA(FloatToHalf).NumInputs(1).NumOutputs(1);
OPERATOR_SCHEMA(HalfToFloat).NumInputs(1).NumOutputs(1);
class GetFloatToHalfGradient : public GradientMakerBase {
using GradientMakerBase::GradientMakerBase;
vector<OperatorDef> GetGradientDefs() override {
return SingleGradientDef(
"HalfToFloat", "",
vector<string>{GO(0)},
vector<string>{GI(0)});
}
};
REGISTER_GRADIENT(FloatToHalf, GetFloatToHalfGradient);
class GetHalfToFloatGradient : public GradientMakerBase {
using GradientMakerBase::GradientMakerBase;
vector<OperatorDef> GetGradientDefs() override {
return SingleGradientDef(
"FloatToHalf", "",
vector<string>{GO(0)},
vector<string>{GI(0)});
}
};
REGISTER_GRADIENT(HalfToFloat, GetHalfToFloatGradient);
} // namespace
} // namespace caffe2
#endif // CAFFE_HAS_CUDA_FP16
| 0fb7968e54f6966e4b04cf5ed25e570e340888e6.cu | #include "caffe2/core/common_gpu.h"
#ifdef CAFFE_HAS_CUDA_FP16
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
namespace {
__global__ void FloatToHalfKernel(const int N, const float* X, half* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __float2half(X[i]);
}
}
__global__ void HalfToFloatKernel(const int N, const half* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __half2float(X[i]);
}
}
}
class FloatToHalfCUDA : public Operator<CUDAContext> {
public:
FloatToHalfCUDA(const OperatorDef& def, Workspace* ws)
: Operator<CUDAContext>(def, ws) {}
~FloatToHalfCUDA() {}
bool RunOnDevice() override {
auto& X = Input(0);
auto* Y = Output(0);
Y->ReshapeLike(X);
FloatToHalfKernel<<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
X.size(), X.data<float>(),
reinterpret_cast<half*>(Y->mutable_data<float16>()));
return true;
}
DISABLE_COPY_AND_ASSIGN(FloatToHalfCUDA);
};
class HalfToFloatCUDA : public Operator<CUDAContext> {
public:
HalfToFloatCUDA(const OperatorDef& def, Workspace* ws)
: Operator<CUDAContext>(def, ws) {}
~HalfToFloatCUDA() {}
bool RunOnDevice() override {
auto& X = Input(0);
auto* Y = Output(0);
Y->ReshapeLike(X);
HalfToFloatKernel<<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
X.size(), reinterpret_cast<const half*>(X.data<float16>()),
Y->mutable_data<float>());
return true;
}
DISABLE_COPY_AND_ASSIGN(HalfToFloatCUDA);
};
namespace {
REGISTER_CUDA_OPERATOR(FloatToHalf, FloatToHalfCUDA);
REGISTER_CUDA_OPERATOR(HalfToFloat, HalfToFloatCUDA);
OPERATOR_SCHEMA(FloatToHalf).NumInputs(1).NumOutputs(1);
OPERATOR_SCHEMA(HalfToFloat).NumInputs(1).NumOutputs(1);
class GetFloatToHalfGradient : public GradientMakerBase {
using GradientMakerBase::GradientMakerBase;
vector<OperatorDef> GetGradientDefs() override {
return SingleGradientDef(
"HalfToFloat", "",
vector<string>{GO(0)},
vector<string>{GI(0)});
}
};
REGISTER_GRADIENT(FloatToHalf, GetFloatToHalfGradient);
class GetHalfToFloatGradient : public GradientMakerBase {
using GradientMakerBase::GradientMakerBase;
vector<OperatorDef> GetGradientDefs() override {
return SingleGradientDef(
"FloatToHalf", "",
vector<string>{GO(0)},
vector<string>{GI(0)});
}
};
REGISTER_GRADIENT(HalfToFloat, GetHalfToFloatGradient);
} // namespace
} // namespace caffe2
#endif // CAFFE_HAS_CUDA_FP16
|
600400dc8e4d3ac34af5cd6b8de162fc3acbc6bd.hip | // !!! This is a file automatically generated by hipify!!!
#include <LevelSet.h>
#include <cmath>
int main(int argc, char *argv[])
{
LevelSet data(false,"../src/test/test_data/sphere334",false);
std::string type = "x";
float isovalue = 0.;
//input filename (minus extension)
for (int i = 0; i < argc; i++) {
if (strcmp(argv[i],"-v") == 0) {
data.verbose_ = true;
} else if (strcmp(argv[i],"-i") == 0) {
if (i+1 >= argc) break;
data.filename_ = std::string(argv[i+1]);
if (data.filename_.substr(data.filename_.size()-5,5) == ".node")
data.filename_ = data.filename_.substr(0,data.filename_.size() - 5);
if (data.filename_.substr(data.filename_.size()-4,4) == ".ele")
data.filename_ = data.filename_.substr(0,data.filename_.size() - 4);
i++;
} else if (strcmp(argv[i],"-n") == 0) {
if (i+1 >= argc) break;
data.numSteps_ = atoi(argv[i+1]);
i++;
} else if (strcmp(argv[i],"-t") == 0) {
if (i+1 >= argc) break;
data.timeStep_ = atof(argv[i+1]);
i++;
} else if (strcmp(argv[i],"-s") == 0) {
if (i+1 >= argc) break;
data.insideIterations_ = atoi(argv[i+1]);
i++;
} else if (strcmp(argv[i],"-d") == 0) {
if (i+1 >= argc) break;
data.sideLengths_ = atoi(argv[i+1]);
i++;
} else if (strcmp(argv[i],"-p") == 0) {
if (i+1 >= argc) break;
data.partitionType_ = atoi(argv[i+1]);
i++;
} else if (strcmp(argv[i], "-o") == 0) {
if (i + 1 >= argc) break;
isovalue = atof(argv[i + 1]);
i++;
} else if (strcmp(argv[i],"-m") == 0) {
if (i+1 >= argc) break;
data.metisSize_ = atoi(argv[i+1]);
i++;
} else if (strcmp(argv[i],"-b") == 0) {
if (i+1 >= argc) break;
data.blockSize_ = atoi(argv[i+1]);
i++;
} else if (strcmp(argv[i],"-w") == 0) {
if (i+1 >= argc) break;
data.bandwidth_ = atof(argv[i+1]);
i++;
} else if (strcmp(argv[i], "-y") == 0) {
if (i + 1 >= argc) break;
type = std::string(argv[++i]);
} else if (strcmp(argv[i],"-h") == 0) {
std::cout << "Usage: ./Example1 [OPTIONS]" << std::endl;
std::cout << " -h Print this help message." << std::endl;
std::cout << " -v Print verbose runtime information." << std::endl;
std::cout << " -i FILENAME Use this input tet mesh (node/ele)." << std::endl;
std::cout << " -n NSTEPS # of steps to take of TIMESTEP amount." << std::endl;
std::cout << " -t TIMESTEP Duration of a timestep." << std::endl;
std::cout << " -s INSIDE_NITER # of inside iterations." << std::endl;
std::cout << " -d NSIDE # of sides for Square partition type." << std::endl;
std::cout << " -p PARTITION_TYPE 1 for Square, otherwise is it METIS." << std::endl;
std::cout << " -b NUM_BLOCKS # of blocks for Square partition type." << std::endl;
std::cout << " -m METIS_SIZE The size for METIS partiation type." << std::endl;
std::cout << " -w BANDWIDTH The Bandwidth for the algorithm." << std::endl;
std::cout << " -y EXAMPLE_TYPE Example type: 'center', 'revolve', 'x', 'curvature'" << std::endl;
std::cout << " -o ISOVALUE The isovalue for curvature." << std::endl;
exit(0);
}
}
if (type == "center" || type == "revolve" || type == "curvature") {
//find the center, max from center
data.initializeMesh();
point center(0, 0, 0);
for (size_t i = 0; i < data.tetMesh_->vertices.size(); i++) {
center = center + data.tetMesh_->vertices[i];
}
center = center / static_cast<float>(data.tetMesh_->vertices.size());
float max = 0.;
for (size_t i = 0; i < data.tetMesh_->vertices.size(); i++) {
point p = data.tetMesh_->vertices[i] - center;
float mag = len(p);
max = ::max(max, mag);
}
//initialize values of verts
std::vector<float> vals;
for (size_t i = 0; i < data.tetMesh_->vertices.size(); i++) {
point p = data.tetMesh_->vertices[i] - center;
double mag = len(p);
if (type == "revolve") {
//get the angle with (+/-1,0,0)
float val = p[0];
if (val < 0.) val *= -1.;
float theta = std::acos(val / std::sqrt(p[0] * p[0] + p[1] * p[1]));
if (p[1] < 0.f) theta *= -1.f;
vals.push_back(10.f * theta);
} else {
if (type == "center") {
vals.push_back(mag - max / 2.);
} else {
vals.push_back(::max(std::abs(p[0]),
::max(std::abs(p[1]), std::abs(p[2]))) - isovalue);
}
}
}
//initialize advection to be away from the center.
std::vector<point> adv;
for (size_t i = 0; i < data.tetMesh_->tets.size(); i++) {
point p = (data.tetMesh_->vertices[data.tetMesh_->tets[i][0]] +
data.tetMesh_->vertices[data.tetMesh_->tets[i][1]] +
data.tetMesh_->vertices[data.tetMesh_->tets[i][2]] +
data.tetMesh_->vertices[data.tetMesh_->tets[i][3]])
/ 4.f - center;
float mag = len(p);
mag /= max / 20.f;
if (type == "revolve") {
//only care about XY plane angle
//get the tangent to the central circle
point p2 = p;
p2[2] = 0.f;
point p3 = p2 CROSS point(0, 0, 1);
adv.push_back(p3 * len(p2) / (100.f * len (p3)));
} else {
if (type == "center") {
adv.push_back(p / mag / mag);
} else {
adv.push_back(point(0.f,0.f,0.f));
}
}
}
data.initializeVertices(vals);
data.initializeAdvection(adv);
}
data.solveLevelSet();
data.writeVTK();
return 0;
}
| 600400dc8e4d3ac34af5cd6b8de162fc3acbc6bd.cu | #include <LevelSet.h>
#include <cmath>
int main(int argc, char *argv[])
{
LevelSet data(false,"../src/test/test_data/sphere334",false);
std::string type = "x";
float isovalue = 0.;
//input filename (minus extension)
for (int i = 0; i < argc; i++) {
if (strcmp(argv[i],"-v") == 0) {
data.verbose_ = true;
} else if (strcmp(argv[i],"-i") == 0) {
if (i+1 >= argc) break;
data.filename_ = std::string(argv[i+1]);
if (data.filename_.substr(data.filename_.size()-5,5) == ".node")
data.filename_ = data.filename_.substr(0,data.filename_.size() - 5);
if (data.filename_.substr(data.filename_.size()-4,4) == ".ele")
data.filename_ = data.filename_.substr(0,data.filename_.size() - 4);
i++;
} else if (strcmp(argv[i],"-n") == 0) {
if (i+1 >= argc) break;
data.numSteps_ = atoi(argv[i+1]);
i++;
} else if (strcmp(argv[i],"-t") == 0) {
if (i+1 >= argc) break;
data.timeStep_ = atof(argv[i+1]);
i++;
} else if (strcmp(argv[i],"-s") == 0) {
if (i+1 >= argc) break;
data.insideIterations_ = atoi(argv[i+1]);
i++;
} else if (strcmp(argv[i],"-d") == 0) {
if (i+1 >= argc) break;
data.sideLengths_ = atoi(argv[i+1]);
i++;
} else if (strcmp(argv[i],"-p") == 0) {
if (i+1 >= argc) break;
data.partitionType_ = atoi(argv[i+1]);
i++;
} else if (strcmp(argv[i], "-o") == 0) {
if (i + 1 >= argc) break;
isovalue = atof(argv[i + 1]);
i++;
} else if (strcmp(argv[i],"-m") == 0) {
if (i+1 >= argc) break;
data.metisSize_ = atoi(argv[i+1]);
i++;
} else if (strcmp(argv[i],"-b") == 0) {
if (i+1 >= argc) break;
data.blockSize_ = atoi(argv[i+1]);
i++;
} else if (strcmp(argv[i],"-w") == 0) {
if (i+1 >= argc) break;
data.bandwidth_ = atof(argv[i+1]);
i++;
} else if (strcmp(argv[i], "-y") == 0) {
if (i + 1 >= argc) break;
type = std::string(argv[++i]);
} else if (strcmp(argv[i],"-h") == 0) {
std::cout << "Usage: ./Example1 [OPTIONS]" << std::endl;
std::cout << " -h Print this help message." << std::endl;
std::cout << " -v Print verbose runtime information." << std::endl;
std::cout << " -i FILENAME Use this input tet mesh (node/ele)." << std::endl;
std::cout << " -n NSTEPS # of steps to take of TIMESTEP amount." << std::endl;
std::cout << " -t TIMESTEP Duration of a timestep." << std::endl;
std::cout << " -s INSIDE_NITER # of inside iterations." << std::endl;
std::cout << " -d NSIDE # of sides for Square partition type." << std::endl;
std::cout << " -p PARTITION_TYPE 1 for Square, otherwise is it METIS." << std::endl;
std::cout << " -b NUM_BLOCKS # of blocks for Square partition type." << std::endl;
std::cout << " -m METIS_SIZE The size for METIS partiation type." << std::endl;
std::cout << " -w BANDWIDTH The Bandwidth for the algorithm." << std::endl;
std::cout << " -y EXAMPLE_TYPE Example type: 'center', 'revolve', 'x', 'curvature'" << std::endl;
std::cout << " -o ISOVALUE The isovalue for curvature." << std::endl;
exit(0);
}
}
if (type == "center" || type == "revolve" || type == "curvature") {
//find the center, max from center
data.initializeMesh();
point center(0, 0, 0);
for (size_t i = 0; i < data.tetMesh_->vertices.size(); i++) {
center = center + data.tetMesh_->vertices[i];
}
center = center / static_cast<float>(data.tetMesh_->vertices.size());
float max = 0.;
for (size_t i = 0; i < data.tetMesh_->vertices.size(); i++) {
point p = data.tetMesh_->vertices[i] - center;
float mag = len(p);
max = std::max(max, mag);
}
//initialize values of verts
std::vector<float> vals;
for (size_t i = 0; i < data.tetMesh_->vertices.size(); i++) {
point p = data.tetMesh_->vertices[i] - center;
double mag = len(p);
if (type == "revolve") {
//get the angle with (+/-1,0,0)
float val = p[0];
if (val < 0.) val *= -1.;
float theta = std::acos(val / std::sqrt(p[0] * p[0] + p[1] * p[1]));
if (p[1] < 0.f) theta *= -1.f;
vals.push_back(10.f * theta);
} else {
if (type == "center") {
vals.push_back(mag - max / 2.);
} else {
vals.push_back(std::max(std::abs(p[0]),
std::max(std::abs(p[1]), std::abs(p[2]))) - isovalue);
}
}
}
//initialize advection to be away from the center.
std::vector<point> adv;
for (size_t i = 0; i < data.tetMesh_->tets.size(); i++) {
point p = (data.tetMesh_->vertices[data.tetMesh_->tets[i][0]] +
data.tetMesh_->vertices[data.tetMesh_->tets[i][1]] +
data.tetMesh_->vertices[data.tetMesh_->tets[i][2]] +
data.tetMesh_->vertices[data.tetMesh_->tets[i][3]])
/ 4.f - center;
float mag = len(p);
mag /= max / 20.f;
if (type == "revolve") {
//only care about XY plane angle
//get the tangent to the central circle
point p2 = p;
p2[2] = 0.f;
point p3 = p2 CROSS point(0, 0, 1);
adv.push_back(p3 * len(p2) / (100.f * len (p3)));
} else {
if (type == "center") {
adv.push_back(p / mag / mag);
} else {
adv.push_back(point(0.f,0.f,0.f));
}
}
}
data.initializeVertices(vals);
data.initializeAdvection(adv);
}
data.solveLevelSet();
data.writeVTK();
return 0;
}
|
a557fb74d334e83da7f5005be5097903c8649ad6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This code contains NVIDIA Confidential Information and is disclosed to you
// under a form of NVIDIA software license agreement provided separately to you.
//
// Notice
// NVIDIA Corporation and its licensors retain all intellectual property and
// proprietary rights in and to this software and related documentation and
// any modifications thereto. Any use, reproduction, disclosure, or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA Corporation is strictly prohibited.
//
// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
//
// Information and code furnished is believed to be accurate and reliable.
// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
// information or for any infringement of patents or other rights of third parties that may
// result from its use. No license is granted by implication or otherwise under any patent
// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
// This code supersedes and replaces all information previously supplied.
// NVIDIA Corporation products are not authorized for use as critical
// components in life support devices or systems without express written approval of
// NVIDIA Corporation.
//
// Copyright (c) 2008-2013 NVIDIA Corporation. All rights reserved.
#include "PxPhysX.h"
#include "PxVec4.h"
#include "PxVec3.h"
#include "PxVec2.h"
#include "cloth/PxClothTypes.h"
namespace physx
{
// interleaved format must match that used by RendererClothShape
struct Vertex
{
PxVec3 position;
PxVec3 normal;
};
namespace
{
__device__ inline void PxAtomicFloatAdd(float* dest, float x)
{
#if __CUDA_ARCH__ >= 200
atomicAdd(dest, x);
#else
union bits { float f; unsigned int i; };
bits oldVal, newVal;
do
{
// emulate atomic float add on 1.1 arch
oldVal.f = *dest;
newVal.f = oldVal.f + x;
}
while (atomicCAS((unsigned int*)dest, oldVal.i, newVal.i) != oldVal.i);
#endif
}
__device__ void PxAtomicVec3Add(PxVec3& dest, PxVec3 inc)
{
PxAtomicFloatAdd(&dest.x, inc.x);
PxAtomicFloatAdd(&dest.y, inc.y);
PxAtomicFloatAdd(&dest.z, inc.z);
}
}
extern "C" __global__ void computeSmoothNormals(
const PxClothParticle* particles,
const PxU16* indices,
Vertex* vertices,
PxU32 numTris,
PxU32 numParticles)
{
// zero old normals
for (PxU32 i=threadIdx.x; i < numParticles; i += blockDim.x)
vertices[i].normal = PxVec3(0.0f);
__syncthreads();
for (PxU32 i=threadIdx.x; i < numTris; i += blockDim.x)
{
PxU16 a = indices[i*3];
PxU16 b = indices[i*3+1];
PxU16 c = indices[i*3+2];
// calculate face normal
PxVec3 e1 = particles[b].pos-particles[a].pos;
PxVec3 e2 = particles[c].pos-particles[a].pos;
PxVec3 n = e2.cross(e1);
PxAtomicVec3Add(vertices[a].normal, n);
PxAtomicVec3Add(vertices[b].normal, n);
PxAtomicVec3Add(vertices[c].normal, n);
}
__syncthreads();
// update vertex buffer
for (PxU32 i=threadIdx.x; i < numParticles; i += blockDim.x)
{
vertices[i].position = particles[i].pos;
vertices[i].normal = vertices[i].normal.getNormalized();
}
}
} | a557fb74d334e83da7f5005be5097903c8649ad6.cu | // This code contains NVIDIA Confidential Information and is disclosed to you
// under a form of NVIDIA software license agreement provided separately to you.
//
// Notice
// NVIDIA Corporation and its licensors retain all intellectual property and
// proprietary rights in and to this software and related documentation and
// any modifications thereto. Any use, reproduction, disclosure, or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA Corporation is strictly prohibited.
//
// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
//
// Information and code furnished is believed to be accurate and reliable.
// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
// information or for any infringement of patents or other rights of third parties that may
// result from its use. No license is granted by implication or otherwise under any patent
// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
// This code supersedes and replaces all information previously supplied.
// NVIDIA Corporation products are not authorized for use as critical
// components in life support devices or systems without express written approval of
// NVIDIA Corporation.
//
// Copyright (c) 2008-2013 NVIDIA Corporation. All rights reserved.
#include "PxPhysX.h"
#include "PxVec4.h"
#include "PxVec3.h"
#include "PxVec2.h"
#include "cloth/PxClothTypes.h"
namespace physx
{
// interleaved format must match that used by RendererClothShape
struct Vertex
{
PxVec3 position;
PxVec3 normal;
};
namespace
{
__device__ inline void PxAtomicFloatAdd(float* dest, float x)
{
#if __CUDA_ARCH__ >= 200
atomicAdd(dest, x);
#else
union bits { float f; unsigned int i; };
bits oldVal, newVal;
do
{
// emulate atomic float add on 1.1 arch
oldVal.f = *dest;
newVal.f = oldVal.f + x;
}
while (atomicCAS((unsigned int*)dest, oldVal.i, newVal.i) != oldVal.i);
#endif
}
__device__ void PxAtomicVec3Add(PxVec3& dest, PxVec3 inc)
{
PxAtomicFloatAdd(&dest.x, inc.x);
PxAtomicFloatAdd(&dest.y, inc.y);
PxAtomicFloatAdd(&dest.z, inc.z);
}
}
extern "C" __global__ void computeSmoothNormals(
const PxClothParticle* particles,
const PxU16* indices,
Vertex* vertices,
PxU32 numTris,
PxU32 numParticles)
{
// zero old normals
for (PxU32 i=threadIdx.x; i < numParticles; i += blockDim.x)
vertices[i].normal = PxVec3(0.0f);
__syncthreads();
for (PxU32 i=threadIdx.x; i < numTris; i += blockDim.x)
{
PxU16 a = indices[i*3];
PxU16 b = indices[i*3+1];
PxU16 c = indices[i*3+2];
// calculate face normal
PxVec3 e1 = particles[b].pos-particles[a].pos;
PxVec3 e2 = particles[c].pos-particles[a].pos;
PxVec3 n = e2.cross(e1);
PxAtomicVec3Add(vertices[a].normal, n);
PxAtomicVec3Add(vertices[b].normal, n);
PxAtomicVec3Add(vertices[c].normal, n);
}
__syncthreads();
// update vertex buffer
for (PxU32 i=threadIdx.x; i < numParticles; i += blockDim.x)
{
vertices[i].position = particles[i].pos;
vertices[i].normal = vertices[i].normal.getNormalized();
}
}
} |
34d54d92e363f1e889cf39971e23d9b4dd528df5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Discrete Cosine Transform in row wise (DCT three)
* DCT_III_Row
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DCT_III_Row(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DCT_III_Row.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// #define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const float PI_d = 3.141592653589793238462643383279502884f; //pi
template <unsigned int TILE_DIM > __global__ void DCTIII_Row_Kernel(float *A, float *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
float CValue = 0.0f;
const float PI_d = 3.141592653589793238462643383279502884f; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ float As[TILE_DIM][TILE_DIM];
__shared__ float Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numAColumns - 1) / TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < numAColumns && Row < numARows) { As[threadIdx.y][threadIdx.x] = A[Row*numAColumns + k*TILE_DIM + threadIdx.x]; }
else { As[threadIdx.y][threadIdx.x] = 0.0; }
if (k*TILE_DIM + threadIdx.y < numAColumns && Col < numAColumns) { Bs[threadIdx.y][threadIdx.x] = cosf(((2 * Col + 1) / (2.0 * numAColumns))*PI_d*(threadIdx.y + k*TILE_DIM))*sqrt(1.0 / (1 + DELTA(1, (threadIdx.y + k*TILE_DIM) + 1)))*sqrt(2.0 / numAColumns); }
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDCTRowThree(float * A, float * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
float * hostA = A; // The A matrix
//float * hostB = B; // The B matrix
float * hostC = C; // The output C matrix
//float * hostComputedC;
float * deviceA;
//float * deviceB;
float * deviceC;
//hostA = (float *)malloc(sizeof(float)*numARows*numAColumns);
hipError_t error;
int devID = 0;
// get number of SMs on this GPU
error = hipGetDevice(&devID);
hipDeviceProp_t deviceProp;
error = hipGetDeviceProperties(&deviceProp, devID);
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
int TILEDIM = (deviceProp.major < 2) ? 16 : 32;
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(hipMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns));
//hipMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns);
gpuErrchk(hipMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns));
//thrust::device_ptr< float >dev_ptr_A(deviceA);
//thrust::device_ptr< float >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(hipMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, hipMemcpyHostToDevice));
//hipMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, hipMemcpyHostToDevice);
/////////////////////////////////////////////////////////
unsigned int TILE_DIM=16;
dim3 dimBlock;
dim3 dimGrid;
switch (TILEDIM){
case 16:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCTIII_Row_Kernel <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(hipFree(deviceA));
//hipFree(deviceB);
gpuErrchk(hipFree(deviceC));
return;
case 32:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCTIII_Row_Kernel <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(hipFree(deviceA));
//hipFree(deviceB);
gpuErrchk(hipFree(deviceC));
return;
}
}
| 34d54d92e363f1e889cf39971e23d9b4dd528df5.cu | /*
* Discrete Cosine Transform in row wise (DCT three)
* DCT_III_Row
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DCT_III_Row(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DCT_III_Row.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cuda.h>
#include <cuda_runtime.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// #define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const float PI_d = 3.141592653589793238462643383279502884f; //pi
template <unsigned int TILE_DIM > __global__ void DCTIII_Row_Kernel(float *A, float *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
float CValue = 0.0f;
const float PI_d = 3.141592653589793238462643383279502884f; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ float As[TILE_DIM][TILE_DIM];
__shared__ float Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numAColumns - 1) / TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < numAColumns && Row < numARows) { As[threadIdx.y][threadIdx.x] = A[Row*numAColumns + k*TILE_DIM + threadIdx.x]; }
else { As[threadIdx.y][threadIdx.x] = 0.0; }
if (k*TILE_DIM + threadIdx.y < numAColumns && Col < numAColumns) { Bs[threadIdx.y][threadIdx.x] = cosf(((2 * Col + 1) / (2.0 * numAColumns))*PI_d*(threadIdx.y + k*TILE_DIM))*sqrt(1.0 / (1 + DELTA(1, (threadIdx.y + k*TILE_DIM) + 1)))*sqrt(2.0 / numAColumns); }
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDCTRowThree(float * A, float * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
float * hostA = A; // The A matrix
//float * hostB = B; // The B matrix
float * hostC = C; // The output C matrix
//float * hostComputedC;
float * deviceA;
//float * deviceB;
float * deviceC;
//hostA = (float *)malloc(sizeof(float)*numARows*numAColumns);
cudaError_t error;
int devID = 0;
// get number of SMs on this GPU
error = cudaGetDevice(&devID);
cudaDeviceProp deviceProp;
error = cudaGetDeviceProperties(&deviceProp, devID);
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
int TILEDIM = (deviceProp.major < 2) ? 16 : 32;
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(cudaMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns));
//cudaMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns);
gpuErrchk(cudaMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns));
//thrust::device_ptr< float >dev_ptr_A(deviceA);
//thrust::device_ptr< float >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(cudaMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, cudaMemcpyHostToDevice));
//cudaMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, cudaMemcpyHostToDevice);
/////////////////////////////////////////////////////////
unsigned int TILE_DIM=16;
dim3 dimBlock;
dim3 dimGrid;
switch (TILEDIM){
case 16:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCTIII_Row_Kernel <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(cudaFree(deviceA));
//cudaFree(deviceB);
gpuErrchk(cudaFree(deviceC));
return;
case 32:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCTIII_Row_Kernel <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(cudaFree(deviceA));
//cudaFree(deviceB);
gpuErrchk(cudaFree(deviceC));
return;
}
}
|
e297ed17c6d349e2a3a948e9f517bde75244d8fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <float.h>
#include <math.h>
#include <thrust/tuple.h>
#include <cstdio>
#include <tuple>
#include "rasterize_points/rasterization_utils.cuh"
#include "utils/float_math.cuh"
#include "utils/geometry_utils.cuh"
namespace {
// A structure for holding details about a pixel.
struct Pixel {
float z;
int64_t idx; // idx of face
float dist; // abs distance of pixel to face
float3 bary;
};
__device__ bool operator<(const Pixel& a, const Pixel& b) {
return a.z < b.z || (a.z == b.z && a.idx < b.idx);
}
// Get the xyz coordinates of the three vertices for the face given by the
// index face_idx into face_verts.
__device__ thrust::tuple<float3, float3, float3> GetSingleFaceVerts(
const float* face_verts,
int face_idx) {
const float x0 = face_verts[face_idx * 9 + 0];
const float y0 = face_verts[face_idx * 9 + 1];
const float z0 = face_verts[face_idx * 9 + 2];
const float x1 = face_verts[face_idx * 9 + 3];
const float y1 = face_verts[face_idx * 9 + 4];
const float z1 = face_verts[face_idx * 9 + 5];
const float x2 = face_verts[face_idx * 9 + 6];
const float y2 = face_verts[face_idx * 9 + 7];
const float z2 = face_verts[face_idx * 9 + 8];
const float3 v0xyz = make_float3(x0, y0, z0);
const float3 v1xyz = make_float3(x1, y1, z1);
const float3 v2xyz = make_float3(x2, y2, z2);
return thrust::make_tuple(v0xyz, v1xyz, v2xyz);
}
// Get the min/max x/y/z values for the face given by vertices v0, v1, v2.
__device__ thrust::tuple<float2, float2, float2>
GetFaceBoundingBox(float3 v0, float3 v1, float3 v2) {
const float xmin = FloatMin3(v0.x, v1.x, v2.x);
const float ymin = FloatMin3(v0.y, v1.y, v2.y);
const float zmin = FloatMin3(v0.z, v1.z, v2.z);
const float xmax = FloatMax3(v0.x, v1.x, v2.x);
const float ymax = FloatMax3(v0.y, v1.y, v2.y);
const float zmax = FloatMax3(v0.z, v1.z, v2.z);
return thrust::make_tuple(
make_float2(xmin, xmax),
make_float2(ymin, ymax),
make_float2(zmin, zmax));
}
// Check if the point (px, py) lies outside the face bounding box face_bbox.
// Return true if the point is outside.
__device__ bool CheckPointOutsideBoundingBox(
float3 v0,
float3 v1,
float3 v2,
float blur_radius,
float2 pxy) {
const auto bbox = GetFaceBoundingBox(v0, v1, v2);
const float2 xlims = thrust::get<0>(bbox);
const float2 ylims = thrust::get<1>(bbox);
const float2 zlims = thrust::get<2>(bbox);
const float x_min = xlims.x - blur_radius;
const float y_min = ylims.x - blur_radius;
const float x_max = xlims.y + blur_radius;
const float y_max = ylims.y + blur_radius;
// Faces with at least one vertex behind the camera won't render correctly
// and should be removed or clipped before calling the rasterizer
const bool z_invalid = zlims.x < kEpsilon;
// Check if the current point is oustside the triangle bounding box.
return (
pxy.x > x_max || pxy.x < x_min || pxy.y > y_max || pxy.y < y_min ||
z_invalid);
}
// This function checks if a pixel given by xy location pxy lies within the
// face with index face_idx in face_verts. One of the inputs is a list (q)
// which contains Pixel structs with the indices of the faces which intersect
// with this pixel sorted by closest z distance. If the point pxy lies in the
// face, the list (q) is updated and re-orderered in place. In addition
// the auxiliary variables q_size, q_max_z and q_max_idx are also modified.
// This code is shared between RasterizeMeshesNaiveCudaKernel and
// RasterizeMeshesFineCudaKernel.
template <typename FaceQ>
__device__ void CheckPixelInsideFace(
const float* face_verts, // (F, 3, 3)
const int64_t* clipped_faces_neighbor_idx, // (F,)
const int face_idx,
int& q_size,
float& q_max_z,
int& q_max_idx,
FaceQ& q,
const float blur_radius,
const float2 pxy, // Coordinates of the pixel
const int K,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces) {
const auto v012 = GetSingleFaceVerts(face_verts, face_idx);
const float3 v0 = thrust::get<0>(v012);
const float3 v1 = thrust::get<1>(v012);
const float3 v2 = thrust::get<2>(v012);
// Only need xy for barycentric coordinates and distance calculations.
const float2 v0xy = make_float2(v0.x, v0.y);
const float2 v1xy = make_float2(v1.x, v1.y);
const float2 v2xy = make_float2(v2.x, v2.y);
// Perform checks and skip if:
// 1. the face is behind the camera
// 2. the face is facing away from the camera
// 3. the face has very small face area
// 4. the pixel is outside the face bbox
const float zmax = FloatMax3(v0.z, v1.z, v2.z);
const bool outside_bbox = CheckPointOutsideBoundingBox(
v0, v1, v2, sqrt(blur_radius), pxy); // use sqrt of blur for bbox
const float face_area = EdgeFunctionForward(v0xy, v1xy, v2xy);
// Check if the face is visible to the camera.
const bool back_face = face_area < 0.0;
const bool zero_face_area =
(face_area <= kEpsilon && face_area >= -1.0f * kEpsilon);
if (zmax < 0 || cull_backfaces && back_face || outside_bbox ||
zero_face_area) {
return;
}
// Calculate barycentric coords and euclidean dist to triangle.
const float3 p_bary0 = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy);
const float3 p_bary = !perspective_correct
? p_bary0
: BarycentricPerspectiveCorrectionForward(p_bary0, v0.z, v1.z, v2.z);
const float3 p_bary_clip =
!clip_barycentric_coords ? p_bary : BarycentricClipForward(p_bary);
const float pz =
p_bary_clip.x * v0.z + p_bary_clip.y * v1.z + p_bary_clip.z * v2.z;
if (pz < 0) {
return; // Face is behind the image plane.
}
// Get abs squared distance
const float dist = PointTriangleDistanceForward(pxy, v0xy, v1xy, v2xy);
// Use the unclipped bary coordinates to determine if the point is inside the
// face.
const bool inside = p_bary.x > 0.0f && p_bary.y > 0.0f && p_bary.z > 0.0f;
const float signed_dist = inside ? -dist : dist;
// Check if pixel is outside blur region
if (!inside && dist >= blur_radius) {
return;
}
// Handle the case where a face (f) partially behind the image plane is
// clipped to a quadrilateral and then split into two faces (t1, t2). In this
// case we:
// 1. Find the index of the neighboring face (e.g. for t1 need index of t2)
// 2. Check if the neighboring face (t2) is already in the top K faces
// 3. If yes, compare the distance of the pixel to t1 with the distance to t2.
// 4. If dist_t1 < dist_t2, overwrite the values for t2 in the top K faces.
const int neighbor_idx = clipped_faces_neighbor_idx[face_idx];
int neighbor_idx_top_k = -1;
// Check if neighboring face is already in the top K.
// -1 is the fill value in clipped_faces_neighbor_idx
if (neighbor_idx != -1) {
// Only need to loop until q_size.
for (int i = 0; i < q_size; i++) {
if (q[i].idx == neighbor_idx) {
neighbor_idx_top_k = i;
break;
}
}
}
// If neighbor idx is not -1 then it is in the top K struct.
if (neighbor_idx_top_k != -1) {
// If dist of current face is less than neighbor then overwrite the
// neighbor face values in the top K struct.
float neighbor_dist = abs(q[neighbor_idx_top_k].dist);
if (dist < neighbor_dist) {
// Overwrite the neighbor face values
q[neighbor_idx_top_k] = {pz, face_idx, signed_dist, p_bary_clip};
// If pz > q_max then overwrite the max values and index of the max.
// q_size stays the same.
if (pz > q_max_z) {
q_max_z = pz;
q_max_idx = neighbor_idx_top_k;
}
}
} else {
// Handle as a normal face
if (q_size < K) {
// Just insert it.
q[q_size] = {pz, face_idx, signed_dist, p_bary_clip};
if (pz > q_max_z) {
q_max_z = pz;
q_max_idx = q_size;
}
q_size++;
} else if (pz < q_max_z) {
// Overwrite the old max, and find the new max.
q[q_max_idx] = {pz, face_idx, signed_dist, p_bary_clip};
q_max_z = pz;
for (int i = 0; i < K; i++) {
if (q[i].z > q_max_z) {
q_max_z = q[i].z;
q_max_idx = i;
}
}
}
}
}
} // namespace
// ****************************************************************************
// * NAIVE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizeMeshesNaiveCudaKernel(
const float* face_verts,
const int64_t* mesh_to_face_first_idx,
const int64_t* num_faces_per_mesh,
const int64_t* clipped_faces_neighbor_idx,
const float blur_radius,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces,
const int N,
const int H,
const int W,
const int K,
int64_t* face_idxs,
float* zbuf,
float* pix_dists,
float* bary) {
// Simple version: One thread per output pixel
int num_threads = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = tid; i < N * H * W; i += num_threads) {
// Convert linear index to 3D index
const int n = i / (H * W); // batch index.
const int pix_idx = i % (H * W);
// Reverse ordering of X and Y axes
const int yi = H - 1 - pix_idx / W;
const int xi = W - 1 - pix_idx % W;
// screen coordinates to ndc coordinates of pixel.
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
const float2 pxy = make_float2(xf, yf);
// For keeping track of the K closest points we want a data structure
// that (1) gives O(1) access to the closest point for easy comparisons,
// and (2) allows insertion of new elements. In the CPU version we use
// std::priority_queue; then (2) is O(log K). We can't use STL
// containers in CUDA; we could roll our own max heap in an array, but
// that would likely have a lot of warp divergence so we do something
// simpler instead: keep the elements in an unsorted array, but keep
// track of the max value and the index of the max value. Then (1) is
// still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8
// this should be fast enough for our purposes.
Pixel q[kMaxPointsPerPixel];
int q_size = 0;
float q_max_z = -1000;
int q_max_idx = -1;
// Using the batch index of the thread get the start and stop
// indices for the faces.
const int64_t face_start_idx = mesh_to_face_first_idx[n];
const int64_t face_stop_idx = face_start_idx + num_faces_per_mesh[n];
// Loop through the faces in the mesh.
for (int f = face_start_idx; f < face_stop_idx; ++f) {
// Check if the pixel pxy is inside the face bounding box and if it is,
// update q, q_size, q_max_z and q_max_idx in place.
CheckPixelInsideFace(
face_verts,
clipped_faces_neighbor_idx,
f,
q_size,
q_max_z,
q_max_idx,
q,
blur_radius,
pxy,
K,
perspective_correct,
clip_barycentric_coords,
cull_backfaces);
}
// TODO: make sorting an option as only top k is needed, not sorted values.
BubbleSort(q, q_size);
int idx = n * H * W * K + pix_idx * K;
for (int k = 0; k < q_size; ++k) {
face_idxs[idx + k] = q[k].idx;
zbuf[idx + k] = q[k].z;
pix_dists[idx + k] = q[k].dist;
bary[(idx + k) * 3 + 0] = q[k].bary.x;
bary[(idx + k) * 3 + 1] = q[k].bary.y;
bary[(idx + k) * 3 + 2] = q[k].bary.z;
}
}
}
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>
RasterizeMeshesNaiveCuda(
const at::Tensor& face_verts,
const at::Tensor& mesh_to_faces_packed_first_idx,
const at::Tensor& num_faces_per_mesh,
const at::Tensor& clipped_faces_neighbor_idx,
const std::tuple<int, int> image_size,
const float blur_radius,
const int num_closest,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces) {
TORCH_CHECK(
face_verts.ndimension() == 3 && face_verts.size(1) == 3 &&
face_verts.size(2) == 3,
"face_verts must have dimensions (num_faces, 3, 3)");
TORCH_CHECK(
num_faces_per_mesh.size(0) == mesh_to_faces_packed_first_idx.size(0),
"num_faces_per_mesh must have save size first dimension as mesh_to_faces_packed_first_idx");
TORCH_CHECK(
clipped_faces_neighbor_idx.size(0) == face_verts.size(0),
"clipped_faces_neighbor_idx must have save size first dimension as face_verts");
if (num_closest > kMaxPointsPerPixel) {
std::stringstream ss;
ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel;
AT_ERROR(ss.str());
}
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
mesh_to_faces_packed_first_idx_t{
mesh_to_faces_packed_first_idx, "mesh_to_faces_packed_first_idx", 2},
num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3},
clipped_faces_neighbor_idx_t{
clipped_faces_neighbor_idx, "clipped_faces_neighbor_idx", 4};
at::CheckedFrom c = "RasterizeMeshesNaiveCuda";
at::checkAllSameGPU(
c,
{face_verts_t,
mesh_to_faces_packed_first_idx_t,
num_faces_per_mesh_t,
clipped_faces_neighbor_idx_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int N = num_faces_per_mesh.size(0); // batch size.
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
const int K = num_closest;
auto long_opts = num_faces_per_mesh.options().dtype(at::kLong);
auto float_opts = face_verts.options().dtype(at::kFloat);
at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts);
at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts);
at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts);
at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts);
if (face_idxs.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
const size_t blocks = 1024;
const size_t threads = 64;
hipLaunchKernelGGL(( RasterizeMeshesNaiveCudaKernel), dim3(blocks), dim3(threads), 0, stream,
face_verts.contiguous().data_ptr<float>(),
mesh_to_faces_packed_first_idx.contiguous().data_ptr<int64_t>(),
num_faces_per_mesh.contiguous().data_ptr<int64_t>(),
clipped_faces_neighbor_idx.contiguous().data_ptr<int64_t>(),
blur_radius,
perspective_correct,
clip_barycentric_coords,
cull_backfaces,
N,
H,
W,
K,
face_idxs.data_ptr<int64_t>(),
zbuf.data_ptr<float>(),
pix_dists.data_ptr<float>(),
bary.data_ptr<float>());
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
// ****************************************************************************
// * BACKWARD PASS *
// ****************************************************************************
// TODO: benchmark parallelizing over faces_verts instead of over pixels.
__global__ void RasterizeMeshesBackwardCudaKernel(
const float* face_verts, // (F, 3, 3)
const int64_t* pix_to_face, // (N, H, W, K)
const bool perspective_correct,
const bool clip_barycentric_coords,
const int N,
const int H,
const int W,
const int K,
const float* grad_zbuf, // (N, H, W, K)
const float* grad_bary, // (N, H, W, K, 3)
const float* grad_dists, // (N, H, W, K)
float* grad_face_verts) { // (F, 3, 3)
// Parallelize over each pixel in images of
// size H * W, for each image in the batch of size N.
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int t_i = tid; t_i < N * H * W; t_i += num_threads) {
// Convert linear index to 3D index
const int n = t_i / (H * W); // batch index.
const int pix_idx = t_i % (H * W);
// Reverse ordering of X and Y axes.
const int yi = H - 1 - pix_idx / W;
const int xi = W - 1 - pix_idx % W;
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
const float2 pxy = make_float2(xf, yf);
// Loop over all the faces for this pixel.
for (int k = 0; k < K; k++) {
// Index into (N, H, W, K, :) grad tensors
// pixel index + top k index
int i = n * H * W * K + pix_idx * K + k;
const int f = pix_to_face[i];
if (f < 0) {
continue; // padded face.
}
// Get xyz coordinates of the three face vertices.
const auto v012 = GetSingleFaceVerts(face_verts, f);
const float3 v0 = thrust::get<0>(v012);
const float3 v1 = thrust::get<1>(v012);
const float3 v2 = thrust::get<2>(v012);
// Only neex xy for barycentric coordinate and distance calculations.
const float2 v0xy = make_float2(v0.x, v0.y);
const float2 v1xy = make_float2(v1.x, v1.y);
const float2 v2xy = make_float2(v2.x, v2.y);
// Get upstream gradients for the face.
const float grad_dist_upstream = grad_dists[i];
const float grad_zbuf_upstream = grad_zbuf[i];
const float grad_bary_upstream_w0 = grad_bary[i * 3 + 0];
const float grad_bary_upstream_w1 = grad_bary[i * 3 + 1];
const float grad_bary_upstream_w2 = grad_bary[i * 3 + 2];
const float3 grad_bary_upstream = make_float3(
grad_bary_upstream_w0, grad_bary_upstream_w1, grad_bary_upstream_w2);
const float3 b_w = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy);
const float3 b_pp = !perspective_correct
? b_w
: BarycentricPerspectiveCorrectionForward(b_w, v0.z, v1.z, v2.z);
const float3 b_w_clip =
!clip_barycentric_coords ? b_pp : BarycentricClipForward(b_pp);
const bool inside = b_pp.x > 0.0f && b_pp.y > 0.0f && b_pp.z > 0.0f;
const float sign = inside ? -1.0f : 1.0f;
auto grad_dist_f = PointTriangleDistanceBackward(
pxy, v0xy, v1xy, v2xy, sign * grad_dist_upstream);
const float2 ddist_d_v0 = thrust::get<1>(grad_dist_f);
const float2 ddist_d_v1 = thrust::get<2>(grad_dist_f);
const float2 ddist_d_v2 = thrust::get<3>(grad_dist_f);
// Upstream gradient for barycentric coords from zbuf calculation:
// zbuf = bary_w0 * z0 + bary_w1 * z1 + bary_w2 * z2
// Therefore
// d_zbuf/d_bary_w0 = z0
// d_zbuf/d_bary_w1 = z1
// d_zbuf/d_bary_w2 = z2
const float3 d_zbuf_d_bwclip = make_float3(v0.z, v1.z, v2.z);
// Total upstream barycentric gradients are the sum of
// external upstream gradients and contribution from zbuf.
const float3 grad_bary_f_sum =
(grad_bary_upstream + grad_zbuf_upstream * d_zbuf_d_bwclip);
float3 grad_bary0 = grad_bary_f_sum;
if (clip_barycentric_coords) {
grad_bary0 = BarycentricClipBackward(b_w, grad_bary_f_sum);
}
float dz0_persp = 0.0f, dz1_persp = 0.0f, dz2_persp = 0.0f;
if (perspective_correct) {
auto perspective_grads = BarycentricPerspectiveCorrectionBackward(
b_w, v0.z, v1.z, v2.z, grad_bary0);
grad_bary0 = thrust::get<0>(perspective_grads);
dz0_persp = thrust::get<1>(perspective_grads);
dz1_persp = thrust::get<2>(perspective_grads);
dz2_persp = thrust::get<3>(perspective_grads);
}
auto grad_bary_f =
BarycentricCoordsBackward(pxy, v0xy, v1xy, v2xy, grad_bary0);
const float2 dbary_d_v0 = thrust::get<1>(grad_bary_f);
const float2 dbary_d_v1 = thrust::get<2>(grad_bary_f);
const float2 dbary_d_v2 = thrust::get<3>(grad_bary_f);
atomicAdd(grad_face_verts + f * 9 + 0, dbary_d_v0.x + ddist_d_v0.x);
atomicAdd(grad_face_verts + f * 9 + 1, dbary_d_v0.y + ddist_d_v0.y);
atomicAdd(
grad_face_verts + f * 9 + 2,
grad_zbuf_upstream * b_w_clip.x + dz0_persp);
atomicAdd(grad_face_verts + f * 9 + 3, dbary_d_v1.x + ddist_d_v1.x);
atomicAdd(grad_face_verts + f * 9 + 4, dbary_d_v1.y + ddist_d_v1.y);
atomicAdd(
grad_face_verts + f * 9 + 5,
grad_zbuf_upstream * b_w_clip.y + dz1_persp);
atomicAdd(grad_face_verts + f * 9 + 6, dbary_d_v2.x + ddist_d_v2.x);
atomicAdd(grad_face_verts + f * 9 + 7, dbary_d_v2.y + ddist_d_v2.y);
atomicAdd(
grad_face_verts + f * 9 + 8,
grad_zbuf_upstream * b_w_clip.z + dz2_persp);
}
}
}
at::Tensor RasterizeMeshesBackwardCuda(
const at::Tensor& face_verts, // (F, 3, 3)
const at::Tensor& pix_to_face, // (N, H, W, K)
const at::Tensor& grad_zbuf, // (N, H, W, K)
const at::Tensor& grad_bary, // (N, H, W, K, 3)
const at::Tensor& grad_dists, // (N, H, W, K)
const bool perspective_correct,
const bool clip_barycentric_coords) {
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
pix_to_face_t{pix_to_face, "pix_to_face", 2},
grad_zbuf_t{grad_zbuf, "grad_zbuf", 3},
grad_bary_t{grad_bary, "grad_bary", 4},
grad_dists_t{grad_dists, "grad_dists", 5};
at::CheckedFrom c = "RasterizeMeshesBackwardCuda";
at::checkAllSameGPU(
c, {face_verts_t, pix_to_face_t, grad_zbuf_t, grad_bary_t, grad_dists_t});
at::checkAllSameType(
c, {face_verts_t, grad_zbuf_t, grad_bary_t, grad_dists_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int F = face_verts.size(0);
const int N = pix_to_face.size(0);
const int H = pix_to_face.size(1);
const int W = pix_to_face.size(2);
const int K = pix_to_face.size(3);
at::Tensor grad_face_verts = at::zeros({F, 3, 3}, face_verts.options());
if (grad_face_verts.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return grad_face_verts;
}
const size_t blocks = 1024;
const size_t threads = 64;
hipLaunchKernelGGL(( RasterizeMeshesBackwardCudaKernel), dim3(blocks), dim3(threads), 0, stream,
face_verts.contiguous().data_ptr<float>(),
pix_to_face.contiguous().data_ptr<int64_t>(),
perspective_correct,
clip_barycentric_coords,
N,
H,
W,
K,
grad_zbuf.contiguous().data_ptr<float>(),
grad_bary.contiguous().data_ptr<float>(),
grad_dists.contiguous().data_ptr<float>(),
grad_face_verts.data_ptr<float>());
AT_CUDA_CHECK(hipGetLastError());
return grad_face_verts;
}
// ****************************************************************************
// * FINE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizeMeshesFineCudaKernel(
const float* face_verts, // (F, 3, 3)
const int32_t* bin_faces, // (N, BH, BW, T)
const int64_t* clipped_faces_neighbor_idx, // (F,)
const float blur_radius,
const int bin_size,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces,
const int N,
const int BH,
const int BW,
const int M,
const int H,
const int W,
const int K,
int64_t* face_idxs, // (N, H, W, K)
float* zbuf, // (N, H, W, K)
float* pix_dists, // (N, H, W, K)
float* bary // (N, H, W, K, 3)
) {
// This can be more than H * W if H or W are not divisible by bin_size.
int num_pixels = N * BH * BW * bin_size * bin_size;
int num_threads = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int pid = tid; pid < num_pixels; pid += num_threads) {
// Convert linear index into bin and pixel indices. We make the within
// block pixel ids move the fastest, so that adjacent threads will fall
// into the same bin; this should give them coalesced memory reads when
// they read from faces and bin_faces.
int i = pid;
const int n = i / (BH * BW * bin_size * bin_size);
i %= BH * BW * bin_size * bin_size;
// bin index y
const int by = i / (BW * bin_size * bin_size);
i %= BW * bin_size * bin_size;
// bin index y
const int bx = i / (bin_size * bin_size);
// pixel within the bin
i %= bin_size * bin_size;
// Pixel x, y indices
const int yi = i / bin_size + by * bin_size;
const int xi = i % bin_size + bx * bin_size;
if (yi >= H || xi >= W)
continue;
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
const float2 pxy = make_float2(xf, yf);
// This part looks like the naive rasterization kernel, except we use
// bin_faces to only look at a subset of faces already known to fall
// in this bin. TODO abstract out this logic into some data structure
// that is shared by both kernels?
Pixel q[kMaxPointsPerPixel];
int q_size = 0;
float q_max_z = -1000;
int q_max_idx = -1;
for (int m = 0; m < M; m++) {
const int f = bin_faces[n * BH * BW * M + by * BW * M + bx * M + m];
if (f < 0) {
continue; // bin_faces uses -1 as a sentinal value.
}
// Check if the pixel pxy is inside the face bounding box and if it is,
// update q, q_size, q_max_z and q_max_idx in place.
CheckPixelInsideFace(
face_verts,
clipped_faces_neighbor_idx,
f,
q_size,
q_max_z,
q_max_idx,
q,
blur_radius,
pxy,
K,
perspective_correct,
clip_barycentric_coords,
cull_backfaces);
}
// Now we've looked at all the faces for this bin, so we can write
// output for the current pixel.
// TODO: make sorting an option as only top k is needed, not sorted values.
BubbleSort(q, q_size);
// Reverse ordering of the X and Y axis so that
// in the image +Y is pointing up and +X is pointing left.
const int yidx = H - 1 - yi;
const int xidx = W - 1 - xi;
const int pix_idx = n * H * W * K + yidx * W * K + xidx * K;
for (int k = 0; k < q_size; k++) {
face_idxs[pix_idx + k] = q[k].idx;
zbuf[pix_idx + k] = q[k].z;
pix_dists[pix_idx + k] = q[k].dist;
bary[(pix_idx + k) * 3 + 0] = q[k].bary.x;
bary[(pix_idx + k) * 3 + 1] = q[k].bary.y;
bary[(pix_idx + k) * 3 + 2] = q[k].bary.z;
}
}
}
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>
RasterizeMeshesFineCuda(
const at::Tensor& face_verts,
const at::Tensor& bin_faces,
const at::Tensor& clipped_faces_neighbor_idx,
const std::tuple<int, int> image_size,
const float blur_radius,
const int bin_size,
const int faces_per_pixel,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces) {
TORCH_CHECK(
face_verts.ndimension() == 3 && face_verts.size(1) == 3 &&
face_verts.size(2) == 3,
"face_verts must have dimensions (num_faces, 3, 3)");
TORCH_CHECK(bin_faces.ndimension() == 4, "bin_faces must have 4 dimensions");
TORCH_CHECK(
clipped_faces_neighbor_idx.size(0) == face_verts.size(0),
"clipped_faces_neighbor_idx must have the same first dimension as face_verts");
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
bin_faces_t{bin_faces, "bin_faces", 2},
clipped_faces_neighbor_idx_t{
clipped_faces_neighbor_idx, "clipped_faces_neighbor_idx", 3};
at::CheckedFrom c = "RasterizeMeshesFineCuda";
at::checkAllSameGPU(
c, {face_verts_t, bin_faces_t, clipped_faces_neighbor_idx_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// bin_faces shape (N, BH, BW, M)
const int N = bin_faces.size(0);
const int BH = bin_faces.size(1);
const int BW = bin_faces.size(2);
const int M = bin_faces.size(3);
const int K = faces_per_pixel;
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
if (K > kMaxPointsPerPixel) {
AT_ERROR("Must have num_closest <= 150");
}
auto long_opts = bin_faces.options().dtype(at::kLong);
auto float_opts = face_verts.options().dtype(at::kFloat);
at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts);
at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts);
at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts);
at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts);
if (face_idxs.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
const size_t blocks = 1024;
const size_t threads = 64;
hipLaunchKernelGGL(( RasterizeMeshesFineCudaKernel), dim3(blocks), dim3(threads), 0, stream,
face_verts.contiguous().data_ptr<float>(),
bin_faces.contiguous().data_ptr<int32_t>(),
clipped_faces_neighbor_idx.contiguous().data_ptr<int64_t>(),
blur_radius,
bin_size,
perspective_correct,
clip_barycentric_coords,
cull_backfaces,
N,
BH,
BW,
M,
H,
W,
K,
face_idxs.data_ptr<int64_t>(),
zbuf.data_ptr<float>(),
pix_dists.data_ptr<float>(),
bary.data_ptr<float>());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
| e297ed17c6d349e2a3a948e9f517bde75244d8fe.cu | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <float.h>
#include <math.h>
#include <thrust/tuple.h>
#include <cstdio>
#include <tuple>
#include "rasterize_points/rasterization_utils.cuh"
#include "utils/float_math.cuh"
#include "utils/geometry_utils.cuh"
namespace {
// A structure for holding details about a pixel.
struct Pixel {
float z;
int64_t idx; // idx of face
float dist; // abs distance of pixel to face
float3 bary;
};
__device__ bool operator<(const Pixel& a, const Pixel& b) {
return a.z < b.z || (a.z == b.z && a.idx < b.idx);
}
// Get the xyz coordinates of the three vertices for the face given by the
// index face_idx into face_verts.
__device__ thrust::tuple<float3, float3, float3> GetSingleFaceVerts(
const float* face_verts,
int face_idx) {
const float x0 = face_verts[face_idx * 9 + 0];
const float y0 = face_verts[face_idx * 9 + 1];
const float z0 = face_verts[face_idx * 9 + 2];
const float x1 = face_verts[face_idx * 9 + 3];
const float y1 = face_verts[face_idx * 9 + 4];
const float z1 = face_verts[face_idx * 9 + 5];
const float x2 = face_verts[face_idx * 9 + 6];
const float y2 = face_verts[face_idx * 9 + 7];
const float z2 = face_verts[face_idx * 9 + 8];
const float3 v0xyz = make_float3(x0, y0, z0);
const float3 v1xyz = make_float3(x1, y1, z1);
const float3 v2xyz = make_float3(x2, y2, z2);
return thrust::make_tuple(v0xyz, v1xyz, v2xyz);
}
// Get the min/max x/y/z values for the face given by vertices v0, v1, v2.
__device__ thrust::tuple<float2, float2, float2>
GetFaceBoundingBox(float3 v0, float3 v1, float3 v2) {
const float xmin = FloatMin3(v0.x, v1.x, v2.x);
const float ymin = FloatMin3(v0.y, v1.y, v2.y);
const float zmin = FloatMin3(v0.z, v1.z, v2.z);
const float xmax = FloatMax3(v0.x, v1.x, v2.x);
const float ymax = FloatMax3(v0.y, v1.y, v2.y);
const float zmax = FloatMax3(v0.z, v1.z, v2.z);
return thrust::make_tuple(
make_float2(xmin, xmax),
make_float2(ymin, ymax),
make_float2(zmin, zmax));
}
// Check if the point (px, py) lies outside the face bounding box face_bbox.
// Return true if the point is outside.
__device__ bool CheckPointOutsideBoundingBox(
float3 v0,
float3 v1,
float3 v2,
float blur_radius,
float2 pxy) {
const auto bbox = GetFaceBoundingBox(v0, v1, v2);
const float2 xlims = thrust::get<0>(bbox);
const float2 ylims = thrust::get<1>(bbox);
const float2 zlims = thrust::get<2>(bbox);
const float x_min = xlims.x - blur_radius;
const float y_min = ylims.x - blur_radius;
const float x_max = xlims.y + blur_radius;
const float y_max = ylims.y + blur_radius;
// Faces with at least one vertex behind the camera won't render correctly
// and should be removed or clipped before calling the rasterizer
const bool z_invalid = zlims.x < kEpsilon;
// Check if the current point is oustside the triangle bounding box.
return (
pxy.x > x_max || pxy.x < x_min || pxy.y > y_max || pxy.y < y_min ||
z_invalid);
}
// This function checks if a pixel given by xy location pxy lies within the
// face with index face_idx in face_verts. One of the inputs is a list (q)
// which contains Pixel structs with the indices of the faces which intersect
// with this pixel sorted by closest z distance. If the point pxy lies in the
// face, the list (q) is updated and re-orderered in place. In addition
// the auxiliary variables q_size, q_max_z and q_max_idx are also modified.
// This code is shared between RasterizeMeshesNaiveCudaKernel and
// RasterizeMeshesFineCudaKernel.
template <typename FaceQ>
__device__ void CheckPixelInsideFace(
const float* face_verts, // (F, 3, 3)
const int64_t* clipped_faces_neighbor_idx, // (F,)
const int face_idx,
int& q_size,
float& q_max_z,
int& q_max_idx,
FaceQ& q,
const float blur_radius,
const float2 pxy, // Coordinates of the pixel
const int K,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces) {
const auto v012 = GetSingleFaceVerts(face_verts, face_idx);
const float3 v0 = thrust::get<0>(v012);
const float3 v1 = thrust::get<1>(v012);
const float3 v2 = thrust::get<2>(v012);
// Only need xy for barycentric coordinates and distance calculations.
const float2 v0xy = make_float2(v0.x, v0.y);
const float2 v1xy = make_float2(v1.x, v1.y);
const float2 v2xy = make_float2(v2.x, v2.y);
// Perform checks and skip if:
// 1. the face is behind the camera
// 2. the face is facing away from the camera
// 3. the face has very small face area
// 4. the pixel is outside the face bbox
const float zmax = FloatMax3(v0.z, v1.z, v2.z);
const bool outside_bbox = CheckPointOutsideBoundingBox(
v0, v1, v2, sqrt(blur_radius), pxy); // use sqrt of blur for bbox
const float face_area = EdgeFunctionForward(v0xy, v1xy, v2xy);
// Check if the face is visible to the camera.
const bool back_face = face_area < 0.0;
const bool zero_face_area =
(face_area <= kEpsilon && face_area >= -1.0f * kEpsilon);
if (zmax < 0 || cull_backfaces && back_face || outside_bbox ||
zero_face_area) {
return;
}
// Calculate barycentric coords and euclidean dist to triangle.
const float3 p_bary0 = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy);
const float3 p_bary = !perspective_correct
? p_bary0
: BarycentricPerspectiveCorrectionForward(p_bary0, v0.z, v1.z, v2.z);
const float3 p_bary_clip =
!clip_barycentric_coords ? p_bary : BarycentricClipForward(p_bary);
const float pz =
p_bary_clip.x * v0.z + p_bary_clip.y * v1.z + p_bary_clip.z * v2.z;
if (pz < 0) {
return; // Face is behind the image plane.
}
// Get abs squared distance
const float dist = PointTriangleDistanceForward(pxy, v0xy, v1xy, v2xy);
// Use the unclipped bary coordinates to determine if the point is inside the
// face.
const bool inside = p_bary.x > 0.0f && p_bary.y > 0.0f && p_bary.z > 0.0f;
const float signed_dist = inside ? -dist : dist;
// Check if pixel is outside blur region
if (!inside && dist >= blur_radius) {
return;
}
// Handle the case where a face (f) partially behind the image plane is
// clipped to a quadrilateral and then split into two faces (t1, t2). In this
// case we:
// 1. Find the index of the neighboring face (e.g. for t1 need index of t2)
// 2. Check if the neighboring face (t2) is already in the top K faces
// 3. If yes, compare the distance of the pixel to t1 with the distance to t2.
// 4. If dist_t1 < dist_t2, overwrite the values for t2 in the top K faces.
const int neighbor_idx = clipped_faces_neighbor_idx[face_idx];
int neighbor_idx_top_k = -1;
// Check if neighboring face is already in the top K.
// -1 is the fill value in clipped_faces_neighbor_idx
if (neighbor_idx != -1) {
// Only need to loop until q_size.
for (int i = 0; i < q_size; i++) {
if (q[i].idx == neighbor_idx) {
neighbor_idx_top_k = i;
break;
}
}
}
// If neighbor idx is not -1 then it is in the top K struct.
if (neighbor_idx_top_k != -1) {
// If dist of current face is less than neighbor then overwrite the
// neighbor face values in the top K struct.
float neighbor_dist = abs(q[neighbor_idx_top_k].dist);
if (dist < neighbor_dist) {
// Overwrite the neighbor face values
q[neighbor_idx_top_k] = {pz, face_idx, signed_dist, p_bary_clip};
// If pz > q_max then overwrite the max values and index of the max.
// q_size stays the same.
if (pz > q_max_z) {
q_max_z = pz;
q_max_idx = neighbor_idx_top_k;
}
}
} else {
// Handle as a normal face
if (q_size < K) {
// Just insert it.
q[q_size] = {pz, face_idx, signed_dist, p_bary_clip};
if (pz > q_max_z) {
q_max_z = pz;
q_max_idx = q_size;
}
q_size++;
} else if (pz < q_max_z) {
// Overwrite the old max, and find the new max.
q[q_max_idx] = {pz, face_idx, signed_dist, p_bary_clip};
q_max_z = pz;
for (int i = 0; i < K; i++) {
if (q[i].z > q_max_z) {
q_max_z = q[i].z;
q_max_idx = i;
}
}
}
}
}
} // namespace
// ****************************************************************************
// * NAIVE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizeMeshesNaiveCudaKernel(
const float* face_verts,
const int64_t* mesh_to_face_first_idx,
const int64_t* num_faces_per_mesh,
const int64_t* clipped_faces_neighbor_idx,
const float blur_radius,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces,
const int N,
const int H,
const int W,
const int K,
int64_t* face_idxs,
float* zbuf,
float* pix_dists,
float* bary) {
// Simple version: One thread per output pixel
int num_threads = gridDim.x * blockDim.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
for (int i = tid; i < N * H * W; i += num_threads) {
// Convert linear index to 3D index
const int n = i / (H * W); // batch index.
const int pix_idx = i % (H * W);
// Reverse ordering of X and Y axes
const int yi = H - 1 - pix_idx / W;
const int xi = W - 1 - pix_idx % W;
// screen coordinates to ndc coordinates of pixel.
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
const float2 pxy = make_float2(xf, yf);
// For keeping track of the K closest points we want a data structure
// that (1) gives O(1) access to the closest point for easy comparisons,
// and (2) allows insertion of new elements. In the CPU version we use
// std::priority_queue; then (2) is O(log K). We can't use STL
// containers in CUDA; we could roll our own max heap in an array, but
// that would likely have a lot of warp divergence so we do something
// simpler instead: keep the elements in an unsorted array, but keep
// track of the max value and the index of the max value. Then (1) is
// still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8
// this should be fast enough for our purposes.
Pixel q[kMaxPointsPerPixel];
int q_size = 0;
float q_max_z = -1000;
int q_max_idx = -1;
// Using the batch index of the thread get the start and stop
// indices for the faces.
const int64_t face_start_idx = mesh_to_face_first_idx[n];
const int64_t face_stop_idx = face_start_idx + num_faces_per_mesh[n];
// Loop through the faces in the mesh.
for (int f = face_start_idx; f < face_stop_idx; ++f) {
// Check if the pixel pxy is inside the face bounding box and if it is,
// update q, q_size, q_max_z and q_max_idx in place.
CheckPixelInsideFace(
face_verts,
clipped_faces_neighbor_idx,
f,
q_size,
q_max_z,
q_max_idx,
q,
blur_radius,
pxy,
K,
perspective_correct,
clip_barycentric_coords,
cull_backfaces);
}
// TODO: make sorting an option as only top k is needed, not sorted values.
BubbleSort(q, q_size);
int idx = n * H * W * K + pix_idx * K;
for (int k = 0; k < q_size; ++k) {
face_idxs[idx + k] = q[k].idx;
zbuf[idx + k] = q[k].z;
pix_dists[idx + k] = q[k].dist;
bary[(idx + k) * 3 + 0] = q[k].bary.x;
bary[(idx + k) * 3 + 1] = q[k].bary.y;
bary[(idx + k) * 3 + 2] = q[k].bary.z;
}
}
}
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>
RasterizeMeshesNaiveCuda(
const at::Tensor& face_verts,
const at::Tensor& mesh_to_faces_packed_first_idx,
const at::Tensor& num_faces_per_mesh,
const at::Tensor& clipped_faces_neighbor_idx,
const std::tuple<int, int> image_size,
const float blur_radius,
const int num_closest,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces) {
TORCH_CHECK(
face_verts.ndimension() == 3 && face_verts.size(1) == 3 &&
face_verts.size(2) == 3,
"face_verts must have dimensions (num_faces, 3, 3)");
TORCH_CHECK(
num_faces_per_mesh.size(0) == mesh_to_faces_packed_first_idx.size(0),
"num_faces_per_mesh must have save size first dimension as mesh_to_faces_packed_first_idx");
TORCH_CHECK(
clipped_faces_neighbor_idx.size(0) == face_verts.size(0),
"clipped_faces_neighbor_idx must have save size first dimension as face_verts");
if (num_closest > kMaxPointsPerPixel) {
std::stringstream ss;
ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel;
AT_ERROR(ss.str());
}
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
mesh_to_faces_packed_first_idx_t{
mesh_to_faces_packed_first_idx, "mesh_to_faces_packed_first_idx", 2},
num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3},
clipped_faces_neighbor_idx_t{
clipped_faces_neighbor_idx, "clipped_faces_neighbor_idx", 4};
at::CheckedFrom c = "RasterizeMeshesNaiveCuda";
at::checkAllSameGPU(
c,
{face_verts_t,
mesh_to_faces_packed_first_idx_t,
num_faces_per_mesh_t,
clipped_faces_neighbor_idx_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(face_verts.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int N = num_faces_per_mesh.size(0); // batch size.
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
const int K = num_closest;
auto long_opts = num_faces_per_mesh.options().dtype(at::kLong);
auto float_opts = face_verts.options().dtype(at::kFloat);
at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts);
at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts);
at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts);
at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts);
if (face_idxs.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
const size_t blocks = 1024;
const size_t threads = 64;
RasterizeMeshesNaiveCudaKernel<<<blocks, threads, 0, stream>>>(
face_verts.contiguous().data_ptr<float>(),
mesh_to_faces_packed_first_idx.contiguous().data_ptr<int64_t>(),
num_faces_per_mesh.contiguous().data_ptr<int64_t>(),
clipped_faces_neighbor_idx.contiguous().data_ptr<int64_t>(),
blur_radius,
perspective_correct,
clip_barycentric_coords,
cull_backfaces,
N,
H,
W,
K,
face_idxs.data_ptr<int64_t>(),
zbuf.data_ptr<float>(),
pix_dists.data_ptr<float>(),
bary.data_ptr<float>());
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
// ****************************************************************************
// * BACKWARD PASS *
// ****************************************************************************
// TODO: benchmark parallelizing over faces_verts instead of over pixels.
__global__ void RasterizeMeshesBackwardCudaKernel(
const float* face_verts, // (F, 3, 3)
const int64_t* pix_to_face, // (N, H, W, K)
const bool perspective_correct,
const bool clip_barycentric_coords,
const int N,
const int H,
const int W,
const int K,
const float* grad_zbuf, // (N, H, W, K)
const float* grad_bary, // (N, H, W, K, 3)
const float* grad_dists, // (N, H, W, K)
float* grad_face_verts) { // (F, 3, 3)
// Parallelize over each pixel in images of
// size H * W, for each image in the batch of size N.
const int num_threads = gridDim.x * blockDim.x;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int t_i = tid; t_i < N * H * W; t_i += num_threads) {
// Convert linear index to 3D index
const int n = t_i / (H * W); // batch index.
const int pix_idx = t_i % (H * W);
// Reverse ordering of X and Y axes.
const int yi = H - 1 - pix_idx / W;
const int xi = W - 1 - pix_idx % W;
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
const float2 pxy = make_float2(xf, yf);
// Loop over all the faces for this pixel.
for (int k = 0; k < K; k++) {
// Index into (N, H, W, K, :) grad tensors
// pixel index + top k index
int i = n * H * W * K + pix_idx * K + k;
const int f = pix_to_face[i];
if (f < 0) {
continue; // padded face.
}
// Get xyz coordinates of the three face vertices.
const auto v012 = GetSingleFaceVerts(face_verts, f);
const float3 v0 = thrust::get<0>(v012);
const float3 v1 = thrust::get<1>(v012);
const float3 v2 = thrust::get<2>(v012);
// Only neex xy for barycentric coordinate and distance calculations.
const float2 v0xy = make_float2(v0.x, v0.y);
const float2 v1xy = make_float2(v1.x, v1.y);
const float2 v2xy = make_float2(v2.x, v2.y);
// Get upstream gradients for the face.
const float grad_dist_upstream = grad_dists[i];
const float grad_zbuf_upstream = grad_zbuf[i];
const float grad_bary_upstream_w0 = grad_bary[i * 3 + 0];
const float grad_bary_upstream_w1 = grad_bary[i * 3 + 1];
const float grad_bary_upstream_w2 = grad_bary[i * 3 + 2];
const float3 grad_bary_upstream = make_float3(
grad_bary_upstream_w0, grad_bary_upstream_w1, grad_bary_upstream_w2);
const float3 b_w = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy);
const float3 b_pp = !perspective_correct
? b_w
: BarycentricPerspectiveCorrectionForward(b_w, v0.z, v1.z, v2.z);
const float3 b_w_clip =
!clip_barycentric_coords ? b_pp : BarycentricClipForward(b_pp);
const bool inside = b_pp.x > 0.0f && b_pp.y > 0.0f && b_pp.z > 0.0f;
const float sign = inside ? -1.0f : 1.0f;
auto grad_dist_f = PointTriangleDistanceBackward(
pxy, v0xy, v1xy, v2xy, sign * grad_dist_upstream);
const float2 ddist_d_v0 = thrust::get<1>(grad_dist_f);
const float2 ddist_d_v1 = thrust::get<2>(grad_dist_f);
const float2 ddist_d_v2 = thrust::get<3>(grad_dist_f);
// Upstream gradient for barycentric coords from zbuf calculation:
// zbuf = bary_w0 * z0 + bary_w1 * z1 + bary_w2 * z2
// Therefore
// d_zbuf/d_bary_w0 = z0
// d_zbuf/d_bary_w1 = z1
// d_zbuf/d_bary_w2 = z2
const float3 d_zbuf_d_bwclip = make_float3(v0.z, v1.z, v2.z);
// Total upstream barycentric gradients are the sum of
// external upstream gradients and contribution from zbuf.
const float3 grad_bary_f_sum =
(grad_bary_upstream + grad_zbuf_upstream * d_zbuf_d_bwclip);
float3 grad_bary0 = grad_bary_f_sum;
if (clip_barycentric_coords) {
grad_bary0 = BarycentricClipBackward(b_w, grad_bary_f_sum);
}
float dz0_persp = 0.0f, dz1_persp = 0.0f, dz2_persp = 0.0f;
if (perspective_correct) {
auto perspective_grads = BarycentricPerspectiveCorrectionBackward(
b_w, v0.z, v1.z, v2.z, grad_bary0);
grad_bary0 = thrust::get<0>(perspective_grads);
dz0_persp = thrust::get<1>(perspective_grads);
dz1_persp = thrust::get<2>(perspective_grads);
dz2_persp = thrust::get<3>(perspective_grads);
}
auto grad_bary_f =
BarycentricCoordsBackward(pxy, v0xy, v1xy, v2xy, grad_bary0);
const float2 dbary_d_v0 = thrust::get<1>(grad_bary_f);
const float2 dbary_d_v1 = thrust::get<2>(grad_bary_f);
const float2 dbary_d_v2 = thrust::get<3>(grad_bary_f);
atomicAdd(grad_face_verts + f * 9 + 0, dbary_d_v0.x + ddist_d_v0.x);
atomicAdd(grad_face_verts + f * 9 + 1, dbary_d_v0.y + ddist_d_v0.y);
atomicAdd(
grad_face_verts + f * 9 + 2,
grad_zbuf_upstream * b_w_clip.x + dz0_persp);
atomicAdd(grad_face_verts + f * 9 + 3, dbary_d_v1.x + ddist_d_v1.x);
atomicAdd(grad_face_verts + f * 9 + 4, dbary_d_v1.y + ddist_d_v1.y);
atomicAdd(
grad_face_verts + f * 9 + 5,
grad_zbuf_upstream * b_w_clip.y + dz1_persp);
atomicAdd(grad_face_verts + f * 9 + 6, dbary_d_v2.x + ddist_d_v2.x);
atomicAdd(grad_face_verts + f * 9 + 7, dbary_d_v2.y + ddist_d_v2.y);
atomicAdd(
grad_face_verts + f * 9 + 8,
grad_zbuf_upstream * b_w_clip.z + dz2_persp);
}
}
}
at::Tensor RasterizeMeshesBackwardCuda(
const at::Tensor& face_verts, // (F, 3, 3)
const at::Tensor& pix_to_face, // (N, H, W, K)
const at::Tensor& grad_zbuf, // (N, H, W, K)
const at::Tensor& grad_bary, // (N, H, W, K, 3)
const at::Tensor& grad_dists, // (N, H, W, K)
const bool perspective_correct,
const bool clip_barycentric_coords) {
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
pix_to_face_t{pix_to_face, "pix_to_face", 2},
grad_zbuf_t{grad_zbuf, "grad_zbuf", 3},
grad_bary_t{grad_bary, "grad_bary", 4},
grad_dists_t{grad_dists, "grad_dists", 5};
at::CheckedFrom c = "RasterizeMeshesBackwardCuda";
at::checkAllSameGPU(
c, {face_verts_t, pix_to_face_t, grad_zbuf_t, grad_bary_t, grad_dists_t});
at::checkAllSameType(
c, {face_verts_t, grad_zbuf_t, grad_bary_t, grad_dists_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(face_verts.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int F = face_verts.size(0);
const int N = pix_to_face.size(0);
const int H = pix_to_face.size(1);
const int W = pix_to_face.size(2);
const int K = pix_to_face.size(3);
at::Tensor grad_face_verts = at::zeros({F, 3, 3}, face_verts.options());
if (grad_face_verts.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return grad_face_verts;
}
const size_t blocks = 1024;
const size_t threads = 64;
RasterizeMeshesBackwardCudaKernel<<<blocks, threads, 0, stream>>>(
face_verts.contiguous().data_ptr<float>(),
pix_to_face.contiguous().data_ptr<int64_t>(),
perspective_correct,
clip_barycentric_coords,
N,
H,
W,
K,
grad_zbuf.contiguous().data_ptr<float>(),
grad_bary.contiguous().data_ptr<float>(),
grad_dists.contiguous().data_ptr<float>(),
grad_face_verts.data_ptr<float>());
AT_CUDA_CHECK(cudaGetLastError());
return grad_face_verts;
}
// ****************************************************************************
// * FINE RASTERIZATION *
// ****************************************************************************
__global__ void RasterizeMeshesFineCudaKernel(
const float* face_verts, // (F, 3, 3)
const int32_t* bin_faces, // (N, BH, BW, T)
const int64_t* clipped_faces_neighbor_idx, // (F,)
const float blur_radius,
const int bin_size,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces,
const int N,
const int BH,
const int BW,
const int M,
const int H,
const int W,
const int K,
int64_t* face_idxs, // (N, H, W, K)
float* zbuf, // (N, H, W, K)
float* pix_dists, // (N, H, W, K)
float* bary // (N, H, W, K, 3)
) {
// This can be more than H * W if H or W are not divisible by bin_size.
int num_pixels = N * BH * BW * bin_size * bin_size;
int num_threads = gridDim.x * blockDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int pid = tid; pid < num_pixels; pid += num_threads) {
// Convert linear index into bin and pixel indices. We make the within
// block pixel ids move the fastest, so that adjacent threads will fall
// into the same bin; this should give them coalesced memory reads when
// they read from faces and bin_faces.
int i = pid;
const int n = i / (BH * BW * bin_size * bin_size);
i %= BH * BW * bin_size * bin_size;
// bin index y
const int by = i / (BW * bin_size * bin_size);
i %= BW * bin_size * bin_size;
// bin index y
const int bx = i / (bin_size * bin_size);
// pixel within the bin
i %= bin_size * bin_size;
// Pixel x, y indices
const int yi = i / bin_size + by * bin_size;
const int xi = i % bin_size + bx * bin_size;
if (yi >= H || xi >= W)
continue;
const float xf = PixToNonSquareNdc(xi, W, H);
const float yf = PixToNonSquareNdc(yi, H, W);
const float2 pxy = make_float2(xf, yf);
// This part looks like the naive rasterization kernel, except we use
// bin_faces to only look at a subset of faces already known to fall
// in this bin. TODO abstract out this logic into some data structure
// that is shared by both kernels?
Pixel q[kMaxPointsPerPixel];
int q_size = 0;
float q_max_z = -1000;
int q_max_idx = -1;
for (int m = 0; m < M; m++) {
const int f = bin_faces[n * BH * BW * M + by * BW * M + bx * M + m];
if (f < 0) {
continue; // bin_faces uses -1 as a sentinal value.
}
// Check if the pixel pxy is inside the face bounding box and if it is,
// update q, q_size, q_max_z and q_max_idx in place.
CheckPixelInsideFace(
face_verts,
clipped_faces_neighbor_idx,
f,
q_size,
q_max_z,
q_max_idx,
q,
blur_radius,
pxy,
K,
perspective_correct,
clip_barycentric_coords,
cull_backfaces);
}
// Now we've looked at all the faces for this bin, so we can write
// output for the current pixel.
// TODO: make sorting an option as only top k is needed, not sorted values.
BubbleSort(q, q_size);
// Reverse ordering of the X and Y axis so that
// in the image +Y is pointing up and +X is pointing left.
const int yidx = H - 1 - yi;
const int xidx = W - 1 - xi;
const int pix_idx = n * H * W * K + yidx * W * K + xidx * K;
for (int k = 0; k < q_size; k++) {
face_idxs[pix_idx + k] = q[k].idx;
zbuf[pix_idx + k] = q[k].z;
pix_dists[pix_idx + k] = q[k].dist;
bary[(pix_idx + k) * 3 + 0] = q[k].bary.x;
bary[(pix_idx + k) * 3 + 1] = q[k].bary.y;
bary[(pix_idx + k) * 3 + 2] = q[k].bary.z;
}
}
}
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>
RasterizeMeshesFineCuda(
const at::Tensor& face_verts,
const at::Tensor& bin_faces,
const at::Tensor& clipped_faces_neighbor_idx,
const std::tuple<int, int> image_size,
const float blur_radius,
const int bin_size,
const int faces_per_pixel,
const bool perspective_correct,
const bool clip_barycentric_coords,
const bool cull_backfaces) {
TORCH_CHECK(
face_verts.ndimension() == 3 && face_verts.size(1) == 3 &&
face_verts.size(2) == 3,
"face_verts must have dimensions (num_faces, 3, 3)");
TORCH_CHECK(bin_faces.ndimension() == 4, "bin_faces must have 4 dimensions");
TORCH_CHECK(
clipped_faces_neighbor_idx.size(0) == face_verts.size(0),
"clipped_faces_neighbor_idx must have the same first dimension as face_verts");
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
bin_faces_t{bin_faces, "bin_faces", 2},
clipped_faces_neighbor_idx_t{
clipped_faces_neighbor_idx, "clipped_faces_neighbor_idx", 3};
at::CheckedFrom c = "RasterizeMeshesFineCuda";
at::checkAllSameGPU(
c, {face_verts_t, bin_faces_t, clipped_faces_neighbor_idx_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(face_verts.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// bin_faces shape (N, BH, BW, M)
const int N = bin_faces.size(0);
const int BH = bin_faces.size(1);
const int BW = bin_faces.size(2);
const int M = bin_faces.size(3);
const int K = faces_per_pixel;
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
if (K > kMaxPointsPerPixel) {
AT_ERROR("Must have num_closest <= 150");
}
auto long_opts = bin_faces.options().dtype(at::kLong);
auto float_opts = face_verts.options().dtype(at::kFloat);
at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts);
at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts);
at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts);
at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts);
if (face_idxs.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
const size_t blocks = 1024;
const size_t threads = 64;
RasterizeMeshesFineCudaKernel<<<blocks, threads, 0, stream>>>(
face_verts.contiguous().data_ptr<float>(),
bin_faces.contiguous().data_ptr<int32_t>(),
clipped_faces_neighbor_idx.contiguous().data_ptr<int64_t>(),
blur_radius,
bin_size,
perspective_correct,
clip_barycentric_coords,
cull_backfaces,
N,
BH,
BW,
M,
H,
W,
K,
face_idxs.data_ptr<int64_t>(),
zbuf.data_ptr<float>(),
pix_dists.data_ptr<float>(),
bary.data_ptr<float>());
return std::make_tuple(face_idxs, zbuf, bary, pix_dists);
}
|
703423701992f2eec7f21c6f8f93f299f8ac47a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <arbor/gpu/gpu_common.hpp>
#include <arbor/gpu/math_cu.hpp>
#include <arbor/gpu/reduce_by_key.hpp>
#include <arbor/mechanism_abi.h>
namespace testing {
#define PPACK_IFACE_BLOCK \
auto _pp_var_width __attribute__((unused)) = params_.width;\
auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\
auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\
auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\
auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\
auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\
auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\
auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\
auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\
auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\
auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\
auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\
auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\
auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\
auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\
auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\
auto* _pp_var_weight __attribute__((unused)) = params_.weight;\
auto& _pp_var_events __attribute__((unused)) = params_.events;\
auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\
auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\
auto* _pp_var_s __attribute__((unused)) = params_.state_vars[0];\
auto* _pp_var_d __attribute__((unused)) = params_.state_vars[1];\
auto* _pp_var_h __attribute__((unused)) = params_.state_vars[2];\
//End of IFACEBLOCK
namespace {
using ::arb::gpu::exprelr;
using ::arb::gpu::safeinv;
using ::arb::gpu::min;
using ::arb::gpu::max;
__global__
void init(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
_pp_var_h[tid_] = 0.20000000000000001;
_pp_var_d[tid_] = 0.29999999999999999;
_pp_var_s[tid_] = 1.0-_pp_var_d[tid_]-_pp_var_h[tid_];
}
}
__global__
void multiply(arb_mechanism_ppack params_) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
auto idx_ = blockIdx.y; if(tid_<_pp_var_width) {
_pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_];
}
}
__global__
void advance_state(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
arb_value_type t_8_, t_5_, t_3_, t_2_, t_1_, t_0_, a_8_, a_7_, a_5_, a_4_, t_4_, a_3_, a_2_, a_1_, alpha1, alpha2, t_7_, beta1, t_6_, a_6_, beta2, a_0_;
a_0_ = 0.;
alpha1 = 2.0;
beta1 = 0.59999999999999998;
alpha2 = 3.0;
beta2 = 0.69999999999999996;
a_1_ = 1.0;
a_2_ = 1.0;
a_3_ = 1.0;
a_4_ = 1.0;
a_5_ = -1.0*alpha2;
a_6_ = -1.0* -beta2;
a_7_ = -beta1;
a_8_ = alpha1;
t_0_ = a_7_*a_1_;
t_1_ = a_7_*a_3_-a_2_*a_8_;
t_2_ = a_7_*a_4_-a_2_*a_0_;
t_3_ = a_5_*t_1_-t_0_*a_6_;
t_4_ = a_5_*t_2_-t_0_*a_0_;
t_5_ = t_3_*a_5_;
t_6_ = t_3_*a_0_-a_6_*t_4_;
t_7_ = t_3_*a_7_;
t_8_ = t_3_*a_0_-a_8_*t_4_;
_pp_var_d[tid_] = t_6_/t_5_;
_pp_var_h[tid_] = t_8_/t_7_;
_pp_var_s[tid_] = t_4_/t_3_;
}
}
} // namespace
void mechanism_test0_kin_steadystate_gpu_init_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( init), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
if (!p->multiplicity) return;
hipLaunchKernelGGL(( multiply), dim3(dim3{grid_dim), dim3(3}), block_dim, 0, *p);
}
void mechanism_test0_kin_steadystate_gpu_compute_currents_(arb_mechanism_ppack* p) {}
void mechanism_test0_kin_steadystate_gpu_advance_state_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( advance_state), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
}
void mechanism_test0_kin_steadystate_gpu_write_ions_(arb_mechanism_ppack* p) {}
void mechanism_test0_kin_steadystate_gpu_post_event_(arb_mechanism_ppack* p) {}
void mechanism_test0_kin_steadystate_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {}
} // namespace testing
| 703423701992f2eec7f21c6f8f93f299f8ac47a4.cu | #include <arbor/gpu/gpu_common.hpp>
#include <arbor/gpu/math_cu.hpp>
#include <arbor/gpu/reduce_by_key.hpp>
#include <arbor/mechanism_abi.h>
namespace testing {
#define PPACK_IFACE_BLOCK \
auto _pp_var_width __attribute__((unused)) = params_.width;\
auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\
auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\
auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\
auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\
auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\
auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\
auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\
auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\
auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\
auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\
auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\
auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\
auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\
auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\
auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\
auto* _pp_var_weight __attribute__((unused)) = params_.weight;\
auto& _pp_var_events __attribute__((unused)) = params_.events;\
auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\
auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\
auto* _pp_var_s __attribute__((unused)) = params_.state_vars[0];\
auto* _pp_var_d __attribute__((unused)) = params_.state_vars[1];\
auto* _pp_var_h __attribute__((unused)) = params_.state_vars[2];\
//End of IFACEBLOCK
namespace {
using ::arb::gpu::exprelr;
using ::arb::gpu::safeinv;
using ::arb::gpu::min;
using ::arb::gpu::max;
__global__
void init(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
_pp_var_h[tid_] = 0.20000000000000001;
_pp_var_d[tid_] = 0.29999999999999999;
_pp_var_s[tid_] = 1.0-_pp_var_d[tid_]-_pp_var_h[tid_];
}
}
__global__
void multiply(arb_mechanism_ppack params_) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
auto idx_ = blockIdx.y; if(tid_<_pp_var_width) {
_pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_];
}
}
__global__
void advance_state(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
arb_value_type t_8_, t_5_, t_3_, t_2_, t_1_, t_0_, a_8_, a_7_, a_5_, a_4_, t_4_, a_3_, a_2_, a_1_, alpha1, alpha2, t_7_, beta1, t_6_, a_6_, beta2, a_0_;
a_0_ = 0.;
alpha1 = 2.0;
beta1 = 0.59999999999999998;
alpha2 = 3.0;
beta2 = 0.69999999999999996;
a_1_ = 1.0;
a_2_ = 1.0;
a_3_ = 1.0;
a_4_ = 1.0;
a_5_ = -1.0*alpha2;
a_6_ = -1.0* -beta2;
a_7_ = -beta1;
a_8_ = alpha1;
t_0_ = a_7_*a_1_;
t_1_ = a_7_*a_3_-a_2_*a_8_;
t_2_ = a_7_*a_4_-a_2_*a_0_;
t_3_ = a_5_*t_1_-t_0_*a_6_;
t_4_ = a_5_*t_2_-t_0_*a_0_;
t_5_ = t_3_*a_5_;
t_6_ = t_3_*a_0_-a_6_*t_4_;
t_7_ = t_3_*a_7_;
t_8_ = t_3_*a_0_-a_8_*t_4_;
_pp_var_d[tid_] = t_6_/t_5_;
_pp_var_h[tid_] = t_8_/t_7_;
_pp_var_s[tid_] = t_4_/t_3_;
}
}
} // namespace
void mechanism_test0_kin_steadystate_gpu_init_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
init<<<grid_dim, block_dim>>>(*p);
if (!p->multiplicity) return;
multiply<<<dim3{grid_dim, 3}, block_dim>>>(*p);
}
void mechanism_test0_kin_steadystate_gpu_compute_currents_(arb_mechanism_ppack* p) {}
void mechanism_test0_kin_steadystate_gpu_advance_state_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
advance_state<<<grid_dim, block_dim>>>(*p);
}
void mechanism_test0_kin_steadystate_gpu_write_ions_(arb_mechanism_ppack* p) {}
void mechanism_test0_kin_steadystate_gpu_post_event_(arb_mechanism_ppack* p) {}
void mechanism_test0_kin_steadystate_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {}
} // namespace testing
|
d3f04ac5e7863ff66e4ec5da8277ba7b15c36108.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include "activations.h"
#include "activation_templates.h"
#include "hip/hip_runtime.h"
// > Mixed precision kernels (templated)
// Activation kernels
template<typename T>
__device__ T linear_activate_kernel(T x) {
return x;
}
template<typename T>
__device__ T logistic_activate_kernel(T x) {
return 1 / (1 + exp(-x));
}
__device__ half_device logistic_activate_kernel(half_device x) {
return half_device(1) / (half_device(1) + hexp(-x));
}
template<typename T>
__device__ T loggy_activate_kernel(T x) {
return 2. / (1. + exp(-x)) - 1;
}
__device__ half_device loggy_activate_kernel(half_device x) {
return half_device(2.) / (half_device(1.) + hexp(-x)) - half_device(1);
}
template<typename T>
__device__ T relu_activate_kernel(T x) {
return x * T(x > T(0));
}
template<typename T>
__device__ T elu_activate_kernel(T x) {
return (x >= 0)*x + (x < 0) * (exp(x) - 1);
}
__device__ half_device elu_activate_kernel(half_device x) {
return half_device(x >= half_device(0))*x + half_device(x < half_device(0)) * (hexp(x) - half_device(1));
}
template<typename T>
__device__ T selu_activate_kernel(T x) {
return (x >= 0)*1.0507f*x + (x < 0)*1.0507f*1.6732f * (exp(x) - 1);
}
__device__ half_device selu_activate_kernel(half_device x) {
return half_device(x >= half_device(0)) * half_device(1.0507f) * x + half_device(x < half_device(0))*half_device(1.0507f*1.6732f) * (hexp(x) - half_device(1));
}
template<typename T>
__device__ T relie_activate_kernel(T x) {
return (x > T(0)) ? x : T(.01)*x;
}
template<typename T>
__device__ T ramp_activate_kernel(T x) {
return x*T(x > T(0)) + T(.1)*x;
}
template<typename T>
__device__ T leaky_activate_kernel(T x) {
return (x > T(0)) ? x : T(.1)*x;
}
template<typename T>
__device__ T tanh_activate_kernel(T x) {
return (2.0 / (1 + exp(-2*x)) - 1);
}
__device__ half_device tanh_activate_kernel(half_device x) {
return (half_device(2) / (half_device(1) + hexp(half_device(-2)*x)) - half_device(1));
}
template<typename T>
__device__ T plse_activate_kernel(T x) {
if(x < T(-4)) return T(.01f) * (x + T(4));
if(x > T(4)) return T(.01f) * (x - T(4)) + T(1);
return T(.125)*x + T(.5);
}
template<typename T>
__device__ T stair_activate_kernel(T x) {
int n = floor(x);
if (n % 2 == 0) return floor(x / 2);
else return (x - n) + floor(x / 2);
}
__device__ half_device stair_activate_kernel(half_device x) {
int n = hfloor(x);
if (n % 2 == 0) return hfloor(x / half_device(2));
else return (x - half_device(n)) + hfloor(x / half_device(2));
}
template<typename T>
__device__ T hardtan_activate_kernel(T x) {
if (x < T(-1)) return T(-1);
if (x > T(1)) return T(1);
return x;
}
template<typename T>
__device__ T lhtan_activate_kernel(T x) {
if(x < T(0)) return T(.001)*x;
if(x > T(1)) return T(.001) * (x - T(1.0)) + T(1.0);
return x;
}
template<typename T>
__device__ T activate_kernel(T x, ACTIVATION a) {
switch(a){
case LINEAR:
return linear_activate_kernel(x);
case LOGISTIC:
return logistic_activate_kernel(x);
case LOGGY:
return loggy_activate_kernel(x);
case RELU:
return relu_activate_kernel(x);
case ELU:
return elu_activate_kernel(x);
case SELU:
return selu_activate_kernel(x);
case RELIE:
return relie_activate_kernel(x);
case RAMP:
return ramp_activate_kernel(x);
case LEAKY:
return leaky_activate_kernel(x);
case TANH:
return tanh_activate_kernel(x);
case PLSE:
return plse_activate_kernel(x);
case STAIR:
return stair_activate_kernel(x);
case HARDTAN:
return hardtan_activate_kernel(x);
case LHTAN:
return lhtan_activate_kernel(x);
}
return 0;
}
template<typename T>
__global__ void activate_array_kernel(T *x, int n, ACTIVATION a) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) x[i] = activate_kernel(x[i], a);
}
// Gradient kernels
template<typename T>
__device__ T linear_gradient_kernel(T x) {
return 1;
}
template<typename T>
__device__ T logistic_gradient_kernel(T x) {
return (T(1)-x)*x;
}
template<typename T>
__device__ T loggy_gradient_kernel(T x) {
T y = (x + T(1)) / T(2);
return T(2)*(T(1)-y)*y;
}
template<typename T>
__device__ T relu_gradient_kernel(T x) {
return T(x > T(0));
}
template<typename T>
__device__ T elu_gradient_kernel(T x) {
return T(x >= T(0)) + T(x < T(0))*(x + T(1));
}
template<typename T>
__device__ T selu_gradient_kernel(T x) {
return T(x >= T(0))*T(1.0507) + T(x < T(0))*(x + T(1.0507*1.6732));
}
template<typename T>
__device__ T relie_gradient_kernel(T x) {
return (x > T(0)) ? 1 : .01f;
}
template<typename T>
__device__ T ramp_gradient_kernel(T x) {
return T(x > T(0)) + T(.1f);
}
template<typename T>
__device__ T leaky_gradient_kernel(T x) {
return (x > T(0)) ? 1 : .1f;
}
template<typename T>
__device__ T tanh_gradient_kernel(T x) {
return T(1)-x*x;
}
template<typename T>
__device__ T plse_gradient_kernel(T x) {
return ((x < T(0)) || (x > T(1))) ? .01f : .125f;
}
template<typename T>
__device__ T stair_gradient_kernel(T x) {
if (floor(x) == x) return 0;
return 1;
}
__device__ half_device stair_gradient_kernel(half_device x) {
if (hfloor(x) == x) return 0;
return 1;
}
template<typename T>
__device__ T hardtan_gradient_kernel(T x) {
if ((x > T(-1)) && (x < T(1))) return 1;
return 0;
}
template<typename T>
__device__ T lhtan_gradient_kernel(T x) {
if((x > T(0)) && (x < T(1))) return 1;
return .001;
}
template<typename T>
__device__ T gradient_kernel(T x, ACTIVATION a) {
switch(a){
case LINEAR:
return linear_gradient_kernel(x);
case LOGISTIC:
return logistic_gradient_kernel(x);
case LOGGY:
return loggy_gradient_kernel(x);
case RELU:
return relu_gradient_kernel(x);
case ELU:
return elu_gradient_kernel(x);
case SELU:
return selu_gradient_kernel(x);
case RELIE:
return relie_gradient_kernel(x);
case RAMP:
return ramp_gradient_kernel(x);
case LEAKY:
return leaky_gradient_kernel(x);
case TANH:
return tanh_gradient_kernel(x);
case PLSE:
return plse_gradient_kernel(x);
case STAIR:
return stair_gradient_kernel(x);
case HARDTAN:
return hardtan_gradient_kernel(x);
case LHTAN:
return lhtan_gradient_kernel(x);
}
return 0;
}
template<typename T>
__global__ void gradient_array_kernel(T *x, int n, ACTIVATION a, T *delta) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) delta[i] *= gradient_kernel(x[i], a);
}
// > Mixed precision kernels callers (templated)
// Activations
template<typename T>
void activate_array_gpu(T *x, int n, ACTIVATION a) {
hipLaunchKernelGGL(( activate_array_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, a);
check_error(hipPeekAtLastError());
}
void activate_array_gpu(half_float::half *x, int n, ACTIVATION a) {
hipLaunchKernelGGL(( activate_array_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, (half_device*)x, n, a);
check_error(hipPeekAtLastError());
}
template void activate_array_gpu<float>(float *x, int n, ACTIVATION a);
template void activate_array_gpu<double>(double *x, int n, ACTIVATION a);
// Gradients
template<typename T>
void gradient_array_gpu(T *x, int n, ACTIVATION a, T *delta) {
hipLaunchKernelGGL(( gradient_array_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, a, delta);
check_error(hipPeekAtLastError());
}
void gradient_array_gpu(half_float::half *x, int n, ACTIVATION a, half_float::half *delta) {
hipLaunchKernelGGL(( gradient_array_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, (half_device*)x, n, a, (half_device*)delta);
check_error(hipPeekAtLastError());
}
template void gradient_array_gpu(float *x, int n, ACTIVATION a, float *delta);
template void gradient_array_gpu(double *x, int n, ACTIVATION a, double *delta);
// > General functions
__global__ void binary_gradient_array_kernel(real_device *x, real_device *dy, int n, int s, BINARY_ACTIVATION a, real_device *dx) {
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int i = id % s;
int b = id / s;
real_device x1 = x[b*s + i];
real_device x2 = x[b*s + s/2 + i];
if(id < n) {
real_device de = dy[id];
dx[b*s + i] = x2*de;
dx[b*s + s/2 + i] = x1*de;
}
}
void binary_gradient_array_gpu(real *x, real *dx, int n, int size, BINARY_ACTIVATION a, real *y) {
hipLaunchKernelGGL(( binary_gradient_array_kernel), dim3(cuda_gridsize(n/2)), dim3(BLOCK), 0, 0, (real_device*)x, (real_device*)dx, n/2, size, a, (real_device*)y);
check_error(hipPeekAtLastError());
}
__global__ void binary_activate_array_kernel(real_device *x, int n, int s, BINARY_ACTIVATION a, real_device *y) {
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int i = id % s;
int b = id / s;
real_device x1 = x[b*s + i];
real_device x2 = x[b*s + s/2 + i];
if(id < n) y[id] = x1*x2;
}
void binary_activate_array_gpu(real *x, int n, int size, BINARY_ACTIVATION a, real *y) {
hipLaunchKernelGGL(( binary_activate_array_kernel), dim3(cuda_gridsize(n/2)), dim3(BLOCK), 0, 0, (real_device*)x, n/2, size, a, (real_device*)y);
check_error(hipPeekAtLastError());
} | d3f04ac5e7863ff66e4ec5da8277ba7b15c36108.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include "activations.h"
#include "activation_templates.h"
#include "cuda.h"
// > Mixed precision kernels (templated)
// Activation kernels
template<typename T>
__device__ T linear_activate_kernel(T x) {
return x;
}
template<typename T>
__device__ T logistic_activate_kernel(T x) {
return 1 / (1 + exp(-x));
}
__device__ half_device logistic_activate_kernel(half_device x) {
return half_device(1) / (half_device(1) + hexp(-x));
}
template<typename T>
__device__ T loggy_activate_kernel(T x) {
return 2. / (1. + exp(-x)) - 1;
}
__device__ half_device loggy_activate_kernel(half_device x) {
return half_device(2.) / (half_device(1.) + hexp(-x)) - half_device(1);
}
template<typename T>
__device__ T relu_activate_kernel(T x) {
return x * T(x > T(0));
}
template<typename T>
__device__ T elu_activate_kernel(T x) {
return (x >= 0)*x + (x < 0) * (exp(x) - 1);
}
__device__ half_device elu_activate_kernel(half_device x) {
return half_device(x >= half_device(0))*x + half_device(x < half_device(0)) * (hexp(x) - half_device(1));
}
template<typename T>
__device__ T selu_activate_kernel(T x) {
return (x >= 0)*1.0507f*x + (x < 0)*1.0507f*1.6732f * (exp(x) - 1);
}
__device__ half_device selu_activate_kernel(half_device x) {
return half_device(x >= half_device(0)) * half_device(1.0507f) * x + half_device(x < half_device(0))*half_device(1.0507f*1.6732f) * (hexp(x) - half_device(1));
}
template<typename T>
__device__ T relie_activate_kernel(T x) {
return (x > T(0)) ? x : T(.01)*x;
}
template<typename T>
__device__ T ramp_activate_kernel(T x) {
return x*T(x > T(0)) + T(.1)*x;
}
template<typename T>
__device__ T leaky_activate_kernel(T x) {
return (x > T(0)) ? x : T(.1)*x;
}
template<typename T>
__device__ T tanh_activate_kernel(T x) {
return (2.0 / (1 + exp(-2*x)) - 1);
}
__device__ half_device tanh_activate_kernel(half_device x) {
return (half_device(2) / (half_device(1) + hexp(half_device(-2)*x)) - half_device(1));
}
template<typename T>
__device__ T plse_activate_kernel(T x) {
if(x < T(-4)) return T(.01f) * (x + T(4));
if(x > T(4)) return T(.01f) * (x - T(4)) + T(1);
return T(.125)*x + T(.5);
}
template<typename T>
__device__ T stair_activate_kernel(T x) {
int n = floor(x);
if (n % 2 == 0) return floor(x / 2);
else return (x - n) + floor(x / 2);
}
__device__ half_device stair_activate_kernel(half_device x) {
int n = hfloor(x);
if (n % 2 == 0) return hfloor(x / half_device(2));
else return (x - half_device(n)) + hfloor(x / half_device(2));
}
template<typename T>
__device__ T hardtan_activate_kernel(T x) {
if (x < T(-1)) return T(-1);
if (x > T(1)) return T(1);
return x;
}
template<typename T>
__device__ T lhtan_activate_kernel(T x) {
if(x < T(0)) return T(.001)*x;
if(x > T(1)) return T(.001) * (x - T(1.0)) + T(1.0);
return x;
}
template<typename T>
__device__ T activate_kernel(T x, ACTIVATION a) {
switch(a){
case LINEAR:
return linear_activate_kernel(x);
case LOGISTIC:
return logistic_activate_kernel(x);
case LOGGY:
return loggy_activate_kernel(x);
case RELU:
return relu_activate_kernel(x);
case ELU:
return elu_activate_kernel(x);
case SELU:
return selu_activate_kernel(x);
case RELIE:
return relie_activate_kernel(x);
case RAMP:
return ramp_activate_kernel(x);
case LEAKY:
return leaky_activate_kernel(x);
case TANH:
return tanh_activate_kernel(x);
case PLSE:
return plse_activate_kernel(x);
case STAIR:
return stair_activate_kernel(x);
case HARDTAN:
return hardtan_activate_kernel(x);
case LHTAN:
return lhtan_activate_kernel(x);
}
return 0;
}
template<typename T>
__global__ void activate_array_kernel(T *x, int n, ACTIVATION a) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) x[i] = activate_kernel(x[i], a);
}
// Gradient kernels
template<typename T>
__device__ T linear_gradient_kernel(T x) {
return 1;
}
template<typename T>
__device__ T logistic_gradient_kernel(T x) {
return (T(1)-x)*x;
}
template<typename T>
__device__ T loggy_gradient_kernel(T x) {
T y = (x + T(1)) / T(2);
return T(2)*(T(1)-y)*y;
}
template<typename T>
__device__ T relu_gradient_kernel(T x) {
return T(x > T(0));
}
template<typename T>
__device__ T elu_gradient_kernel(T x) {
return T(x >= T(0)) + T(x < T(0))*(x + T(1));
}
template<typename T>
__device__ T selu_gradient_kernel(T x) {
return T(x >= T(0))*T(1.0507) + T(x < T(0))*(x + T(1.0507*1.6732));
}
template<typename T>
__device__ T relie_gradient_kernel(T x) {
return (x > T(0)) ? 1 : .01f;
}
template<typename T>
__device__ T ramp_gradient_kernel(T x) {
return T(x > T(0)) + T(.1f);
}
template<typename T>
__device__ T leaky_gradient_kernel(T x) {
return (x > T(0)) ? 1 : .1f;
}
template<typename T>
__device__ T tanh_gradient_kernel(T x) {
return T(1)-x*x;
}
template<typename T>
__device__ T plse_gradient_kernel(T x) {
return ((x < T(0)) || (x > T(1))) ? .01f : .125f;
}
template<typename T>
__device__ T stair_gradient_kernel(T x) {
if (floor(x) == x) return 0;
return 1;
}
__device__ half_device stair_gradient_kernel(half_device x) {
if (hfloor(x) == x) return 0;
return 1;
}
template<typename T>
__device__ T hardtan_gradient_kernel(T x) {
if ((x > T(-1)) && (x < T(1))) return 1;
return 0;
}
template<typename T>
__device__ T lhtan_gradient_kernel(T x) {
if((x > T(0)) && (x < T(1))) return 1;
return .001;
}
template<typename T>
__device__ T gradient_kernel(T x, ACTIVATION a) {
switch(a){
case LINEAR:
return linear_gradient_kernel(x);
case LOGISTIC:
return logistic_gradient_kernel(x);
case LOGGY:
return loggy_gradient_kernel(x);
case RELU:
return relu_gradient_kernel(x);
case ELU:
return elu_gradient_kernel(x);
case SELU:
return selu_gradient_kernel(x);
case RELIE:
return relie_gradient_kernel(x);
case RAMP:
return ramp_gradient_kernel(x);
case LEAKY:
return leaky_gradient_kernel(x);
case TANH:
return tanh_gradient_kernel(x);
case PLSE:
return plse_gradient_kernel(x);
case STAIR:
return stair_gradient_kernel(x);
case HARDTAN:
return hardtan_gradient_kernel(x);
case LHTAN:
return lhtan_gradient_kernel(x);
}
return 0;
}
template<typename T>
__global__ void gradient_array_kernel(T *x, int n, ACTIVATION a, T *delta) {
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) delta[i] *= gradient_kernel(x[i], a);
}
// > Mixed precision kernels callers (templated)
// Activations
template<typename T>
void activate_array_gpu(T *x, int n, ACTIVATION a) {
activate_array_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, a);
check_error(cudaPeekAtLastError());
}
void activate_array_gpu(half_float::half *x, int n, ACTIVATION a) {
activate_array_kernel<<<cuda_gridsize(n), BLOCK>>>((half_device*)x, n, a);
check_error(cudaPeekAtLastError());
}
template void activate_array_gpu<float>(float *x, int n, ACTIVATION a);
template void activate_array_gpu<double>(double *x, int n, ACTIVATION a);
// Gradients
template<typename T>
void gradient_array_gpu(T *x, int n, ACTIVATION a, T *delta) {
gradient_array_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, a, delta);
check_error(cudaPeekAtLastError());
}
void gradient_array_gpu(half_float::half *x, int n, ACTIVATION a, half_float::half *delta) {
gradient_array_kernel<<<cuda_gridsize(n), BLOCK>>>((half_device*)x, n, a, (half_device*)delta);
check_error(cudaPeekAtLastError());
}
template void gradient_array_gpu(float *x, int n, ACTIVATION a, float *delta);
template void gradient_array_gpu(double *x, int n, ACTIVATION a, double *delta);
// > General functions
__global__ void binary_gradient_array_kernel(real_device *x, real_device *dy, int n, int s, BINARY_ACTIVATION a, real_device *dx) {
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int i = id % s;
int b = id / s;
real_device x1 = x[b*s + i];
real_device x2 = x[b*s + s/2 + i];
if(id < n) {
real_device de = dy[id];
dx[b*s + i] = x2*de;
dx[b*s + s/2 + i] = x1*de;
}
}
void binary_gradient_array_gpu(real *x, real *dx, int n, int size, BINARY_ACTIVATION a, real *y) {
binary_gradient_array_kernel<<<cuda_gridsize(n/2), BLOCK>>>((real_device*)x, (real_device*)dx, n/2, size, a, (real_device*)y);
check_error(cudaPeekAtLastError());
}
__global__ void binary_activate_array_kernel(real_device *x, int n, int s, BINARY_ACTIVATION a, real_device *y) {
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int i = id % s;
int b = id / s;
real_device x1 = x[b*s + i];
real_device x2 = x[b*s + s/2 + i];
if(id < n) y[id] = x1*x2;
}
void binary_activate_array_gpu(real *x, int n, int size, BINARY_ACTIVATION a, real *y) {
binary_activate_array_kernel<<<cuda_gridsize(n/2), BLOCK>>>((real_device*)x, n/2, size, a, (real_device*)y);
check_error(cudaPeekAtLastError());
} |
e7b88aa0796271bf686e2ddbef7518115e1ad99e.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
#define block_size 16
// compute the output at one output row/col
// to do so, we need to sum over the product of the corresponding input rows
template <typename scalar_t>
__global__ void matmul_viterbi_cuda_forward_kernel(
const scalar_t* __restrict__ a,
const scalar_t* __restrict__ b,
scalar_t* __restrict__ output,
int64_t* __restrict__ max_indices,
size_t sz1,
size_t sz2,
size_t sz3,
size_t batch_size) {
const int64_t out_row = blockIdx.x * block_size + threadIdx.x;
const int64_t out_col = blockIdx.y * block_size + threadIdx.y;
const int64_t batch = blockIdx.z;
if(out_row < sz1 && out_col < sz3 && batch < batch_size) {
scalar_t sum = 0;
int64_t src = 0;
for(int64_t k = 0; k < sz2; k++) {
// a[out_row, k]
scalar_t left = a[batch*sz1*sz2 + out_row*sz2 + k];
// b[k, out_col]
scalar_t right = b[batch*sz2*sz3 + k*sz3 + out_col];
scalar_t prod = left*right;
if(prod > sum) {
sum = prod;
src = k;
}
}
output[batch*sz1*sz3 + out_row*sz3 + out_col] = sum;
max_indices[batch*sz1*sz3 + out_row*sz3 + out_col] = src;
}
}
std::vector<torch::Tensor> matmul_viterbi_cuda_forward(
torch::Tensor a,
torch::Tensor b) {
const auto batch_size = a.size(0);
const auto sz1 = a.size(1);
const auto sz2 = a.size(2);
const auto sz3 = b.size(2);
auto output = torch::zeros({batch_size, sz1, sz3}, a.options());
auto max_indices = torch::zeros({batch_size, sz1, sz3}, torch::TensorOptions().dtype(torch::kInt64).device(a.device()));
// each thread computes value at one output cell
const dim3 dimBlock(block_size, block_size);
const dim3 dimGrid((sz1 + block_size - 1) / block_size, (sz3 + block_size - 1) / block_size, batch_size);
AT_DISPATCH_FLOATING_TYPES(a.type(), "matmul_viterbi_forward_cuda", ([&] {
hipLaunchKernelGGL(( matmul_viterbi_cuda_forward_kernel<scalar_t>), dim3(dimGrid), dim3(dimBlock), 0, 0,
a.data<scalar_t>(),
b.data<scalar_t>(),
output.data<scalar_t>(),
max_indices.data<int64_t>(),
sz1,
sz2,
sz3,
batch_size
);
}));
return {output, max_indices};
}
template <typename scalar_t>
__global__ void matmul_viterbi_a_cuda_backward_kernel(
const scalar_t* __restrict__ a,
const scalar_t* __restrict__ b,
const scalar_t* __restrict__ output,
const int64_t* __restrict__ max_indices,
const scalar_t* __restrict__ d_output,
scalar_t* __restrict__ d_a,
size_t sz1,
size_t sz2,
size_t sz3,
size_t batch_size) {
const int64_t row = blockIdx.x * block_size + threadIdx.x;
const int64_t col = blockIdx.y * block_size + threadIdx.y;
const int64_t batch = blockIdx.z;
if(row < sz1 && col < sz2 && batch < batch_size) {
// we're at a[row, col], so each out[row, k] could potentially depend on us
scalar_t sum = 0;
for(int k = 0; k < sz3; k++) {
int64_t idx = max_indices[batch*sz1*sz3 + row*sz3 + k];
if(idx == col) {
sum += d_output[batch*sz1*sz3 + row*sz3 + k] * b[batch*sz2*sz3 + col*sz3 + k];
}
}
d_a[batch*sz1*sz2 + row*sz2 + col] = sum;
}
}
template <typename scalar_t>
__global__ void matmul_viterbi_b_cuda_backward_kernel(
const scalar_t* __restrict__ a,
const scalar_t* __restrict__ b,
const scalar_t* __restrict__ output,
const int64_t* __restrict__ max_indices,
const scalar_t* __restrict__ d_output,
scalar_t* __restrict__ d_b,
size_t sz1,
size_t sz2,
size_t sz3,
size_t batch_size) {
const int64_t row = blockIdx.x * block_size + threadIdx.x;
const int64_t col = blockIdx.y * block_size + threadIdx.y;
const int64_t batch = blockIdx.z;
if(row < sz2 && col < sz3 && batch < batch_size) {
// we're at b[row, col], so each out[k, col] could potentially depend on us
scalar_t sum = 0;
for(int k = 0; k < sz1; k++) {
int64_t idx = max_indices[batch*sz1*sz3 + k*sz3 + col];
if(idx == row) {
sum += d_output[batch*sz1*sz3 + k*sz3 + col] * a[batch*sz1*sz2 + k*sz2 + row];
}
}
d_b[batch*sz2*sz3 + row*sz3 + col] = sum;
}
}
std::vector<torch::Tensor> matmul_viterbi_cuda_backward(
torch::Tensor a,
torch::Tensor b,
torch::Tensor output,
torch::Tensor max_indices,
torch::Tensor d_output) {
const auto batch_size = a.size(0);
const auto sz1 = a.size(1);
const auto sz2 = a.size(2);
const auto sz3 = b.size(2);
auto d_a = torch::zeros_like(a);
auto d_b = torch::zeros_like(b);
const dim3 aDimBlock(block_size, block_size);
const dim3 aDimGrid((sz1 + block_size - 1) / block_size, (sz2 + block_size - 1) / block_size, batch_size);
const dim3 bDimBlock(block_size, block_size);
const dim3 bDimGrid((sz2 + block_size - 1) / block_size, (sz3 + block_size - 1) / block_size, batch_size);
AT_DISPATCH_FLOATING_TYPES(a.type(), "matmul_viterbi_a_backward_cuda", ([&] {
hipLaunchKernelGGL(( matmul_viterbi_a_cuda_backward_kernel<scalar_t>), dim3(aDimGrid), dim3(aDimBlock), 0, 0,
a.data<scalar_t>(),
b.data<scalar_t>(),
output.data<scalar_t>(),
max_indices.data<int64_t>(),
d_output.data<scalar_t>(),
d_a.data<scalar_t>(),
sz1,
sz2,
sz3,
batch_size
);
}));
AT_DISPATCH_FLOATING_TYPES(a.type(), "matmul_viterbi_b_backward_cuda", ([&] {
hipLaunchKernelGGL(( matmul_viterbi_b_cuda_backward_kernel<scalar_t>), dim3(bDimGrid), dim3(bDimBlock), 0, 0,
a.data<scalar_t>(),
b.data<scalar_t>(),
output.data<scalar_t>(),
max_indices.data<int64_t>(),
d_output.data<scalar_t>(),
d_b.data<scalar_t>(),
sz1,
sz2,
sz3,
batch_size
);
}));
return {d_a, d_b};
}
| e7b88aa0796271bf686e2ddbef7518115e1ad99e.cu | #include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#define block_size 16
// compute the output at one output row/col
// to do so, we need to sum over the product of the corresponding input rows
template <typename scalar_t>
__global__ void matmul_viterbi_cuda_forward_kernel(
const scalar_t* __restrict__ a,
const scalar_t* __restrict__ b,
scalar_t* __restrict__ output,
int64_t* __restrict__ max_indices,
size_t sz1,
size_t sz2,
size_t sz3,
size_t batch_size) {
const int64_t out_row = blockIdx.x * block_size + threadIdx.x;
const int64_t out_col = blockIdx.y * block_size + threadIdx.y;
const int64_t batch = blockIdx.z;
if(out_row < sz1 && out_col < sz3 && batch < batch_size) {
scalar_t sum = 0;
int64_t src = 0;
for(int64_t k = 0; k < sz2; k++) {
// a[out_row, k]
scalar_t left = a[batch*sz1*sz2 + out_row*sz2 + k];
// b[k, out_col]
scalar_t right = b[batch*sz2*sz3 + k*sz3 + out_col];
scalar_t prod = left*right;
if(prod > sum) {
sum = prod;
src = k;
}
}
output[batch*sz1*sz3 + out_row*sz3 + out_col] = sum;
max_indices[batch*sz1*sz3 + out_row*sz3 + out_col] = src;
}
}
std::vector<torch::Tensor> matmul_viterbi_cuda_forward(
torch::Tensor a,
torch::Tensor b) {
const auto batch_size = a.size(0);
const auto sz1 = a.size(1);
const auto sz2 = a.size(2);
const auto sz3 = b.size(2);
auto output = torch::zeros({batch_size, sz1, sz3}, a.options());
auto max_indices = torch::zeros({batch_size, sz1, sz3}, torch::TensorOptions().dtype(torch::kInt64).device(a.device()));
// each thread computes value at one output cell
const dim3 dimBlock(block_size, block_size);
const dim3 dimGrid((sz1 + block_size - 1) / block_size, (sz3 + block_size - 1) / block_size, batch_size);
AT_DISPATCH_FLOATING_TYPES(a.type(), "matmul_viterbi_forward_cuda", ([&] {
matmul_viterbi_cuda_forward_kernel<scalar_t><<<dimGrid, dimBlock>>>(
a.data<scalar_t>(),
b.data<scalar_t>(),
output.data<scalar_t>(),
max_indices.data<int64_t>(),
sz1,
sz2,
sz3,
batch_size
);
}));
return {output, max_indices};
}
template <typename scalar_t>
__global__ void matmul_viterbi_a_cuda_backward_kernel(
const scalar_t* __restrict__ a,
const scalar_t* __restrict__ b,
const scalar_t* __restrict__ output,
const int64_t* __restrict__ max_indices,
const scalar_t* __restrict__ d_output,
scalar_t* __restrict__ d_a,
size_t sz1,
size_t sz2,
size_t sz3,
size_t batch_size) {
const int64_t row = blockIdx.x * block_size + threadIdx.x;
const int64_t col = blockIdx.y * block_size + threadIdx.y;
const int64_t batch = blockIdx.z;
if(row < sz1 && col < sz2 && batch < batch_size) {
// we're at a[row, col], so each out[row, k] could potentially depend on us
scalar_t sum = 0;
for(int k = 0; k < sz3; k++) {
int64_t idx = max_indices[batch*sz1*sz3 + row*sz3 + k];
if(idx == col) {
sum += d_output[batch*sz1*sz3 + row*sz3 + k] * b[batch*sz2*sz3 + col*sz3 + k];
}
}
d_a[batch*sz1*sz2 + row*sz2 + col] = sum;
}
}
template <typename scalar_t>
__global__ void matmul_viterbi_b_cuda_backward_kernel(
const scalar_t* __restrict__ a,
const scalar_t* __restrict__ b,
const scalar_t* __restrict__ output,
const int64_t* __restrict__ max_indices,
const scalar_t* __restrict__ d_output,
scalar_t* __restrict__ d_b,
size_t sz1,
size_t sz2,
size_t sz3,
size_t batch_size) {
const int64_t row = blockIdx.x * block_size + threadIdx.x;
const int64_t col = blockIdx.y * block_size + threadIdx.y;
const int64_t batch = blockIdx.z;
if(row < sz2 && col < sz3 && batch < batch_size) {
// we're at b[row, col], so each out[k, col] could potentially depend on us
scalar_t sum = 0;
for(int k = 0; k < sz1; k++) {
int64_t idx = max_indices[batch*sz1*sz3 + k*sz3 + col];
if(idx == row) {
sum += d_output[batch*sz1*sz3 + k*sz3 + col] * a[batch*sz1*sz2 + k*sz2 + row];
}
}
d_b[batch*sz2*sz3 + row*sz3 + col] = sum;
}
}
std::vector<torch::Tensor> matmul_viterbi_cuda_backward(
torch::Tensor a,
torch::Tensor b,
torch::Tensor output,
torch::Tensor max_indices,
torch::Tensor d_output) {
const auto batch_size = a.size(0);
const auto sz1 = a.size(1);
const auto sz2 = a.size(2);
const auto sz3 = b.size(2);
auto d_a = torch::zeros_like(a);
auto d_b = torch::zeros_like(b);
const dim3 aDimBlock(block_size, block_size);
const dim3 aDimGrid((sz1 + block_size - 1) / block_size, (sz2 + block_size - 1) / block_size, batch_size);
const dim3 bDimBlock(block_size, block_size);
const dim3 bDimGrid((sz2 + block_size - 1) / block_size, (sz3 + block_size - 1) / block_size, batch_size);
AT_DISPATCH_FLOATING_TYPES(a.type(), "matmul_viterbi_a_backward_cuda", ([&] {
matmul_viterbi_a_cuda_backward_kernel<scalar_t><<<aDimGrid, aDimBlock>>>(
a.data<scalar_t>(),
b.data<scalar_t>(),
output.data<scalar_t>(),
max_indices.data<int64_t>(),
d_output.data<scalar_t>(),
d_a.data<scalar_t>(),
sz1,
sz2,
sz3,
batch_size
);
}));
AT_DISPATCH_FLOATING_TYPES(a.type(), "matmul_viterbi_b_backward_cuda", ([&] {
matmul_viterbi_b_cuda_backward_kernel<scalar_t><<<bDimGrid, bDimBlock>>>(
a.data<scalar_t>(),
b.data<scalar_t>(),
output.data<scalar_t>(),
max_indices.data<int64_t>(),
d_output.data<scalar_t>(),
d_b.data<scalar_t>(),
sz1,
sz2,
sz3,
batch_size
);
}));
return {d_a, d_b};
}
|
ab8a4654df8ae79aaf21fcaadd74afa4550f60a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "barostat.hpp"
#include "gpu_utils.cuh"
#include "fixed_point.hpp"
#include <algorithm>
#include <stdio.h>
#include <set>
#define AVOGADRO 6.0221367e23
#define BOLTZ 0.008314462618
namespace timemachine {
MonteCarloBarostat::MonteCarloBarostat(
const int N,
const double pressure, // Expected in Bar
const double temperature, // Kelvin
const std::vector<std::vector<int> > group_idxs,
const int interval,
const std::vector<BoundPotential *> bps,
const int seed) :
N_(N),
pressure_(pressure),
temperature_(temperature),
interval_(interval),
bps_(bps),
group_idxs_(group_idxs),
num_grouped_atoms_(0),
volume_scale_(0),
num_attempted_(0),
num_accepted_(0),
seed_(seed),
step_(0) {
// lets not have another facepalm moment again...
if(temperature < 100.0) {
std::cout << "warning temperature less than 100K" << std::endl;
}
if(pressure > 10.0) {
std::cout << "warning pressure more than 10bar" << std::endl;
}
mt_ = std::mt19937(seed_);
dist_ = std::uniform_real_distribution<double>(0.0, 1.0);
//gpuErrchk(hipMalloc(&d_group_idxs_, group_idxs_.size()*sizeof(*d_group_idxs_)));
const int num_mols = group_idxs_.size();
gpuErrchk(hipMalloc(&d_x_after_, N_*3*sizeof(*d_x_after_)));
gpuErrchk(hipMalloc(&d_box_after_, 3*3*sizeof(*d_box_after_)));
gpuErrchk(hipMalloc(&d_u_before_, N_*sizeof(*d_u_before_)));
gpuErrchk(hipMalloc(&d_u_after_, N_*sizeof(*d_u_after_)));
std::set<int> group_set;
for (int i = 0; i < num_mols; i++) {
std::vector<int> atoms = group_idxs[i];
const int num_atoms = atoms.size();
num_grouped_atoms_ += num_atoms;
for(int j = 0; j < num_atoms; j++) {
int idx = atoms[j];
if (idx < 0 || idx >= N_) {
throw std::runtime_error("Grouped indices must be between 0 and N");
}
group_set.insert(idx);
}
}
// Verify that all of the group indices are unique
if (group_set.size() != num_grouped_atoms_) {
throw std::runtime_error("All grouped indices must be unique");
}
gpuErrchk(hipMalloc(&d_centroids_, num_mols*3*sizeof(*d_centroids_)));
gpuErrchk(hipMalloc(&d_atom_idxs_, num_grouped_atoms_*sizeof(*d_atom_idxs_)));
gpuErrchk(hipMalloc(&d_mol_idxs_, num_grouped_atoms_*sizeof(*d_mol_idxs_)));
gpuErrchk(hipMalloc(&d_mol_offsets_, (num_mols+1)*sizeof(*d_mol_offsets_)));
int offset = 0;
int mol_offsets[num_mols+1];
int mol_idxs[num_grouped_atoms_];
int atom_idxs[num_grouped_atoms_];
for (int i = 0; i < num_mols; i++) {
std::vector<int> atoms = group_idxs[i];
mol_offsets[i] = offset;
int num_atoms = atoms.size();
for (int j = 0; j < num_atoms; j++) {
mol_idxs[offset+j] = i;
atom_idxs[offset+j] = atoms[j];
}
offset += num_atoms;
}
mol_offsets[num_mols] = offset;
gpuErrchk(hipMemcpy(d_mol_idxs_, mol_idxs, num_grouped_atoms_*sizeof(*d_mol_idxs_), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_atom_idxs_, atom_idxs, num_grouped_atoms_*sizeof(*d_atom_idxs_), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_mol_offsets_, mol_offsets, (num_mols+1)*sizeof(*d_mol_offsets_), hipMemcpyHostToDevice));
};
MonteCarloBarostat::~MonteCarloBarostat() {
gpuErrchk(hipFree(d_x_after_));
gpuErrchk(hipFree(d_centroids_));
gpuErrchk(hipFree(d_atom_idxs_));
gpuErrchk(hipFree(d_mol_idxs_));
gpuErrchk(hipFree(d_mol_offsets_));
gpuErrchk(hipFree(d_box_after_));
gpuErrchk(hipFree(d_u_before_));
gpuErrchk(hipFree(d_u_after_));
};
void __global__ rescale_positions(
const int N, // Number of atoms to shift
double * __restrict__ coords, // Cordinates
const double length_scale,
const double * __restrict__ box, // [9]
double * __restrict__ scaled_box, // [9]
const int * __restrict__ atom_idxs, // [N]
const int * __restrict__ mol_idxs, // [N]
const int * __restrict__ mol_offsets, // [N]
const double * __restrict__ centroids // [N*3]
) {
const int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
const int atom_idx = atom_idxs[idx];
const int mol_idx = mol_idxs[idx];
double center_x = box[0*3+0] * 0.5;
double center_y = box[1*3+1] * 0.5;
double center_z = box[2*3+2] * 0.5;
const double num_atoms = static_cast<double>(mol_offsets[mol_idx+1] - mol_offsets[mol_idx]);
const double centroid_x = centroids[mol_idx*3+0] / num_atoms;
const double centroid_y = centroids[mol_idx*3+1] / num_atoms;
const double centroid_z = centroids[mol_idx*3+2] / num_atoms;
const double displacement_x = ((centroid_x - center_x) * length_scale) + center_x - centroid_x;
const double displacement_y = ((centroid_y - center_y) * length_scale) + center_y - centroid_y;
const double displacement_z = ((centroid_z - center_z) * length_scale) + center_z - centroid_z;
coords[atom_idx*3+0] += displacement_x;
coords[atom_idx*3+1] += displacement_y;
coords[atom_idx*3+2] += displacement_z;
if (atom_idx == 0) {
scaled_box[0*3+0] *= length_scale;
scaled_box[1*3+1] *= length_scale;
scaled_box[2*3+2] *= length_scale;
}
}
void __global__ find_group_centroids(
const int N, // Number of atoms to shift
const double * __restrict__ coords, // Coordinates
const int * __restrict__ atom_idxs, // [N]
const int * __restrict__ mol_idxs, // [N]
double * __restrict__ centroids // [num_molecules * 3]
) {
const int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
const int atom_idx = atom_idxs[idx];
const int mol_idx = mol_idxs[idx];
atomicAdd(centroids + mol_idx*3+0, coords[atom_idx*3+0]);
atomicAdd(centroids + mol_idx*3+1, coords[atom_idx*3+2]);
atomicAdd(centroids + mol_idx*3+2, coords[atom_idx*3+2]);
}
void MonteCarloBarostat::reset_counters() {
num_attempted_ = 0;
num_accepted_ = 0;
}
void MonteCarloBarostat::inplace_move(
double *d_x,
double *d_box,
const double lambda
) {
step_++;
if (step_ % interval_ != 0) {
return;
}
std::vector<double> h_box(9);
gpuErrchk(hipMemcpy(&h_box[0], d_box, 9*sizeof(d_box), hipMemcpyDeviceToHost));
double volume = h_box[0*3+0]*h_box[1*3+1]*h_box[2*3+2];
if(volume_scale_ == 0) {
volume_scale_ = 0.01*volume;
}
const int num_molecules = group_idxs_.size();
const int tpb = 32;
const int blocks = (num_grouped_atoms_ + tpb - 1) / tpb;
// Compute the energy of the modified system.
hipStream_t stream = static_cast<hipStream_t>(0);
gpuErrchk(hipMemsetAsync(d_u_before_, 0, N_*sizeof(*d_u_before_), stream));
gpuErrchk(hipMemsetAsync(d_u_after_, 0, N_*sizeof(*d_u_after_), stream));
gpuErrchk(hipMemsetAsync(d_centroids_, 0, num_molecules*3*sizeof(*d_centroids_), stream));
for(int i=0; i < bps_.size(); i++) {
bps_[i]->execute_device(
N_,
d_x,
d_box,
lambda,
nullptr,
nullptr,
nullptr,
d_u_before_,
stream // TBD: parallelize me!
);
}
gpuErrchk(hipMemcpyAsync(d_x_after_, d_x, N_*3*sizeof(*d_x), hipMemcpyDeviceToDevice, stream));
gpuErrchk(hipMemcpyAsync(d_box_after_, d_box, 3*3*sizeof(*d_x), hipMemcpyDeviceToDevice, stream));
hipLaunchKernelGGL(( find_group_centroids), dim3(blocks), dim3(tpb), 0, stream,
num_grouped_atoms_,
d_x_after_,
d_atom_idxs_,
d_mol_idxs_,
d_centroids_
);
double delta_volume = volume_scale_*2*(dist_(mt_)-0.5);
double new_volume = volume+delta_volume;
double length_scale = ::pow(new_volume/volume, 1.0/3.0);
// Scale centroids
hipLaunchKernelGGL(( rescale_positions), dim3(blocks), dim3(tpb), 0, stream,
num_grouped_atoms_,
d_x_after_,
length_scale,
d_box,
d_box_after_, // Box will be rescaled by length_scale
d_atom_idxs_,
d_mol_idxs_,
d_mol_offsets_,
d_centroids_
);
gpuErrchk(hipPeekAtLastError());
for(int i=0; i < bps_.size(); i++) {
bps_[i]->execute_device(
N_,
d_x_after_,
d_box_after_,
lambda,
nullptr,
nullptr,
nullptr,
d_u_after_,
stream // TBD: parallelize me!
);
}
double pressure = pressure_*AVOGADRO*1e-25;
const double kT = BOLTZ*temperature_;
unsigned long long u_init_agg = 0;
unsigned long long u_final_agg = 0;
unsigned long long initial_energy[N_];
unsigned long long final_energy[N_];
gpuErrchk(hipMemcpyAsync(initial_energy, d_u_before_, N_*sizeof(*d_u_before_), hipMemcpyDeviceToHost, stream));
gpuErrchk(hipMemcpyAsync(final_energy, d_u_after_, N_*sizeof(*d_u_after_), hipMemcpyDeviceToHost, stream));
gpuErrchk(hipStreamSynchronize(stream));
for (int i = 0; i < N_; i++) {
u_init_agg += initial_energy[i];
u_final_agg += final_energy[i];
}
double u_init = FIXED_TO_FLOAT<double>(u_init_agg);
double u_final = FIXED_TO_FLOAT<double>(u_final_agg);
double w = u_final-u_init + pressure*delta_volume - num_molecules*kT*::log(new_volume/volume);
if (w > 0 && dist_(mt_) > ::exp(-w/kT)) {
// Reject the step.
// Don't modify the coords, keep box the same
volume = new_volume;
}
else {
num_accepted_++;
// Replace the coords and box if step accepted
gpuErrchk(hipMemcpyAsync(d_x, d_x_after_, N_*3*sizeof(*d_x), hipMemcpyDeviceToDevice, stream));
gpuErrchk(hipMemcpyAsync(d_box, d_box_after_, 3*3*sizeof(*d_box), hipMemcpyDeviceToDevice, stream));
}
num_attempted_++;
if (num_attempted_ >= 10) {
if (num_accepted_ < 0.25*num_attempted_) {
volume_scale_ /= 1.1;
this->reset_counters();
}
else if (num_accepted_ > 0.75*num_attempted_) {
volume_scale_ = ::min(volume_scale_*1.1, volume*0.3);
this->reset_counters();
}
}
};
void MonteCarloBarostat::set_interval(const int interval){
interval_ = interval;
// Clear the step, to ensure user can expect that in N steps the barostat will trigger
step_ = 0;
}
int MonteCarloBarostat::get_interval(){
return interval_;
}
void MonteCarloBarostat::set_pressure(const double pressure){
pressure_ = pressure;
// Could have equilibrated and be a large number of steps from shifting volume
// adjustment, ie num attempted = 300 and num accepted = 150
this->reset_counters();
}
}
| ab8a4654df8ae79aaf21fcaadd74afa4550f60a1.cu | #include "barostat.hpp"
#include "gpu_utils.cuh"
#include "fixed_point.hpp"
#include <algorithm>
#include <stdio.h>
#include <set>
#define AVOGADRO 6.0221367e23
#define BOLTZ 0.008314462618
namespace timemachine {
MonteCarloBarostat::MonteCarloBarostat(
const int N,
const double pressure, // Expected in Bar
const double temperature, // Kelvin
const std::vector<std::vector<int> > group_idxs,
const int interval,
const std::vector<BoundPotential *> bps,
const int seed) :
N_(N),
pressure_(pressure),
temperature_(temperature),
interval_(interval),
bps_(bps),
group_idxs_(group_idxs),
num_grouped_atoms_(0),
volume_scale_(0),
num_attempted_(0),
num_accepted_(0),
seed_(seed),
step_(0) {
// lets not have another facepalm moment again...
if(temperature < 100.0) {
std::cout << "warning temperature less than 100K" << std::endl;
}
if(pressure > 10.0) {
std::cout << "warning pressure more than 10bar" << std::endl;
}
mt_ = std::mt19937(seed_);
dist_ = std::uniform_real_distribution<double>(0.0, 1.0);
//gpuErrchk(cudaMalloc(&d_group_idxs_, group_idxs_.size()*sizeof(*d_group_idxs_)));
const int num_mols = group_idxs_.size();
gpuErrchk(cudaMalloc(&d_x_after_, N_*3*sizeof(*d_x_after_)));
gpuErrchk(cudaMalloc(&d_box_after_, 3*3*sizeof(*d_box_after_)));
gpuErrchk(cudaMalloc(&d_u_before_, N_*sizeof(*d_u_before_)));
gpuErrchk(cudaMalloc(&d_u_after_, N_*sizeof(*d_u_after_)));
std::set<int> group_set;
for (int i = 0; i < num_mols; i++) {
std::vector<int> atoms = group_idxs[i];
const int num_atoms = atoms.size();
num_grouped_atoms_ += num_atoms;
for(int j = 0; j < num_atoms; j++) {
int idx = atoms[j];
if (idx < 0 || idx >= N_) {
throw std::runtime_error("Grouped indices must be between 0 and N");
}
group_set.insert(idx);
}
}
// Verify that all of the group indices are unique
if (group_set.size() != num_grouped_atoms_) {
throw std::runtime_error("All grouped indices must be unique");
}
gpuErrchk(cudaMalloc(&d_centroids_, num_mols*3*sizeof(*d_centroids_)));
gpuErrchk(cudaMalloc(&d_atom_idxs_, num_grouped_atoms_*sizeof(*d_atom_idxs_)));
gpuErrchk(cudaMalloc(&d_mol_idxs_, num_grouped_atoms_*sizeof(*d_mol_idxs_)));
gpuErrchk(cudaMalloc(&d_mol_offsets_, (num_mols+1)*sizeof(*d_mol_offsets_)));
int offset = 0;
int mol_offsets[num_mols+1];
int mol_idxs[num_grouped_atoms_];
int atom_idxs[num_grouped_atoms_];
for (int i = 0; i < num_mols; i++) {
std::vector<int> atoms = group_idxs[i];
mol_offsets[i] = offset;
int num_atoms = atoms.size();
for (int j = 0; j < num_atoms; j++) {
mol_idxs[offset+j] = i;
atom_idxs[offset+j] = atoms[j];
}
offset += num_atoms;
}
mol_offsets[num_mols] = offset;
gpuErrchk(cudaMemcpy(d_mol_idxs_, mol_idxs, num_grouped_atoms_*sizeof(*d_mol_idxs_), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_atom_idxs_, atom_idxs, num_grouped_atoms_*sizeof(*d_atom_idxs_), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_mol_offsets_, mol_offsets, (num_mols+1)*sizeof(*d_mol_offsets_), cudaMemcpyHostToDevice));
};
MonteCarloBarostat::~MonteCarloBarostat() {
gpuErrchk(cudaFree(d_x_after_));
gpuErrchk(cudaFree(d_centroids_));
gpuErrchk(cudaFree(d_atom_idxs_));
gpuErrchk(cudaFree(d_mol_idxs_));
gpuErrchk(cudaFree(d_mol_offsets_));
gpuErrchk(cudaFree(d_box_after_));
gpuErrchk(cudaFree(d_u_before_));
gpuErrchk(cudaFree(d_u_after_));
};
void __global__ rescale_positions(
const int N, // Number of atoms to shift
double * __restrict__ coords, // Cordinates
const double length_scale,
const double * __restrict__ box, // [9]
double * __restrict__ scaled_box, // [9]
const int * __restrict__ atom_idxs, // [N]
const int * __restrict__ mol_idxs, // [N]
const int * __restrict__ mol_offsets, // [N]
const double * __restrict__ centroids // [N*3]
) {
const int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
const int atom_idx = atom_idxs[idx];
const int mol_idx = mol_idxs[idx];
double center_x = box[0*3+0] * 0.5;
double center_y = box[1*3+1] * 0.5;
double center_z = box[2*3+2] * 0.5;
const double num_atoms = static_cast<double>(mol_offsets[mol_idx+1] - mol_offsets[mol_idx]);
const double centroid_x = centroids[mol_idx*3+0] / num_atoms;
const double centroid_y = centroids[mol_idx*3+1] / num_atoms;
const double centroid_z = centroids[mol_idx*3+2] / num_atoms;
const double displacement_x = ((centroid_x - center_x) * length_scale) + center_x - centroid_x;
const double displacement_y = ((centroid_y - center_y) * length_scale) + center_y - centroid_y;
const double displacement_z = ((centroid_z - center_z) * length_scale) + center_z - centroid_z;
coords[atom_idx*3+0] += displacement_x;
coords[atom_idx*3+1] += displacement_y;
coords[atom_idx*3+2] += displacement_z;
if (atom_idx == 0) {
scaled_box[0*3+0] *= length_scale;
scaled_box[1*3+1] *= length_scale;
scaled_box[2*3+2] *= length_scale;
}
}
void __global__ find_group_centroids(
const int N, // Number of atoms to shift
const double * __restrict__ coords, // Coordinates
const int * __restrict__ atom_idxs, // [N]
const int * __restrict__ mol_idxs, // [N]
double * __restrict__ centroids // [num_molecules * 3]
) {
const int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
const int atom_idx = atom_idxs[idx];
const int mol_idx = mol_idxs[idx];
atomicAdd(centroids + mol_idx*3+0, coords[atom_idx*3+0]);
atomicAdd(centroids + mol_idx*3+1, coords[atom_idx*3+2]);
atomicAdd(centroids + mol_idx*3+2, coords[atom_idx*3+2]);
}
void MonteCarloBarostat::reset_counters() {
num_attempted_ = 0;
num_accepted_ = 0;
}
void MonteCarloBarostat::inplace_move(
double *d_x,
double *d_box,
const double lambda
) {
step_++;
if (step_ % interval_ != 0) {
return;
}
std::vector<double> h_box(9);
gpuErrchk(cudaMemcpy(&h_box[0], d_box, 9*sizeof(d_box), cudaMemcpyDeviceToHost));
double volume = h_box[0*3+0]*h_box[1*3+1]*h_box[2*3+2];
if(volume_scale_ == 0) {
volume_scale_ = 0.01*volume;
}
const int num_molecules = group_idxs_.size();
const int tpb = 32;
const int blocks = (num_grouped_atoms_ + tpb - 1) / tpb;
// Compute the energy of the modified system.
cudaStream_t stream = static_cast<cudaStream_t>(0);
gpuErrchk(cudaMemsetAsync(d_u_before_, 0, N_*sizeof(*d_u_before_), stream));
gpuErrchk(cudaMemsetAsync(d_u_after_, 0, N_*sizeof(*d_u_after_), stream));
gpuErrchk(cudaMemsetAsync(d_centroids_, 0, num_molecules*3*sizeof(*d_centroids_), stream));
for(int i=0; i < bps_.size(); i++) {
bps_[i]->execute_device(
N_,
d_x,
d_box,
lambda,
nullptr,
nullptr,
nullptr,
d_u_before_,
stream // TBD: parallelize me!
);
}
gpuErrchk(cudaMemcpyAsync(d_x_after_, d_x, N_*3*sizeof(*d_x), cudaMemcpyDeviceToDevice, stream));
gpuErrchk(cudaMemcpyAsync(d_box_after_, d_box, 3*3*sizeof(*d_x), cudaMemcpyDeviceToDevice, stream));
find_group_centroids<<<blocks, tpb, 0, stream>>>(
num_grouped_atoms_,
d_x_after_,
d_atom_idxs_,
d_mol_idxs_,
d_centroids_
);
double delta_volume = volume_scale_*2*(dist_(mt_)-0.5);
double new_volume = volume+delta_volume;
double length_scale = std::pow(new_volume/volume, 1.0/3.0);
// Scale centroids
rescale_positions<<<blocks, tpb, 0, stream>>>(
num_grouped_atoms_,
d_x_after_,
length_scale,
d_box,
d_box_after_, // Box will be rescaled by length_scale
d_atom_idxs_,
d_mol_idxs_,
d_mol_offsets_,
d_centroids_
);
gpuErrchk(cudaPeekAtLastError());
for(int i=0; i < bps_.size(); i++) {
bps_[i]->execute_device(
N_,
d_x_after_,
d_box_after_,
lambda,
nullptr,
nullptr,
nullptr,
d_u_after_,
stream // TBD: parallelize me!
);
}
double pressure = pressure_*AVOGADRO*1e-25;
const double kT = BOLTZ*temperature_;
unsigned long long u_init_agg = 0;
unsigned long long u_final_agg = 0;
unsigned long long initial_energy[N_];
unsigned long long final_energy[N_];
gpuErrchk(cudaMemcpyAsync(initial_energy, d_u_before_, N_*sizeof(*d_u_before_), cudaMemcpyDeviceToHost, stream));
gpuErrchk(cudaMemcpyAsync(final_energy, d_u_after_, N_*sizeof(*d_u_after_), cudaMemcpyDeviceToHost, stream));
gpuErrchk(cudaStreamSynchronize(stream));
for (int i = 0; i < N_; i++) {
u_init_agg += initial_energy[i];
u_final_agg += final_energy[i];
}
double u_init = FIXED_TO_FLOAT<double>(u_init_agg);
double u_final = FIXED_TO_FLOAT<double>(u_final_agg);
double w = u_final-u_init + pressure*delta_volume - num_molecules*kT*std::log(new_volume/volume);
if (w > 0 && dist_(mt_) > std::exp(-w/kT)) {
// Reject the step.
// Don't modify the coords, keep box the same
volume = new_volume;
}
else {
num_accepted_++;
// Replace the coords and box if step accepted
gpuErrchk(cudaMemcpyAsync(d_x, d_x_after_, N_*3*sizeof(*d_x), cudaMemcpyDeviceToDevice, stream));
gpuErrchk(cudaMemcpyAsync(d_box, d_box_after_, 3*3*sizeof(*d_box), cudaMemcpyDeviceToDevice, stream));
}
num_attempted_++;
if (num_attempted_ >= 10) {
if (num_accepted_ < 0.25*num_attempted_) {
volume_scale_ /= 1.1;
this->reset_counters();
}
else if (num_accepted_ > 0.75*num_attempted_) {
volume_scale_ = std::min(volume_scale_*1.1, volume*0.3);
this->reset_counters();
}
}
};
void MonteCarloBarostat::set_interval(const int interval){
interval_ = interval;
// Clear the step, to ensure user can expect that in N steps the barostat will trigger
step_ = 0;
}
int MonteCarloBarostat::get_interval(){
return interval_;
}
void MonteCarloBarostat::set_pressure(const double pressure){
pressure_ = pressure;
// Could have equilibrated and be a large number of steps from shifting volume
// adjustment, ie num attempted = 300 and num accepted = 150
this->reset_counters();
}
}
|
a4cc23eafe677ef308f31b56678a34f7f3045661.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Introduction code to CUDA
* first attempt to push this onto the device
*
* Compile: nvcc -g -o vec_add vecAdd1.cu -lm
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Make sure we do not go out of bounds
for(int i = 0; i < n; i++)
c[i] = a[i] + b[i];
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 1<<20;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
// Copy host vectors to device
hipMemcpy( d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy( d_b, h_b, bytes, hipMemcpyHostToDevice);
// Execute the kernel
hipLaunchKernelGGL(( vecAdd), dim3(1), dim3(1), 0, 0, d_a, d_b, d_c, n);
// Wait for the GPU to finish
hipDeviceSynchronize();
// Copy array back to host
hipMemcpy( h_c, d_c, bytes, hipMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
printf("final result: %f\n", sum/n);
// Release device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
| a4cc23eafe677ef308f31b56678a34f7f3045661.cu | /* Introduction code to CUDA
* first attempt to push this onto the device
*
* Compile: nvcc -g -o vec_add vecAdd1.cu -lm
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Make sure we do not go out of bounds
for(int i = 0; i < n; i++)
c[i] = a[i] + b[i];
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 1<<20;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
// Copy host vectors to device
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
// Execute the kernel
vecAdd<<<1, 1>>>(d_a, d_b, d_c, n);
// Wait for the GPU to finish
cudaDeviceSynchronize();
// Copy array back to host
cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
printf("final result: %f\n", sum/n);
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
7c5999f435fcbcd2de4a42eba0e8820256f8588c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/roi_align_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/gpu_launch_config.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <class T>
__device__ T BilinearInterpolate(const T* input_data, const int height,
const int width, T y, T x) {
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return 0;
}
y = y <= 0 ? 0 : y;
x = x <= 0 ? 0 : x;
int y_low = static_cast<int>(y);
int x_low = static_cast<int>(x);
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = static_cast<T>(y_low);
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = static_cast<T>(x_low);
} else {
x_high = x_low + 1;
}
T ly = y - y_low, lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
T v1 = input_data[y_low * width + x_low];
T v2 = input_data[y_low * width + x_high];
T v3 = input_data[y_high * width + x_low];
T v4 = input_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <class T>
__device__ void BilinearInterpolateGradient(const int height, const int width,
T y, T x, T* w1, T* w2, T* w3,
T* w4, int* x_low, int* x_high,
int* y_low, int* y_high) {
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return;
}
y = y <= 0 ? 0 : y;
x = x <= 0 ? 0 : x;
*y_low = static_cast<int>(y);
*x_low = static_cast<int>(x);
if (*y_low >= height - 1) {
*y_high = *y_low = height - 1;
y = static_cast<T>(*y_low);
} else {
*y_high = *y_low + 1;
}
if (*x_low >= width - 1) {
*x_high = *x_low = width - 1;
x = static_cast<T>(*x_low);
} else {
*x_high = *x_low + 1;
}
T ly = y - *y_low, lx = x - *x_low;
T hy = 1. - ly, hx = 1. - lx;
*w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx;
return;
}
template <class T>
__global__ void GPUROIAlignForward(
const int nthreads, const T* input_data, const T* input_rois,
const float spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int sampling_ratio, int* roi_batch_id_data, T* output_data,
const bool continuous_coordinate) {
CUDA_KERNEL_LOOP(i, nthreads) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
const T* offset_input_rois = input_rois + n * kROISize;
int roi_batch_ind = roi_batch_id_data[n];
T roi_offset = continuous_coordinate ? static_cast<T>(0.5) : 0;
T roi_xmin = offset_input_rois[0] * spatial_scale - roi_offset;
T roi_ymin = offset_input_rois[1] * spatial_scale - roi_offset;
T roi_xmax = offset_input_rois[2] * spatial_scale - roi_offset;
T roi_ymax = offset_input_rois[3] * spatial_scale - roi_offset;
T roi_width = roi_xmax - roi_xmin;
T roi_height = roi_ymax - roi_ymin;
if (!continuous_coordinate) {
roi_width = max(roi_width, static_cast<T>(1.));
roi_height = max(roi_height, static_cast<T>(1.));
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_input_data =
input_data + (roi_batch_ind * channels + c) * height * width;
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1);
T output_val = 0;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_ymin + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_xmin + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = BilinearInterpolate(offset_input_data, height, width, y, x);
output_val += val;
}
}
output_val /= count;
output_data[i] = output_val;
}
}
template <typename T>
__global__ void GPUROIAlignBackward(
const int nthreads, const T* input_rois, const T* out_grad,
const int num_rois, const float spatial_scale, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int sampling_ratio, int* roi_batch_id_data,
T* input_grad, const bool continuous_coordinate) {
CUDA_KERNEL_LOOP(i, nthreads) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
const T* offset_input_rois = input_rois + n * kROISize;
int roi_batch_ind = roi_batch_id_data[n];
T roi_offset = continuous_coordinate ? T(0.5) : 0;
T roi_xmin = offset_input_rois[0] * spatial_scale - roi_offset;
T roi_ymin = offset_input_rois[1] * spatial_scale - roi_offset;
T roi_xmax = offset_input_rois[2] * spatial_scale - roi_offset;
T roi_ymax = offset_input_rois[3] * spatial_scale - roi_offset;
T roi_width = roi_xmax - roi_xmin;
T roi_height = roi_ymax - roi_ymin;
if (!continuous_coordinate) {
roi_width = max(roi_width, static_cast<T>(1.));
roi_height = max(roi_height, static_cast<T>(1.));
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_input_grad =
input_grad + (roi_batch_ind * channels + c) * height * width;
const T* offset_out_grad =
out_grad + (n * channels + c) * pooled_height * pooled_width;
const T out_grad_this_bin = offset_out_grad[ph * pooled_width + pw];
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_ymin + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_xmin + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1 = 0, w2 = 0, w3 = 0, w4 = 0;
int x_low = -1, x_high = -1, y_low = -1, y_high = -1;
BilinearInterpolateGradient(height, width, y, x, &w1, &w2, &w3, &w4,
&x_low, &x_high, &y_low, &y_high);
T diff1 = out_grad_this_bin * w1 / count;
T diff2 = out_grad_this_bin * w2 / count;
T diff3 = out_grad_this_bin * w3 / count;
T diff4 = out_grad_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_low,
diff1);
platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_high,
diff2);
platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_low,
diff3);
platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_high,
diff4);
}
}
}
}
}
template <typename Place, typename T>
class GPUROIAlignOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out = ctx.Output<Tensor>("Out");
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto sampling_ratio = ctx.Attr<int>("sampling_ratio");
auto aligned = ctx.Attr<bool>("aligned");
auto in_dims = in->dims();
int batch_size = in_dims[0];
int channels = in_dims[1];
int height = in_dims[2];
int width = in_dims[3];
int rois_num = rois->dims()[0];
if (rois_num == 0) return;
int output_size = out->numel();
int blocks = NumBlocks(output_size);
int threads = kNumCUDAThreads;
#ifdef WITH_NV_JETSON
platform::ChangeThreadNum(ctx.cuda_device_context(), &threads, 256);
#endif
Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto& dev_ctx = ctx.cuda_device_context();
auto gplace = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace());
if (ctx.HasInput("RoisNum")) {
auto* rois_num_t = ctx.Input<Tensor>("RoisNum");
int rois_batch_size = rois_num_t->numel();
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
platform::errors::InvalidArgument(
"The rois_batch_size and imgs "
"batch_size must be the same. But received rois_batch_size = %d, "
"batch_size = %d",
rois_batch_size, batch_size));
std::vector<int> rois_num_list(rois_batch_size);
memory::Copy(cplace, rois_num_list.data(), gplace,
rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0);
int start = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (int i = start; i < start + rois_num_list[n]; ++i) {
roi_batch_id_data[i] = n;
}
start += rois_num_list[n];
}
} else {
auto lod = rois->lod();
PADDLE_ENFORCE_EQ(
lod.empty(), false,
platform::errors::InvalidArgument("Input(ROIs) in ROIAlignOp does "
"not contain LoD information."));
auto rois_lod = lod.back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
platform::errors::InvalidArgument(
"The batch size of rois and batch size "
"of images must be the same. But received rois batch size = %d, "
"and images batch size = %d",
rois_batch_size, batch_size));
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(
rois_num, rois_num_with_lod,
platform::errors::InvalidArgument(
"The actual number of rois and the number of rois "
"provided from Input(RoIsLoD) in RoIAlign must be the same."
" But received actual number of rois is %d, and the number "
"of rois from RoIsLoD is %d",
rois_num, rois_num_with_lod));
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
}
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
hipLaunchKernelGGL(( GPUROIAlignForward<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
output_size, in->data<T>(), rois->data<T>(), spatial_scale, channels,
height, width, pooled_height, pooled_width, sampling_ratio, roi_id_data,
out->mutable_data<T>(ctx.GetPlace()), aligned);
}
};
template <typename Place, typename T>
class GPUROIAlignGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* in_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto sampling_ratio = ctx.Attr<int>("sampling_ratio");
auto aligned = ctx.Attr<bool>("aligned");
int rois_num = rois->dims()[0];
int channels = in->dims()[1];
int height = in->dims()[2];
int width = in->dims()[3];
if (!in_grad) {
return;
}
Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto& dev_ctx = ctx.cuda_device_context();
auto gplace = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace());
if (ctx.HasInput("RoisNum")) {
auto* rois_num_t = ctx.Input<Tensor>("RoisNum");
int rois_batch_size = rois_num_t->numel();
std::vector<int> rois_num_list(rois_batch_size);
memory::Copy(cplace, rois_num_list.data(), gplace,
rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0);
int start = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = start; i < start + rois_num_list[n]; ++i) {
roi_batch_id_data[i] = n;
}
start += rois_num_list[n];
}
} else {
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
}
auto roi_ptr =
memory::Alloc(dev_ctx, roi_batch_id_list.numel() * sizeof(int));
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
int bytes = roi_batch_id_list.numel() * sizeof(int);
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
in_grad->mutable_data<T>(ctx.GetPlace());
math::SetConstant<Place, T> set_zero;
set_zero(dev_ctx, in_grad, static_cast<T>(0));
int output_grad_size = out_grad->numel();
int blocks = NumBlocks(output_grad_size);
int threads = kNumCUDAThreads;
if (output_grad_size > 0) {
hipLaunchKernelGGL(( GPUROIAlignBackward<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
output_grad_size, rois->data<T>(), out_grad->data<T>(), rois_num,
spatial_scale, channels, height, width, pooled_height, pooled_width,
sampling_ratio, roi_id_data, in_grad->mutable_data<T>(ctx.GetPlace()),
aligned);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
roi_align,
ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
roi_align_grad,
ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, double>);
| 7c5999f435fcbcd2de4a42eba0e8820256f8588c.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/roi_align_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/gpu_launch_config.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <class T>
__device__ T BilinearInterpolate(const T* input_data, const int height,
const int width, T y, T x) {
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return 0;
}
y = y <= 0 ? 0 : y;
x = x <= 0 ? 0 : x;
int y_low = static_cast<int>(y);
int x_low = static_cast<int>(x);
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = static_cast<T>(y_low);
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = static_cast<T>(x_low);
} else {
x_high = x_low + 1;
}
T ly = y - y_low, lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
T v1 = input_data[y_low * width + x_low];
T v2 = input_data[y_low * width + x_high];
T v3 = input_data[y_high * width + x_low];
T v4 = input_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <class T>
__device__ void BilinearInterpolateGradient(const int height, const int width,
T y, T x, T* w1, T* w2, T* w3,
T* w4, int* x_low, int* x_high,
int* y_low, int* y_high) {
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return;
}
y = y <= 0 ? 0 : y;
x = x <= 0 ? 0 : x;
*y_low = static_cast<int>(y);
*x_low = static_cast<int>(x);
if (*y_low >= height - 1) {
*y_high = *y_low = height - 1;
y = static_cast<T>(*y_low);
} else {
*y_high = *y_low + 1;
}
if (*x_low >= width - 1) {
*x_high = *x_low = width - 1;
x = static_cast<T>(*x_low);
} else {
*x_high = *x_low + 1;
}
T ly = y - *y_low, lx = x - *x_low;
T hy = 1. - ly, hx = 1. - lx;
*w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx;
return;
}
template <class T>
__global__ void GPUROIAlignForward(
const int nthreads, const T* input_data, const T* input_rois,
const float spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int sampling_ratio, int* roi_batch_id_data, T* output_data,
const bool continuous_coordinate) {
CUDA_KERNEL_LOOP(i, nthreads) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
const T* offset_input_rois = input_rois + n * kROISize;
int roi_batch_ind = roi_batch_id_data[n];
T roi_offset = continuous_coordinate ? static_cast<T>(0.5) : 0;
T roi_xmin = offset_input_rois[0] * spatial_scale - roi_offset;
T roi_ymin = offset_input_rois[1] * spatial_scale - roi_offset;
T roi_xmax = offset_input_rois[2] * spatial_scale - roi_offset;
T roi_ymax = offset_input_rois[3] * spatial_scale - roi_offset;
T roi_width = roi_xmax - roi_xmin;
T roi_height = roi_ymax - roi_ymin;
if (!continuous_coordinate) {
roi_width = max(roi_width, static_cast<T>(1.));
roi_height = max(roi_height, static_cast<T>(1.));
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_input_data =
input_data + (roi_batch_ind * channels + c) * height * width;
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1);
T output_val = 0;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_ymin + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_xmin + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = BilinearInterpolate(offset_input_data, height, width, y, x);
output_val += val;
}
}
output_val /= count;
output_data[i] = output_val;
}
}
template <typename T>
__global__ void GPUROIAlignBackward(
const int nthreads, const T* input_rois, const T* out_grad,
const int num_rois, const float spatial_scale, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int sampling_ratio, int* roi_batch_id_data,
T* input_grad, const bool continuous_coordinate) {
CUDA_KERNEL_LOOP(i, nthreads) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
const T* offset_input_rois = input_rois + n * kROISize;
int roi_batch_ind = roi_batch_id_data[n];
T roi_offset = continuous_coordinate ? T(0.5) : 0;
T roi_xmin = offset_input_rois[0] * spatial_scale - roi_offset;
T roi_ymin = offset_input_rois[1] * spatial_scale - roi_offset;
T roi_xmax = offset_input_rois[2] * spatial_scale - roi_offset;
T roi_ymax = offset_input_rois[3] * spatial_scale - roi_offset;
T roi_width = roi_xmax - roi_xmin;
T roi_height = roi_ymax - roi_ymin;
if (!continuous_coordinate) {
roi_width = max(roi_width, static_cast<T>(1.));
roi_height = max(roi_height, static_cast<T>(1.));
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_input_grad =
input_grad + (roi_batch_ind * channels + c) * height * width;
const T* offset_out_grad =
out_grad + (n * channels + c) * pooled_height * pooled_width;
const T out_grad_this_bin = offset_out_grad[ph * pooled_width + pw];
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_ymin + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_xmin + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1 = 0, w2 = 0, w3 = 0, w4 = 0;
int x_low = -1, x_high = -1, y_low = -1, y_high = -1;
BilinearInterpolateGradient(height, width, y, x, &w1, &w2, &w3, &w4,
&x_low, &x_high, &y_low, &y_high);
T diff1 = out_grad_this_bin * w1 / count;
T diff2 = out_grad_this_bin * w2 / count;
T diff3 = out_grad_this_bin * w3 / count;
T diff4 = out_grad_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_low,
diff1);
platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_high,
diff2);
platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_low,
diff3);
platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_high,
diff4);
}
}
}
}
}
template <typename Place, typename T>
class GPUROIAlignOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out = ctx.Output<Tensor>("Out");
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto sampling_ratio = ctx.Attr<int>("sampling_ratio");
auto aligned = ctx.Attr<bool>("aligned");
auto in_dims = in->dims();
int batch_size = in_dims[0];
int channels = in_dims[1];
int height = in_dims[2];
int width = in_dims[3];
int rois_num = rois->dims()[0];
if (rois_num == 0) return;
int output_size = out->numel();
int blocks = NumBlocks(output_size);
int threads = kNumCUDAThreads;
#ifdef WITH_NV_JETSON
platform::ChangeThreadNum(ctx.cuda_device_context(), &threads, 256);
#endif
Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto& dev_ctx = ctx.cuda_device_context();
auto gplace = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace());
if (ctx.HasInput("RoisNum")) {
auto* rois_num_t = ctx.Input<Tensor>("RoisNum");
int rois_batch_size = rois_num_t->numel();
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
platform::errors::InvalidArgument(
"The rois_batch_size and imgs "
"batch_size must be the same. But received rois_batch_size = %d, "
"batch_size = %d",
rois_batch_size, batch_size));
std::vector<int> rois_num_list(rois_batch_size);
memory::Copy(cplace, rois_num_list.data(), gplace,
rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0);
int start = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (int i = start; i < start + rois_num_list[n]; ++i) {
roi_batch_id_data[i] = n;
}
start += rois_num_list[n];
}
} else {
auto lod = rois->lod();
PADDLE_ENFORCE_EQ(
lod.empty(), false,
platform::errors::InvalidArgument("Input(ROIs) in ROIAlignOp does "
"not contain LoD information."));
auto rois_lod = lod.back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
platform::errors::InvalidArgument(
"The batch size of rois and batch size "
"of images must be the same. But received rois batch size = %d, "
"and images batch size = %d",
rois_batch_size, batch_size));
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(
rois_num, rois_num_with_lod,
platform::errors::InvalidArgument(
"The actual number of rois and the number of rois "
"provided from Input(RoIsLoD) in RoIAlign must be the same."
" But received actual number of rois is %d, and the number "
"of rois from RoIsLoD is %d",
rois_num, rois_num_with_lod));
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
}
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
GPUROIAlignForward<T><<<blocks, threads, 0, dev_ctx.stream()>>>(
output_size, in->data<T>(), rois->data<T>(), spatial_scale, channels,
height, width, pooled_height, pooled_width, sampling_ratio, roi_id_data,
out->mutable_data<T>(ctx.GetPlace()), aligned);
}
};
template <typename Place, typename T>
class GPUROIAlignGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* in_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto sampling_ratio = ctx.Attr<int>("sampling_ratio");
auto aligned = ctx.Attr<bool>("aligned");
int rois_num = rois->dims()[0];
int channels = in->dims()[1];
int height = in->dims()[2];
int width = in->dims()[3];
if (!in_grad) {
return;
}
Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto& dev_ctx = ctx.cuda_device_context();
auto gplace = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace());
if (ctx.HasInput("RoisNum")) {
auto* rois_num_t = ctx.Input<Tensor>("RoisNum");
int rois_batch_size = rois_num_t->numel();
std::vector<int> rois_num_list(rois_batch_size);
memory::Copy(cplace, rois_num_list.data(), gplace,
rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0);
int start = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = start; i < start + rois_num_list[n]; ++i) {
roi_batch_id_data[i] = n;
}
start += rois_num_list[n];
}
} else {
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
}
auto roi_ptr =
memory::Alloc(dev_ctx, roi_batch_id_list.numel() * sizeof(int));
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
int bytes = roi_batch_id_list.numel() * sizeof(int);
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
in_grad->mutable_data<T>(ctx.GetPlace());
math::SetConstant<Place, T> set_zero;
set_zero(dev_ctx, in_grad, static_cast<T>(0));
int output_grad_size = out_grad->numel();
int blocks = NumBlocks(output_grad_size);
int threads = kNumCUDAThreads;
if (output_grad_size > 0) {
GPUROIAlignBackward<T><<<blocks, threads, 0, dev_ctx.stream()>>>(
output_grad_size, rois->data<T>(), out_grad->data<T>(), rois_num,
spatial_scale, channels, height, width, pooled_height, pooled_width,
sampling_ratio, roi_id_data, in_grad->mutable_data<T>(ctx.GetPlace()),
aligned);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
roi_align,
ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
roi_align_grad,
ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, double>);
|
c66fd2871fdd5d2cd2abaf0185cc5d73539cd0e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//BlockDim is (32, 32)
int y = threadIdx.y + blockIdx.y* blockDim.y;
int x = threadIdx.x + blockIdx.x* blockDim.x;
int index = numRows*y +x;
uchar4 color = rgbaImage(index);
float channelSum = .299f * color.x + .587f * color.y + .114f * color.z;
greyImage[index] = channelSum;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
int BlockWidth = 32;
// threads per block
const dims blockSize(BlockWidth,BlockWidth,1);
// block
int blocksX = numRows/blockWidth + 1;
int blocksY = numCols/blockWidth + 1;
const gridSzie(blocksX, blocksY, 1 )
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| c66fd2871fdd5d2cd2abaf0185cc5d73539cd0e8.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//BlockDim is (32, 32)
int y = threadIdx.y + blockIdx.y* blockDim.y;
int x = threadIdx.x + blockIdx.x* blockDim.x;
int index = numRows*y +x;
uchar4 color = rgbaImage(index);
float channelSum = .299f * color.x + .587f * color.y + .114f * color.z;
greyImage[index] = channelSum;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
int BlockWidth = 32;
// threads per block
const dims blockSize(BlockWidth,BlockWidth,1);
// block
int blocksX = numRows/blockWidth + 1;
int blocksY = numCols/blockWidth + 1;
const gridSzie(blocksX, blocksY, 1 )
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
25d08e9aa10603df05c1be515e4e31b4ea2ee063.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "plusMinus.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
const double *base = NULL;
hipMalloc(&base, XSIZE*YSIZE);
const float *deviation = NULL;
hipMalloc(&deviation, XSIZE*YSIZE);
double *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
plusMinus), dim3(gridBlock),dim3(threadBlock), 0, 0, size,base,deviation,a,b);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
plusMinus), dim3(gridBlock),dim3(threadBlock), 0, 0, size,base,deviation,a,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
plusMinus), dim3(gridBlock),dim3(threadBlock), 0, 0, size,base,deviation,a,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 25d08e9aa10603df05c1be515e4e31b4ea2ee063.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "plusMinus.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
const double *base = NULL;
cudaMalloc(&base, XSIZE*YSIZE);
const float *deviation = NULL;
cudaMalloc(&deviation, XSIZE*YSIZE);
double *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
plusMinus<<<gridBlock,threadBlock>>>(size,base,deviation,a,b);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
plusMinus<<<gridBlock,threadBlock>>>(size,base,deviation,a,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
plusMinus<<<gridBlock,threadBlock>>>(size,base,deviation,a,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.