hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
b01fb8b2fa5cbd29757108b82cf2ad8f77f2fe7d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <type_traits>
#include <wmma_extension/wmma_extension.hpp>
#include "common.hpp"
#ifndef TEST_ARCH
#define TEST_ARCH (-1)
#endif
//#define TEST_TF32
#ifndef TEST_TF32
constexpr int M = 16;
constexpr int N = 16;
constexpr int K = 16;
#else
constexpr int M = 16;
constexpr int N = 16;
constexpr int K = 8;
#endif
template <class T, class S>
__device__ __host__ T convert(const S);
template <> __device__ __host__ float convert<float, float>(const float a) {return a;}
template <> __device__ __host__ float convert<float, half >(const half a) {return __half2float(a);}
template <> __device__ __host__ half convert<half , float>(const float a) {return __float2half(a);}
template <> __device__ __host__ half convert<half , half >(const half a) {return a;}
__global__ void test_store_vector_kernel(
float* const dst,
const float* const src,
const nvcuda::wmma::layout_t layout
) {
nvcuda::wmma::fragment<nvcuda::wmma::accumulator, M, N, K, float> frag_c;
nvcuda::wmma::load_matrix_sync(frag_c, src, M, layout);
mtk::wmma::store_vector(dst, frag_c, layout);
}
void test(const nvcuda::wmma::layout_t layout) {
float* src_mem;
float* dst_mem;
hipHostMalloc(&src_mem, M * N * sizeof(float));
hipHostMalloc(&dst_mem, M * sizeof(float));
for (std::size_t i = 0; i < M * N; i++) {
src_mem[i] = static_cast<float>(i);
}
hipDeviceSynchronize();
hipLaunchKernelGGL(( test_store_vector_kernel), dim3(1), dim3(32), 0, 0, dst_mem, src_mem, layout);
hipDeviceSynchronize();
double error = 0;
for (std::size_t i = 0; i < M; i++) {
const double diff = src_mem[i] - dst_mem[i];
error = ::max(error, std::abs(diff));
}
hipHostFree(src_mem);
hipHostFree(dst_mem);
std::printf("[%s] ARCH=%d, <%2d, %2d, %2d>, error=%e, [%s]\n",
__FILE__,
TEST_ARCH,
M, N, K,
error,
mtk::test_utils::get_test_result_string(error < mtk::test_utils::get_machine_eps<float>())
);
}
int main() {
test(nvcuda::wmma::mem_row_major);
test(nvcuda::wmma::mem_col_major);
test(nvcuda::wmma::mem_row_major);
test(nvcuda::wmma::mem_col_major);
}
| b01fb8b2fa5cbd29757108b82cf2ad8f77f2fe7d.cu | #include <iostream>
#include <type_traits>
#include <wmma_extension/wmma_extension.hpp>
#include "common.hpp"
#ifndef TEST_ARCH
#define TEST_ARCH (-1)
#endif
//#define TEST_TF32
#ifndef TEST_TF32
constexpr int M = 16;
constexpr int N = 16;
constexpr int K = 16;
#else
constexpr int M = 16;
constexpr int N = 16;
constexpr int K = 8;
#endif
template <class T, class S>
__device__ __host__ T convert(const S);
template <> __device__ __host__ float convert<float, float>(const float a) {return a;}
template <> __device__ __host__ float convert<float, half >(const half a) {return __half2float(a);}
template <> __device__ __host__ half convert<half , float>(const float a) {return __float2half(a);}
template <> __device__ __host__ half convert<half , half >(const half a) {return a;}
__global__ void test_store_vector_kernel(
float* const dst,
const float* const src,
const nvcuda::wmma::layout_t layout
) {
nvcuda::wmma::fragment<nvcuda::wmma::accumulator, M, N, K, float> frag_c;
nvcuda::wmma::load_matrix_sync(frag_c, src, M, layout);
mtk::wmma::store_vector(dst, frag_c, layout);
}
void test(const nvcuda::wmma::layout_t layout) {
float* src_mem;
float* dst_mem;
cudaMallocHost(&src_mem, M * N * sizeof(float));
cudaMallocHost(&dst_mem, M * sizeof(float));
for (std::size_t i = 0; i < M * N; i++) {
src_mem[i] = static_cast<float>(i);
}
cudaDeviceSynchronize();
test_store_vector_kernel<<<1, 32>>>(dst_mem, src_mem, layout);
cudaDeviceSynchronize();
double error = 0;
for (std::size_t i = 0; i < M; i++) {
const double diff = src_mem[i] - dst_mem[i];
error = std::max(error, std::abs(diff));
}
cudaFreeHost(src_mem);
cudaFreeHost(dst_mem);
std::printf("[%s] ARCH=%d, <%2d, %2d, %2d>, error=%e, [%s]\n",
__FILE__,
TEST_ARCH,
M, N, K,
error,
mtk::test_utils::get_test_result_string(error < mtk::test_utils::get_machine_eps<float>())
);
}
int main() {
test(nvcuda::wmma::mem_row_major);
test(nvcuda::wmma::mem_col_major);
test(nvcuda::wmma::mem_row_major);
test(nvcuda::wmma::mem_col_major);
}
|
e399eee76fd9d149213a6ddef6c2410ce1aa2f23.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zlat2c.cu mixed zc -> ds, Tue Feb 9 16:05:32 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define PRECISION_d
#define BLK_X 64
#define BLK_Y 32
// TODO get rid of global variable!
static __device__ int flag = 0;
/*
Divides matrix into ceil( n/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to dlag2s and zlaset.
*/
__global__
void dlat2s_lower(
int n,
const double *A, int lda,
float *SA, int ldsa,
double rmax )
{
double tmp;
double neg_rmax = - rmax;
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < n && ind + BLK_X > iby ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
}
}
/*
Similar to dlat2s_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to dlag2s and zlaset.
*/
__global__
void dlat2s_upper(
int n,
const double *A, int lda,
float *SA, int ldsa,
double rmax )
{
double tmp;
double neg_rmax = - rmax;
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < n && ind < iby + BLK_Y ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
}
}
}
/**
Purpose
-------
DLAT2S converts a double-real matrix, A,
to a single-real matrix, SA.
RMAX is the overflow for the single-real arithmetic.
DLAT2S checks that all the entries of A are between -RMAX and
RMAX. If not, the conversion is aborted and a flag is raised.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix A to be converted.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
n INTEGER
The number of columns of the matrix A. n >= 0.
@param[in]
A DOUBLE PRECISION array, dimension (LDA,n)
On entry, the n-by-n coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,n).
@param[out]
SA SINGLE PRECISION array, dimension (LDSA,n)
On exit, if INFO=0, the n-by-n coefficient matrix SA;
if INFO > 0, the content of SA is unspecified.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,n).
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
- = 1: an entry of the matrix A is greater than the SINGLE PRECISION
overflow threshold, in this case, the content
of SA on exit is unspecified.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlat2s_q(
magma_uplo_t uplo, magma_int_t n,
magmaDouble_const_ptr A, magma_int_t lda,
magmaFloat_ptr SA, magma_int_t ldsa,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( lda < max(1,n) )
*info = -4;
else if ( ldsa < max(1,n) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( n == 0 ) {
return;
}
double rmax = (double)lapackf77_slamch("O");
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( n, BLK_X ), magma_ceildiv( n, BLK_Y ) );
hipMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
if (uplo == MagmaLower) {
hipLaunchKernelGGL(( dlat2s_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, SA, ldsa, rmax);
}
else if (uplo == MagmaUpper) {
hipLaunchKernelGGL(( dlat2s_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, SA, ldsa, rmax);
}
hipMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
| e399eee76fd9d149213a6ddef6c2410ce1aa2f23.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zlat2c.cu mixed zc -> ds, Tue Feb 9 16:05:32 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define PRECISION_d
#define BLK_X 64
#define BLK_Y 32
// TODO get rid of global variable!
static __device__ int flag = 0;
/*
Divides matrix into ceil( n/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to dlag2s and zlaset.
*/
__global__
void dlat2s_lower(
int n,
const double *A, int lda,
float *SA, int ldsa,
double rmax )
{
double tmp;
double neg_rmax = - rmax;
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < n && ind + BLK_X > iby ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
}
}
/*
Similar to dlat2s_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to dlag2s and zlaset.
*/
__global__
void dlat2s_upper(
int n,
const double *A, int lda,
float *SA, int ldsa,
double rmax )
{
double tmp;
double neg_rmax = - rmax;
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < n && ind < iby + BLK_Y ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
}
}
}
/**
Purpose
-------
DLAT2S converts a double-real matrix, A,
to a single-real matrix, SA.
RMAX is the overflow for the single-real arithmetic.
DLAT2S checks that all the entries of A are between -RMAX and
RMAX. If not, the conversion is aborted and a flag is raised.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix A to be converted.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
n INTEGER
The number of columns of the matrix A. n >= 0.
@param[in]
A DOUBLE PRECISION array, dimension (LDA,n)
On entry, the n-by-n coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,n).
@param[out]
SA SINGLE PRECISION array, dimension (LDSA,n)
On exit, if INFO=0, the n-by-n coefficient matrix SA;
if INFO > 0, the content of SA is unspecified.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,n).
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
- = 1: an entry of the matrix A is greater than the SINGLE PRECISION
overflow threshold, in this case, the content
of SA on exit is unspecified.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dlat2s_q(
magma_uplo_t uplo, magma_int_t n,
magmaDouble_const_ptr A, magma_int_t lda,
magmaFloat_ptr SA, magma_int_t ldsa,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( lda < max(1,n) )
*info = -4;
else if ( ldsa < max(1,n) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( n == 0 ) {
return;
}
double rmax = (double)lapackf77_slamch("O");
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( n, BLK_X ), magma_ceildiv( n, BLK_Y ) );
cudaMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
if (uplo == MagmaLower) {
dlat2s_lower<<< grid, threads, 0, queue->cuda_stream() >>> (n, A, lda, SA, ldsa, rmax);
}
else if (uplo == MagmaUpper) {
dlat2s_upper<<< grid, threads, 0, queue->cuda_stream() >>> (n, A, lda, SA, ldsa, rmax);
}
cudaMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
|
a327889330356b7df67085896e2a37153d0ec890.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <stdio.h>
#include <omp.h>
#include <string>
#define BLOCK_SIZE 1024
__global__ void reduction_kernel2(double* sum, const double* a, long N) {
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = a[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) {
smem[threadIdx.x] += smem[threadIdx.x + 32];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 16];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 8];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 4];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 2];
__syncwarp();
if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1];
}
}
double reduction(double* a, long N) {
// assume a is already loaded to device memory
double *y_d;
long N_work = 1;
for (long i = (N + BLOCK_SIZE - 1) / (BLOCK_SIZE); i > 1; i = (i + BLOCK_SIZE - 1) / (BLOCK_SIZE)) N_work += i;
hipMalloc(&y_d, N_work * sizeof(double)); // extra memory buffer for reduction across thread-blocks
double* sum_d = y_d;
long Nb = (N + BLOCK_SIZE - 1) / (BLOCK_SIZE);
reduction_kernel2 << <Nb, BLOCK_SIZE >> >(sum_d, a, N);
while (Nb > 1) {
long N = Nb;
Nb = (Nb + BLOCK_SIZE - 1) / (BLOCK_SIZE);
reduction_kernel2 << <Nb, BLOCK_SIZE >> >(sum_d + N, sum_d, N);
sum_d += N;
}
double sum;
hipMemcpyAsync(&sum, sum_d, 1 * sizeof(double), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
return sum;
}
void Check_CUDA_Error(const char *message) {
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "ERROR: %s: %s\n", message, hipGetErrorString(error));
exit(-1);
}
}
__global__ void update(double* res, double* u, double* u_temp, const double* f, int N) {
double h = 1 / (N + 1.0);
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
int j = idx / (N + 2);
int i = idx % N + 2;
if (0 < j && j < N + 1 && 0 < i && i < N + 1) {
double resid = (1 / (h*h))*(4 * u[idx] - u[idx - 1] - u[idx + 1] - u[idx - (N + 2)] - u[idx + (N + 2)]) - f[idx];
res[(j - 1)*N + i] = resid*resid;
u_temp[idx] = h*h*f[idx] + u[idx - 1] + u[idx + 1] + u[idx - (N + 2)] + u[idx + (N + 2)];
u_temp[idx] = 0.25*u_temp[idx];
}
}
int main() {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
printf("\nDevice Name: %s\n\n", prop.name);
long N = 1e3;
int max_itr = 21;
double *u;
hipHostMalloc((void**)&u, (N+2)*(N+2)*sizeof(double));
double* f;
hipHostMalloc((void**)&f, (N+2)*(N+2) * sizeof(double));
for (long j = 0; j < (N+2)*(N+2); j++) {
f[j] = 1.0;
u[j] = 0.0;
}
double *f_d, *resArray_d;
double *u_d, *u_temp_d;
hipMalloc(&f_d, (N + 2)*(N + 2) * sizeof(double));
hipMalloc(&resArray_d, (N + 2)*(N + 2) * sizeof(double));
hipMalloc(&u_d, (N + 2)*(N + 2) * sizeof(double));
hipMalloc(&u_temp_d, (N + 2)*(N + 2) * sizeof(double));
double t = omp_get_wtime();
hipMemcpyAsync(f_d, f, (N + 2)*(N + 2)* sizeof(double), hipMemcpyHostToDevice);
hipMemcpyAsync(u_d, u, (N + 2)*(N + 2) * sizeof(double), hipMemcpyHostToDevice);
hipDeviceSynchronize();
long Nb = (N + 2)*(N + 2) / BLOCK_SIZE;
for (int i = 0; i < max_itr; i++) {
hipLaunchKernelGGL(( update) , dim3(Nb), dim3(BLOCK_SIZE), 0, 0, resArray_d, u_d, u_temp_d, f, N);
hipDeviceSynchronize();
double *uTemp = u_temp_d;
u_temp_d = u_d;
u_d = uTemp;
double sum = reduction(resArray_d, N*N);
if (i%5 == 0)
printf("res %d = %f\n", i, sqrt(sum));
}
printf("\nelapsed time: %fs\n", omp_get_wtime() - t);
hipFree(u_d); hipFree(u_temp_d); hipFree(resArray_d); hipFree(f_d);
hipHostFree(u); hipHostFree(f);
return 0;
}
| a327889330356b7df67085896e2a37153d0ec890.cu | #include <algorithm>
#include <stdio.h>
#include <omp.h>
#include <string>
#define BLOCK_SIZE 1024
__global__ void reduction_kernel2(double* sum, const double* a, long N) {
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = a[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) {
smem[threadIdx.x] += smem[threadIdx.x + 32];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 16];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 8];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 4];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 2];
__syncwarp();
if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1];
}
}
double reduction(double* a, long N) {
// assume a is already loaded to device memory
double *y_d;
long N_work = 1;
for (long i = (N + BLOCK_SIZE - 1) / (BLOCK_SIZE); i > 1; i = (i + BLOCK_SIZE - 1) / (BLOCK_SIZE)) N_work += i;
cudaMalloc(&y_d, N_work * sizeof(double)); // extra memory buffer for reduction across thread-blocks
double* sum_d = y_d;
long Nb = (N + BLOCK_SIZE - 1) / (BLOCK_SIZE);
reduction_kernel2 << <Nb, BLOCK_SIZE >> >(sum_d, a, N);
while (Nb > 1) {
long N = Nb;
Nb = (Nb + BLOCK_SIZE - 1) / (BLOCK_SIZE);
reduction_kernel2 << <Nb, BLOCK_SIZE >> >(sum_d + N, sum_d, N);
sum_d += N;
}
double sum;
cudaMemcpyAsync(&sum, sum_d, 1 * sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
return sum;
}
void Check_CUDA_Error(const char *message) {
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "ERROR: %s: %s\n", message, cudaGetErrorString(error));
exit(-1);
}
}
__global__ void update(double* res, double* u, double* u_temp, const double* f, int N) {
double h = 1 / (N + 1.0);
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
int j = idx / (N + 2);
int i = idx % N + 2;
if (0 < j && j < N + 1 && 0 < i && i < N + 1) {
double resid = (1 / (h*h))*(4 * u[idx] - u[idx - 1] - u[idx + 1] - u[idx - (N + 2)] - u[idx + (N + 2)]) - f[idx];
res[(j - 1)*N + i] = resid*resid;
u_temp[idx] = h*h*f[idx] + u[idx - 1] + u[idx + 1] + u[idx - (N + 2)] + u[idx + (N + 2)];
u_temp[idx] = 0.25*u_temp[idx];
}
}
int main() {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("\nDevice Name: %s\n\n", prop.name);
long N = 1e3;
int max_itr = 21;
double *u;
cudaMallocHost((void**)&u, (N+2)*(N+2)*sizeof(double));
double* f;
cudaMallocHost((void**)&f, (N+2)*(N+2) * sizeof(double));
for (long j = 0; j < (N+2)*(N+2); j++) {
f[j] = 1.0;
u[j] = 0.0;
}
double *f_d, *resArray_d;
double *u_d, *u_temp_d;
cudaMalloc(&f_d, (N + 2)*(N + 2) * sizeof(double));
cudaMalloc(&resArray_d, (N + 2)*(N + 2) * sizeof(double));
cudaMalloc(&u_d, (N + 2)*(N + 2) * sizeof(double));
cudaMalloc(&u_temp_d, (N + 2)*(N + 2) * sizeof(double));
double t = omp_get_wtime();
cudaMemcpyAsync(f_d, f, (N + 2)*(N + 2)* sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpyAsync(u_d, u, (N + 2)*(N + 2) * sizeof(double), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
long Nb = (N + 2)*(N + 2) / BLOCK_SIZE;
for (int i = 0; i < max_itr; i++) {
update <<<Nb, BLOCK_SIZE>>>(resArray_d, u_d, u_temp_d, f, N);
cudaDeviceSynchronize();
double *uTemp = u_temp_d;
u_temp_d = u_d;
u_d = uTemp;
double sum = reduction(resArray_d, N*N);
if (i%5 == 0)
printf("res %d = %f\n", i, sqrt(sum));
}
printf("\nelapsed time: %fs\n", omp_get_wtime() - t);
cudaFree(u_d); cudaFree(u_temp_d); cudaFree(resArray_d); cudaFree(f_d);
cudaFreeHost(u); cudaFreeHost(f);
return 0;
}
|
tile_layer.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/tile_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Tile(const int nthreads, const Dtype* bottom_data,
const int tile_size, const int num_tiles, const int bottom_tile_axis,
Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % tile_size;
const int b = (index / tile_size / num_tiles) % bottom_tile_axis;
const int n = index / tile_size / num_tiles / bottom_tile_axis;
const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d;
top_data[index] = bottom_data[bottom_index];
}
}
template <typename Dtype>
void TileLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int bottom_tile_axis = bottom[0]->shape(axis_);
const int nthreads = top[0]->count();
Tile<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, bottom_data, inner_dim_, tiles_, bottom_tile_axis, top_data);
}
template <typename Dtype>
__global__ void TileBackward(const int nthreads, const Dtype* top_diff,
const int tile_size, const int num_tiles, const int bottom_tile_axis,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % tile_size;
const int b = (index / tile_size) % bottom_tile_axis;
const int n = index / tile_size / bottom_tile_axis;
bottom_diff[index] = 0;
int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;
for (int t = 0; t < num_tiles; ++t) {
bottom_diff[index] += top_diff[top_index];
top_index += bottom_tile_axis * tile_size;
}
}
}
template <typename Dtype>
void TileLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) { return; }
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_tile_axis = bottom[0]->shape(axis_);
const int tile_size = inner_dim_ / bottom_tile_axis;
const int nthreads = bottom[0]->count();
TileBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, top_diff, tile_size, tiles_, bottom_tile_axis, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(TileLayer);
} // namespace caffe | tile_layer.cu | #include <vector>
#include "caffe/layers/tile_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Tile(const int nthreads, const Dtype* bottom_data,
const int tile_size, const int num_tiles, const int bottom_tile_axis,
Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % tile_size;
const int b = (index / tile_size / num_tiles) % bottom_tile_axis;
const int n = index / tile_size / num_tiles / bottom_tile_axis;
const int bottom_index = (n * bottom_tile_axis + b) * tile_size + d;
top_data[index] = bottom_data[bottom_index];
}
}
template <typename Dtype>
void TileLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int bottom_tile_axis = bottom[0]->shape(axis_);
const int nthreads = top[0]->count();
Tile<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, bottom_data, inner_dim_, tiles_, bottom_tile_axis, top_data);
}
template <typename Dtype>
__global__ void TileBackward(const int nthreads, const Dtype* top_diff,
const int tile_size, const int num_tiles, const int bottom_tile_axis,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % tile_size;
const int b = (index / tile_size) % bottom_tile_axis;
const int n = index / tile_size / bottom_tile_axis;
bottom_diff[index] = 0;
int top_index = (n * num_tiles * bottom_tile_axis + b) * tile_size + d;
for (int t = 0; t < num_tiles; ++t) {
bottom_diff[index] += top_diff[top_index];
top_index += bottom_tile_axis * tile_size;
}
}
}
template <typename Dtype>
void TileLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) { return; }
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_tile_axis = bottom[0]->shape(axis_);
const int tile_size = inner_dim_ / bottom_tile_axis;
const int nthreads = bottom[0]->count();
TileBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, top_diff, tile_size, tiles_, bottom_tile_axis, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(TileLayer);
} // namespace caffe |
b3d426b5fe942c851842cc9bf62a2242703ad983.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define BLOCKSIZE 1024
__device__ float sigmoid(float x) {
return 1.0/(1+expf(-x));
}
__global__ void gelu_fwd_cuda(float* input, float* ret,
int64_t size) {
int64_t idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < size) {
ret[idx] = input[idx]*sigmoid(1.702*input[idx]);
}
}
__global__ void gelu_bwd_cuda(float* grad_out, float* input,
float* ret, int64_t size) {
int64_t idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < size) {
float tmp = sigmoid(1.702*input[idx]);
ret[idx] = grad_out[idx]*(tmp + 1.702*input[idx]*tmp*(1-tmp));
}
}
__host__ void gelu_fwd_interface(float* input, float* ret, int64_t size) {
int64_t nblock = (size + BLOCKSIZE - 1)/BLOCKSIZE;
hipLaunchKernelGGL(( gelu_fwd_cuda), dim3(nblock), dim3(BLOCKSIZE), 0, 0, input, ret, size);
}
__host__ void gelu_bwd_interface(float* grad_out, float* input, float* ret,
int64_t size) {
int64_t nblock = (size + BLOCKSIZE - 1)/BLOCKSIZE;
hipLaunchKernelGGL(( gelu_bwd_cuda), dim3(nblock), dim3(BLOCKSIZE), 0, 0, grad_out, input,
ret, size);
}
| b3d426b5fe942c851842cc9bf62a2242703ad983.cu | #include <cuda.h>
#include <cuda_runtime.h>
#define BLOCKSIZE 1024
__device__ float sigmoid(float x) {
return 1.0/(1+expf(-x));
}
__global__ void gelu_fwd_cuda(float* input, float* ret,
int64_t size) {
int64_t idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < size) {
ret[idx] = input[idx]*sigmoid(1.702*input[idx]);
}
}
__global__ void gelu_bwd_cuda(float* grad_out, float* input,
float* ret, int64_t size) {
int64_t idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < size) {
float tmp = sigmoid(1.702*input[idx]);
ret[idx] = grad_out[idx]*(tmp + 1.702*input[idx]*tmp*(1-tmp));
}
}
__host__ void gelu_fwd_interface(float* input, float* ret, int64_t size) {
int64_t nblock = (size + BLOCKSIZE - 1)/BLOCKSIZE;
gelu_fwd_cuda<<<nblock, BLOCKSIZE>>>(input, ret, size);
}
__host__ void gelu_bwd_interface(float* grad_out, float* input, float* ret,
int64_t size) {
int64_t nblock = (size + BLOCKSIZE - 1)/BLOCKSIZE;
gelu_bwd_cuda<<<nblock, BLOCKSIZE>>>(grad_out, input,
ret, size);
}
|
5251dd9d3c1076afee63176c88bad51e4ad0a91e.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/batch_triangular/Xsyrk_batch.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ali Charara
* @date 2018-11-14
**/
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "rocblas.h"
#include "kblas.h"
#include "kblas_struct.h"
#include "operators.h"
#include "defs.h"
#include "kblas_common.h"
#include "workspace_queries.ch"
//==============================================================================================
#include "Xblas_core.ch"
#include "Xhelper_funcs.ch"
#include "Xsyrk_batch_drivers.cuh"
//==============================================================================================
//Non-Strided form
// workspace needed: device pointers
// A, B: host pointer to array of device pointers to device buffers
int Xsyrk_batch(kblasHandle_t handle,
char uplo, char trans,
int m, int n,
TYPE alpha, TYPE** A, int A_row_off, int A_col_off, int lda, long strideA,
TYPE beta, TYPE** B, int B_row_off, int B_col_off, int ldb, long strideB,
int batchCount)
{
(void)strideA;
(void)strideB;
return Xsyrk_batch_core<TYPE, TYPE**>(
handle,
uplo, trans,
m, n,
alpha, (TYPE**)A, A_row_off, A_col_off, lda,
beta, (TYPE**)B, B_row_off, B_col_off, ldb,
batchCount);
}
// workspace needed: device pointers
// A, B: host pointer to array of device pointers to device buffers
int kblas_syrk_batch(kblasHandle_t handle,
char uplo, char trans,
const int m, const int n,
const TYPE alpha, const TYPE** A, int lda,
const TYPE beta, TYPE** B, int ldb,
int batchCount)
{
return Xsyrk_batch_core<TYPE, TYPE**>(
handle,
uplo, trans,
m, n,
alpha, (TYPE**)A, 0, 0, lda,
beta, (TYPE**)B, 0, 0, ldb,
batchCount);
}
// workspace needed: device pointers
// A, B: host pointer to array of device pointers to device buffers
extern "C"
int kblasXsyrk_batch(kblasHandle_t handle,
char uplo, char trans,
const int m, const int n,
const TYPE alpha, const TYPE** A, int lda,
const TYPE beta, TYPE** B, int ldb,
int batchCount)
{
return Xsyrk_batch_core<TYPE, TYPE**>(
handle,
uplo, trans,
m, n,
alpha, (TYPE**)A, 0, 0, lda,
beta, (TYPE**)B, 0, 0, ldb,
batchCount);
}
int Xsyrk_batch(kblasHandle_t handle,
char uplo, char trans,
int* m, int* n,
int max_m, int max_n,
TYPE alpha, TYPE** A, int* lda,
TYPE beta, TYPE** B, int* ldb,
int batchCount)
{
return Xsyrk_batch_nonuniform_core<TYPE>(
handle,
uplo, trans,
m, n,
alpha, A, lda,
beta, B, ldb,
max_m, max_n,
batchCount);
}
int kblas_syrk_batch( kblasHandle_t handle,
char uplo, char trans,
int* m, int* n,
int max_m, int max_n,
TYPE alpha, TYPE** A, int* lda,
TYPE beta, TYPE** B, int* ldb,
int batchCount)
{
return Xsyrk_batch( handle,
uplo, trans,
m, n,
max_m, max_n,
alpha, A, lda,
beta, B, ldb,
batchCount);
}
//==============================================================================================
//Strided form
int Xsyrk_batch(kblasHandle_t handle,
char uplo, char trans,
int m, int n,
TYPE alpha, TYPE* A, int A_row_off, int A_col_off, int lda, long strideA,
TYPE beta, TYPE* B, int B_row_off, int B_col_off, int ldb, long strideB,
int batchCount)
{
return Xsyrk_batch_strided_core<TYPE, TYPE*>(
handle,
uplo, trans,
m, n,
alpha, (TYPE*)(A) + A_row_off + A_col_off * lda, lda, strideA,
beta, (TYPE*)(B) + B_row_off + B_col_off * ldb, ldb, strideB,
batchCount);
}
// workspace needed: device pointers
// A, B: host pointer to device buffers
int kblas_syrk_batch( kblasHandle_t handle,
char uplo, char trans,
const int m, const int n,
const TYPE alpha, const TYPE* A, int lda, long strideA,
const TYPE beta, TYPE* B, int ldb, long strideB,
int batchCount)
{
return Xsyrk_batch_strided_core<TYPE, TYPE*>(
handle,
uplo, trans,
m, n,
alpha, (TYPE*)A, lda, strideA,
beta, (TYPE*)B, ldb, strideB,
batchCount);
}
// workspace needed: device pointers
// A, B: host pointer to device buffers
extern "C"
int kblasXsyrk_batch_strided(kblasHandle_t handle,
char uplo, char trans,
const int m, const int n,
const TYPE alpha, const TYPE* A, int lda, long strideA,
const TYPE beta, TYPE* B, int ldb, long strideB,
int batchCount)
{
return Xsyrk_batch_strided_core<TYPE, TYPE*>(
handle,
uplo, trans,
m, n,
alpha, (TYPE*)A, lda, strideA,
beta, (TYPE*)B, ldb, strideB,
batchCount);
}
| 5251dd9d3c1076afee63176c88bad51e4ad0a91e.cu | /**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/batch_triangular/Xsyrk_batch.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ali Charara
* @date 2018-11-14
**/
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "cublas_v2.h"
#include "kblas.h"
#include "kblas_struct.h"
#include "operators.h"
#include "defs.h"
#include "kblas_common.h"
#include "workspace_queries.ch"
//==============================================================================================
#include "Xblas_core.ch"
#include "Xhelper_funcs.ch"
#include "Xsyrk_batch_drivers.cuh"
//==============================================================================================
//Non-Strided form
// workspace needed: device pointers
// A, B: host pointer to array of device pointers to device buffers
int Xsyrk_batch(kblasHandle_t handle,
char uplo, char trans,
int m, int n,
TYPE alpha, TYPE** A, int A_row_off, int A_col_off, int lda, long strideA,
TYPE beta, TYPE** B, int B_row_off, int B_col_off, int ldb, long strideB,
int batchCount)
{
(void)strideA;
(void)strideB;
return Xsyrk_batch_core<TYPE, TYPE**>(
handle,
uplo, trans,
m, n,
alpha, (TYPE**)A, A_row_off, A_col_off, lda,
beta, (TYPE**)B, B_row_off, B_col_off, ldb,
batchCount);
}
// workspace needed: device pointers
// A, B: host pointer to array of device pointers to device buffers
int kblas_syrk_batch(kblasHandle_t handle,
char uplo, char trans,
const int m, const int n,
const TYPE alpha, const TYPE** A, int lda,
const TYPE beta, TYPE** B, int ldb,
int batchCount)
{
return Xsyrk_batch_core<TYPE, TYPE**>(
handle,
uplo, trans,
m, n,
alpha, (TYPE**)A, 0, 0, lda,
beta, (TYPE**)B, 0, 0, ldb,
batchCount);
}
// workspace needed: device pointers
// A, B: host pointer to array of device pointers to device buffers
extern "C"
int kblasXsyrk_batch(kblasHandle_t handle,
char uplo, char trans,
const int m, const int n,
const TYPE alpha, const TYPE** A, int lda,
const TYPE beta, TYPE** B, int ldb,
int batchCount)
{
return Xsyrk_batch_core<TYPE, TYPE**>(
handle,
uplo, trans,
m, n,
alpha, (TYPE**)A, 0, 0, lda,
beta, (TYPE**)B, 0, 0, ldb,
batchCount);
}
int Xsyrk_batch(kblasHandle_t handle,
char uplo, char trans,
int* m, int* n,
int max_m, int max_n,
TYPE alpha, TYPE** A, int* lda,
TYPE beta, TYPE** B, int* ldb,
int batchCount)
{
return Xsyrk_batch_nonuniform_core<TYPE>(
handle,
uplo, trans,
m, n,
alpha, A, lda,
beta, B, ldb,
max_m, max_n,
batchCount);
}
int kblas_syrk_batch( kblasHandle_t handle,
char uplo, char trans,
int* m, int* n,
int max_m, int max_n,
TYPE alpha, TYPE** A, int* lda,
TYPE beta, TYPE** B, int* ldb,
int batchCount)
{
return Xsyrk_batch( handle,
uplo, trans,
m, n,
max_m, max_n,
alpha, A, lda,
beta, B, ldb,
batchCount);
}
//==============================================================================================
//Strided form
int Xsyrk_batch(kblasHandle_t handle,
char uplo, char trans,
int m, int n,
TYPE alpha, TYPE* A, int A_row_off, int A_col_off, int lda, long strideA,
TYPE beta, TYPE* B, int B_row_off, int B_col_off, int ldb, long strideB,
int batchCount)
{
return Xsyrk_batch_strided_core<TYPE, TYPE*>(
handle,
uplo, trans,
m, n,
alpha, (TYPE*)(A) + A_row_off + A_col_off * lda, lda, strideA,
beta, (TYPE*)(B) + B_row_off + B_col_off * ldb, ldb, strideB,
batchCount);
}
// workspace needed: device pointers
// A, B: host pointer to device buffers
int kblas_syrk_batch( kblasHandle_t handle,
char uplo, char trans,
const int m, const int n,
const TYPE alpha, const TYPE* A, int lda, long strideA,
const TYPE beta, TYPE* B, int ldb, long strideB,
int batchCount)
{
return Xsyrk_batch_strided_core<TYPE, TYPE*>(
handle,
uplo, trans,
m, n,
alpha, (TYPE*)A, lda, strideA,
beta, (TYPE*)B, ldb, strideB,
batchCount);
}
// workspace needed: device pointers
// A, B: host pointer to device buffers
extern "C"
int kblasXsyrk_batch_strided(kblasHandle_t handle,
char uplo, char trans,
const int m, const int n,
const TYPE alpha, const TYPE* A, int lda, long strideA,
const TYPE beta, TYPE* B, int ldb, long strideB,
int batchCount)
{
return Xsyrk_batch_strided_core<TYPE, TYPE*>(
handle,
uplo, trans,
m, n,
alpha, (TYPE*)A, lda, strideA,
beta, (TYPE*)B, ldb, strideB,
batchCount);
}
|
4f88f5ac00114e9e32fc06c6a2840240bf9a0ebf.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=2 --gridDim=2
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#define N 2
struct S {
int * p;
};
__global__ void* foo(int * A) {
S myS;
myS.p = A;
int * q;
q = myS.p;
q[threadIdx.x + blockDim.x*blockIdx.x] = threadIdx.x;
}
| 4f88f5ac00114e9e32fc06c6a2840240bf9a0ebf.cu | //pass
//--blockDim=2 --gridDim=2
#include <stdio.h>
#include <cuda.h>
#include <assert.h>
#define N 2
struct S {
int * p;
};
__global__ void* foo(int * A) {
S myS;
myS.p = A;
int * q;
q = myS.p;
q[threadIdx.x + blockDim.x*blockIdx.x] = threadIdx.x;
}
|
a6030e60263111e744ee645a5701eeb2ee2c8a03.hip | // !!! This is a file automatically generated by hipify!!!
#include "matrix_hip.cuh"
#include <algorithm>
#include <thrust/inner_product.h>
#include <thrust/extrema.h>
namespace matrix
{
using namespace h2o4gpu;
void max_index_per_column(Matrix<float>& A, std::vector<int>& result_array, device::DeviceContext& context){
int result;
for (int i=0; i<A.columns(); i++) {
safe_cublas(hipblasIsamax(context.cublas_handle, A.rows(), A.data() + i*A.rows(), 1, &result));
result_array[i] = result - 1 + i * A.rows();
}
}
void max_index_per_column(Matrix<double>& A, std::vector<int>& result_array, device::DeviceContext& context){
int result;
for (int i=0; i<A.columns(); i++) {
safe_cublas(hipblasIdamax(context.cublas_handle, A.rows(), A.data() + i*A.rows(), 1, &result));
result_array[i] = result - 1 + i * A.rows();
}
}
template<typename T, typename U>
void multiply(Matrix<T>& A, const U a, device::DeviceContext& context)
{
thrust::transform(A.dptr(), A.dptr() + A.size(), A.dptr(), [=]__device__ (U val)
{
return val * a;
}
);
}
template<typename T>
void subtract(const Matrix<T>& A, const Matrix<T>& B, Matrix<T>& C, device::DeviceContext& context)
{
auto counting = thrust::make_counting_iterator(0);
const T* d_A = A.data();
const T* d_B = B.data();
T* d_C = C.data();
thrust::for_each(counting, counting + A.rows() * A.columns(), [=]__device__(int idx)
{
d_C[idx] = d_A[idx] - d_B[idx];
});
}
template<typename T>
void add(const Matrix<T>& A, const Matrix<T>& B, Matrix<T>& C, device::DeviceContext& context)
{
auto counting = thrust::make_counting_iterator(0);
const T* d_A = A.data();
const T* d_B = B.data();
T* d_C = C.data();
thrust::for_each(counting, counting + A.rows() * A.columns(), [=]__device__(int idx)
{
d_C[idx] = d_A[idx] + d_B[idx];
});
}
template<typename T>
void normalize_vector_thrust(Matrix<T>& M, device::DeviceContext& context){
float M_inner = thrust::inner_product(M.dptr(), M.dptr() + M.size(), M.dptr(), 0.0f); //Will allocate memory for every call to fxn.
M.transform([=]__device__ (float val){return val / std::sqrt(M_inner);});
}
void multiply_diag(const Matrix<float>& A, const Matrix<float>& B, Matrix<float>& C, device::DeviceContext& context, bool left_diag)
{
hipblasSideMode_t mode = left_diag ? HIPBLAS_SIDE_LEFT : HIPBLAS_SIDE_RIGHT;
int m = C.rows();
int n = C.columns();
int lda = m;
int incx = 1; //Review what this should be...
int ldc = m;
safe_cublas(hipblasSdgmm(context.cublas_handle, mode, m, n, A.data(), lda, B.data(), incx, C.data(), ldc));
}
void multiply_diag(const Matrix<double>& A, const Matrix<double>& B, Matrix<double>& C, device::DeviceContext& context, bool left_diag)
{
hipblasSideMode_t mode = left_diag ? HIPBLAS_SIDE_LEFT : HIPBLAS_SIDE_RIGHT;
int m = C.rows();
int n = C.columns();
int lda = m;
int incx = 1; //Review what this should be...
int ldc = m;
safe_cublas(hipblasDdgmm(context.cublas_handle, mode, m, n, A.data(), lda, B.data(), incx, C.data(), ldc));
}
void multiply(const Matrix<float>& A, const Matrix<float>& B, Matrix<float>& C, device::DeviceContext& context, bool transpose_a, bool transpose_b, float alpha)
{
hipblasOperation_t op_a = transpose_a ? HIPBLAS_OP_T : HIPBLAS_OP_N;
hipblasOperation_t op_b = transpose_b ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const float beta = 0;
int m = C.rows();
int n = C.columns();
int k = transpose_a ? A.rows() : A.columns();
int lda = transpose_a ? k : m;
int ldb = transpose_b ? n : k;
int ldc = m;
safe_cublas(hipblasSgemm(context.cublas_handle, op_a, op_b, m, n, k, &alpha, A.data(), lda, B.data(), ldb, &beta, C.data(), ldc));
}
void multiply(const Matrix<double>& A, const Matrix<double>& B, Matrix<double>& C, device::DeviceContext& context, bool transpose_a, bool transpose_b, double alpha)
{
hipblasOperation_t op_a = transpose_a ? HIPBLAS_OP_T : HIPBLAS_OP_N;
hipblasOperation_t op_b = transpose_b ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const double beta = 0;
int m = C.rows();
int n = C.columns();
int k = transpose_a ? A.rows() : A.columns();
int lda = transpose_a ? k : m;
int ldb = transpose_b ? n : k;
int ldc = m;
safe_cublas(hipblasDgemm(context.cublas_handle, op_a, op_b, m, n, k, &alpha, A.data(), lda, B.data(), ldb, &beta, C.data(), ldc));
}
void transpose(const Matrix<float>& A, Matrix<float>& B, device::DeviceContext& context)
{
h2o4gpu_check(A.rows() == B.columns()&&A.columns() == B.rows(), "Transpose dimensions incorrect");
const float alpha = 1.0f;
const float beta = 0.0f;
safe_cublas(hipblasSgeam(context.cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N, B.rows(), B.columns(), &alpha, A.data(), A.rows(), &beta, NULL, B.rows(), B.data(), B.rows()));
}
void transpose(const Matrix<double>& A, Matrix<double>& B, device::DeviceContext& context)
{
h2o4gpu_check(A.rows() == B.columns()&&A.columns() == B.rows(), "Transpose dimensions incorrect");
const double alpha = 1.0f;
const double beta = 0.0f;
safe_cublas(hipblasDgeam(context.cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N, B.rows(), B.columns(), &alpha, A.data(), A.rows(), &beta, NULL, B.rows(), B.data(), B.rows()));
}
void normalize_columns(Matrix<float>& M, Matrix<float>& M_temp, Matrix<float>& column_length, const Matrix<float>& ones, device::DeviceContext& context)
{
thrust::transform(M.dptr(), M.dptr() + M.size(), M_temp.dptr(), sqr_op());
auto d_column_length = column_length.data();
auto d_ones = ones.data();
const float alpha = 1.0f;
const float beta = 0.0f;
safe_cublas(hipblasSgemv(context.cublas_handle, HIPBLAS_OP_T, M.rows(), M.columns(), &alpha, M_temp.data(), M.rows(), d_ones, 1, &beta, d_column_length, 1));
thrust::transform(column_length.dptr(), column_length.dptr() + column_length.size(), column_length.dptr(), [=]__device__(float val)
{
if (val == 0.0)
{
return 0.0;
}
return 1.0/ sqrt(val);
});
safe_cublas(hipblasSdgmm(context.cublas_handle, HIPBLAS_SIDE_RIGHT, M.rows(), M.columns(), M.data(), M.rows(), d_column_length, 1, M.data(), M.rows()));
}
void normalize_columns(Matrix<double>& M, Matrix<double>& M_temp, Matrix<double>& column_length, const Matrix<double>& ones, device::DeviceContext& context)
{
thrust::transform(M.dptr(), M.dptr() + M.size(), M_temp.dptr(), sqr_op());
auto d_column_length = column_length.data();
auto d_ones = ones.data();
const double alpha = 1.0f;
const double beta = 0.0f;
safe_cublas(hipblasDgemv(context.cublas_handle, HIPBLAS_OP_T, M.rows(), M.columns(), &alpha, M_temp.data(), M.rows(), d_ones, 1, &beta, d_column_length, 1));
thrust::transform(column_length.dptr(), column_length.dptr() + column_length.size(), column_length.dptr(), [=]__device__(double val)
{
if (val == 0.0)
{
return 0.0;
}
return 1.0/ sqrt(val);
});
safe_cublas(hipblasDdgmm(context.cublas_handle, HIPBLAS_SIDE_RIGHT, M.rows(), M.columns(), M.data(), M.rows(), d_column_length, 1, M.data(), M.rows()));
}
void normalize_columns(Matrix<float>& M, device::DeviceContext& context)
{
Matrix<float> M_temp(M.rows(), M.columns());
Matrix<float> columns_length(1, M.columns());
Matrix<float> ones(1, M.columns());
ones.fill(1.0f);
normalize_columns(M, M_temp, columns_length, ones, context);
}
void normalize_columns(Matrix<double>& M, device::DeviceContext& context)
{
Matrix<double> M_temp(M.rows(), M.columns());
Matrix<double> columns_length(1, M.columns());
Matrix<double> ones(1, M.columns());
ones.fill(1.0f);
normalize_columns(M, M_temp, columns_length, ones, context);
}
void normalize_vector_cublas(Matrix<float>& M, device::DeviceContext& context){
float norm2 = 0.0;
safe_cublas(hipblasSnrm2(context.cublas_handle, M.rows(), M.data(), 1.0, &norm2));
M.transform([=]__device__ (float val){return val * (1/norm2);});
}
void normalize_vector_cublas(Matrix<double>& M, device::DeviceContext& context){
double norm2 = 0.0;
safe_cublas(hipblasDnrm2(context.cublas_handle, M.rows(), M.data(), 1.0, &norm2));
M.transform([=]__device__ (float val){return val * (1/norm2);});
}
void residual(const Matrix<float>& X, const Matrix<float>& D, const Matrix<float>& S, Matrix<float>& R, device::DeviceContext& context)
{
multiply(D, S, R, context);
subtract(X, R, R, context);
}
void residual(const Matrix<double>& X, const Matrix<double>& D, const Matrix<double>& S, Matrix<double>& R, device::DeviceContext& context)
{
multiply(D, S, R, context);
subtract(X, R, R, context);
}
void calculate_eigen_pairs_exact(const Matrix<float>& X, Matrix<float>& Q, Matrix<float>& w, device::DeviceContext& context)
{
h2o4gpu_check(X.rows() == X.columns(), "X must be a symmetric matrix");
h2o4gpu_check(X.rows() == Q.rows() && X.columns() == Q.columns(), "X and Q must have the same dimension");
h2o4gpu_check(w.rows() == Q.columns(), "Q and w should have the same number of columns");
int lwork;
safe_cusolver(hipsolverDnSsyevd_bufferSize(context.cusolver_handle, HIPSOLVER_EIG_MODE_VECTOR, HIPBLAS_FILL_MODE_UPPER, X.rows(), X.data(), X.columns(), w.data(), &lwork));
float *d_work;
safe_cuda(hipMalloc(&d_work, sizeof(float) * lwork));
int *dev_info = NULL;
safe_cuda(hipMalloc ((void**)&dev_info, sizeof(int)));
Q.copy(X);
safe_cusolver(hipsolverDnSsyevd(context.cusolver_handle, HIPSOLVER_EIG_MODE_VECTOR, HIPBLAS_FILL_MODE_UPPER, Q.rows(), Q.data(), Q.columns(), w.data(), d_work, lwork, dev_info));
safe_cuda(hipDeviceSynchronize());
safe_cuda(hipFree(d_work));
safe_cuda(hipFree(dev_info));
safe_cuda(hipGetLastError());
}
void calculate_eigen_pairs_exact(const Matrix<double>& X, Matrix<double>& Q, Matrix<double>& w, device::DeviceContext& context)
{
h2o4gpu_check(X.rows() == X.columns(), "X must be a symmetric matrix");
h2o4gpu_check(X.rows() == Q.rows() && X.columns() == Q.columns(), "X and Q must have the same dimension");
h2o4gpu_check(w.rows() == Q.columns(), "Q and w should have the same number of columns");
int lwork;
safe_cusolver(hipsolverDnDsyevd_bufferSize(context.cusolver_handle, HIPSOLVER_EIG_MODE_VECTOR, HIPBLAS_FILL_MODE_UPPER, X.rows(), X.data(), X.columns(), w.data(), &lwork));
double *d_work;
safe_cuda(hipMalloc(&d_work, sizeof(double) * lwork));
int *dev_info = NULL;
safe_cuda(hipMalloc ((void**)&dev_info, sizeof(int)));
Q.copy(X);
safe_cusolver(hipsolverDnDsyevd(context.cusolver_handle, HIPSOLVER_EIG_MODE_VECTOR, HIPBLAS_FILL_MODE_UPPER, Q.rows(), Q.data(), Q.columns(), w.data(), d_work, lwork, dev_info));
safe_cuda(hipDeviceSynchronize());
safe_cuda(hipFree(d_work));
safe_cuda(hipFree(dev_info));
safe_cuda(hipGetLastError());
}
void dot_product(Matrix<float>& b_k1, Matrix<float>& b_k, float* eigen_value_estimate, device::DeviceContext& context)
{
safe_cublas(hipblasSdot(context.cublas_handle, b_k1.rows(), b_k1.data(), 1.0, b_k.data(), 1.0, eigen_value_estimate));
}
void dot_product(Matrix<double>& b_k1, Matrix<double>& b_k, double* eigen_value_estimate, device::DeviceContext& context)
{
safe_cublas(hipblasDdot(context.cublas_handle, b_k1.rows(), b_k1.data(), 1.0, b_k.data(), 1.0, eigen_value_estimate));
}
//----------------------------------------------------------------------------------------------------------------------------------------------------------------------
//Stricly floating point operations that are not used
void linear_solve(const Matrix<float>& A, Matrix<float>& X, const Matrix<float>& B, device::DeviceContext& context)
{
h2o4gpu_check(A.rows()>= A.columns(),"Linear solve requires m >= n");
h2o4gpu_check(X.rows()>= X.columns(),"Linear solve requires n >= k"); //TODO: is this restriction necessary?
Matrix<float> A_copy(A);
Matrix<float> B_copy(A.rows(), A.columns());
thrust::copy(B.dptr(), B.dptr() + B.size(), B_copy.dptr());
thrust::fill(B_copy.dptr() + B.size(), B_copy.dptr() + B_copy.size(), 0.0f);
int work_size = 0;
safe_cusolver(hipsolverDnSgeqrf_bufferSize(context.cusolver_handle, A_copy.rows(), A_copy.columns(), A_copy.data(), A_copy.rows(), &work_size));
thrust::device_vector<float> work(work_size);
float* d_work = thrust::raw_pointer_cast(work.data());
thrust::device_vector<float> tau((std::min)(A.rows(), A.columns()));
float* d_tau = thrust::raw_pointer_cast(tau.data());
thrust::device_vector<int> dev_info(1);
int* d_dev_info = thrust::raw_pointer_cast(dev_info.data());
safe_cusolver(hipsolverDnSgeqrf(context.cusolver_handle, A_copy.rows(), A_copy.columns(), A_copy.data(), A_copy.rows(), d_tau, d_work, work_size, d_dev_info));
h2o4gpu_check(dev_info[0] == 0, "geqrf unsuccessful");
safe_cusolver(hipsolverDnSormqr(context.cusolver_handle, HIPBLAS_SIDE_LEFT, HIPBLAS_OP_T, A.rows(), A.columns(), (std::min)(A.rows(), A.columns()), A_copy.data(), A.rows(), d_tau, B_copy.data(), A.rows(), d_work, work_size, d_dev_info));
h2o4gpu_check(dev_info[0] == 0, "ormqr unsuccessful");
Matrix<float> R(A.columns(), A.columns());
Matrix<float> QTB(A.columns(), B.columns());
auto counting = thrust::make_counting_iterator(0);
int n = R.columns();
int m = A.rows();
auto d_R = R.data();
auto d_A_copy = A_copy.data();
auto d_QTB = QTB.data();
auto d_B_copy = B_copy.data();
int qtb_columns = QTB.columns();
thrust::for_each(counting, counting + R.size(), [=]__device__ (int idx)
{
int row = idx % n;
int column = idx / n;
d_R[idx] = d_A_copy[column * m + row];
if (column < qtb_columns)
{
d_QTB[idx] = d_B_copy[column * m + row];
}
});
const float alpha = 1.0f;
safe_cublas(hipblasStrsm(context.cublas_handle, HIPBLAS_SIDE_LEFT, HIPBLAS_FILL_MODE_UPPER, HIPBLAS_OP_N, HIPBLAS_DIAG_NON_UNIT, QTB.rows(), QTB.columns(), &alpha, R.data(), R.rows(), QTB.data(), QTB.rows()));
thrust::copy(QTB.dptr(), QTB.dptr() + QTB.size(), X.data());
}
void pseudoinverse(const Matrix<float>& A, Matrix<float>& pinvA, device::DeviceContext& context)
{
h2o4gpu_check(A.rows() == pinvA.columns() && A.columns() == pinvA.rows(), "pseudoinverse dimensions incorrect");
//Add zero rows if m < n such that m >= n
Matrix<float> A_extended((std::max)(A.columns(), A.rows()), A.columns());
auto counting = thrust::make_counting_iterator(0);
int A_column_size = A.rows();
int A_extended_column_size = A_extended.rows();
auto d_A = A.data();
auto d_A_extended = A_extended.data();
thrust::for_each(counting, counting + A_extended.size(), [=]__device__(int idx)
{
int row = idx % A_extended_column_size;
if (row < A_column_size)
{
int column = idx / A_extended_column_size;
d_A_extended[idx] = d_A[A_column_size * column + row];
}
else
{
d_A_extended[idx] = 0;
}
});
int work_size = 0;
safe_cusolver(hipsolverDnSgesvd_bufferSize(context.cusolver_handle, A_extended.rows(), A_extended.columns(), &work_size));
Matrix<float> work(work_size, 1);
Matrix<float> S((std::min)(A_extended.rows(), A_extended.columns()), 1);
Matrix<float> U(A_extended.rows(), A_extended.rows());
Matrix<float> VT(A_extended.columns(), A_extended.columns());
Matrix<int> dev_info(1, 1);
safe_cusolver (hipsolverDnSgesvd(context.cusolver_handle, 'A', 'A', A_extended.rows(), A_extended.columns(), d_A_extended, A_extended.rows(), S.data(), U.data(), U.rows(), VT.data(), VT.rows(), work.data(), work_size, NULL, dev_info.data()));
float eps = 1e-5;
thrust::transform(S.dptr(), S.dptr() + S.size(), S.dptr(), [=]__device__(float val)
{
if (abs(val) < eps)
{
return 0.0;
}
else
{
return 1.0 / val;
}
});
Matrix<float> UT(A_extended.rows(), A_extended.rows());
//Calculate transpose of U
const float alpha = 1.0;
const float beta = 0.0;
safe_cublas(hipblasSgeam(context.cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N, UT.rows(), UT.columns(), &alpha, U.data(), UT.rows(), &beta,NULL, UT.rows(), UT.data(), UT.rows()));
safe_cublas(hipblasSdgmm(context.cublas_handle, HIPBLAS_SIDE_LEFT, UT.rows(), UT.columns(), UT.data(), UT.rows(), S.data(), 1, U.data(), U.rows()));
Matrix<float> pinvA_extended(A_extended.columns(), A_extended.rows());
multiply(VT, U, pinvA_extended, context, true);
thrust::copy(pinvA_extended.dptr(), pinvA_extended.dptr() + pinvA.size(), pinvA.dptr());
}
void f_normalize(Matrix<float>& M, device::DeviceContext& context)
{
Matrix<float> temp(M.rows(), M.columns());
thrust::transform(M.dptr(), M.dptr() + M.size(), temp.dptr(), sqr_op());
float sum = thrust::reduce(temp.dptr(), temp.dptr() + temp.size());
multiply(M, 1.0 / std::sqrt(sum), context);
thrust::transform(M.dptr(), M.dptr() + M.size(), temp.dptr(), sqr_op());
float final_sum = thrust::reduce(temp.dptr(), temp.dptr() + temp.size());
printf("f norm sum squares: %1.4f\n", final_sum);
}
void normalize_columns_cub(Matrix<float>& M, device::DeviceContext& context)
{
//Create alias so device Lamba does not dereference this pointer
int m = M.rows();
thrust::device_vector<float> temp(M.size());
thrust::device_vector<float> length_squared(M.columns());
thrust::transform(M.dptr(), M.dptr() + M.size(), temp.begin(), [=]__device__(float val)
{
return val * val;
});
thrust::device_vector<int> column_segments(M.columns() + 1);
auto counting = thrust::make_counting_iterator(0);
thrust::transform(counting, counting + column_segments.size(), column_segments.begin(), [=]__device__(int idx)
{
return idx * m;
});
// Determine temporary device storage requirements
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
auto segments = thrust::raw_pointer_cast(column_segments.data());
auto sum_in = thrust::raw_pointer_cast(temp.data());
auto sum_out = thrust::raw_pointer_cast(length_squared.data());
hipcub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, sum_in, sum_out,
M.columns(), segments, segments + 1);
// Allocate temporary storage
hipMalloc(&d_temp_storage, temp_storage_bytes);
hipcub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, sum_in, sum_out,
M.columns(), segments, segments + 1);
//Scale
auto d_length_squared = thrust::raw_pointer_cast(length_squared.data());
auto d_data = M.data();
thrust::transform(counting, counting + M.size(), M.dptr(), [=]__device__(int idx)
{
int col = idx / m;
float length_squared = d_length_squared[col];
if (length_squared > 0.0)
{
return d_data[idx] / std::sqrt(d_length_squared[col]);
}
else
{
return 0.0f;
}
});
hipFree(d_temp_storage);
}
}
//Orignal Impl
template void matrix::multiply<double>(Matrix<double>& A, const float a, device::DeviceContext& context);
//Impl for floats and doubles
template void matrix::multiply<float>(Matrix<float>& A, const float a, device::DeviceContext& context);
template void matrix::multiply<double>(Matrix<double>& A, const double a, device::DeviceContext& context);
template void matrix::subtract<float>(const Matrix<float>& A, const Matrix<float>& B, Matrix<float>& C, device::DeviceContext& context);
template void matrix::subtract<double>(const Matrix<double>& A, const Matrix<double>& B, Matrix<double>& C, device::DeviceContext& context);
template void matrix::add<float>(const Matrix<float>& A, const Matrix<float>& B, Matrix<float>& C, device::DeviceContext& context);
template void matrix::add<double>(const Matrix<double>& A, const Matrix<double>& B, Matrix<double>& C, device::DeviceContext& context);
template void matrix::normalize_vector_thrust<float>(Matrix<float>& M, device::DeviceContext& context);
template void matrix::normalize_vector_thrust<double>(Matrix<double>& M, device::DeviceContext& context);
| a6030e60263111e744ee645a5701eeb2ee2c8a03.cu | #include "matrix.cuh"
#include <algorithm>
#include <thrust/inner_product.h>
#include <thrust/extrema.h>
namespace matrix
{
using namespace h2o4gpu;
void max_index_per_column(Matrix<float>& A, std::vector<int>& result_array, device::DeviceContext& context){
int result;
for (int i=0; i<A.columns(); i++) {
safe_cublas(cublasIsamax(context.cublas_handle, A.rows(), A.data() + i*A.rows(), 1, &result));
result_array[i] = result - 1 + i * A.rows();
}
}
void max_index_per_column(Matrix<double>& A, std::vector<int>& result_array, device::DeviceContext& context){
int result;
for (int i=0; i<A.columns(); i++) {
safe_cublas(cublasIdamax(context.cublas_handle, A.rows(), A.data() + i*A.rows(), 1, &result));
result_array[i] = result - 1 + i * A.rows();
}
}
template<typename T, typename U>
void multiply(Matrix<T>& A, const U a, device::DeviceContext& context)
{
thrust::transform(A.dptr(), A.dptr() + A.size(), A.dptr(), [=]__device__ (U val)
{
return val * a;
}
);
}
template<typename T>
void subtract(const Matrix<T>& A, const Matrix<T>& B, Matrix<T>& C, device::DeviceContext& context)
{
auto counting = thrust::make_counting_iterator(0);
const T* d_A = A.data();
const T* d_B = B.data();
T* d_C = C.data();
thrust::for_each(counting, counting + A.rows() * A.columns(), [=]__device__(int idx)
{
d_C[idx] = d_A[idx] - d_B[idx];
});
}
template<typename T>
void add(const Matrix<T>& A, const Matrix<T>& B, Matrix<T>& C, device::DeviceContext& context)
{
auto counting = thrust::make_counting_iterator(0);
const T* d_A = A.data();
const T* d_B = B.data();
T* d_C = C.data();
thrust::for_each(counting, counting + A.rows() * A.columns(), [=]__device__(int idx)
{
d_C[idx] = d_A[idx] + d_B[idx];
});
}
template<typename T>
void normalize_vector_thrust(Matrix<T>& M, device::DeviceContext& context){
float M_inner = thrust::inner_product(M.dptr(), M.dptr() + M.size(), M.dptr(), 0.0f); //Will allocate memory for every call to fxn.
M.transform([=]__device__ (float val){return val / std::sqrt(M_inner);});
}
void multiply_diag(const Matrix<float>& A, const Matrix<float>& B, Matrix<float>& C, device::DeviceContext& context, bool left_diag)
{
cublasSideMode_t mode = left_diag ? CUBLAS_SIDE_LEFT : CUBLAS_SIDE_RIGHT;
int m = C.rows();
int n = C.columns();
int lda = m;
int incx = 1; //Review what this should be...
int ldc = m;
safe_cublas(cublasSdgmm(context.cublas_handle, mode, m, n, A.data(), lda, B.data(), incx, C.data(), ldc));
}
void multiply_diag(const Matrix<double>& A, const Matrix<double>& B, Matrix<double>& C, device::DeviceContext& context, bool left_diag)
{
cublasSideMode_t mode = left_diag ? CUBLAS_SIDE_LEFT : CUBLAS_SIDE_RIGHT;
int m = C.rows();
int n = C.columns();
int lda = m;
int incx = 1; //Review what this should be...
int ldc = m;
safe_cublas(cublasDdgmm(context.cublas_handle, mode, m, n, A.data(), lda, B.data(), incx, C.data(), ldc));
}
void multiply(const Matrix<float>& A, const Matrix<float>& B, Matrix<float>& C, device::DeviceContext& context, bool transpose_a, bool transpose_b, float alpha)
{
cublasOperation_t op_a = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N;
cublasOperation_t op_b = transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N;
const float beta = 0;
int m = C.rows();
int n = C.columns();
int k = transpose_a ? A.rows() : A.columns();
int lda = transpose_a ? k : m;
int ldb = transpose_b ? n : k;
int ldc = m;
safe_cublas(cublasSgemm(context.cublas_handle, op_a, op_b, m, n, k, &alpha, A.data(), lda, B.data(), ldb, &beta, C.data(), ldc));
}
void multiply(const Matrix<double>& A, const Matrix<double>& B, Matrix<double>& C, device::DeviceContext& context, bool transpose_a, bool transpose_b, double alpha)
{
cublasOperation_t op_a = transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N;
cublasOperation_t op_b = transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N;
const double beta = 0;
int m = C.rows();
int n = C.columns();
int k = transpose_a ? A.rows() : A.columns();
int lda = transpose_a ? k : m;
int ldb = transpose_b ? n : k;
int ldc = m;
safe_cublas(cublasDgemm(context.cublas_handle, op_a, op_b, m, n, k, &alpha, A.data(), lda, B.data(), ldb, &beta, C.data(), ldc));
}
void transpose(const Matrix<float>& A, Matrix<float>& B, device::DeviceContext& context)
{
h2o4gpu_check(A.rows() == B.columns()&&A.columns() == B.rows(), "Transpose dimensions incorrect");
const float alpha = 1.0f;
const float beta = 0.0f;
safe_cublas(cublasSgeam(context.cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, B.rows(), B.columns(), &alpha, A.data(), A.rows(), &beta, NULL, B.rows(), B.data(), B.rows()));
}
void transpose(const Matrix<double>& A, Matrix<double>& B, device::DeviceContext& context)
{
h2o4gpu_check(A.rows() == B.columns()&&A.columns() == B.rows(), "Transpose dimensions incorrect");
const double alpha = 1.0f;
const double beta = 0.0f;
safe_cublas(cublasDgeam(context.cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, B.rows(), B.columns(), &alpha, A.data(), A.rows(), &beta, NULL, B.rows(), B.data(), B.rows()));
}
void normalize_columns(Matrix<float>& M, Matrix<float>& M_temp, Matrix<float>& column_length, const Matrix<float>& ones, device::DeviceContext& context)
{
thrust::transform(M.dptr(), M.dptr() + M.size(), M_temp.dptr(), sqr_op());
auto d_column_length = column_length.data();
auto d_ones = ones.data();
const float alpha = 1.0f;
const float beta = 0.0f;
safe_cublas(cublasSgemv(context.cublas_handle, CUBLAS_OP_T, M.rows(), M.columns(), &alpha, M_temp.data(), M.rows(), d_ones, 1, &beta, d_column_length, 1));
thrust::transform(column_length.dptr(), column_length.dptr() + column_length.size(), column_length.dptr(), [=]__device__(float val)
{
if (val == 0.0)
{
return 0.0;
}
return 1.0/ sqrt(val);
});
safe_cublas(cublasSdgmm(context.cublas_handle, CUBLAS_SIDE_RIGHT, M.rows(), M.columns(), M.data(), M.rows(), d_column_length, 1, M.data(), M.rows()));
}
void normalize_columns(Matrix<double>& M, Matrix<double>& M_temp, Matrix<double>& column_length, const Matrix<double>& ones, device::DeviceContext& context)
{
thrust::transform(M.dptr(), M.dptr() + M.size(), M_temp.dptr(), sqr_op());
auto d_column_length = column_length.data();
auto d_ones = ones.data();
const double alpha = 1.0f;
const double beta = 0.0f;
safe_cublas(cublasDgemv(context.cublas_handle, CUBLAS_OP_T, M.rows(), M.columns(), &alpha, M_temp.data(), M.rows(), d_ones, 1, &beta, d_column_length, 1));
thrust::transform(column_length.dptr(), column_length.dptr() + column_length.size(), column_length.dptr(), [=]__device__(double val)
{
if (val == 0.0)
{
return 0.0;
}
return 1.0/ sqrt(val);
});
safe_cublas(cublasDdgmm(context.cublas_handle, CUBLAS_SIDE_RIGHT, M.rows(), M.columns(), M.data(), M.rows(), d_column_length, 1, M.data(), M.rows()));
}
void normalize_columns(Matrix<float>& M, device::DeviceContext& context)
{
Matrix<float> M_temp(M.rows(), M.columns());
Matrix<float> columns_length(1, M.columns());
Matrix<float> ones(1, M.columns());
ones.fill(1.0f);
normalize_columns(M, M_temp, columns_length, ones, context);
}
void normalize_columns(Matrix<double>& M, device::DeviceContext& context)
{
Matrix<double> M_temp(M.rows(), M.columns());
Matrix<double> columns_length(1, M.columns());
Matrix<double> ones(1, M.columns());
ones.fill(1.0f);
normalize_columns(M, M_temp, columns_length, ones, context);
}
void normalize_vector_cublas(Matrix<float>& M, device::DeviceContext& context){
float norm2 = 0.0;
safe_cublas(cublasSnrm2(context.cublas_handle, M.rows(), M.data(), 1.0, &norm2));
M.transform([=]__device__ (float val){return val * (1/norm2);});
}
void normalize_vector_cublas(Matrix<double>& M, device::DeviceContext& context){
double norm2 = 0.0;
safe_cublas(cublasDnrm2(context.cublas_handle, M.rows(), M.data(), 1.0, &norm2));
M.transform([=]__device__ (float val){return val * (1/norm2);});
}
void residual(const Matrix<float>& X, const Matrix<float>& D, const Matrix<float>& S, Matrix<float>& R, device::DeviceContext& context)
{
multiply(D, S, R, context);
subtract(X, R, R, context);
}
void residual(const Matrix<double>& X, const Matrix<double>& D, const Matrix<double>& S, Matrix<double>& R, device::DeviceContext& context)
{
multiply(D, S, R, context);
subtract(X, R, R, context);
}
void calculate_eigen_pairs_exact(const Matrix<float>& X, Matrix<float>& Q, Matrix<float>& w, device::DeviceContext& context)
{
h2o4gpu_check(X.rows() == X.columns(), "X must be a symmetric matrix");
h2o4gpu_check(X.rows() == Q.rows() && X.columns() == Q.columns(), "X and Q must have the same dimension");
h2o4gpu_check(w.rows() == Q.columns(), "Q and w should have the same number of columns");
int lwork;
safe_cusolver(cusolverDnSsyevd_bufferSize(context.cusolver_handle, CUSOLVER_EIG_MODE_VECTOR, CUBLAS_FILL_MODE_UPPER, X.rows(), X.data(), X.columns(), w.data(), &lwork));
float *d_work;
safe_cuda(cudaMalloc(&d_work, sizeof(float) * lwork));
int *dev_info = NULL;
safe_cuda(cudaMalloc ((void**)&dev_info, sizeof(int)));
Q.copy(X);
safe_cusolver(cusolverDnSsyevd(context.cusolver_handle, CUSOLVER_EIG_MODE_VECTOR, CUBLAS_FILL_MODE_UPPER, Q.rows(), Q.data(), Q.columns(), w.data(), d_work, lwork, dev_info));
safe_cuda(cudaDeviceSynchronize());
safe_cuda(cudaFree(d_work));
safe_cuda(cudaFree(dev_info));
safe_cuda(cudaGetLastError());
}
void calculate_eigen_pairs_exact(const Matrix<double>& X, Matrix<double>& Q, Matrix<double>& w, device::DeviceContext& context)
{
h2o4gpu_check(X.rows() == X.columns(), "X must be a symmetric matrix");
h2o4gpu_check(X.rows() == Q.rows() && X.columns() == Q.columns(), "X and Q must have the same dimension");
h2o4gpu_check(w.rows() == Q.columns(), "Q and w should have the same number of columns");
int lwork;
safe_cusolver(cusolverDnDsyevd_bufferSize(context.cusolver_handle, CUSOLVER_EIG_MODE_VECTOR, CUBLAS_FILL_MODE_UPPER, X.rows(), X.data(), X.columns(), w.data(), &lwork));
double *d_work;
safe_cuda(cudaMalloc(&d_work, sizeof(double) * lwork));
int *dev_info = NULL;
safe_cuda(cudaMalloc ((void**)&dev_info, sizeof(int)));
Q.copy(X);
safe_cusolver(cusolverDnDsyevd(context.cusolver_handle, CUSOLVER_EIG_MODE_VECTOR, CUBLAS_FILL_MODE_UPPER, Q.rows(), Q.data(), Q.columns(), w.data(), d_work, lwork, dev_info));
safe_cuda(cudaDeviceSynchronize());
safe_cuda(cudaFree(d_work));
safe_cuda(cudaFree(dev_info));
safe_cuda(cudaGetLastError());
}
void dot_product(Matrix<float>& b_k1, Matrix<float>& b_k, float* eigen_value_estimate, device::DeviceContext& context)
{
safe_cublas(cublasSdot(context.cublas_handle, b_k1.rows(), b_k1.data(), 1.0, b_k.data(), 1.0, eigen_value_estimate));
}
void dot_product(Matrix<double>& b_k1, Matrix<double>& b_k, double* eigen_value_estimate, device::DeviceContext& context)
{
safe_cublas(cublasDdot(context.cublas_handle, b_k1.rows(), b_k1.data(), 1.0, b_k.data(), 1.0, eigen_value_estimate));
}
//----------------------------------------------------------------------------------------------------------------------------------------------------------------------
//Stricly floating point operations that are not used
void linear_solve(const Matrix<float>& A, Matrix<float>& X, const Matrix<float>& B, device::DeviceContext& context)
{
h2o4gpu_check(A.rows()>= A.columns(),"Linear solve requires m >= n");
h2o4gpu_check(X.rows()>= X.columns(),"Linear solve requires n >= k"); //TODO: is this restriction necessary?
Matrix<float> A_copy(A);
Matrix<float> B_copy(A.rows(), A.columns());
thrust::copy(B.dptr(), B.dptr() + B.size(), B_copy.dptr());
thrust::fill(B_copy.dptr() + B.size(), B_copy.dptr() + B_copy.size(), 0.0f);
int work_size = 0;
safe_cusolver(cusolverDnSgeqrf_bufferSize(context.cusolver_handle, A_copy.rows(), A_copy.columns(), A_copy.data(), A_copy.rows(), &work_size));
thrust::device_vector<float> work(work_size);
float* d_work = thrust::raw_pointer_cast(work.data());
thrust::device_vector<float> tau((std::min)(A.rows(), A.columns()));
float* d_tau = thrust::raw_pointer_cast(tau.data());
thrust::device_vector<int> dev_info(1);
int* d_dev_info = thrust::raw_pointer_cast(dev_info.data());
safe_cusolver(cusolverDnSgeqrf(context.cusolver_handle, A_copy.rows(), A_copy.columns(), A_copy.data(), A_copy.rows(), d_tau, d_work, work_size, d_dev_info));
h2o4gpu_check(dev_info[0] == 0, "geqrf unsuccessful");
safe_cusolver(cusolverDnSormqr(context.cusolver_handle, CUBLAS_SIDE_LEFT, CUBLAS_OP_T, A.rows(), A.columns(), (std::min)(A.rows(), A.columns()), A_copy.data(), A.rows(), d_tau, B_copy.data(), A.rows(), d_work, work_size, d_dev_info));
h2o4gpu_check(dev_info[0] == 0, "ormqr unsuccessful");
Matrix<float> R(A.columns(), A.columns());
Matrix<float> QTB(A.columns(), B.columns());
auto counting = thrust::make_counting_iterator(0);
int n = R.columns();
int m = A.rows();
auto d_R = R.data();
auto d_A_copy = A_copy.data();
auto d_QTB = QTB.data();
auto d_B_copy = B_copy.data();
int qtb_columns = QTB.columns();
thrust::for_each(counting, counting + R.size(), [=]__device__ (int idx)
{
int row = idx % n;
int column = idx / n;
d_R[idx] = d_A_copy[column * m + row];
if (column < qtb_columns)
{
d_QTB[idx] = d_B_copy[column * m + row];
}
});
const float alpha = 1.0f;
safe_cublas(cublasStrsm(context.cublas_handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, QTB.rows(), QTB.columns(), &alpha, R.data(), R.rows(), QTB.data(), QTB.rows()));
thrust::copy(QTB.dptr(), QTB.dptr() + QTB.size(), X.data());
}
void pseudoinverse(const Matrix<float>& A, Matrix<float>& pinvA, device::DeviceContext& context)
{
h2o4gpu_check(A.rows() == pinvA.columns() && A.columns() == pinvA.rows(), "pseudoinverse dimensions incorrect");
//Add zero rows if m < n such that m >= n
Matrix<float> A_extended((std::max)(A.columns(), A.rows()), A.columns());
auto counting = thrust::make_counting_iterator(0);
int A_column_size = A.rows();
int A_extended_column_size = A_extended.rows();
auto d_A = A.data();
auto d_A_extended = A_extended.data();
thrust::for_each(counting, counting + A_extended.size(), [=]__device__(int idx)
{
int row = idx % A_extended_column_size;
if (row < A_column_size)
{
int column = idx / A_extended_column_size;
d_A_extended[idx] = d_A[A_column_size * column + row];
}
else
{
d_A_extended[idx] = 0;
}
});
int work_size = 0;
safe_cusolver(cusolverDnSgesvd_bufferSize(context.cusolver_handle, A_extended.rows(), A_extended.columns(), &work_size));
Matrix<float> work(work_size, 1);
Matrix<float> S((std::min)(A_extended.rows(), A_extended.columns()), 1);
Matrix<float> U(A_extended.rows(), A_extended.rows());
Matrix<float> VT(A_extended.columns(), A_extended.columns());
Matrix<int> dev_info(1, 1);
safe_cusolver (cusolverDnSgesvd(context.cusolver_handle, 'A', 'A', A_extended.rows(), A_extended.columns(), d_A_extended, A_extended.rows(), S.data(), U.data(), U.rows(), VT.data(), VT.rows(), work.data(), work_size, NULL, dev_info.data()));
float eps = 1e-5;
thrust::transform(S.dptr(), S.dptr() + S.size(), S.dptr(), [=]__device__(float val)
{
if (abs(val) < eps)
{
return 0.0;
}
else
{
return 1.0 / val;
}
});
Matrix<float> UT(A_extended.rows(), A_extended.rows());
//Calculate transpose of U
const float alpha = 1.0;
const float beta = 0.0;
safe_cublas(cublasSgeam(context.cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, UT.rows(), UT.columns(), &alpha, U.data(), UT.rows(), &beta,NULL, UT.rows(), UT.data(), UT.rows()));
safe_cublas(cublasSdgmm(context.cublas_handle, CUBLAS_SIDE_LEFT, UT.rows(), UT.columns(), UT.data(), UT.rows(), S.data(), 1, U.data(), U.rows()));
Matrix<float> pinvA_extended(A_extended.columns(), A_extended.rows());
multiply(VT, U, pinvA_extended, context, true);
thrust::copy(pinvA_extended.dptr(), pinvA_extended.dptr() + pinvA.size(), pinvA.dptr());
}
void f_normalize(Matrix<float>& M, device::DeviceContext& context)
{
Matrix<float> temp(M.rows(), M.columns());
thrust::transform(M.dptr(), M.dptr() + M.size(), temp.dptr(), sqr_op());
float sum = thrust::reduce(temp.dptr(), temp.dptr() + temp.size());
multiply(M, 1.0 / std::sqrt(sum), context);
thrust::transform(M.dptr(), M.dptr() + M.size(), temp.dptr(), sqr_op());
float final_sum = thrust::reduce(temp.dptr(), temp.dptr() + temp.size());
printf("f norm sum squares: %1.4f\n", final_sum);
}
void normalize_columns_cub(Matrix<float>& M, device::DeviceContext& context)
{
//Create alias so device Lamba does not dereference this pointer
int m = M.rows();
thrust::device_vector<float> temp(M.size());
thrust::device_vector<float> length_squared(M.columns());
thrust::transform(M.dptr(), M.dptr() + M.size(), temp.begin(), [=]__device__(float val)
{
return val * val;
});
thrust::device_vector<int> column_segments(M.columns() + 1);
auto counting = thrust::make_counting_iterator(0);
thrust::transform(counting, counting + column_segments.size(), column_segments.begin(), [=]__device__(int idx)
{
return idx * m;
});
// Determine temporary device storage requirements
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
auto segments = thrust::raw_pointer_cast(column_segments.data());
auto sum_in = thrust::raw_pointer_cast(temp.data());
auto sum_out = thrust::raw_pointer_cast(length_squared.data());
cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, sum_in, sum_out,
M.columns(), segments, segments + 1);
// Allocate temporary storage
cudaMalloc(&d_temp_storage, temp_storage_bytes);
cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, sum_in, sum_out,
M.columns(), segments, segments + 1);
//Scale
auto d_length_squared = thrust::raw_pointer_cast(length_squared.data());
auto d_data = M.data();
thrust::transform(counting, counting + M.size(), M.dptr(), [=]__device__(int idx)
{
int col = idx / m;
float length_squared = d_length_squared[col];
if (length_squared > 0.0)
{
return d_data[idx] / std::sqrt(d_length_squared[col]);
}
else
{
return 0.0f;
}
});
cudaFree(d_temp_storage);
}
}
//Orignal Impl
template void matrix::multiply<double>(Matrix<double>& A, const float a, device::DeviceContext& context);
//Impl for floats and doubles
template void matrix::multiply<float>(Matrix<float>& A, const float a, device::DeviceContext& context);
template void matrix::multiply<double>(Matrix<double>& A, const double a, device::DeviceContext& context);
template void matrix::subtract<float>(const Matrix<float>& A, const Matrix<float>& B, Matrix<float>& C, device::DeviceContext& context);
template void matrix::subtract<double>(const Matrix<double>& A, const Matrix<double>& B, Matrix<double>& C, device::DeviceContext& context);
template void matrix::add<float>(const Matrix<float>& A, const Matrix<float>& B, Matrix<float>& C, device::DeviceContext& context);
template void matrix::add<double>(const Matrix<double>& A, const Matrix<double>& B, Matrix<double>& C, device::DeviceContext& context);
template void matrix::normalize_vector_thrust<float>(Matrix<float>& M, device::DeviceContext& context);
template void matrix::normalize_vector_thrust<double>(Matrix<double>& M, device::DeviceContext& context);
|
645beee519e554bb2cd0ee5a566ad24e151a1c6b.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/LogSigmoid.cu"
#else
#include <THHUNN/common.h>
void THNN_(LogSigmoid_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *buffer)
{
THCUNN_assertSameGPU(state, 2, input, output);
THCTensor_(resizeAs)(state, output, input);
THC_pointwiseApply2<scalar_t, scalar_t>(state, output, input, logSigmoid_updateOutput_functor<scalar_t>());
}
void THNN_(LogSigmoid_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *buffer)
{
THCUNN_check_nElement(state, input, gradOutput);
THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput);
THCTensor_(resizeAs)(state, gradInput, input);
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, input, gradOutput, logSigmoid_updateGradInput_functor<scalar_t>());
}
#endif
| 645beee519e554bb2cd0ee5a566ad24e151a1c6b.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/LogSigmoid.cu"
#else
#include <THCUNN/common.h>
void THNN_(LogSigmoid_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *buffer)
{
THCUNN_assertSameGPU(state, 2, input, output);
THCTensor_(resizeAs)(state, output, input);
THC_pointwiseApply2<scalar_t, scalar_t>(state, output, input, logSigmoid_updateOutput_functor<scalar_t>());
}
void THNN_(LogSigmoid_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *buffer)
{
THCUNN_check_nElement(state, input, gradOutput);
THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput);
THCTensor_(resizeAs)(state, gradInput, input);
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, input, gradOutput, logSigmoid_updateGradInput_functor<scalar_t>());
}
#endif
|
af18c69e93973850dbe0523c5cd61507a37de45a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <benchmark/benchmark.h>
#include <matazure/tensor>
using namespace matazure;
template <typename _ValueType>
__global__ void each_copy_gold_kenel(_ValueType *p_dst, _ValueType *p_src, int_t count){
for (int_t i = threadIdx.x + blockIdx.x * blockDim.x; i < count; i += blockDim.x * gridDim.x) {
p_dst[i] = p_src[i];
}
}
template <typename _Tensor>
void BM_each_copy_gold(benchmark::State& state) {
_Tensor ts_src(state.range(0));
_Tensor ts_dst(ts_src.shape());
while (state.KeepRunning()) {
cuda::execution_policy policy;
cuda::configure_grid(policy, each_copy_gold_kenel<typename _Tensor::value_type>);
hipLaunchKernelGGL(( each_copy_gold_kenel), dim3(policy.grid_size()),
dim3( policy.block_size()),
policy.shared_mem_bytes(),
policy.stream() , ts_dst.data(), ts_src.data(), ts_src.size());
hip::device_synchronize();
}
auto bytes_size = static_cast<size_t>(ts_src.size()) * sizeof(decltype(ts_src[0]));
state.SetBytesProcessed(state.iterations() * bytes_size);
}
template <typename _TensorSrc, typename _TensorDst>
static void BM_mem_copy(benchmark::State& state) {
_TensorSrc ts_src(state.range(0));
_TensorDst ts_dst(ts_src.shape());
while (state.KeepRunning()) {
mem_copy(ts_src, ts_dst);
#ifdef MATAZURE_CUDA
hip::device_synchronize();
#endif
}
auto bytes_size = static_cast<size_t>(ts_src.size()) * sizeof(typename _TensorSrc::value_type);
state.SetBytesProcessed(state.iterations() * bytes_size);
}
auto BM_host2host_mem_copy = BM_mem_copy<tensor<byte, 1>, tensor<byte, 1>>;
BENCHMARK(BM_host2host_mem_copy)->Range(1 << 10, 1 << 28)->UseRealTime();
#ifdef MATAZURE_CUDA
auto BM_device2host_mem_copy = BM_mem_copy<cuda::tensor<byte, 1>, tensor<byte, 1>>;
BENCHMARK(BM_device2host_mem_copy)->Range(1 << 10, 1 << 28)->UseRealTime();
auto BM_host2device_mem_copy = BM_mem_copy<tensor<byte, 1>, cuda::tensor<byte, 1>>;
BENCHMARK(BM_host2device_mem_copy)->Range(1 << 10, 1 << 28)->UseRealTime();
auto BM_device2device_mem_copy = BM_mem_copy<cuda::tensor<byte, 1>, cuda::tensor<byte, 1>>;
BENCHMARK(BM_device2device_mem_copy)->Range(1 << 10, 1 << 28)->UseRealTime();
#endif
auto BM_each_copy_gold_byte = BM_each_copy_gold<cuda::tensor<byte, 1>>;
BENCHMARK(BM_each_copy_gold_byte)->Range(1 << 10, 1 << 28)->UseRealTime();
auto BM_each_copy_gold_float = BM_each_copy_gold<cuda::tensor<float, 1>>;
BENCHMARK(BM_each_copy_gold_float)->Range(1 << 10, 1 << 28)->UseRealTime();
| af18c69e93973850dbe0523c5cd61507a37de45a.cu | #include <benchmark/benchmark.h>
#include <matazure/tensor>
using namespace matazure;
template <typename _ValueType>
__global__ void each_copy_gold_kenel(_ValueType *p_dst, _ValueType *p_src, int_t count){
for (int_t i = threadIdx.x + blockIdx.x * blockDim.x; i < count; i += blockDim.x * gridDim.x) {
p_dst[i] = p_src[i];
}
}
template <typename _Tensor>
void BM_each_copy_gold(benchmark::State& state) {
_Tensor ts_src(state.range(0));
_Tensor ts_dst(ts_src.shape());
while (state.KeepRunning()) {
cuda::execution_policy policy;
cuda::configure_grid(policy, each_copy_gold_kenel<typename _Tensor::value_type>);
each_copy_gold_kenel<<< policy.grid_size(),
policy.block_size(),
policy.shared_mem_bytes(),
policy.stream() >>>(ts_dst.data(), ts_src.data(), ts_src.size());
cuda::device_synchronize();
}
auto bytes_size = static_cast<size_t>(ts_src.size()) * sizeof(decltype(ts_src[0]));
state.SetBytesProcessed(state.iterations() * bytes_size);
}
template <typename _TensorSrc, typename _TensorDst>
static void BM_mem_copy(benchmark::State& state) {
_TensorSrc ts_src(state.range(0));
_TensorDst ts_dst(ts_src.shape());
while (state.KeepRunning()) {
mem_copy(ts_src, ts_dst);
#ifdef MATAZURE_CUDA
cuda::device_synchronize();
#endif
}
auto bytes_size = static_cast<size_t>(ts_src.size()) * sizeof(typename _TensorSrc::value_type);
state.SetBytesProcessed(state.iterations() * bytes_size);
}
auto BM_host2host_mem_copy = BM_mem_copy<tensor<byte, 1>, tensor<byte, 1>>;
BENCHMARK(BM_host2host_mem_copy)->Range(1 << 10, 1 << 28)->UseRealTime();
#ifdef MATAZURE_CUDA
auto BM_device2host_mem_copy = BM_mem_copy<cuda::tensor<byte, 1>, tensor<byte, 1>>;
BENCHMARK(BM_device2host_mem_copy)->Range(1 << 10, 1 << 28)->UseRealTime();
auto BM_host2device_mem_copy = BM_mem_copy<tensor<byte, 1>, cuda::tensor<byte, 1>>;
BENCHMARK(BM_host2device_mem_copy)->Range(1 << 10, 1 << 28)->UseRealTime();
auto BM_device2device_mem_copy = BM_mem_copy<cuda::tensor<byte, 1>, cuda::tensor<byte, 1>>;
BENCHMARK(BM_device2device_mem_copy)->Range(1 << 10, 1 << 28)->UseRealTime();
#endif
auto BM_each_copy_gold_byte = BM_each_copy_gold<cuda::tensor<byte, 1>>;
BENCHMARK(BM_each_copy_gold_byte)->Range(1 << 10, 1 << 28)->UseRealTime();
auto BM_each_copy_gold_float = BM_each_copy_gold<cuda::tensor<float, 1>>;
BENCHMARK(BM_each_copy_gold_float)->Range(1 << 10, 1 << 28)->UseRealTime();
|
6850ff08cfc28a049021e10a00a5313bcb7284f0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include <hip/hip_runtime.h>
#ifndef nIsPow2
#define nIsPow2 false
#endif
#ifndef blockSize
#define blockSize 256
#endif
#ifdef __DEVICE_EMULATION__
#define EMUSYNC __syncthreads()
#else
#define EMUSYNC
#endif
__device__ inline Real_t norm2(Real_t x, Real_t y) {
return x*x + y*y;
}
__global__ void countInCircle(Real_t *g_idata, int *g_odata, unsigned int n)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
__shared__ volatile int sdata[2 * 256];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
int mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
mySum += norm2(g_idata[2*i], g_idata[2*i + 1]) > 1 ? 0 : 1;
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
mySum += norm2(g_idata[2*(i+blockSize)], g_idata[2*(i+blockSize) + 1]) > 1 ? 0 : 1;
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); }
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
if (blockSize >= 64) { sdata[tid] = mySum = mySum + sdata[tid + 32]; EMUSYNC; }
if (blockSize >= 32) { sdata[tid] = mySum = mySum + sdata[tid + 16]; EMUSYNC; }
if (blockSize >= 16) { sdata[tid] = mySum = mySum + sdata[tid + 8]; EMUSYNC; }
if (blockSize >= 8) { sdata[tid] = mySum = mySum + sdata[tid + 4]; EMUSYNC; }
if (blockSize >= 4) { sdata[tid] = mySum = mySum + sdata[tid + 2]; EMUSYNC; }
if (blockSize >= 2) { sdata[tid] = mySum = mySum + sdata[tid + 1]; EMUSYNC; }
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
} | 6850ff08cfc28a049021e10a00a5313bcb7284f0.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include <cuda.h>
#ifndef nIsPow2
#define nIsPow2 false
#endif
#ifndef blockSize
#define blockSize 256
#endif
#ifdef __DEVICE_EMULATION__
#define EMUSYNC __syncthreads()
#else
#define EMUSYNC
#endif
__device__ inline Real_t norm2(Real_t x, Real_t y) {
return x*x + y*y;
}
__global__ void countInCircle(Real_t *g_idata, int *g_odata, unsigned int n)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
__shared__ volatile int sdata[2 * 256];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
int mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
mySum += norm2(g_idata[2*i], g_idata[2*i + 1]) > 1 ? 0 : 1;
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
mySum += norm2(g_idata[2*(i+blockSize)], g_idata[2*(i+blockSize) + 1]) > 1 ? 0 : 1;
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); }
#ifndef __DEVICE_EMULATION__
if (tid < 32)
#endif
{
if (blockSize >= 64) { sdata[tid] = mySum = mySum + sdata[tid + 32]; EMUSYNC; }
if (blockSize >= 32) { sdata[tid] = mySum = mySum + sdata[tid + 16]; EMUSYNC; }
if (blockSize >= 16) { sdata[tid] = mySum = mySum + sdata[tid + 8]; EMUSYNC; }
if (blockSize >= 8) { sdata[tid] = mySum = mySum + sdata[tid + 4]; EMUSYNC; }
if (blockSize >= 4) { sdata[tid] = mySum = mySum + sdata[tid + 2]; EMUSYNC; }
if (blockSize >= 2) { sdata[tid] = mySum = mySum + sdata[tid + 1]; EMUSYNC; }
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
} |
b19f5003bb81579cd785a4f58c6a15c49f54e033.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// FPS with local density condition
__global__ void farthestpointsamplingKernel (int b,int n,int m, float r, int minnum,const float * dataset,float * temp,int * idxs, float * cores){
if (m<=0)
return;
const int BlockSize=1024;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
__shared__ int num_neighbor[BlockSize];
const int BufferSize=3072;
__shared__ float buf[BufferSize*3];
int old=0; // The last sampled point id
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[j]=1e6;
}
for (int j=threadIdx.x;j<min(BufferSize,n)*3;j+=blockDim.x){
buf[j]=dataset[j];
}
__syncthreads();
int j=0;
while (j<m){
num_neighbor[threadIdx.x]=0;
int besti=0;
float best=-1;
float x1=dataset[old*3+0];
float y1=dataset[old*3+1];
float z1=dataset[old*3+2];
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[k];
float x2,y2,z2;
if (k<BufferSize){
x2=buf[k*3+0];
y2=buf[k*3+1];
z2=buf[k*3+2];
}else{
x2=dataset[k*3+0];
y2=dataset[k*3+1];
z2=dataset[k*3+2];
}
float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
if (d<=r*r){
num_neighbor[threadIdx.x]++;
}
float d2=min(d,td);
if (d2!=td)
temp[k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
num_neighbor[i1] = num_neighbor[i1] + num_neighbor[i2];
}
}
__syncthreads();
if (num_neighbor[0]>=minnum){
if (threadIdx.x==0){
idxs[j]=old;
cores[j*3+0]=dataset[old*3+0];
cores[j*3+1]=dataset[old*3+1];
cores[j*3+2]=dataset[old*3+2];
}
j++;
}
old=dists_i[0];
__syncthreads();
}
}
// Original code of FPS in PointNet++
__global__ void farthestpointsamplingallKernel(int b,int n,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){
if (m<=0)
return;
const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
const int BufferSize=3072;
__shared__ float buf[BufferSize*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int old=0;
if (threadIdx.x==0)
idxs[i*m+0]=old;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
for (int j=threadIdx.x;j<min(BufferSize,n)*3;j+=blockDim.x){
buf[j]=dataset[i*n*3+j];
}
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
float x1=dataset[i*n*3+old*3+0];
float y1=dataset[i*n*3+old*3+1];
float z1=dataset[i*n*3+old*3+2];
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float x2,y2,z2;
if (k<BufferSize){
x2=buf[k*3+0];
y2=buf[k*3+1];
z2=buf[k*3+2];
}else{
x2=dataset[i*n*3+k*3+0];
y2=dataset[i*n*3+k*3+1];
z2=dataset[i*n*3+k*3+2];
}
float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
// input: dataset (b,n,3), cores (b,m,3), dist (b,n), flag (b,n)
__global__ void knearkernel (int b,int n,int m,const float * dataset,float * cores,float * dist,int * flag){
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float x1 = dataset[k*3+0];
float y1 = dataset[k*3+1];
float z1 = dataset[k*3+2];
dist[k] = 1e3;
for (int i=0; i<m; i++){
float x2 = cores[i*3+0];
float y2 = cores[i*3+1];
float z2 = cores[i*3+2];
float d = (x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
if (d<dist[k]){
dist[k] = d;
flag[k] = i;
}
}
}
__syncthreads();
}
// input: dataset (b,n,3), cores (b,m,3), flag (b,n)
// temp_cluster: (m,1024,3), dist_temp: (m, 1024), dist_temp_id: (m, 1024), temp_x: (m, 1024), output_r: (b,m)
__global__ void rbskernel (int b,int n,int m,const float * dataset,float * cores,int * flag,float * output_r){
__shared__ float temp_x[1024];
__shared__ float temp_y[1024];
__shared__ float temp_z[1024];
__shared__ int temp_x_id[1024];
__shared__ int temp_y_id[1024];
__shared__ int temp_z_id[1024];
__shared__ float dist_temp[1024];
__shared__ int dist_temp_id[1024];
__shared__ float temp_cluster[1024*3];
//assign points to block
__shared__ int cnt;
// ** On cuda 11.1 and tensorflow 2.4.1: When blockIdx.x=0, block cannot update shared variable **
if (blockIdx.x>0){
if (threadIdx.x==0){
for (int k=0;k<n;k++){
if (blockIdx.x-1==flag[k]){
temp_cluster[cnt*3+0] = dataset[k*3+0];
temp_cluster[cnt*3+1] = dataset[k*3+1];
temp_cluster[cnt*3+2] = dataset[k*3+2];
cnt+=1;
}
}
}
__syncthreads();
// compute min/max xyz
if (threadIdx.x<cnt){
temp_x[threadIdx.x] = temp_cluster[threadIdx.x*3+0];
temp_y[threadIdx.x] = temp_cluster[threadIdx.x*3+1];
temp_z[threadIdx.x] = temp_cluster[threadIdx.x*3+2];
temp_x_id[threadIdx.x] = threadIdx.x;
temp_y_id[threadIdx.x] = threadIdx.x;
temp_z_id[threadIdx.x] = threadIdx.x;
}
else{
temp_x[threadIdx.x] = temp_cluster[0];
temp_y[threadIdx.x] = temp_cluster[1];
temp_z[threadIdx.x] = temp_cluster[2];
}
__syncthreads();
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2+0)<<u;
int i2=(threadIdx.x*2+1)<<u;
int i3=((threadIdx.x*2+1)<<u)-1;
int i4=((threadIdx.x*2+2)<<u)-1;
float min_x = min(temp_x[i1], temp_x[i2]);
float max_x = max(temp_x[i4], temp_x[i3]);
int x_i3_id = temp_x_id[i3];
if (min_x == temp_x[i2]){
temp_x_id[i1] = temp_x_id[i2];
}
if (max_x == temp_x[i3]){
temp_x_id[i4] = x_i3_id;
}
temp_x[i1] = min_x;
temp_x[i4] = max_x;
float min_y = min(temp_y[i1], temp_y[i2]);
float max_y = max(temp_y[i4], temp_y[i3]);
int y_i3_id = temp_y_id[i3];
if (min_y == temp_y[i2]){
temp_y_id[i1] = temp_y_id[i2];
}
if (max_y == temp_y[i3]){
temp_y_id[i4] = y_i3_id;
}
temp_y[i1] = min_y;
temp_y[i4] = max_y;
float min_z = min(temp_z[i1], temp_z[i2]);
float max_z = max(temp_z[i4], temp_z[i3]);
int z_i3_id = temp_z_id[i3];
if (min_z == temp_z[i2]){
temp_z_id[i1] = temp_z_id[i2];
}
if (max_z == temp_z[i3]){
temp_z_id[i4] = z_i3_id;
}
temp_z[i1] = min_z;
temp_z[i4] = max_z;
}
}
__syncthreads();
if (threadIdx.x==0){
float min_x_x = temp_cluster[temp_x_id[0]*3+0];
float min_x_y = temp_cluster[temp_x_id[0]*3+1];
float min_x_z = temp_cluster[temp_x_id[0]*3+2];
float max_x_x = temp_cluster[temp_x_id[1023]*3+0];
float max_x_y = temp_cluster[temp_x_id[1023]*3+1];
float max_x_z = temp_cluster[temp_x_id[1023]*3+2];
float min_y_x = temp_cluster[temp_y_id[0]*3+0];
float min_y_y = temp_cluster[temp_y_id[0]*3+1];
float min_y_z = temp_cluster[temp_y_id[0]*3+2];
float max_y_x = temp_cluster[temp_y_id[1023]*3+0];
float max_y_y = temp_cluster[temp_y_id[1023]*3+1];
float max_y_z = temp_cluster[temp_y_id[1023]*3+2];
float min_z_x = temp_cluster[temp_z_id[0]*3+0];
float min_z_y = temp_cluster[temp_z_id[0]*3+1];
float min_z_z = temp_cluster[temp_z_id[0]*3+2];
float max_z_x = temp_cluster[temp_z_id[1023]*3+0];
float max_z_y = temp_cluster[temp_z_id[1023]*3+1];
float max_z_z = temp_cluster[temp_z_id[1023]*3+2];
float d_x = (min_x_x-max_x_x)*(min_x_x-max_x_x)+(min_x_y-max_x_y)*(min_x_y-max_x_y)+(min_x_z-max_x_z)*(min_x_z-max_x_z);
float d_y = (min_y_x-max_y_x)*(min_y_x-max_y_x)+(min_y_y-max_y_y)*(min_y_y-max_y_y)+(min_y_z-max_y_z)*(min_y_z-max_y_z);
float d_z = (min_z_x-max_z_x)*(min_z_x-max_z_x)+(min_z_y-max_z_y)*(min_z_y-max_z_y)+(min_z_z-max_z_z)*(min_z_z-max_z_z);
float max_d = max(max(d_x,d_y),d_z);
output_r[(blockIdx.x-1)] = sqrt(max_d)/2.0;
if (max_d==d_x){
cores[(blockIdx.x-1)*3+0] = 0.5*(min_x_x+max_x_x);
cores[(blockIdx.x-1)*3+1] = 0.5*(min_x_y+max_x_y);
cores[(blockIdx.x-1)*3+2] = 0.5*(min_x_z+max_x_z);
}
if (max_d==d_y){
cores[(blockIdx.x-1)*3+0] = 0.5*(min_y_x+max_y_x);
cores[(blockIdx.x-1)*3+1] = 0.5*(min_y_y+max_y_y);
cores[(blockIdx.x-1)*3+2] = 0.5*(min_y_z+max_y_z);
}
if (max_d==d_z){
cores[(blockIdx.x-1)*3+0] = 0.5*(min_z_x+max_z_x);
cores[(blockIdx.x-1)*3+1] = 0.5*(min_z_y+max_z_y);
cores[(blockIdx.x-1)*3+2] = 0.5*(min_z_z+max_z_z);
}
}
__syncthreads();
// compute rbs
__shared__ int break_flag;
while (break_flag==0) {
float x0 = cores[(blockIdx.x-1)*3+0];
float y0 = cores[(blockIdx.x-1)*3+1];
float z0 = cores[(blockIdx.x-1)*3+2];
if (threadIdx.x<cnt){
float x1 = temp_cluster[threadIdx.x*3+0];
float y1 = temp_cluster[threadIdx.x*3+1];
float z1 = temp_cluster[threadIdx.x*3+2];
dist_temp[threadIdx.x] = (x0-x1)*(x0-x1)+(y0-y1)*(y0-y1)+(z0-z1)*(z0-z1);
dist_temp_id[threadIdx.x] = threadIdx.x;
}
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2+0)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dist_temp[i1]<dist_temp[i2]){
dist_temp[i1]=dist_temp[i2];
dist_temp_id[i1]=dist_temp_id[i2];
}
}
}
__syncthreads();
if (threadIdx.x==0){
float outlier_dist = sqrt(dist_temp[0]);
if (outlier_dist>output_r[blockIdx.x-1]){
int outlier_id = dist_temp_id[0];
float outlier_x = temp_cluster[outlier_id*3+0];
float outlier_y = temp_cluster[outlier_id*3+1];
float outlier_z = temp_cluster[outlier_id*3+2];
float coef = 0.5/outlier_dist*(outlier_dist-output_r[blockIdx.x-1]);
cores[(blockIdx.x-1)*3+0] = cores[(blockIdx.x-1)*3+0] + (outlier_x-cores[(blockIdx.x-1)*3+0])*coef;
cores[(blockIdx.x-1)*3+1] = cores[(blockIdx.x-1)*3+1] + (outlier_y-cores[(blockIdx.x-1)*3+1])*coef;
cores[(blockIdx.x-1)*3+2] = cores[(blockIdx.x-1)*3+2] + (outlier_z-cores[(blockIdx.x-1)*3+2])*coef;
output_r[blockIdx.x-1] = 1.05*0.5*(outlier_dist+output_r[blockIdx.x-1]);
}
else{
break_flag=1;
}
}
__syncthreads();
}
}
}
// input: dataset (b,n,3), cores (b,m,3), output_r: (b,m), dist2cores: (b,m,10240), max_temp: (b,m,10)
__global__ void updateradius(int b,int n,int m,const float * dataset,float * cores,float * output_r){
if (blockIdx.x>0){
__shared__ float dist2core[1024];
int cluster_id = 1e2;
float max_dist = 0.0;
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float x1 = dataset[k*3+0];
float y1 = dataset[k*3+1];
float z1 = dataset[k*3+2];
float dist_old = 1e3;
for (int i=0; i<m; i++){
float x0 = cores[i*3+0];
float y0 = cores[i*3+1];
float z0 = cores[i*3+2];
float dist = sqrt((x0-x1)*(x0-x1)+(y0-y1)*(y0-y1)+(z0-z1)*(z0-z1));
if (dist<dist_old){
cluster_id = i;
dist_old = dist;
}
}
if ( (cluster_id==(blockIdx.x-1)) && (dist_old>max_dist) ){
max_dist = dist_old;
}
}
dist2core[threadIdx.x] = max_dist;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dist2core[i1]<dist2core[i2]){
dist2core[i1]=dist2core[i2];
}
}
}
__syncthreads();
if (threadIdx.x==0) {
output_r[blockIdx.x-1] = max(0.15,dist2core[0]);
}
}
}
// input: dataset (b,n,3), cores (b,m,3), output_r: (b,m), count: (b,m), local_region(b,m,1024,3)
__global__ void ballquery (int b,int n,int m,const float * dataset,float * cores,float * output_r,float * local_region,int * count){
__shared__ float dist2cores[10240];
if (blockIdx.x>0){
count[blockIdx.x-1] = 0;
float x0 = cores[(blockIdx.x-1)*3+0];
float y0 = cores[(blockIdx.x-1)*3+1];
float z0 = cores[(blockIdx.x-1)*3+2];
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float x1 = dataset[k*3+0];
float y1 = dataset[k*3+1];
float z1 = dataset[k*3+2];
float d = (x0-x1)*(x0-x1)+(y0-y1)*(y0-y1)+(z0-z1)*(z0-z1);
dist2cores[k] = sqrt(d);
}
__syncthreads();
if (threadIdx.x==0){
for (int i=0;i<n;i++){
if (dist2cores[i]<=output_r[blockIdx.x-1]){
local_region[(blockIdx.x-1)*1024*3+count[blockIdx.x-1]*3+0]=dataset[i*3+0];
local_region[(blockIdx.x-1)*1024*3+count[blockIdx.x-1]*3+1]=dataset[i*3+1];
local_region[(blockIdx.x-1)*1024*3+count[blockIdx.x-1]*3+2]=dataset[i*3+2];
count[blockIdx.x-1] += 1;
}
}
}
__syncthreads();
}
}
void farthestpointsamplingallLauncher(int b,int n,int m,const float * inp,float * temp,int * out){
hipLaunchKernelGGL(( farthestpointsamplingallKernel), dim3(32),dim3(512), 0, 0, b,n,m,inp,temp,out);
}
void farthestpointsamplingLauncher(int b,int n,int m,float r,int minnum,const float * dataset,float * temp,int * idxs,float * cores){
hipLaunchKernelGGL(( farthestpointsamplingKernel), dim3(1),dim3(1024), 0, 0, b,n,m,r,minnum,dataset,temp,idxs,cores);
}
void samplegroupLauncher (int b,int n,int m,float r,int minnum,const float * dataset, float * temp, int * idxs,float * cores, float * dist, int * flag,
float * output_r, float * local_region, int * cnt){
hipLaunchKernelGGL(( farthestpointsamplingKernel), dim3(1),dim3(1024), 0, 0, b,n,m,r,minnum,dataset,temp,idxs,cores);
hipLaunchKernelGGL(( knearkernel), dim3(1),dim3(1024), 0, 0, b,n,m,dataset,cores,dist,flag);
hipLaunchKernelGGL(( rbskernel), dim3(m+1), dim3(1024), 0, 0, b,n,m,dataset,cores,flag,output_r);
hipLaunchKernelGGL(( updateradius), dim3(m+1), dim3(1024), 0, 0, b,n,m,dataset,cores,output_r);
hipLaunchKernelGGL(( ballquery), dim3(m+1), dim3(1024), 0, 0, b,n,m,dataset,cores,output_r,local_region,cnt);
}
| b19f5003bb81579cd785a4f58c6a15c49f54e033.cu | // FPS with local density condition
__global__ void farthestpointsamplingKernel (int b,int n,int m, float r, int minnum,const float * dataset,float * temp,int * idxs, float * cores){
if (m<=0)
return;
const int BlockSize=1024;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
__shared__ int num_neighbor[BlockSize];
const int BufferSize=3072;
__shared__ float buf[BufferSize*3];
int old=0; // The last sampled point id
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[j]=1e6;
}
for (int j=threadIdx.x;j<min(BufferSize,n)*3;j+=blockDim.x){
buf[j]=dataset[j];
}
__syncthreads();
int j=0;
while (j<m){
num_neighbor[threadIdx.x]=0;
int besti=0;
float best=-1;
float x1=dataset[old*3+0];
float y1=dataset[old*3+1];
float z1=dataset[old*3+2];
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[k];
float x2,y2,z2;
if (k<BufferSize){
x2=buf[k*3+0];
y2=buf[k*3+1];
z2=buf[k*3+2];
}else{
x2=dataset[k*3+0];
y2=dataset[k*3+1];
z2=dataset[k*3+2];
}
float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
if (d<=r*r){
num_neighbor[threadIdx.x]++;
}
float d2=min(d,td);
if (d2!=td)
temp[k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
num_neighbor[i1] = num_neighbor[i1] + num_neighbor[i2];
}
}
__syncthreads();
if (num_neighbor[0]>=minnum){
if (threadIdx.x==0){
idxs[j]=old;
cores[j*3+0]=dataset[old*3+0];
cores[j*3+1]=dataset[old*3+1];
cores[j*3+2]=dataset[old*3+2];
}
j++;
}
old=dists_i[0];
__syncthreads();
}
}
// Original code of FPS in PointNet++
__global__ void farthestpointsamplingallKernel(int b,int n,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){
if (m<=0)
return;
const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
const int BufferSize=3072;
__shared__ float buf[BufferSize*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int old=0;
if (threadIdx.x==0)
idxs[i*m+0]=old;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
for (int j=threadIdx.x;j<min(BufferSize,n)*3;j+=blockDim.x){
buf[j]=dataset[i*n*3+j];
}
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
float x1=dataset[i*n*3+old*3+0];
float y1=dataset[i*n*3+old*3+1];
float z1=dataset[i*n*3+old*3+2];
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float x2,y2,z2;
if (k<BufferSize){
x2=buf[k*3+0];
y2=buf[k*3+1];
z2=buf[k*3+2];
}else{
x2=dataset[i*n*3+k*3+0];
y2=dataset[i*n*3+k*3+1];
z2=dataset[i*n*3+k*3+2];
}
float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
// input: dataset (b,n,3), cores (b,m,3), dist (b,n), flag (b,n)
__global__ void knearkernel (int b,int n,int m,const float * dataset,float * cores,float * dist,int * flag){
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float x1 = dataset[k*3+0];
float y1 = dataset[k*3+1];
float z1 = dataset[k*3+2];
dist[k] = 1e3;
for (int i=0; i<m; i++){
float x2 = cores[i*3+0];
float y2 = cores[i*3+1];
float z2 = cores[i*3+2];
float d = (x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
if (d<dist[k]){
dist[k] = d;
flag[k] = i;
}
}
}
__syncthreads();
}
// input: dataset (b,n,3), cores (b,m,3), flag (b,n)
// temp_cluster: (m,1024,3), dist_temp: (m, 1024), dist_temp_id: (m, 1024), temp_x: (m, 1024), output_r: (b,m)
__global__ void rbskernel (int b,int n,int m,const float * dataset,float * cores,int * flag,float * output_r){
__shared__ float temp_x[1024];
__shared__ float temp_y[1024];
__shared__ float temp_z[1024];
__shared__ int temp_x_id[1024];
__shared__ int temp_y_id[1024];
__shared__ int temp_z_id[1024];
__shared__ float dist_temp[1024];
__shared__ int dist_temp_id[1024];
__shared__ float temp_cluster[1024*3];
//assign points to block
__shared__ int cnt;
// ** On cuda 11.1 and tensorflow 2.4.1: When blockIdx.x=0, block cannot update shared variable **
if (blockIdx.x>0){
if (threadIdx.x==0){
for (int k=0;k<n;k++){
if (blockIdx.x-1==flag[k]){
temp_cluster[cnt*3+0] = dataset[k*3+0];
temp_cluster[cnt*3+1] = dataset[k*3+1];
temp_cluster[cnt*3+2] = dataset[k*3+2];
cnt+=1;
}
}
}
__syncthreads();
// compute min/max xyz
if (threadIdx.x<cnt){
temp_x[threadIdx.x] = temp_cluster[threadIdx.x*3+0];
temp_y[threadIdx.x] = temp_cluster[threadIdx.x*3+1];
temp_z[threadIdx.x] = temp_cluster[threadIdx.x*3+2];
temp_x_id[threadIdx.x] = threadIdx.x;
temp_y_id[threadIdx.x] = threadIdx.x;
temp_z_id[threadIdx.x] = threadIdx.x;
}
else{
temp_x[threadIdx.x] = temp_cluster[0];
temp_y[threadIdx.x] = temp_cluster[1];
temp_z[threadIdx.x] = temp_cluster[2];
}
__syncthreads();
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2+0)<<u;
int i2=(threadIdx.x*2+1)<<u;
int i3=((threadIdx.x*2+1)<<u)-1;
int i4=((threadIdx.x*2+2)<<u)-1;
float min_x = min(temp_x[i1], temp_x[i2]);
float max_x = max(temp_x[i4], temp_x[i3]);
int x_i3_id = temp_x_id[i3];
if (min_x == temp_x[i2]){
temp_x_id[i1] = temp_x_id[i2];
}
if (max_x == temp_x[i3]){
temp_x_id[i4] = x_i3_id;
}
temp_x[i1] = min_x;
temp_x[i4] = max_x;
float min_y = min(temp_y[i1], temp_y[i2]);
float max_y = max(temp_y[i4], temp_y[i3]);
int y_i3_id = temp_y_id[i3];
if (min_y == temp_y[i2]){
temp_y_id[i1] = temp_y_id[i2];
}
if (max_y == temp_y[i3]){
temp_y_id[i4] = y_i3_id;
}
temp_y[i1] = min_y;
temp_y[i4] = max_y;
float min_z = min(temp_z[i1], temp_z[i2]);
float max_z = max(temp_z[i4], temp_z[i3]);
int z_i3_id = temp_z_id[i3];
if (min_z == temp_z[i2]){
temp_z_id[i1] = temp_z_id[i2];
}
if (max_z == temp_z[i3]){
temp_z_id[i4] = z_i3_id;
}
temp_z[i1] = min_z;
temp_z[i4] = max_z;
}
}
__syncthreads();
if (threadIdx.x==0){
float min_x_x = temp_cluster[temp_x_id[0]*3+0];
float min_x_y = temp_cluster[temp_x_id[0]*3+1];
float min_x_z = temp_cluster[temp_x_id[0]*3+2];
float max_x_x = temp_cluster[temp_x_id[1023]*3+0];
float max_x_y = temp_cluster[temp_x_id[1023]*3+1];
float max_x_z = temp_cluster[temp_x_id[1023]*3+2];
float min_y_x = temp_cluster[temp_y_id[0]*3+0];
float min_y_y = temp_cluster[temp_y_id[0]*3+1];
float min_y_z = temp_cluster[temp_y_id[0]*3+2];
float max_y_x = temp_cluster[temp_y_id[1023]*3+0];
float max_y_y = temp_cluster[temp_y_id[1023]*3+1];
float max_y_z = temp_cluster[temp_y_id[1023]*3+2];
float min_z_x = temp_cluster[temp_z_id[0]*3+0];
float min_z_y = temp_cluster[temp_z_id[0]*3+1];
float min_z_z = temp_cluster[temp_z_id[0]*3+2];
float max_z_x = temp_cluster[temp_z_id[1023]*3+0];
float max_z_y = temp_cluster[temp_z_id[1023]*3+1];
float max_z_z = temp_cluster[temp_z_id[1023]*3+2];
float d_x = (min_x_x-max_x_x)*(min_x_x-max_x_x)+(min_x_y-max_x_y)*(min_x_y-max_x_y)+(min_x_z-max_x_z)*(min_x_z-max_x_z);
float d_y = (min_y_x-max_y_x)*(min_y_x-max_y_x)+(min_y_y-max_y_y)*(min_y_y-max_y_y)+(min_y_z-max_y_z)*(min_y_z-max_y_z);
float d_z = (min_z_x-max_z_x)*(min_z_x-max_z_x)+(min_z_y-max_z_y)*(min_z_y-max_z_y)+(min_z_z-max_z_z)*(min_z_z-max_z_z);
float max_d = max(max(d_x,d_y),d_z);
output_r[(blockIdx.x-1)] = sqrt(max_d)/2.0;
if (max_d==d_x){
cores[(blockIdx.x-1)*3+0] = 0.5*(min_x_x+max_x_x);
cores[(blockIdx.x-1)*3+1] = 0.5*(min_x_y+max_x_y);
cores[(blockIdx.x-1)*3+2] = 0.5*(min_x_z+max_x_z);
}
if (max_d==d_y){
cores[(blockIdx.x-1)*3+0] = 0.5*(min_y_x+max_y_x);
cores[(blockIdx.x-1)*3+1] = 0.5*(min_y_y+max_y_y);
cores[(blockIdx.x-1)*3+2] = 0.5*(min_y_z+max_y_z);
}
if (max_d==d_z){
cores[(blockIdx.x-1)*3+0] = 0.5*(min_z_x+max_z_x);
cores[(blockIdx.x-1)*3+1] = 0.5*(min_z_y+max_z_y);
cores[(blockIdx.x-1)*3+2] = 0.5*(min_z_z+max_z_z);
}
}
__syncthreads();
// compute rbs
__shared__ int break_flag;
while (break_flag==0) {
float x0 = cores[(blockIdx.x-1)*3+0];
float y0 = cores[(blockIdx.x-1)*3+1];
float z0 = cores[(blockIdx.x-1)*3+2];
if (threadIdx.x<cnt){
float x1 = temp_cluster[threadIdx.x*3+0];
float y1 = temp_cluster[threadIdx.x*3+1];
float z1 = temp_cluster[threadIdx.x*3+2];
dist_temp[threadIdx.x] = (x0-x1)*(x0-x1)+(y0-y1)*(y0-y1)+(z0-z1)*(z0-z1);
dist_temp_id[threadIdx.x] = threadIdx.x;
}
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2+0)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dist_temp[i1]<dist_temp[i2]){
dist_temp[i1]=dist_temp[i2];
dist_temp_id[i1]=dist_temp_id[i2];
}
}
}
__syncthreads();
if (threadIdx.x==0){
float outlier_dist = sqrt(dist_temp[0]);
if (outlier_dist>output_r[blockIdx.x-1]){
int outlier_id = dist_temp_id[0];
float outlier_x = temp_cluster[outlier_id*3+0];
float outlier_y = temp_cluster[outlier_id*3+1];
float outlier_z = temp_cluster[outlier_id*3+2];
float coef = 0.5/outlier_dist*(outlier_dist-output_r[blockIdx.x-1]);
cores[(blockIdx.x-1)*3+0] = cores[(blockIdx.x-1)*3+0] + (outlier_x-cores[(blockIdx.x-1)*3+0])*coef;
cores[(blockIdx.x-1)*3+1] = cores[(blockIdx.x-1)*3+1] + (outlier_y-cores[(blockIdx.x-1)*3+1])*coef;
cores[(blockIdx.x-1)*3+2] = cores[(blockIdx.x-1)*3+2] + (outlier_z-cores[(blockIdx.x-1)*3+2])*coef;
output_r[blockIdx.x-1] = 1.05*0.5*(outlier_dist+output_r[blockIdx.x-1]);
}
else{
break_flag=1;
}
}
__syncthreads();
}
}
}
// input: dataset (b,n,3), cores (b,m,3), output_r: (b,m), dist2cores: (b,m,10240), max_temp: (b,m,10)
__global__ void updateradius(int b,int n,int m,const float * dataset,float * cores,float * output_r){
if (blockIdx.x>0){
__shared__ float dist2core[1024];
int cluster_id = 1e2;
float max_dist = 0.0;
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float x1 = dataset[k*3+0];
float y1 = dataset[k*3+1];
float z1 = dataset[k*3+2];
float dist_old = 1e3;
for (int i=0; i<m; i++){
float x0 = cores[i*3+0];
float y0 = cores[i*3+1];
float z0 = cores[i*3+2];
float dist = sqrt((x0-x1)*(x0-x1)+(y0-y1)*(y0-y1)+(z0-z1)*(z0-z1));
if (dist<dist_old){
cluster_id = i;
dist_old = dist;
}
}
if ( (cluster_id==(blockIdx.x-1)) && (dist_old>max_dist) ){
max_dist = dist_old;
}
}
dist2core[threadIdx.x] = max_dist;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dist2core[i1]<dist2core[i2]){
dist2core[i1]=dist2core[i2];
}
}
}
__syncthreads();
if (threadIdx.x==0) {
output_r[blockIdx.x-1] = max(0.15,dist2core[0]);
}
}
}
// input: dataset (b,n,3), cores (b,m,3), output_r: (b,m), count: (b,m), local_region(b,m,1024,3)
__global__ void ballquery (int b,int n,int m,const float * dataset,float * cores,float * output_r,float * local_region,int * count){
__shared__ float dist2cores[10240];
if (blockIdx.x>0){
count[blockIdx.x-1] = 0;
float x0 = cores[(blockIdx.x-1)*3+0];
float y0 = cores[(blockIdx.x-1)*3+1];
float z0 = cores[(blockIdx.x-1)*3+2];
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float x1 = dataset[k*3+0];
float y1 = dataset[k*3+1];
float z1 = dataset[k*3+2];
float d = (x0-x1)*(x0-x1)+(y0-y1)*(y0-y1)+(z0-z1)*(z0-z1);
dist2cores[k] = sqrt(d);
}
__syncthreads();
if (threadIdx.x==0){
for (int i=0;i<n;i++){
if (dist2cores[i]<=output_r[blockIdx.x-1]){
local_region[(blockIdx.x-1)*1024*3+count[blockIdx.x-1]*3+0]=dataset[i*3+0];
local_region[(blockIdx.x-1)*1024*3+count[blockIdx.x-1]*3+1]=dataset[i*3+1];
local_region[(blockIdx.x-1)*1024*3+count[blockIdx.x-1]*3+2]=dataset[i*3+2];
count[blockIdx.x-1] += 1;
}
}
}
__syncthreads();
}
}
void farthestpointsamplingallLauncher(int b,int n,int m,const float * inp,float * temp,int * out){
farthestpointsamplingallKernel<<<32,512>>>(b,n,m,inp,temp,out);
}
void farthestpointsamplingLauncher(int b,int n,int m,float r,int minnum,const float * dataset,float * temp,int * idxs,float * cores){
farthestpointsamplingKernel<<<1,1024>>>(b,n,m,r,minnum,dataset,temp,idxs,cores);
}
void samplegroupLauncher (int b,int n,int m,float r,int minnum,const float * dataset, float * temp, int * idxs,float * cores, float * dist, int * flag,
float * output_r, float * local_region, int * cnt){
farthestpointsamplingKernel<<<1,1024>>>(b,n,m,r,minnum,dataset,temp,idxs,cores);
knearkernel<<<1,1024>>>(b,n,m,dataset,cores,dist,flag);
rbskernel<<<m+1, 1024>>>(b,n,m,dataset,cores,flag,output_r);
updateradius<<<m+1, 1024>>>(b,n,m,dataset,cores,output_r);
ballquery<<<m+1, 1024>>>(b,n,m,dataset,cores,output_r,local_region,cnt);
}
|
877358b879046e6a92d96a19d88560a4afe94508.hip | // !!! This is a file automatically generated by hipify!!!
#include <malloc.h>
#include <string.h>
#include <stdio.h>
#include <vector>
#include <fstream>
#include <iostream>
using namespace std;
#include <hipfft.h>
#define BATCH 1 //
#define CUDA_CHECK_RETURN(value) {\
hipError_t _m_cudaStat = value;\
if (_m_cudaStat != hipSuccess) {\
fprintf(stderr, "Error \"%s\" at line %d in file %s\n",\
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__);\
exit(1);\
}\
} //
#define CUFFT_CHECK_RETURN(value) {\
hipfftResult stat = value;\
if (stat != HIPFFT_SUCCESS) {\
fprintf(stderr, "Error at line %d in file %s\n",\
__LINE__, __FILE__);\
exit(1);\
}\
} //
int main(void) {
string line, buffer;
hipfftHandle plan; // ( )
hipfftComplex *hos_data, *dev_data; //
vector<string> file_string; // = 4 string
vector<float> Wolf_nums; // ( )
vector<float> freq; // ( )
vector<float> power; // ^ 2
ifstream in;
for (int i = 1938; i <= 1991; i++) { //
// i-
string path = string("data/w") + to_string(i) + string(".dat");
in.open(path);
if (!in.is_open()) {
fprintf(stderr, "can't open a file data/w%d.dat\n", i);
return -1;
}
while(getline(in, line)) { //
buffer = "";
line += " "; //
for (int k = 0; k < line.size(); k++) {
if (line[k] != ' ') {
buffer += line[k]; // buffer
}
else { // ,
if (buffer != "")
file_string.push_back(buffer);
buffer = "";
}
}
if (file_string.size() != 0) {
if (file_string[2] == "999") { // ,
file_string[2] = to_string(Wolf_nums.back()); //
}
Wolf_nums.push_back(stoi(file_string[2])); // ,
file_string.clear(); //
}
} //end of while(getline(in, line))
in.close();
} //end of for(int i = 1938; i <= 1991; i++)
int N = Wolf_nums.size();
hipMalloc((void**)&dev_data, sizeof(hipfftComplex) * N * BATCH);
hos_data = new hipfftComplex[N * BATCH];
for (int i = 0; i < N * BATCH; i++) {
hos_data[i].x = Wolf_nums[i]; //
hos_data[i].y = 0.0f; //
}
hipMemcpy(dev_data, hos_data, sizeof(hipfftComplex) * N * BATCH, hipMemcpyHostToDevice);
// , :
CUFFT_CHECK_RETURN(hipfftPlan1d(&plan, N * BATCH, HIPFFT_C2C, BATCH));
// (FFT):
CUFFT_CHECK_RETURN(hipfftExecC2C(plan, dev_data, dev_data, HIPFFT_FORWARD));
//:
CUDA_CHECK_RETURN(hipDeviceSynchronize());
// , FFT:
CUDA_CHECK_RETURN(hipMemcpy(hos_data, dev_data, N * sizeof(hipfftComplex), hipMemcpyDeviceToHost));
power.resize(N / 2 + 1);
for (int i = 1; i <= N / 2; i++) {
// , .. :
power[i] = sqrt(hos_data[i].x * hos_data[i].x + hos_data[i].y * hos_data[i].y);
}
float max_freq = 0.5; //
freq.resize(N / 2 + 1);
for (int i = 1; i <= N / 2; i++) {
// :
freq[i] = 1 / (float(i) / float(N/2) * max_freq);
}
int maxind = 1; //
for (int i = 1 ; i <= N / 2; i++) {
if (power[i] > power[maxind])
maxind = i;
}
//freq[maxind] - ?
printf("calculated periodicity = %f years\n", freq[maxind] / 365);
hipfftDestroy(plan);
hipFree(dev_data);
free(hos_data);
return 0;
}
| 877358b879046e6a92d96a19d88560a4afe94508.cu | #include <malloc.h>
#include <string.h>
#include <stdio.h>
#include <vector>
#include <fstream>
#include <iostream>
using namespace std;
#include <cufft.h>
#define BATCH 1 //размер данных для обработки
#define CUDA_CHECK_RETURN(value) {\
cudaError_t _m_cudaStat = value;\
if (_m_cudaStat != cudaSuccess) {\
fprintf(stderr, "Error \"%s\" at line %d in file %s\n",\
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\
exit(1);\
}\
} //макрос для обработки ошибок
#define CUFFT_CHECK_RETURN(value) {\
cufftResult stat = value;\
if (stat != CUFFT_SUCCESS) {\
fprintf(stderr, "Error at line %d in file %s\n",\
__LINE__, __FILE__);\
exit(1);\
}\
} //макрос для обработки ошибок
int main(void) {
string line, buffer;
cufftHandle plan; //дескриптор плана конфигурации (нужен для оптимизации под выбранную аппаратуру)
cufftComplex *hos_data, *dev_data; //массивы для комплексных чисел
vector<string> file_string; //одна строка в файле = массив из 4 чисел в виде string
vector<float> Wolf_nums; //числа Вольфа (весь диск)
vector<float> freq; //массив частот после преобразования Фурье (на каждый день одно число)
vector<float> power; //массив значений ^ 2 из преобразования Фурье
ifstream in;
for (int i = 1938; i <= 1991; i++) { //цикл по годам
//открыть файл i-го года
string path = string("data/w") + to_string(i) + string(".dat");
in.open(path);
if (!in.is_open()) {
fprintf(stderr, "can't open a file data/w%d.dat\n", i);
return -1;
}
while(getline(in, line)) { //считать строку из файла
buffer = "";
line += " "; //добавить пробел для обработки последнего числа
for (int k = 0; k < line.size(); k++) {
if (line[k] != ' ') {
buffer += line[k]; //записать число посимвольно в buffer
}
else { //если пробел, то есть получено число в виде строки
if (buffer != "")
file_string.push_back(buffer);
buffer = "";
}
}
if (file_string.size() != 0) {
if (file_string[2] == "999") { //если число Вульфа неизвестно,
file_string[2] = to_string(Wolf_nums.back()); //то взять предыдущее значение
}
Wolf_nums.push_back(stoi(file_string[2])); //преобразовать строку в число, добавить в массив чисел
file_string.clear(); //очистить строку
}
} //end of while(getline(in, line))
in.close();
} //end of for(int i = 1938; i <= 1991; i++)
int N = Wolf_nums.size();
cudaMalloc((void**)&dev_data, sizeof(cufftComplex) * N * BATCH);
hos_data = new cufftComplex[N * BATCH];
for (int i = 0; i < N * BATCH; i++) {
hos_data[i].x = Wolf_nums[i]; //действительная часть
hos_data[i].y = 0.0f; //мнимая часть
}
cudaMemcpy(dev_data, hos_data, sizeof(cufftComplex) * N * BATCH, cudaMemcpyHostToDevice);
//создание плана, преобразование Фурье происходит из комплексных чисел в комплексные:
CUFFT_CHECK_RETURN(cufftPlan1d(&plan, N * BATCH, CUFFT_C2C, BATCH));
//совершить быстрое преобразование Фурье (FFT):
CUFFT_CHECK_RETURN(cufftExecC2C(plan, dev_data, dev_data, CUFFT_FORWARD));
//синхронизация:
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
//копируем обратно на хост то, что получено после FFT:
CUDA_CHECK_RETURN(cudaMemcpy(hos_data, dev_data, N * sizeof(cufftComplex), cudaMemcpyDeviceToHost));
power.resize(N / 2 + 1);
for (int i = 1; i <= N / 2; i++) {
//преобразовать значения, т.к. комплексные числа тяжело интерпретировать:
power[i] = sqrt(hos_data[i].x * hos_data[i].x + hos_data[i].y * hos_data[i].y);
}
float max_freq = 0.5; //максимальная частота
freq.resize(N / 2 + 1);
for (int i = 1; i <= N / 2; i++) {
//получаем равномерно распределенную сетку частот:
freq[i] = 1 / (float(i) / float(N/2) * max_freq);
}
int maxind = 1; //найти максимальное значение
for (int i = 1 ; i <= N / 2; i++) {
if (power[i] > power[maxind])
maxind = i;
}
//freq[maxind] - это количество дней при максимальной частоте?
printf("calculated periodicity = %f years\n", freq[maxind] / 365);
cufftDestroy(plan);
cudaFree(dev_data);
free(hos_data);
return 0;
}
|
3621e44e76d258409ee4f38b51cf4e6fab54bb56.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstring>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
#define size 21
using namespace std;
__global__ void jogo(bool* env) {
int x = threadIdx.x;
int y = threadIdx.y;
// mapeaia as bordas da posio analisada
int wrapNorth = ((size + y - 1) % size) * size;
int wrapSouth = ((size + y + 1) % size) * size;
int wrapEast = (size + x + 1) % size;
int wrapWest = (size + x - 1) % size;
// conta quantos existem
int count = 0;
if (env[y * size + wrapEast]) count++;
if (env[y * size + wrapWest]) count++;
if (env[wrapNorth + wrapEast]) count ++;
if (env[wrapNorth + wrapWest]) count++;
if (env[wrapSouth + wrapEast]) count++;
if (env[wrapSouth + wrapWest]) count++;
if (env[wrapNorth + x]) count++;
if (env[wrapSouth + x]) count++;
__syncthreads(); //garante que as threads estejam sincronizadas para realizar o calculo de vizinhos vivos
if(count < 2 || count > 3)
env[y * size + x] = false;
if(count == 3)
env[y * size + x] = true;
}
void print(bool* env) {
for(int i = 0; i < size * size; i++) {
cout << (env[i] ? '#' : ' ');
if (!(i % size)) cout << endl;
}
}
int main(){
int parada = 0;
bool env[size * size]; //linearizei o vetor
env[ 5*size + 7] = true;
env[ 6*size + 8] = true;
env[ 8*size +8] = true;
env[ 6*size +6] = true;
env[ 8*size +10] = true;
env[ 9*size +10] = true;
env[ 8*size +11] = true;
env[10*size +11] = true;
env[10*size +12] = true;
bool* dEnv;
hipMalloc((void**) &dEnv, size * size * sizeof(bool)); //aloca vetor em cuda
hipMemcpy(dEnv, env, size * size * sizeof(bool), hipMemcpyHostToDevice); //copia o vetor para cuda
dim3 golThreads(size, size); //define tamanho das threads
while (parada < 100) { //define o numero de parada para 100
system("clear");
hipLaunchKernelGGL(( jogo), dim3(1), dim3(golThreads), 0, 0, dEnv); //chamada do kernel
hipMemcpy(env, dEnv, size * size * sizeof(bool), hipMemcpyDeviceToHost); //copia valor do vetor de volta a cpu
print(env);
usleep(100000);
parada++;
}
} | 3621e44e76d258409ee4f38b51cf4e6fab54bb56.cu | #include <iostream>
#include <cstring>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
#define size 21
using namespace std;
__global__ void jogo(bool* env) {
int x = threadIdx.x;
int y = threadIdx.y;
// mapeaia as bordas da posição analisada
int wrapNorth = ((size + y - 1) % size) * size;
int wrapSouth = ((size + y + 1) % size) * size;
int wrapEast = (size + x + 1) % size;
int wrapWest = (size + x - 1) % size;
// conta quantos existem
int count = 0;
if (env[y * size + wrapEast]) count++;
if (env[y * size + wrapWest]) count++;
if (env[wrapNorth + wrapEast]) count ++;
if (env[wrapNorth + wrapWest]) count++;
if (env[wrapSouth + wrapEast]) count++;
if (env[wrapSouth + wrapWest]) count++;
if (env[wrapNorth + x]) count++;
if (env[wrapSouth + x]) count++;
__syncthreads(); //garante que as threads estejam sincronizadas para realizar o calculo de vizinhos vivos
if(count < 2 || count > 3)
env[y * size + x] = false;
if(count == 3)
env[y * size + x] = true;
}
void print(bool* env) {
for(int i = 0; i < size * size; i++) {
cout << (env[i] ? '#' : ' ');
if (!(i % size)) cout << endl;
}
}
int main(){
int parada = 0;
bool env[size * size]; //linearizei o vetor
env[ 5*size + 7] = true;
env[ 6*size + 8] = true;
env[ 8*size +8] = true;
env[ 6*size +6] = true;
env[ 8*size +10] = true;
env[ 9*size +10] = true;
env[ 8*size +11] = true;
env[10*size +11] = true;
env[10*size +12] = true;
bool* dEnv;
cudaMalloc((void**) &dEnv, size * size * sizeof(bool)); //aloca vetor em cuda
cudaMemcpy(dEnv, env, size * size * sizeof(bool), cudaMemcpyHostToDevice); //copia o vetor para cuda
dim3 golThreads(size, size); //define tamanho das threads
while (parada < 100) { //define o numero de parada para 100
system("clear");
jogo<<<1, golThreads>>>(dEnv); //chamada do kernel
cudaMemcpy(env, dEnv, size * size * sizeof(bool), cudaMemcpyDeviceToHost); //copia valor do vetor de volta a cpu
print(env);
usleep(100000);
parada++;
}
} |
61abed1b45910d84f8743fe01c908fb18168fb33.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_pos_sorted;
glm::vec3 *dev_vel1_sorted;
glm::vec3 *dev_vel2_sorted;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
hipMalloc((void**)&dev_pos_sorted, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos_sorted failed!");
hipMalloc((void**)&dev_vel1_sorted, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1_sorted failed!");
hipMalloc((void**)&dev_vel2_sorted, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2_sorted failed!");
dev_thrust_particleArrayIndices=thrust::device_pointer_cast(dev_particleArrayIndices);
dev_thrust_particleGridIndices=thrust::device_pointer_cast(dev_particleGridIndices);
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// TODO-1.2
glm::vec3 perceived_center = glm::vec3(0.0f);
glm::vec3 c = glm::vec3(0.0f);
glm::vec3 perceived_velocity = glm::vec3(0.0f);
glm::vec3 currBoidPos = pos[iSelf];
glm::vec3 currBoidVel = vel[iSelf];
float rule1N = 0.0f;
float rule3N = 0.0f;
for (int i = 0; i < N; i++) {
if (i != iSelf) {
glm::vec3 tempBoidPos = pos[i];
glm::vec3 tempBoidVel = vel[i];
float dist = glm::distance(tempBoidPos, currBoidPos);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (dist < rule1Distance) {
perceived_center += tempBoidPos;
rule1N++;
}
// Rule 2: boids try to stay a distance d away from each other
if (dist < rule2Distance) {
c -= (tempBoidPos - currBoidPos);
}
// Rule 3: boids try to match the speed of surrounding boids
if (dist < rule3Distance) {
perceived_velocity += tempBoidVel;
rule3N++;
}
}
}
if (rule1N > 0) {
perceived_center /= rule1N;
perceived_center = (perceived_center - currBoidPos) * rule1Scale;
}
c *= rule2Scale;
if (rule3N > 0) {
perceived_velocity /= rule3N;
perceived_velocity *= rule3Scale;
}
glm::vec3 finalVec = currBoidVel + perceived_center + c + perceived_velocity;
return finalVec;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
// Compute a new velocity based on pos and vel1
glm::vec3 newVel = computeVelocityChange(N, index, pos, vel1);
// Clamp the speed
if (glm::length(newVel) > maxSpeed) {
newVel = glm::normalize(newVel) * maxSpeed;
}
// Record the new velocity into vel2. Question: why NOT vel1?
vel2[index] = newVel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
// - Label each boid with the index of its grid cell.
glm::ivec3 currBoidPos = (pos[index] - gridMin) * inverseCellWidth;
gridIndices[index] = gridIndex3Dto1D(currBoidPos.x, currBoidPos.y, currBoidPos.z, gridResolution);
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
int prev = particleGridIndices[index - 1];
int curr = particleGridIndices[index];
if (index >= 1) {
if (prev != curr) {
gridCellEndIndices[prev] = index - 1;
gridCellStartIndices[curr] = index;
}
}
else {
gridCellStartIndices[curr] = index;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
// - Identify which cells may contain neighbors. This isn't always 8.
glm::ivec3 cellIndex = (pos[index] - gridMin) * inverseCellWidth;
cellIndex = cellIndex - 1;
cellIndex.x = imax(0, cellIndex.x);
cellIndex.y = imax(0, cellIndex.y);
cellIndex.z = imax(0, cellIndex.z);
glm::vec3 perceived_center = glm::vec3(0.0f);
glm::vec3 c = glm::vec3(0.0f);
glm::vec3 perceived_velocity = glm::vec3(0.0f);
float rule1N = 0;
float rule3N = 0;
// - For each cell, read the start/end indices in the boid pointer array.
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
for (int k = 0; k < 2; k++) {
if (cellIndex.x + i < gridResolution && cellIndex.y + j < gridResolution && cellIndex.z + k < gridResolution) {
int gridIndex = gridIndex3Dto1D(cellIndex.x + i, cellIndex.y + j, cellIndex.z + k, gridResolution);
int gridStartIndex = gridCellStartIndices[gridIndex];
int gridEndIndex = gridCellEndIndices[gridIndex];
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int x = gridStartIndex; x <= gridEndIndex; x++) {
if (x != index) {
int currIndex = particleArrayIndices[x];
float dist = glm::distance(pos[currIndex], pos[index]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (dist < rule1Distance) {
perceived_center += pos[currIndex];
rule1N++;
}
// Rule 2: boids try to stay a distance d away from each other
if (dist < rule2Distance) {
c -= (pos[currIndex] - pos[index]);
}
// Rule 3: boids try to match the speed of surrounding boids
if (dist < rule3Distance) {
perceived_velocity += vel1[currIndex];
rule3N++;
}
}
}
}
}
}
}
if (rule1N > 0) {
perceived_center /= rule1N;
perceived_center = (perceived_center - pos[index]) * rule1Scale;
}
c *= rule2Scale;
if (rule3N > 0) {
perceived_velocity /= rule3N;
perceived_velocity *= rule3Scale;
}
glm::vec3 newVel = vel1[index] + perceived_center + c + perceived_velocity;
// - Clamp the speed change before putting the new speed in vel2
if (glm::length(newVel) > maxSpeed) {
newVel = glm::normalize(newVel) * maxSpeed;
}
vel2[index] = newVel;
}
__global__ void kernRearrangeDataPointers(int N, glm::vec3 *prev_buffer, glm::vec3 *new_buffer, int *particleArrayIndices) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
new_buffer[index] = prev_buffer[particleArrayIndices[index]];
}
__global__ void kernSwapRearrangedDataPointers(int N, glm::vec3 *prev_buffer, glm::vec3 *new_buffer, int *particleArrayIndices) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
prev_buffer[particleArrayIndices[index]] = new_buffer[index];
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::ivec3 cellIndex = (pos[index] - gridMin) * inverseCellWidth;
cellIndex = cellIndex - 1;
cellIndex.x = imax(0, cellIndex.x);
cellIndex.y = imax(0, cellIndex.y);
cellIndex.z = imax(0, cellIndex.z);
glm::vec3 perceived_center = glm::vec3(0.0f);
glm::vec3 c = glm::vec3(0.0f);
glm::vec3 perceived_velocity = glm::vec3(0.0f);
float rule1N = 0;
float rule3N = 0;
// - Identify which cells may contain neighbors. This isn't always 8.
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
for (int k = 0; k < 2; k++) {
if (cellIndex.x + i < gridResolution && cellIndex.y + j < gridResolution && cellIndex.z + k < gridResolution) {
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
int gridIndex = gridIndex3Dto1D(cellIndex.x + i, cellIndex.y + j, cellIndex.z + k, gridResolution);
int gridStartIndex = gridCellStartIndices[gridIndex];
int gridEndIndex = gridCellEndIndices[gridIndex];
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int currIndex = gridStartIndex; currIndex <= gridEndIndex; currIndex++) {
if (currIndex != index) {
float dist = glm::distance(pos[currIndex], pos[index]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (dist < rule1Distance) {
perceived_center += pos[currIndex];
rule1N++;
}
// Rule 2: boids try to stay a distance d away from each other
if (dist < rule2Distance) {
c -= (pos[currIndex] - pos[index]);
}
// Rule 3: boids try to match the speed of surrounding boids
if (dist < rule3Distance) {
perceived_velocity += vel1[currIndex];
rule3N++;
}
}
}
}
}
}
}
if (rule1N > 0) {
perceived_center /= rule1N;
perceived_center = (perceived_center - pos[index]) * rule1Scale;
}
c *= rule2Scale;
if (rule3N > 0) {
perceived_velocity /= rule3N;
perceived_velocity *= rule3Scale;
}
glm::vec3 newVel = vel1[index] + perceived_center + c + perceived_velocity;
// - Clamp the speed change before putting the new speed in vel2
if (glm::length(newVel) > maxSpeed) {
newVel = glm::normalize(newVel) * maxSpeed;
}
vel2[index] = newVel;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
int N = numObjects;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> >(N, dev_pos,
dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(N, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// TODO-1.2 ping-pong the velocity buffers
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
int N = numObjects;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
dim3 NBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, blockSize >> >(N, gridSideCount,
gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices,
dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + N, dev_thrust_particleArrayIndices);
kernResetIntBuffer << <NBlocksPerGrid, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for start indices failed!");
kernResetIntBuffer << <NBlocksPerGrid, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for end indices failed!");
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> >(N, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid, blockSize >> >(N, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
// - Update positions
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(N, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// - Ping-pong buffers as needed
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
int N = numObjects;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
dim3 NBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, threadsPerBlock >> >(N, gridSideCount,
gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices,
dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + N, dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
kernResetIntBuffer << <NBlocksPerGrid, threadsPerBlock >> >(gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for start indices failed!");
kernResetIntBuffer << <NBlocksPerGrid, threadsPerBlock >> >(gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for end indices failed!");
kernIdentifyCellStartEnd << <fullBlocksPerGrid, threadsPerBlock >> >(N, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
kernRearrangeDataPointers << <fullBlocksPerGrid, threadsPerBlock >> >(N, dev_pos, dev_pos_sorted,
dev_particleArrayIndices);
checkCUDAErrorWithLine("kernRearrangeDataPointers for dev_pos failed!");
kernRearrangeDataPointers << <fullBlocksPerGrid, threadsPerBlock >> >(N, dev_vel1, dev_vel1_sorted,
dev_particleArrayIndices);
checkCUDAErrorWithLine("kernRearrangeDataPointers for dev_vel1 failed!");
kernRearrangeDataPointers << <fullBlocksPerGrid, threadsPerBlock >> >(N, dev_vel2, dev_vel2_sorted,
dev_particleArrayIndices);
checkCUDAErrorWithLine("kernRearrangeDataPointers for dev_vel2 failed!");
//- Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid, threadsPerBlock >> >(N, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_pos_sorted, dev_vel1_sorted, dev_vel2_sorted);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchCoherent failed!");
// - Update positions
kernUpdatePos << <fullBlocksPerGrid, threadsPerBlock >> >(N, dt, dev_pos_sorted, dev_vel2_sorted);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
kernSwapRearrangedDataPointers << <fullBlocksPerGrid, threadsPerBlock >> >(N, dev_pos, dev_pos_sorted,
dev_particleArrayIndices);
checkCUDAErrorWithLine("kernSwapRearrangedDataPointers for dev_pos failed!");
kernSwapRearrangedDataPointers << <fullBlocksPerGrid, threadsPerBlock >> >(N, dev_vel1, dev_vel2_sorted,
dev_particleArrayIndices);
checkCUDAErrorWithLine("kernSwapRearrangedDataPointers for dev_vel1 failed!");
kernSwapRearrangedDataPointers << <fullBlocksPerGrid, threadsPerBlock >> >(N, dev_vel2, dev_vel1_sorted,
dev_particleArrayIndices);
checkCUDAErrorWithLine("kernSwapRearrangedDataPointers for dev_vel2 failed!");
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
hipFree(dev_particleArrayIndices);
hipFree(dev_particleGridIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
hipFree(dev_pos_sorted);
hipFree(dev_vel1_sorted);
hipFree(dev_vel2_sorted);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues, sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys, dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues, dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
delete[] intKeys;
delete[] intValues;
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
| 61abed1b45910d84f8743fe01c908fb18168fb33.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_pos_sorted;
glm::vec3 *dev_vel1_sorted;
glm::vec3 *dev_vel2_sorted;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
cudaMalloc((void**)&dev_pos_sorted, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos_sorted failed!");
cudaMalloc((void**)&dev_vel1_sorted, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1_sorted failed!");
cudaMalloc((void**)&dev_vel2_sorted, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2_sorted failed!");
dev_thrust_particleArrayIndices=thrust::device_pointer_cast(dev_particleArrayIndices);
dev_thrust_particleGridIndices=thrust::device_pointer_cast(dev_particleGridIndices);
cudaThreadSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaThreadSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// TODO-1.2
glm::vec3 perceived_center = glm::vec3(0.0f);
glm::vec3 c = glm::vec3(0.0f);
glm::vec3 perceived_velocity = glm::vec3(0.0f);
glm::vec3 currBoidPos = pos[iSelf];
glm::vec3 currBoidVel = vel[iSelf];
float rule1N = 0.0f;
float rule3N = 0.0f;
for (int i = 0; i < N; i++) {
if (i != iSelf) {
glm::vec3 tempBoidPos = pos[i];
glm::vec3 tempBoidVel = vel[i];
float dist = glm::distance(tempBoidPos, currBoidPos);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (dist < rule1Distance) {
perceived_center += tempBoidPos;
rule1N++;
}
// Rule 2: boids try to stay a distance d away from each other
if (dist < rule2Distance) {
c -= (tempBoidPos - currBoidPos);
}
// Rule 3: boids try to match the speed of surrounding boids
if (dist < rule3Distance) {
perceived_velocity += tempBoidVel;
rule3N++;
}
}
}
if (rule1N > 0) {
perceived_center /= rule1N;
perceived_center = (perceived_center - currBoidPos) * rule1Scale;
}
c *= rule2Scale;
if (rule3N > 0) {
perceived_velocity /= rule3N;
perceived_velocity *= rule3Scale;
}
glm::vec3 finalVec = currBoidVel + perceived_center + c + perceived_velocity;
return finalVec;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
// Compute a new velocity based on pos and vel1
glm::vec3 newVel = computeVelocityChange(N, index, pos, vel1);
// Clamp the speed
if (glm::length(newVel) > maxSpeed) {
newVel = glm::normalize(newVel) * maxSpeed;
}
// Record the new velocity into vel2. Question: why NOT vel1?
vel2[index] = newVel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
// - Label each boid with the index of its grid cell.
glm::ivec3 currBoidPos = (pos[index] - gridMin) * inverseCellWidth;
gridIndices[index] = gridIndex3Dto1D(currBoidPos.x, currBoidPos.y, currBoidPos.z, gridResolution);
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
int prev = particleGridIndices[index - 1];
int curr = particleGridIndices[index];
if (index >= 1) {
if (prev != curr) {
gridCellEndIndices[prev] = index - 1;
gridCellStartIndices[curr] = index;
}
}
else {
gridCellStartIndices[curr] = index;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
// - Identify which cells may contain neighbors. This isn't always 8.
glm::ivec3 cellIndex = (pos[index] - gridMin) * inverseCellWidth;
cellIndex = cellIndex - 1;
cellIndex.x = imax(0, cellIndex.x);
cellIndex.y = imax(0, cellIndex.y);
cellIndex.z = imax(0, cellIndex.z);
glm::vec3 perceived_center = glm::vec3(0.0f);
glm::vec3 c = glm::vec3(0.0f);
glm::vec3 perceived_velocity = glm::vec3(0.0f);
float rule1N = 0;
float rule3N = 0;
// - For each cell, read the start/end indices in the boid pointer array.
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
for (int k = 0; k < 2; k++) {
if (cellIndex.x + i < gridResolution && cellIndex.y + j < gridResolution && cellIndex.z + k < gridResolution) {
int gridIndex = gridIndex3Dto1D(cellIndex.x + i, cellIndex.y + j, cellIndex.z + k, gridResolution);
int gridStartIndex = gridCellStartIndices[gridIndex];
int gridEndIndex = gridCellEndIndices[gridIndex];
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int x = gridStartIndex; x <= gridEndIndex; x++) {
if (x != index) {
int currIndex = particleArrayIndices[x];
float dist = glm::distance(pos[currIndex], pos[index]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (dist < rule1Distance) {
perceived_center += pos[currIndex];
rule1N++;
}
// Rule 2: boids try to stay a distance d away from each other
if (dist < rule2Distance) {
c -= (pos[currIndex] - pos[index]);
}
// Rule 3: boids try to match the speed of surrounding boids
if (dist < rule3Distance) {
perceived_velocity += vel1[currIndex];
rule3N++;
}
}
}
}
}
}
}
if (rule1N > 0) {
perceived_center /= rule1N;
perceived_center = (perceived_center - pos[index]) * rule1Scale;
}
c *= rule2Scale;
if (rule3N > 0) {
perceived_velocity /= rule3N;
perceived_velocity *= rule3Scale;
}
glm::vec3 newVel = vel1[index] + perceived_center + c + perceived_velocity;
// - Clamp the speed change before putting the new speed in vel2
if (glm::length(newVel) > maxSpeed) {
newVel = glm::normalize(newVel) * maxSpeed;
}
vel2[index] = newVel;
}
__global__ void kernRearrangeDataPointers(int N, glm::vec3 *prev_buffer, glm::vec3 *new_buffer, int *particleArrayIndices) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
new_buffer[index] = prev_buffer[particleArrayIndices[index]];
}
__global__ void kernSwapRearrangedDataPointers(int N, glm::vec3 *prev_buffer, glm::vec3 *new_buffer, int *particleArrayIndices) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
prev_buffer[particleArrayIndices[index]] = new_buffer[index];
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::ivec3 cellIndex = (pos[index] - gridMin) * inverseCellWidth;
cellIndex = cellIndex - 1;
cellIndex.x = imax(0, cellIndex.x);
cellIndex.y = imax(0, cellIndex.y);
cellIndex.z = imax(0, cellIndex.z);
glm::vec3 perceived_center = glm::vec3(0.0f);
glm::vec3 c = glm::vec3(0.0f);
glm::vec3 perceived_velocity = glm::vec3(0.0f);
float rule1N = 0;
float rule3N = 0;
// - Identify which cells may contain neighbors. This isn't always 8.
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
for (int k = 0; k < 2; k++) {
if (cellIndex.x + i < gridResolution && cellIndex.y + j < gridResolution && cellIndex.z + k < gridResolution) {
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
int gridIndex = gridIndex3Dto1D(cellIndex.x + i, cellIndex.y + j, cellIndex.z + k, gridResolution);
int gridStartIndex = gridCellStartIndices[gridIndex];
int gridEndIndex = gridCellEndIndices[gridIndex];
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int currIndex = gridStartIndex; currIndex <= gridEndIndex; currIndex++) {
if (currIndex != index) {
float dist = glm::distance(pos[currIndex], pos[index]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (dist < rule1Distance) {
perceived_center += pos[currIndex];
rule1N++;
}
// Rule 2: boids try to stay a distance d away from each other
if (dist < rule2Distance) {
c -= (pos[currIndex] - pos[index]);
}
// Rule 3: boids try to match the speed of surrounding boids
if (dist < rule3Distance) {
perceived_velocity += vel1[currIndex];
rule3N++;
}
}
}
}
}
}
}
if (rule1N > 0) {
perceived_center /= rule1N;
perceived_center = (perceived_center - pos[index]) * rule1Scale;
}
c *= rule2Scale;
if (rule3N > 0) {
perceived_velocity /= rule3N;
perceived_velocity *= rule3Scale;
}
glm::vec3 newVel = vel1[index] + perceived_center + c + perceived_velocity;
// - Clamp the speed change before putting the new speed in vel2
if (glm::length(newVel) > maxSpeed) {
newVel = glm::normalize(newVel) * maxSpeed;
}
vel2[index] = newVel;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
int N = numObjects;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> >(N, dev_pos,
dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(N, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// TODO-1.2 ping-pong the velocity buffers
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
int N = numObjects;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
dim3 NBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, blockSize >> >(N, gridSideCount,
gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices,
dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + N, dev_thrust_particleArrayIndices);
kernResetIntBuffer << <NBlocksPerGrid, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for start indices failed!");
kernResetIntBuffer << <NBlocksPerGrid, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for end indices failed!");
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> >(N, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid, blockSize >> >(N, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
// - Update positions
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(N, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// - Ping-pong buffers as needed
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
int N = numObjects;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
dim3 NBlocksPerGrid((gridCellCount + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, threadsPerBlock >> >(N, gridSideCount,
gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices,
dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + N, dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
kernResetIntBuffer << <NBlocksPerGrid, threadsPerBlock >> >(gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for start indices failed!");
kernResetIntBuffer << <NBlocksPerGrid, threadsPerBlock >> >(gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer for end indices failed!");
kernIdentifyCellStartEnd << <fullBlocksPerGrid, threadsPerBlock >> >(N, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
kernRearrangeDataPointers << <fullBlocksPerGrid, threadsPerBlock >> >(N, dev_pos, dev_pos_sorted,
dev_particleArrayIndices);
checkCUDAErrorWithLine("kernRearrangeDataPointers for dev_pos failed!");
kernRearrangeDataPointers << <fullBlocksPerGrid, threadsPerBlock >> >(N, dev_vel1, dev_vel1_sorted,
dev_particleArrayIndices);
checkCUDAErrorWithLine("kernRearrangeDataPointers for dev_vel1 failed!");
kernRearrangeDataPointers << <fullBlocksPerGrid, threadsPerBlock >> >(N, dev_vel2, dev_vel2_sorted,
dev_particleArrayIndices);
checkCUDAErrorWithLine("kernRearrangeDataPointers for dev_vel2 failed!");
//- Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid, threadsPerBlock >> >(N, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_pos_sorted, dev_vel1_sorted, dev_vel2_sorted);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchCoherent failed!");
// - Update positions
kernUpdatePos << <fullBlocksPerGrid, threadsPerBlock >> >(N, dt, dev_pos_sorted, dev_vel2_sorted);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
kernSwapRearrangedDataPointers << <fullBlocksPerGrid, threadsPerBlock >> >(N, dev_pos, dev_pos_sorted,
dev_particleArrayIndices);
checkCUDAErrorWithLine("kernSwapRearrangedDataPointers for dev_pos failed!");
kernSwapRearrangedDataPointers << <fullBlocksPerGrid, threadsPerBlock >> >(N, dev_vel1, dev_vel2_sorted,
dev_particleArrayIndices);
checkCUDAErrorWithLine("kernSwapRearrangedDataPointers for dev_vel1 failed!");
kernSwapRearrangedDataPointers << <fullBlocksPerGrid, threadsPerBlock >> >(N, dev_vel2, dev_vel1_sorted,
dev_particleArrayIndices);
checkCUDAErrorWithLine("kernSwapRearrangedDataPointers for dev_vel2 failed!");
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
cudaFree(dev_particleArrayIndices);
cudaFree(dev_particleGridIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
cudaFree(dev_pos_sorted);
cudaFree(dev_vel1_sorted);
cudaFree(dev_vel2_sorted);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues, sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys, dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues, dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
delete[] intKeys;
delete[] intValues;
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
383677f322846f3a4f837e667e02e044e9375d30.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "paddle/framework/lod_tensor.h"
#include "paddle/platform/assert.h"
#include <gtest/gtest.h>
__global__ void test(size_t* a, int size) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size;
i += blockDim.x * gridDim.x) {
a[i] *= 2;
}
}
TEST(LoDTensor, LoDInGPU) {
paddle::framework::LoDTensor lod_tensor;
paddle::platform::GPUPlace place(0);
paddle::framework::LoD src_lod;
src_lod.push_back(std::vector<size_t>{0, 2, 4, 6, 8, 10, 12, 14});
lod_tensor.Resize({14, 16});
lod_tensor.mutable_data<float>(place);
lod_tensor.set_lod(src_lod);
CHECK_EQ(lod_tensor.lod_element(0, 2).first, 4UL);
CHECK_EQ(lod_tensor.lod_element(0, 4).first, 8UL);
auto lod = lod_tensor.lod();
hipLaunchKernelGGL(( test), dim3(1), dim3(8), 0, 0, lod[0].data(), lod[0].size());
hipDeviceSynchronize();
for (size_t i = 0; i < src_lod[0].size(); ++i) {
CHECK_EQ(lod[0].data()[i], src_lod[0].data()[i] * 2);
}
}
| 383677f322846f3a4f837e667e02e044e9375d30.cu | /*
Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include "paddle/framework/lod_tensor.h"
#include "paddle/platform/assert.h"
#include <gtest/gtest.h>
__global__ void test(size_t* a, int size) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size;
i += blockDim.x * gridDim.x) {
a[i] *= 2;
}
}
TEST(LoDTensor, LoDInGPU) {
paddle::framework::LoDTensor lod_tensor;
paddle::platform::GPUPlace place(0);
paddle::framework::LoD src_lod;
src_lod.push_back(std::vector<size_t>{0, 2, 4, 6, 8, 10, 12, 14});
lod_tensor.Resize({14, 16});
lod_tensor.mutable_data<float>(place);
lod_tensor.set_lod(src_lod);
CHECK_EQ(lod_tensor.lod_element(0, 2).first, 4UL);
CHECK_EQ(lod_tensor.lod_element(0, 4).first, 8UL);
auto lod = lod_tensor.lod();
test<<<1, 8>>>(lod[0].data(), lod[0].size());
cudaDeviceSynchronize();
for (size_t i = 0; i < src_lod[0].size(); ++i) {
CHECK_EQ(lod[0].data()[i], src_lod[0].data()[i] * 2);
}
}
|
6b4d81b8e6e456579a416c9ca06b70d1a9026b0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
// Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
/**
* @file
* test_bfs.cu
*
* @brief Simple test driver program for breadth-first search.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <algorithm>
#include <iostream>
#include <fstream>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
#include <gunrock/util/track_utils.cuh>
// BFS includes
#include <gunrock/app/bfs/bfs_enactor.cuh>
#include <gunrock/app/bfs/bfs_problem.cuh>
#include <gunrock/app/bfs/bfs_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
// graph structure
#include "../matrix.h"
//Generic tools handling masks and fill
#include "bfs_tools.cu"
using namespace gunrock;
using namespace gunrock::app;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::bfs;
void ref_bfs_mask(const int src_node, const int dst_node, const int num_nodes, const int num_edges, const int *row_offsets, const int *col_indices, const int *col_mask, int *parents)
{
int *q = (int*)malloc(num_nodes * sizeof(int));
q[0] = src_node;
parents[src_node] = src_node;
int idx = -1;
int size = 1;
int found = 0;
while (idx+1 < size && !found) {
idx++;
int u = q[idx];
for (int i = row_offsets[u]; i < row_offsets[u+1]; i++) {
int v = col_indices[i];
if (parents[v] == -1 && col_mask[i]) {
parents[v] = u;
if (v == dst_node) {
found = 1;
break;
}
else {
q[size] = v;
size++;
}
}
}
}
}
hipError_t bfs_mask(int src_node, int dst_node, int num_nodes, int num_edges, int *row_offsets, int *col_indices, int *col_mask, int *parents)
{
#if 0
// TODO: use Gunrock's customized BFS here
ref_bfs_mask(src_node, dst_node, num_nodes, num_edges, row_offsets, col_indices, col_mask, parents);
return hipSuccess;
#else
typedef int VertexId;
typedef int SizeT;
typedef int Value;
typedef BFSProblem <VertexId,SizeT,Value,
true, // MARK_PREDECESSORS
true> // IDEMPOTENCE
Problem;
typedef BFSEnactor <Problem> Enactor;
hipError_t retval = hipSuccess;
Info<VertexId, SizeT, Value> *info = new Info<VertexId, SizeT, Value>;
info->InitBase2("BFS");
ContextPtr *context = (ContextPtr*)info->context;
hipStream_t *streams = (hipStream_t*)info->streams;
int *gpu_idx = new int[1];
gpu_idx[0] = 0;
Problem *problem = new Problem(false, false); //no direction optimized, no undirected
if (retval = util::GRError(problem->Init(
false, //stream_from_host (depricated)
row_offsets,
col_indices,
col_mask,
parents,
num_nodes,
num_edges,
1,
NULL,
"random",
streams),
"BFS Problem Init failed", __FILE__, __LINE__)) return retval;
Enactor *enactor = new Enactor(1, gpu_idx);
if (retval = util::GRError(enactor->Init(context, problem),
"BFS Enactor Init failed.", __FILE__, __LINE__)) return retval;
if (retval = util::GRError(problem->Reset(
src_node, enactor->GetFrontierType()),
"BFS Problem Reset failed", __FILE__, __LINE__))
return retval;
if (retval = util::GRError(enactor->Reset(),
"BFS Enactor Reset failed", __FILE__, __LINE__))
return retval;
if (retval = util::GRError(enactor->Enact(src_node),
"BFS Enact failed", __FILE__, __LINE__)) return retval;
// free memory
delete info;
delete problem;
delete enactor;
return retval;
#endif
}
//BFS gunrock implementation
int bfs(csr_graph *g, int s, int t, int *q, int *p, int *mask)
{
// set all vertices as undiscovered (-1)
hipLaunchKernelGGL(( fill<-1>), dim3((g->n + 255)/256), dim3(256), 0, 0, g->n, p);
hipDeviceSynchronize();
// setup mask, TODO: move this step inside Gunrock to reduce BW
hipLaunchKernelGGL(( setup_mask), dim3((g->nnz + 255)/256), dim3(256), 0, 0, g->nnz, mask, g->vals_cap, g->vals_flow);
// run bfs (with mask)
bfs_mask(s, t, g->n, g->nnz, g->row_offsets, g->col_indices, mask, p);
// check if path exists
return (p[t] != -1);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| 6b4d81b8e6e456579a416c9ca06b70d1a9026b0d.cu | // ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
// Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
/**
* @file
* test_bfs.cu
*
* @brief Simple test driver program for breadth-first search.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <algorithm>
#include <iostream>
#include <fstream>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
#include <gunrock/util/track_utils.cuh>
// BFS includes
#include <gunrock/app/bfs/bfs_enactor.cuh>
#include <gunrock/app/bfs/bfs_problem.cuh>
#include <gunrock/app/bfs/bfs_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
// graph structure
#include "../matrix.h"
//Generic tools handling masks and fill
#include "bfs_tools.cu"
using namespace gunrock;
using namespace gunrock::app;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::bfs;
void ref_bfs_mask(const int src_node, const int dst_node, const int num_nodes, const int num_edges, const int *row_offsets, const int *col_indices, const int *col_mask, int *parents)
{
int *q = (int*)malloc(num_nodes * sizeof(int));
q[0] = src_node;
parents[src_node] = src_node;
int idx = -1;
int size = 1;
int found = 0;
while (idx+1 < size && !found) {
idx++;
int u = q[idx];
for (int i = row_offsets[u]; i < row_offsets[u+1]; i++) {
int v = col_indices[i];
if (parents[v] == -1 && col_mask[i]) {
parents[v] = u;
if (v == dst_node) {
found = 1;
break;
}
else {
q[size] = v;
size++;
}
}
}
}
}
cudaError_t bfs_mask(int src_node, int dst_node, int num_nodes, int num_edges, int *row_offsets, int *col_indices, int *col_mask, int *parents)
{
#if 0
// TODO: use Gunrock's customized BFS here
ref_bfs_mask(src_node, dst_node, num_nodes, num_edges, row_offsets, col_indices, col_mask, parents);
return cudaSuccess;
#else
typedef int VertexId;
typedef int SizeT;
typedef int Value;
typedef BFSProblem <VertexId,SizeT,Value,
true, // MARK_PREDECESSORS
true> // IDEMPOTENCE
Problem;
typedef BFSEnactor <Problem> Enactor;
cudaError_t retval = cudaSuccess;
Info<VertexId, SizeT, Value> *info = new Info<VertexId, SizeT, Value>;
info->InitBase2("BFS");
ContextPtr *context = (ContextPtr*)info->context;
cudaStream_t *streams = (cudaStream_t*)info->streams;
int *gpu_idx = new int[1];
gpu_idx[0] = 0;
Problem *problem = new Problem(false, false); //no direction optimized, no undirected
if (retval = util::GRError(problem->Init(
false, //stream_from_host (depricated)
row_offsets,
col_indices,
col_mask,
parents,
num_nodes,
num_edges,
1,
NULL,
"random",
streams),
"BFS Problem Init failed", __FILE__, __LINE__)) return retval;
Enactor *enactor = new Enactor(1, gpu_idx);
if (retval = util::GRError(enactor->Init(context, problem),
"BFS Enactor Init failed.", __FILE__, __LINE__)) return retval;
if (retval = util::GRError(problem->Reset(
src_node, enactor->GetFrontierType()),
"BFS Problem Reset failed", __FILE__, __LINE__))
return retval;
if (retval = util::GRError(enactor->Reset(),
"BFS Enactor Reset failed", __FILE__, __LINE__))
return retval;
if (retval = util::GRError(enactor->Enact(src_node),
"BFS Enact failed", __FILE__, __LINE__)) return retval;
// free memory
delete info;
delete problem;
delete enactor;
return retval;
#endif
}
//BFS gunrock implementation
int bfs(csr_graph *g, int s, int t, int *q, int *p, int *mask)
{
// set all vertices as undiscovered (-1)
fill<-1><<<(g->n + 255)/256, 256>>>(g->n, p);
cudaDeviceSynchronize();
// setup mask, TODO: move this step inside Gunrock to reduce BW
setup_mask<<<(g->nnz + 255)/256, 256>>>(g->nnz, mask, g->vals_cap, g->vals_flow);
// run bfs (with mask)
bfs_mask(s, t, g->n, g->nnz, g->row_offsets, g->col_indices, mask, p);
// check if path exists
return (p[t] != -1);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
c0f2fb16015c518ac3d76f56a014b1d88ed94776.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2017, Miroslav Stoyanov
*
* This file is part of
* Toolkit for Adaptive Stochastic Modeling And Non-Intrusive ApproximatioN: TASMANIAN
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
* and the following disclaimer in the documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse
* or promote products derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* UT-BATTELLE, LLC AND THE UNITED STATES GOVERNMENT MAKE NO REPRESENTATIONS AND DISCLAIM ALL WARRANTIES, BOTH EXPRESSED AND IMPLIED.
* THERE ARE NO EXPRESS OR IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, OR THAT THE USE OF THE SOFTWARE WILL NOT INFRINGE ANY PATENT,
* COPYRIGHT, TRADEMARK, OR OTHER PROPRIETARY RIGHTS, OR THAT THE SOFTWARE WILL ACCOMPLISH THE INTENDED RESULTS OR THAT THE SOFTWARE OR ITS USE WILL NOT RESULT IN INJURY OR DAMAGE.
* THE USER ASSUMES RESPONSIBILITY FOR ALL LIABILITIES, PENALTIES, FINES, CLAIMS, CAUSES OF ACTION, AND COSTS AND EXPENSES, CAUSED BY, RESULTING FROM OR ARISING OUT OF,
* IN WHOLE OR IN PART THE USE, STORAGE OR DISPOSAL OF THE SOFTWARE.
*/
#ifndef __TASMANIAN_SPARSE_GRID_CUDA_KERNELS_CU
#define __TASMANIAN_SPARSE_GRID_CUDA_KERNELS_CU
#include "tsgAcceleratedDataStructures.hpp"
#include "tsgCudaLinearAlgebra.hpp"
#include "tsgCudaBasisEvaluations.hpp"
// several kernels assume a linear distribution of the threads and can be executed with "practically unlimited" number of threads
// thus we can set this to the CUDA max number of threads, based on the current cuda version
#define _MAX_CUDA_THREADS 1024
namespace TasGrid{
void TasCUDA::dtrans2can(bool use01, int dims, int num_x, int pad_size, const double *gpu_trans_a, const double *gpu_trans_b, const double *gpu_x_transformed, double *gpu_x_canonical){
int num_blocks = (num_x * dims) / _MAX_CUDA_THREADS + (((num_x * dims) % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks >= 65536) num_blocks = 65536;
hipLaunchKernelGGL(( tasgpu_transformed_to_canonical<double, double, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), (2*pad_size) * sizeof(double), 0, dims, num_x, pad_size, gpu_trans_a, gpu_trans_b, gpu_x_transformed, gpu_x_canonical);
if (use01)hipLaunchKernelGGL(( tasgpu_m11_to_01<double, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, dims * num_x, gpu_x_canonical);
}
// local polynomial basis functions, DENSE algorithm
void TasCUDA::devalpwpoly(int order, TypeOneDRule rule, int dims, int num_x, int num_points, const double *gpu_x, const double *gpu_nodes, const double *gpu_support, double *gpu_y){
// each block thread runs 1024 threads and processes 32 points (or basis functions)
int num_blocks = (num_points / 32) + ((num_points % 32 == 0) ? 0 : 1);
// order == 1 is considered "default" so that the compiler doesn't complain about missing default statement
// semilocalp cannot have order less than 2, only rule_localp can have order 0 (this gets overwrittein in makeLocalPolynomialGrid())
if (rule == rule_localp){
switch(order){
case 0:
hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 0, rule_localp, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
case 2:hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 2, rule_localp, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 1, rule_localp, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else if (rule == rule_localp0){
switch(order){
case 2:hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 2, rule_localp0, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 1, rule_localp0, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else if (rule == rule_localpb){
switch(order){
case 2:hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 2, rule_localpb, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 1, rule_localpb, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else{ // rule == rule_semilocalp
hipLaunchKernelGGL(( tasgpu_devalpwpoly<double, 2, rule_semilocalp, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}
// there is a switch statement that realizes templates for each combination of rule/order
// make one function that covers that switch, the rest is passed from devalpwpoly_sparse
template<typename T, int THREADS, int TOPLEVEL, bool fill>
inline void devalpwpoly_sparse_realize_rule_order(int order, TypeOneDRule rule,
int dims, int num_x, int num_points,
const T *x, const T *nodes, const T *support,
const int *hpntr, const int *hindx, int num_roots, const int *roots,
int *spntr, int *sindx, T *svals){
int num_blocks = num_x / THREADS + ((num_x % THREADS == 0) ? 0 : 1);
if (num_blocks >= 65536) num_blocks = 65536;
if (rule == rule_localp){
switch(order){
case 0:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 0, rule_localp, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
case 2:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localp, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localp, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else if (rule == rule_localp0){
switch(order){
case 2:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localp0, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localp0, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else if (rule == rule_localpb){
switch(order){
case 2:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localpb, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localpb, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else{ // rule == rule_semilocalp
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_semilocalp, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}
// local polynomial basis functions, SPARSE algorithm (2 passes, one pass to compue the non-zeros and one pass to evaluate)
void TasCUDA::devalpwpoly_sparse(int order, TypeOneDRule rule, int dims, int num_x, int num_points, const double *gpu_x,
const CudaVector<double> &gpu_nodes, const CudaVector<double> &gpu_support,
const CudaVector<int> &gpu_hpntr, const CudaVector<int> &gpu_hindx, const CudaVector<int> &gpu_hroots,
CudaVector<int> &gpu_spntr, CudaVector<int> &gpu_sindx, CudaVector<double> &gpu_svals){
gpu_spntr.resize(num_x + 1);
// call with fill == false to count the non-zeros per row of the matrix
devalpwpoly_sparse_realize_rule_order<double, 64, 46, false>
(order, rule, dims, num_x, num_points, gpu_x, gpu_nodes.data(), gpu_support.data(),
gpu_hpntr.data(), gpu_hindx.data(), (int) gpu_hroots.size(), gpu_hroots.data(), gpu_spntr.data(), 0, 0);
std::vector<int> cpu_spntr;
gpu_spntr.unload(cpu_spntr);
cpu_spntr[0] = 0;
int nz = 0;
for(auto &i : cpu_spntr){
i += nz;
nz = i;
}
gpu_spntr.load(cpu_spntr);
gpu_sindx.resize(nz);
gpu_svals.resize(nz);
// call with fill == true to load the non-zeros
devalpwpoly_sparse_realize_rule_order<double, 64, 46, true>
(order, rule, dims, num_x, num_points, gpu_x, gpu_nodes.data(), gpu_support.data(),
gpu_hpntr.data(), gpu_hindx.data(), (int) gpu_hroots.size(), gpu_hroots.data(), gpu_spntr.data(), gpu_sindx.data(), gpu_svals.data());
}
// Sequence Grid basis evaluations
void TasCUDA::devalseq(int dims, int num_x, const std::vector<int> &max_levels, const double *gpu_x, const CudaVector<int> &num_nodes,
const CudaVector<int> &points, const CudaVector<double> &nodes, const CudaVector<double> &coeffs, double *gpu_result){
std::vector<int> offsets(dims);
offsets[0] = 0;
for(int d=1; d<dims; d++) offsets[d] = offsets[d-1] + num_x * (max_levels[d-1] + 1);
size_t num_total = offsets[dims-1] + num_x * (max_levels[dims-1] + 1);
int maxl = max_levels[0]; for(auto l : max_levels) if (maxl < l) maxl = l;
CudaVector<int> gpu_offsets(offsets);
CudaVector<double> cache1D(num_total);
int num_blocks = num_x / _MAX_CUDA_THREADS + ((num_x % _MAX_CUDA_THREADS == 0) ? 0 : 1);
hipLaunchKernelGGL(( tasgpu_dseq_build_cache<double, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0,
dims, num_x, gpu_x, nodes.data(), coeffs.data(), maxl+1, gpu_offsets.data(), num_nodes.data(), cache1D.data());
num_blocks = num_x / 32 + ((num_x % 32 == 0) ? 0 : 1);
hipLaunchKernelGGL(( tasgpu_dseq_eval_sharedpoints<double, 32>), dim3(num_blocks), dim3(1024), 0, 0,
dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_result);
}
// Fourier Grid basis evaluations
void TasCUDA::devalfor(int dims, int num_x, const std::vector<int> &max_levels, const double *gpu_x, const CudaVector<int> &num_nodes, const CudaVector<int> &points, double *gpu_wreal, double *gpu_wimag){
std::vector<int> max_nodes(dims);
for(int j=0; j<dims; j++){
int n = 1;
for(int i=0; i<max_levels[j]; i++) n *= 3;
max_nodes[j] = n;
}
std::vector<int> offsets(dims);
offsets[0] = 0;
for(int d=1; d<dims; d++) offsets[d] = offsets[d-1] + 2 * num_x * (max_nodes[d-1] + 1);
size_t num_total = offsets[dims-1] + 2 * num_x * (max_nodes[dims-1] + 1);
CudaVector<int> gpu_offsets(offsets);
CudaVector<double> cache1D(num_total);
int num_blocks = num_x / _MAX_CUDA_THREADS + ((num_x % _MAX_CUDA_THREADS == 0) ? 0 : 1);
hipLaunchKernelGGL(( tasgpu_dfor_build_cache<double, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0,
dims, num_x, gpu_x, gpu_offsets.data(), num_nodes.data(), cache1D.data());
num_blocks = num_x / 32 + ((num_x % 32 == 0) ? 0 : 1);
if (gpu_wimag == 0){
hipLaunchKernelGGL(( tasgpu_dfor_eval_sharedpoints<double, 32, true>), dim3(num_blocks), dim3(1024), 0, 0,
dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_wreal, 0);
}else{
hipLaunchKernelGGL(( tasgpu_dfor_eval_sharedpoints<double, 32, false>), dim3(num_blocks), dim3(1024), 0, 0,
dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_wreal, gpu_wimag);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Linear Algebra
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef __TASMANIAN_COMPILE_FALLBACK_CUDA_KERNELS__
void TasCUDA::cudaDgemm(int M, int N, int K, const double *gpu_a, const double *gpu_b, double *gpu_c){ // gpu_c = gpu_a * gpu_b, gpu_c is M by N
int blocks = (N / 96) + (((N % 96) == 0) ? 0 : 1);
blocks *= (M / 96) + (((M % 96) == 0) ? 0 : 1);
while(blocks > 65536) blocks = 65536;
hipLaunchKernelGGL(( tasgpu_cudaTgemm<double, 32, 96>), dim3(blocks), dim3(1024), 0, 0, M, N, K, gpu_a, gpu_b, gpu_c);
}
void TasCUDA::cudaSparseMatmul(int M, int N, int num_nz, const int* gpu_spntr, const int* gpu_sindx, const double* gpu_svals, const double *gpu_B, double *gpu_C){
int blocks = M / 64 + ((M % 64 == 0) ? 0 : 1);
hipLaunchKernelGGL(( tasgpu_sparse_matmul<double, 64>), dim3(blocks), dim3(64), 0, 0, M, N, num_nz, gpu_spntr, gpu_sindx, gpu_svals, gpu_B, gpu_C);
}
void TasCUDA::cudaSparseVecDenseMat(int M, int N, int num_nz, const double *A, const int *indx, const double *vals, double *C){
int num_blocks = N / _MAX_CUDA_THREADS + ((N % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks< 65536){
hipLaunchKernelGGL(( tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 1>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, M, N, num_nz, A, indx, vals, C);
}else{
num_blocks = N / (2 * _MAX_CUDA_THREADS) + ((N % (2 * _MAX_CUDA_THREADS) == 0) ? 0 : 1);
if (num_blocks< 65536){
hipLaunchKernelGGL(( tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 2>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, M, N, num_nz, A, indx, vals, C);
}else{
num_blocks = N / (3 * _MAX_CUDA_THREADS) + ((N % (3 * _MAX_CUDA_THREADS) == 0) ? 0 : 1);
if (num_blocks< 65536){
hipLaunchKernelGGL(( tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 3>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, M, N, num_nz, A, indx, vals, C);
}
}
}
}
void TasCUDA::convert_sparse_to_dense(int num_rows, int num_columns, const int *pntr, const int *indx, const double *vals, double *destination){
int n = num_rows * num_columns;
int num_blocks = n / _MAX_CUDA_THREADS + ((n % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks >= 65536) num_blocks = 65536;
hipLaunchKernelGGL(( tascuda_fill<double, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, n, 0.0, destination);
num_blocks = num_rows;
if (num_blocks >= 65536) num_blocks = 65536;
hipLaunchKernelGGL(( tascuda_sparse_to_dense<double, 64>), dim3(num_blocks), dim3(64), 0, 0, num_rows, num_columns, pntr, indx, vals, destination);
}
#endif
}
#endif
| c0f2fb16015c518ac3d76f56a014b1d88ed94776.cu | /*
* Copyright (c) 2017, Miroslav Stoyanov
*
* This file is part of
* Toolkit for Adaptive Stochastic Modeling And Non-Intrusive ApproximatioN: TASMANIAN
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
* and the following disclaimer in the documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse
* or promote products derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* UT-BATTELLE, LLC AND THE UNITED STATES GOVERNMENT MAKE NO REPRESENTATIONS AND DISCLAIM ALL WARRANTIES, BOTH EXPRESSED AND IMPLIED.
* THERE ARE NO EXPRESS OR IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, OR THAT THE USE OF THE SOFTWARE WILL NOT INFRINGE ANY PATENT,
* COPYRIGHT, TRADEMARK, OR OTHER PROPRIETARY RIGHTS, OR THAT THE SOFTWARE WILL ACCOMPLISH THE INTENDED RESULTS OR THAT THE SOFTWARE OR ITS USE WILL NOT RESULT IN INJURY OR DAMAGE.
* THE USER ASSUMES RESPONSIBILITY FOR ALL LIABILITIES, PENALTIES, FINES, CLAIMS, CAUSES OF ACTION, AND COSTS AND EXPENSES, CAUSED BY, RESULTING FROM OR ARISING OUT OF,
* IN WHOLE OR IN PART THE USE, STORAGE OR DISPOSAL OF THE SOFTWARE.
*/
#ifndef __TASMANIAN_SPARSE_GRID_CUDA_KERNELS_CU
#define __TASMANIAN_SPARSE_GRID_CUDA_KERNELS_CU
#include "tsgAcceleratedDataStructures.hpp"
#include "tsgCudaLinearAlgebra.hpp"
#include "tsgCudaBasisEvaluations.hpp"
// several kernels assume a linear distribution of the threads and can be executed with "practically unlimited" number of threads
// thus we can set this to the CUDA max number of threads, based on the current cuda version
#define _MAX_CUDA_THREADS 1024
namespace TasGrid{
void TasCUDA::dtrans2can(bool use01, int dims, int num_x, int pad_size, const double *gpu_trans_a, const double *gpu_trans_b, const double *gpu_x_transformed, double *gpu_x_canonical){
int num_blocks = (num_x * dims) / _MAX_CUDA_THREADS + (((num_x * dims) % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks >= 65536) num_blocks = 65536;
tasgpu_transformed_to_canonical<double, double, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS, (2*pad_size) * sizeof(double)>>>(dims, num_x, pad_size, gpu_trans_a, gpu_trans_b, gpu_x_transformed, gpu_x_canonical);
if (use01) tasgpu_m11_to_01<double, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>(dims * num_x, gpu_x_canonical);
}
// local polynomial basis functions, DENSE algorithm
void TasCUDA::devalpwpoly(int order, TypeOneDRule rule, int dims, int num_x, int num_points, const double *gpu_x, const double *gpu_nodes, const double *gpu_support, double *gpu_y){
// each block thread runs 1024 threads and processes 32 points (or basis functions)
int num_blocks = (num_points / 32) + ((num_points % 32 == 0) ? 0 : 1);
// order == 1 is considered "default" so that the compiler doesn't complain about missing default statement
// semilocalp cannot have order less than 2, only rule_localp can have order 0 (this gets overwrittein in makeLocalPolynomialGrid())
if (rule == rule_localp){
switch(order){
case 0:
tasgpu_devalpwpoly<double, 0, rule_localp, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
case 2: tasgpu_devalpwpoly<double, 2, rule_localp, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
tasgpu_devalpwpoly<double, 1, rule_localp, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else if (rule == rule_localp0){
switch(order){
case 2: tasgpu_devalpwpoly<double, 2, rule_localp0, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
tasgpu_devalpwpoly<double, 1, rule_localp0, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else if (rule == rule_localpb){
switch(order){
case 2: tasgpu_devalpwpoly<double, 2, rule_localpb, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
tasgpu_devalpwpoly<double, 1, rule_localpb, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else{ // rule == rule_semilocalp
tasgpu_devalpwpoly<double, 2, rule_semilocalp, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}
// there is a switch statement that realizes templates for each combination of rule/order
// make one function that covers that switch, the rest is passed from devalpwpoly_sparse
template<typename T, int THREADS, int TOPLEVEL, bool fill>
inline void devalpwpoly_sparse_realize_rule_order(int order, TypeOneDRule rule,
int dims, int num_x, int num_points,
const T *x, const T *nodes, const T *support,
const int *hpntr, const int *hindx, int num_roots, const int *roots,
int *spntr, int *sindx, T *svals){
int num_blocks = num_x / THREADS + ((num_x % THREADS == 0) ? 0 : 1);
if (num_blocks >= 65536) num_blocks = 65536;
if (rule == rule_localp){
switch(order){
case 0:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 0, rule_localp, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
case 2:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localp, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localp, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else if (rule == rule_localp0){
switch(order){
case 2:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localp0, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localp0, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else if (rule == rule_localpb){
switch(order){
case 2:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localpb, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localpb, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else{ // rule == rule_semilocalp
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_semilocalp, fill><<<num_blocks, THREADS>>>
(dims, num_x, num_points, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}
// local polynomial basis functions, SPARSE algorithm (2 passes, one pass to compue the non-zeros and one pass to evaluate)
void TasCUDA::devalpwpoly_sparse(int order, TypeOneDRule rule, int dims, int num_x, int num_points, const double *gpu_x,
const CudaVector<double> &gpu_nodes, const CudaVector<double> &gpu_support,
const CudaVector<int> &gpu_hpntr, const CudaVector<int> &gpu_hindx, const CudaVector<int> &gpu_hroots,
CudaVector<int> &gpu_spntr, CudaVector<int> &gpu_sindx, CudaVector<double> &gpu_svals){
gpu_spntr.resize(num_x + 1);
// call with fill == false to count the non-zeros per row of the matrix
devalpwpoly_sparse_realize_rule_order<double, 64, 46, false>
(order, rule, dims, num_x, num_points, gpu_x, gpu_nodes.data(), gpu_support.data(),
gpu_hpntr.data(), gpu_hindx.data(), (int) gpu_hroots.size(), gpu_hroots.data(), gpu_spntr.data(), 0, 0);
std::vector<int> cpu_spntr;
gpu_spntr.unload(cpu_spntr);
cpu_spntr[0] = 0;
int nz = 0;
for(auto &i : cpu_spntr){
i += nz;
nz = i;
}
gpu_spntr.load(cpu_spntr);
gpu_sindx.resize(nz);
gpu_svals.resize(nz);
// call with fill == true to load the non-zeros
devalpwpoly_sparse_realize_rule_order<double, 64, 46, true>
(order, rule, dims, num_x, num_points, gpu_x, gpu_nodes.data(), gpu_support.data(),
gpu_hpntr.data(), gpu_hindx.data(), (int) gpu_hroots.size(), gpu_hroots.data(), gpu_spntr.data(), gpu_sindx.data(), gpu_svals.data());
}
// Sequence Grid basis evaluations
void TasCUDA::devalseq(int dims, int num_x, const std::vector<int> &max_levels, const double *gpu_x, const CudaVector<int> &num_nodes,
const CudaVector<int> &points, const CudaVector<double> &nodes, const CudaVector<double> &coeffs, double *gpu_result){
std::vector<int> offsets(dims);
offsets[0] = 0;
for(int d=1; d<dims; d++) offsets[d] = offsets[d-1] + num_x * (max_levels[d-1] + 1);
size_t num_total = offsets[dims-1] + num_x * (max_levels[dims-1] + 1);
int maxl = max_levels[0]; for(auto l : max_levels) if (maxl < l) maxl = l;
CudaVector<int> gpu_offsets(offsets);
CudaVector<double> cache1D(num_total);
int num_blocks = num_x / _MAX_CUDA_THREADS + ((num_x % _MAX_CUDA_THREADS == 0) ? 0 : 1);
tasgpu_dseq_build_cache<double, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>
(dims, num_x, gpu_x, nodes.data(), coeffs.data(), maxl+1, gpu_offsets.data(), num_nodes.data(), cache1D.data());
num_blocks = num_x / 32 + ((num_x % 32 == 0) ? 0 : 1);
tasgpu_dseq_eval_sharedpoints<double, 32><<<num_blocks, 1024>>>
(dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_result);
}
// Fourier Grid basis evaluations
void TasCUDA::devalfor(int dims, int num_x, const std::vector<int> &max_levels, const double *gpu_x, const CudaVector<int> &num_nodes, const CudaVector<int> &points, double *gpu_wreal, double *gpu_wimag){
std::vector<int> max_nodes(dims);
for(int j=0; j<dims; j++){
int n = 1;
for(int i=0; i<max_levels[j]; i++) n *= 3;
max_nodes[j] = n;
}
std::vector<int> offsets(dims);
offsets[0] = 0;
for(int d=1; d<dims; d++) offsets[d] = offsets[d-1] + 2 * num_x * (max_nodes[d-1] + 1);
size_t num_total = offsets[dims-1] + 2 * num_x * (max_nodes[dims-1] + 1);
CudaVector<int> gpu_offsets(offsets);
CudaVector<double> cache1D(num_total);
int num_blocks = num_x / _MAX_CUDA_THREADS + ((num_x % _MAX_CUDA_THREADS == 0) ? 0 : 1);
tasgpu_dfor_build_cache<double, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>
(dims, num_x, gpu_x, gpu_offsets.data(), num_nodes.data(), cache1D.data());
num_blocks = num_x / 32 + ((num_x % 32 == 0) ? 0 : 1);
if (gpu_wimag == 0){
tasgpu_dfor_eval_sharedpoints<double, 32, true><<<num_blocks, 1024>>>
(dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_wreal, 0);
}else{
tasgpu_dfor_eval_sharedpoints<double, 32, false><<<num_blocks, 1024>>>
(dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_wreal, gpu_wimag);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Linear Algebra
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef __TASMANIAN_COMPILE_FALLBACK_CUDA_KERNELS__
void TasCUDA::cudaDgemm(int M, int N, int K, const double *gpu_a, const double *gpu_b, double *gpu_c){ // gpu_c = gpu_a * gpu_b, gpu_c is M by N
int blocks = (N / 96) + (((N % 96) == 0) ? 0 : 1);
blocks *= (M / 96) + (((M % 96) == 0) ? 0 : 1);
while(blocks > 65536) blocks = 65536;
tasgpu_cudaTgemm<double, 32, 96><<<blocks, 1024>>>(M, N, K, gpu_a, gpu_b, gpu_c);
}
void TasCUDA::cudaSparseMatmul(int M, int N, int num_nz, const int* gpu_spntr, const int* gpu_sindx, const double* gpu_svals, const double *gpu_B, double *gpu_C){
int blocks = M / 64 + ((M % 64 == 0) ? 0 : 1);
tasgpu_sparse_matmul<double, 64><<<blocks, 64>>>(M, N, num_nz, gpu_spntr, gpu_sindx, gpu_svals, gpu_B, gpu_C);
}
void TasCUDA::cudaSparseVecDenseMat(int M, int N, int num_nz, const double *A, const int *indx, const double *vals, double *C){
int num_blocks = N / _MAX_CUDA_THREADS + ((N % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks< 65536){
tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 1><<<num_blocks, _MAX_CUDA_THREADS>>>(M, N, num_nz, A, indx, vals, C);
}else{
num_blocks = N / (2 * _MAX_CUDA_THREADS) + ((N % (2 * _MAX_CUDA_THREADS) == 0) ? 0 : 1);
if (num_blocks< 65536){
tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 2><<<num_blocks, _MAX_CUDA_THREADS>>>(M, N, num_nz, A, indx, vals, C);
}else{
num_blocks = N / (3 * _MAX_CUDA_THREADS) + ((N % (3 * _MAX_CUDA_THREADS) == 0) ? 0 : 1);
if (num_blocks< 65536){
tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 3><<<num_blocks, _MAX_CUDA_THREADS>>>(M, N, num_nz, A, indx, vals, C);
}
}
}
}
void TasCUDA::convert_sparse_to_dense(int num_rows, int num_columns, const int *pntr, const int *indx, const double *vals, double *destination){
int n = num_rows * num_columns;
int num_blocks = n / _MAX_CUDA_THREADS + ((n % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks >= 65536) num_blocks = 65536;
tascuda_fill<double, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>(n, 0.0, destination);
num_blocks = num_rows;
if (num_blocks >= 65536) num_blocks = 65536;
tascuda_sparse_to_dense<double, 64><<<num_blocks, 64>>>(num_rows, num_columns, pntr, indx, vals, destination);
}
#endif
}
#endif
|
b6c6a346bfcd6037bb829ed82d8dfabaf03bdda7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _SCAN_NAIVE_KERNEL_H_
#define _SCAN_NAIVE_KERNEL_H_
// **===--------------------- Modify this function -----------------------===**
//! @param g_data input data in global memory
// result is expected in index 0 of g_data
//! @param n input number of elements to reduce from input data
// **===------------------------------------------------------------------===**
__global__ void reduction(float *g_data, int n)
{
__shared__ float partialSum[2*16];
unsigned int t = threadIdx.x;
unsigned int start = 2*blockDim.x*blockIdx.x;
partialSum[t] = g_data[start + t];
partialSum[blockDim.x + t] =
g_data[start + blockDim.x + t];
for (unsigned int stride = blockDim.x;
stride >= 1; stride >>= 1)
{
__syncthreads();
if (t < stride)
partialSum[t] += partialSum[t+stride];
}
if(t==0)
{
g_data[blockIdx.x]=partialSum[t];
//printf("hello");
}
}
#endif // #ifndef _SCAN_NAIVE_KERNEL_H_
| b6c6a346bfcd6037bb829ed82d8dfabaf03bdda7.cu | #ifndef _SCAN_NAIVE_KERNEL_H_
#define _SCAN_NAIVE_KERNEL_H_
// **===--------------------- Modify this function -----------------------===**
//! @param g_data input data in global memory
// result is expected in index 0 of g_data
//! @param n input number of elements to reduce from input data
// **===------------------------------------------------------------------===**
__global__ void reduction(float *g_data, int n)
{
__shared__ float partialSum[2*16];
unsigned int t = threadIdx.x;
unsigned int start = 2*blockDim.x*blockIdx.x;
partialSum[t] = g_data[start + t];
partialSum[blockDim.x + t] =
g_data[start + blockDim.x + t];
for (unsigned int stride = blockDim.x;
stride >= 1; stride >>= 1)
{
__syncthreads();
if (t < stride)
partialSum[t] += partialSum[t+stride];
}
if(t==0)
{
g_data[blockIdx.x]=partialSum[t];
//printf("hello");
}
}
#endif // #ifndef _SCAN_NAIVE_KERNEL_H_
|
10dcadcdf15090d0b746384484bc3ce8ae3f3b0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/hip_runtime.h"
#include <wb.h>
// Compute C = A * B
__global__ void matrixMultiply(float *A, float *B, float *C, int numARows,
int numAColumns, int numBRows, int numBColumns) {
// TODO: Insert code to implement matrix multiplication here
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
if ((Row < numARows) && (Col < numBColumns)){
float Pvalue = 0;
for (int i = 0; i < numBRows; ++i){
Pvalue += A[Row*numAColumns + i] * B[i*numBColumns + Col];
//printf("===========%f", Pvalue);
}
C[Row* numBColumns + Col] = Pvalue;
//printf("===========%f\n", Pvalue);
//printf("===========%f\n", C[Row* numBColumns + Col]);
}
}
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while (0)
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows;
int numCColumns;
args = wbArg_read(argc, argv);
#if LAB_DEBUG
std::cout << "Running GPU Matrix Multiplicaion ..." << std::endl;
#endif
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows,
&numAColumns);
hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows,
&numBColumns);
// TODO: Allocate the hostC matrix
hostC = (float *)malloc((numBColumns*numARows)*sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
// TODO: Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbLog(TRACE, "The dimensions of C are ", numCRows, " x ", numCColumns);
wbTime_start(GPU, "Allocating GPU memory.");
// TODO: Allocate GPU memory here
hipMalloc((void**)&deviceA, (numAColumns*numARows)*sizeof(float));
hipMalloc((void**)&deviceB, (numBColumns*numBRows)*sizeof(float));
hipMalloc((void**)&deviceC, (numCColumns*numCRows)*sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
// TODO: Copy memory to the GPU here
hipMemcpy(deviceA, hostA, (numAColumns*numARows)*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(deviceB, hostB, (numBColumns*numBRows)*sizeof(float), hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
// TODO: Initialize the grid and block dimensions here
// Here you will have to use dim3
// dim3 blockDim( ... )
// dim3 gridDim( ... )
dim3 block_size(16, 16, 1);
dim3 grid_size((numCColumns - 1) / 16 + 1, (numCRows - 1) /16 + 1, 1);
// wbLog(TRACE, "The block dimensions are ", blockDim.x, " x ", blockDim.y);
// wbLog(TRACE, "The grid dimensions are ", gridDim.x, " x ", gridDim.y);
wbTime_start(Compute, "Performing CUDA computation");
// TODO:: Launch the GPU Kernel here
matrixMultiply << <grid_size, block_size >> > (deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
// TODO:: Copy the GPU memory back to the CPU here
hipMemcpy(hostC, deviceC, (numCColumns*numCRows)*sizeof(float), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
// TODO:: Free the GPU memory here
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
#if LAB_DEBUG
system("pause");
#endif
return 0;
}
| 10dcadcdf15090d0b746384484bc3ce8ae3f3b0b.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda.h"
#include <wb.h>
// Compute C = A * B
__global__ void matrixMultiply(float *A, float *B, float *C, int numARows,
int numAColumns, int numBRows, int numBColumns) {
// TODO: Insert code to implement matrix multiplication here
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
if ((Row < numARows) && (Col < numBColumns)){
float Pvalue = 0;
for (int i = 0; i < numBRows; ++i){
Pvalue += A[Row*numAColumns + i] * B[i*numBColumns + Col];
//printf("===========%f", Pvalue);
}
C[Row* numBColumns + Col] = Pvalue;
//printf("===========%f\n", Pvalue);
//printf("===========%f\n", C[Row* numBColumns + Col]);
}
}
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while (0)
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows;
int numCColumns;
args = wbArg_read(argc, argv);
#if LAB_DEBUG
std::cout << "Running GPU Matrix Multiplicaion ..." << std::endl;
#endif
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows,
&numAColumns);
hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows,
&numBColumns);
// TODO: Allocate the hostC matrix
hostC = (float *)malloc((numBColumns*numARows)*sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
// TODO: Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbLog(TRACE, "The dimensions of C are ", numCRows, " x ", numCColumns);
wbTime_start(GPU, "Allocating GPU memory.");
// TODO: Allocate GPU memory here
cudaMalloc((void**)&deviceA, (numAColumns*numARows)*sizeof(float));
cudaMalloc((void**)&deviceB, (numBColumns*numBRows)*sizeof(float));
cudaMalloc((void**)&deviceC, (numCColumns*numCRows)*sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
// TODO: Copy memory to the GPU here
cudaMemcpy(deviceA, hostA, (numAColumns*numARows)*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, hostB, (numBColumns*numBRows)*sizeof(float), cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
// TODO: Initialize the grid and block dimensions here
// Here you will have to use dim3
// dim3 blockDim( ... )
// dim3 gridDim( ... )
dim3 block_size(16, 16, 1);
dim3 grid_size((numCColumns - 1) / 16 + 1, (numCRows - 1) /16 + 1, 1);
// wbLog(TRACE, "The block dimensions are ", blockDim.x, " x ", blockDim.y);
// wbLog(TRACE, "The grid dimensions are ", gridDim.x, " x ", gridDim.y);
wbTime_start(Compute, "Performing CUDA computation");
// TODO:: Launch the GPU Kernel here
matrixMultiply << <grid_size, block_size >> > (deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
// TODO:: Copy the GPU memory back to the CPU here
cudaMemcpy(hostC, deviceC, (numCColumns*numCRows)*sizeof(float), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
// TODO:: Free the GPU memory here
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
#if LAB_DEBUG
system("pause");
#endif
return 0;
}
|
7faa07d0ee1c3d469c56a912b7f8af0bd3c83fd4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Genoil's CUDA mining kernel for Ethereum
* based on Tim Hughes' opencl kernel.
* thanks to sp_, trpuvot, djm34, cbuchner for things i took from ccminer.
*/
#include "ethash_cuda_miner_kernel.h"
#include "ethash_cuda_miner_kernel_globals.h"
#include "cuda_helper.h"
#include "fnv.cuh"
#define copy(dst, src, count) for (int i = 0; i != count; ++i) { (dst)[i] = (src)[i]; }
#if __CUDA_ARCH__ < SHUFFLE_MIN_VER
#include "keccak_u64.cuh"
#include "dagger_shared.cuh"
#define TPB 128
#define BPSM 4
#else
#include "keccak.cuh"
#include "dagger_shuffled.cuh"
#define TPB 896
#define BPSM 1
#endif
__global__ void
__launch_bounds__(TPB, BPSM)
ethash_search(
volatile uint32_t* g_output,
uint64_t start_nonce
)
{
uint32_t const gid = blockIdx.x * blockDim.x + threadIdx.x;
uint64_t hash = compute_hash(start_nonce + gid);
if (cuda_swab64(hash) > d_target) return;
uint32_t index = atomicInc(const_cast<uint32_t*>(g_output), SEARCH_RESULT_BUFFER_SIZE - 1) + 1;
g_output[index] = gid;
}
void run_ethash_search(
uint32_t blocks,
uint32_t threads,
uint32_t sharedbytes,
hipStream_t stream,
volatile uint32_t* g_output,
uint64_t start_nonce
)
{
ethash_search << <blocks, threads, sharedbytes, stream >> >(g_output, start_nonce);
CUDA_SAFE_CALL(hipGetLastError());
}
#define ETHASH_DATASET_PARENTS 256
#define NODE_WORDS (64/4)
__global__ void
__launch_bounds__(128, 7)
ethash_calculate_dag_item(uint32_t start)
{
uint32_t const node_index = start + blockIdx.x * blockDim.x + threadIdx.x;
if (node_index > d_dag_size * 2) return;
hash200_t dag_node;
copy(dag_node.uint4s, d_light[node_index % d_light_size].uint4s, 4);
dag_node.words[0] ^= node_index;
SHA3_512(dag_node.uint2s);
const int thread_id = threadIdx.x & 3;
for (uint32_t i = 0; i != ETHASH_DATASET_PARENTS; ++i) {
uint32_t parent_index = fnv(node_index ^ i, dag_node.words[i % NODE_WORDS]) % d_light_size;
#if __CUDA_ARCH__ < SHUFFLE_MIN_VER
for (unsigned w = 0; w != 4; ++w) {
dag_node.uint4s[w] = fnv4(dag_node.uint4s[w], d_light[parent_index].uint4s[w]);
}
#else
for (uint32_t t = 0; t < 4; t++) {
uint32_t shuffle_index = __shfl(parent_index, t, 4);
uint4 p4 = d_light[shuffle_index].uint4s[thread_id];
for (int w = 0; w < 4; w++) {
uint4 s4 = make_uint4(__shfl(p4.x, w, 4), __shfl(p4.y, w, 4), __shfl(p4.z, w, 4), __shfl(p4.w, w, 4));
if (t == thread_id) {
dag_node.uint4s[w] = fnv4(dag_node.uint4s[w], s4);
}
}
}
#endif
}
SHA3_512(dag_node.uint2s);
hash64_t * dag_nodes = (hash64_t *)d_dag;
#if __CUDA_ARCH__ < SHUFFLE_MIN_VER
for (uint32_t i = 0; i < 4; i++) {
dag_nodes[node_index].uint4s[i] = dag_node.uint4s[i];
}
#else
for (uint32_t t = 0; t < 4; t++) {
uint32_t shuffle_index = __shfl(node_index, t, 4);
uint4 s[4];
for (uint32_t w = 0; w < 4; w++) {
s[w] = make_uint4(__shfl(dag_node.uint4s[w].x, t, 4), __shfl(dag_node.uint4s[w].y, t, 4), __shfl(dag_node.uint4s[w].z, t, 4), __shfl(dag_node.uint4s[w].w, t, 4));
}
dag_nodes[shuffle_index].uint4s[thread_id] = s[thread_id];
}
#endif
}
void ethash_generate_dag(
uint64_t dag_size,
uint32_t blocks,
uint32_t threads,
hipStream_t stream,
int device
)
{
uint32_t const work = (uint32_t)(dag_size / sizeof(hash64_t));
uint32_t fullRuns = work / (blocks * threads);
uint32_t const restWork = work % (blocks * threads);
if (restWork > 0) fullRuns++;
for (uint32_t i = 0; i < fullRuns; i++)
{
hipLaunchKernelGGL(( ethash_calculate_dag_item) , dim3(blocks), dim3(threads), 0, stream , i * blocks * threads);
CUDA_SAFE_CALL(hipDeviceSynchronize());
printf("CUDA#%d: %.0f%%\n",device, 100.0f * (float)i / (float)fullRuns);
}
//printf("GPU#%d 100%%\n");
CUDA_SAFE_CALL(hipGetLastError());
}
void set_constants(
hash128_t* _dag,
uint32_t _dag_size,
hash64_t * _light,
uint32_t _light_size
)
{
CUDA_SAFE_CALL(hipMemcpyToSymbol(d_dag, &_dag, sizeof(hash128_t *)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(d_dag_size, &_dag_size, sizeof(uint32_t)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(d_light, &_light, sizeof(hash64_t *)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(d_light_size, &_light_size, sizeof(uint32_t)));
}
void set_header(
hash32_t _header
)
{
CUDA_SAFE_CALL(hipMemcpyToSymbol(d_header, &_header, sizeof(hash32_t)));
}
void set_target(
uint64_t _target
)
{
CUDA_SAFE_CALL(hipMemcpyToSymbol(d_target, &_target, sizeof(uint64_t)));
}
| 7faa07d0ee1c3d469c56a912b7f8af0bd3c83fd4.cu | /*
* Genoil's CUDA mining kernel for Ethereum
* based on Tim Hughes' opencl kernel.
* thanks to sp_, trpuvot, djm34, cbuchner for things i took from ccminer.
*/
#include "ethash_cuda_miner_kernel.h"
#include "ethash_cuda_miner_kernel_globals.h"
#include "cuda_helper.h"
#include "fnv.cuh"
#define copy(dst, src, count) for (int i = 0; i != count; ++i) { (dst)[i] = (src)[i]; }
#if __CUDA_ARCH__ < SHUFFLE_MIN_VER
#include "keccak_u64.cuh"
#include "dagger_shared.cuh"
#define TPB 128
#define BPSM 4
#else
#include "keccak.cuh"
#include "dagger_shuffled.cuh"
#define TPB 896
#define BPSM 1
#endif
__global__ void
__launch_bounds__(TPB, BPSM)
ethash_search(
volatile uint32_t* g_output,
uint64_t start_nonce
)
{
uint32_t const gid = blockIdx.x * blockDim.x + threadIdx.x;
uint64_t hash = compute_hash(start_nonce + gid);
if (cuda_swab64(hash) > d_target) return;
uint32_t index = atomicInc(const_cast<uint32_t*>(g_output), SEARCH_RESULT_BUFFER_SIZE - 1) + 1;
g_output[index] = gid;
}
void run_ethash_search(
uint32_t blocks,
uint32_t threads,
uint32_t sharedbytes,
cudaStream_t stream,
volatile uint32_t* g_output,
uint64_t start_nonce
)
{
ethash_search << <blocks, threads, sharedbytes, stream >> >(g_output, start_nonce);
CUDA_SAFE_CALL(cudaGetLastError());
}
#define ETHASH_DATASET_PARENTS 256
#define NODE_WORDS (64/4)
__global__ void
__launch_bounds__(128, 7)
ethash_calculate_dag_item(uint32_t start)
{
uint32_t const node_index = start + blockIdx.x * blockDim.x + threadIdx.x;
if (node_index > d_dag_size * 2) return;
hash200_t dag_node;
copy(dag_node.uint4s, d_light[node_index % d_light_size].uint4s, 4);
dag_node.words[0] ^= node_index;
SHA3_512(dag_node.uint2s);
const int thread_id = threadIdx.x & 3;
for (uint32_t i = 0; i != ETHASH_DATASET_PARENTS; ++i) {
uint32_t parent_index = fnv(node_index ^ i, dag_node.words[i % NODE_WORDS]) % d_light_size;
#if __CUDA_ARCH__ < SHUFFLE_MIN_VER
for (unsigned w = 0; w != 4; ++w) {
dag_node.uint4s[w] = fnv4(dag_node.uint4s[w], d_light[parent_index].uint4s[w]);
}
#else
for (uint32_t t = 0; t < 4; t++) {
uint32_t shuffle_index = __shfl(parent_index, t, 4);
uint4 p4 = d_light[shuffle_index].uint4s[thread_id];
for (int w = 0; w < 4; w++) {
uint4 s4 = make_uint4(__shfl(p4.x, w, 4), __shfl(p4.y, w, 4), __shfl(p4.z, w, 4), __shfl(p4.w, w, 4));
if (t == thread_id) {
dag_node.uint4s[w] = fnv4(dag_node.uint4s[w], s4);
}
}
}
#endif
}
SHA3_512(dag_node.uint2s);
hash64_t * dag_nodes = (hash64_t *)d_dag;
#if __CUDA_ARCH__ < SHUFFLE_MIN_VER
for (uint32_t i = 0; i < 4; i++) {
dag_nodes[node_index].uint4s[i] = dag_node.uint4s[i];
}
#else
for (uint32_t t = 0; t < 4; t++) {
uint32_t shuffle_index = __shfl(node_index, t, 4);
uint4 s[4];
for (uint32_t w = 0; w < 4; w++) {
s[w] = make_uint4(__shfl(dag_node.uint4s[w].x, t, 4), __shfl(dag_node.uint4s[w].y, t, 4), __shfl(dag_node.uint4s[w].z, t, 4), __shfl(dag_node.uint4s[w].w, t, 4));
}
dag_nodes[shuffle_index].uint4s[thread_id] = s[thread_id];
}
#endif
}
void ethash_generate_dag(
uint64_t dag_size,
uint32_t blocks,
uint32_t threads,
cudaStream_t stream,
int device
)
{
uint32_t const work = (uint32_t)(dag_size / sizeof(hash64_t));
uint32_t fullRuns = work / (blocks * threads);
uint32_t const restWork = work % (blocks * threads);
if (restWork > 0) fullRuns++;
for (uint32_t i = 0; i < fullRuns; i++)
{
ethash_calculate_dag_item <<<blocks, threads, 0, stream >>>(i * blocks * threads);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
printf("CUDA#%d: %.0f%%\n",device, 100.0f * (float)i / (float)fullRuns);
}
//printf("GPU#%d 100%%\n");
CUDA_SAFE_CALL(cudaGetLastError());
}
void set_constants(
hash128_t* _dag,
uint32_t _dag_size,
hash64_t * _light,
uint32_t _light_size
)
{
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_dag, &_dag, sizeof(hash128_t *)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_dag_size, &_dag_size, sizeof(uint32_t)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_light, &_light, sizeof(hash64_t *)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_light_size, &_light_size, sizeof(uint32_t)));
}
void set_header(
hash32_t _header
)
{
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_header, &_header, sizeof(hash32_t)));
}
void set_target(
uint64_t _target
)
{
CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_target, &_target, sizeof(uint64_t)));
}
|
857c53aee8bdf762294f51feb58fadf65f97576d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <vector>
#include "containers.h"
// device code for rollout
__global__ void single_rollout(float *trajset_arr,
float *x_init,
uint Npath,
float *s_path,
float *vxref_path,
float *kappac_path,
float *mu_path,
float *dub_path,
float *dlb_path,
float mu_nominal,
float m,
float Iz,
float lf,
float lr,
float h_cg,
float g,
uint Nt,
uint Ni,
uint Nx,
uint Nu,
uint Nd,
uint Nvx,
uint Npp,
uint Npb,
float dt,
int traction_adaptive,
float *d_ref_arr,
float *vx_ref_arr)
{
// notes on indexing:
// trajset_arr:
// row (i) <- state (Nx+Nu+Nmisc)
// column (j) <- traj (Nd)
// page (k) <- time (Nt)
// iii - index of the 1d array that represents the 3d array (increase order: rows - columns - pages)
// threadIdx.x = index of the current thread within its block (replaces j)
// get init state
float s = x_init[0];
float d = x_init[1];
float deltapsi = x_init[2];
float psidot = x_init[3];
float vx = x_init[4];
float vy = x_init[5];
// set init state in trajset_arr
trajset_arr[threadIdx.x + 0*Nd + 0*Npp + blockIdx.x*Npb] = s;
trajset_arr[threadIdx.x + 1*Nd + 0*Npp + blockIdx.x*Npb] = d;
trajset_arr[threadIdx.x + 2*Nd + 0*Npp + blockIdx.x*Npb] = deltapsi;
trajset_arr[threadIdx.x + 3*Nd + 0*Npp + blockIdx.x*Npb] = psidot;
trajset_arr[threadIdx.x + 4*Nd + 0*Npp + blockIdx.x*Npb] = vx;
trajset_arr[threadIdx.x + 5*Nd + 0*Npp + blockIdx.x*Npb] = vy;
// rollout loop
float Fyf = 0.0f;
float Fxf = 0.0f;
float Fxr = 0.0f;
float Fyr = 0.0f;
float Fzf = 0.0f;
float Fzr = 0.0f;
float kappac = 0.0f;
float mu = 0.0f;
float Cr = 0.0f;
uint k = 0; // k is the regular iterator, ki is upsampled
for (int ki=0; ki<((Nt+1)*Ni); ki++){
if(ki % Ni == 0){ // only the euler step is performed every ki
// init vars for cost eval
bool colliding = false;
bool exitroad = false;
float cost = 0;
// check if exitroad
for (uint id = 0; id<Npath; id++) {
if (d < dlb_path[id] || d > dub_path[id]){
exitroad = true;
}
}
// check collision with obstacle
// get path index
uint path_idx;
for (path_idx = 0; path_idx<Npath; path_idx++) {
// break if s - spath is negative, save index
if(s-s_path[path_idx] <= 0){
break;
}
}
// get kappac
kappac = kappac_path[path_idx];
// set ax for Fz computation from previous cmd
float ax = (Fxf+Fxr)/m;
// get normal forces front and back and mu
if(traction_adaptive == 1){
float theta = 0; // grade angle todo get from pathlocal via traj
Fzf = (1.0f/(lf+lr))*(m*ax*h_cg - m*g*h_cg*sin(theta) + m*g*lr*cos(theta));
Fzr = (1.0f/(lf+lr))*(-m*ax*h_cg + m*g*h_cg*sin(theta) + m*g*lf*cos(theta));
mu = mu_path[path_idx]; // get mu from pathlocal at s
} else { // (traction_adaptive == 0)
Fzf = (1.0f/(lf+lr))*(m*g*lr);
Fzr = (1.0f/(lf+lr))*(m*g*lf);
mu = mu_nominal;
}
// get rear cornering stiffness
float B, C, D; //, E;
if(0.0f <= mu && mu <0.3f){ // ice
B = 4.0f;
C = 2.0f;
D = mu;
//E = 1.0f;
} else if (0.3f <= mu && mu <0.5f) { // snow
B = 5.0f;
C = 2.0f;
D = mu;
//E = 1.0f;
} else if (0.5f <= mu && mu <0.9f) { // wet
B = 12.0f;
C = 2.3f;
D = mu;
//E = 1.0f;
} else if (0.9f <= mu && mu <1.5f) { // dry
B = 10.0f;
C = 1.9f;
D = mu;
//E = 0.97f;
} else if (1.5f <= mu && mu <2.5f) { // dry + racing tires (gotthard default)
B = 12.56f;
C = 1.38f;
D = mu;
//E = 1.00f;
} else {
// todo Error = nonzero nr - check at host and throw error
}
Cr = B*C*D*Fzr; // Rajamani
// compute maximum tire forces
float Ffmax = mu*Fzf;
float Frmax = mu*Fzr;
// get vxerror (one per block)
float vxerror = vx_ref_arr[blockIdx.x] - vx;
// get derror (one per thread)
float derror = d_ref_arr[threadIdx.x] - d;
/*
* ROLLOUT CONTROLLER
*/
// TODO GET PARAMS FOR STARNDARD STATE FEEDBACK
// select Fyf
float feedfwd = 0.5f*m*vx*vx*kappac*cos(deltapsi);
float feedback = 3000*derror - 500*deltapsi;
Fyf = feedfwd + feedback;
// saturate Fyf at Ffmax
if(Fyf >= Ffmax){
Fyf = Ffmax;
}
if(Fyf<=-Ffmax){
Fyf = -Ffmax;
}
// select Fxf
float Fxfmax = sqrt(Ffmax*Ffmax-Fyf*Fyf);
if(vxerror > 0){ // accelerating
Fxf = 0; // rear wheel drive - no drive on front wheel
} else { // braking
Fxf = 1000*vxerror;
// saturate
if(Fxf<=-Fxfmax){
Fxf = -Fxfmax;
}
}
// select Fxr
float Fxrmax = sqrt(Frmax*Frmax-Fyr*Fyr);
Fxr = 1000*vxerror;
// saturate
if(Fxr >= Fxrmax){
Fxr = Fxrmax;
}
if(Fxr<=-Fxrmax){
Fxr = -Fxrmax;
}
}
// set Fyr
float Fyr = 2*Cr*atan(lr*psidot-vy)/vx; // help variable
// euler fwd step
s = s + (dt/Ni)*((vx*cos(deltapsi)-vy*sin(deltapsi))/(1-d*kappac));
d = d + (dt/Ni)*(vx*sin(deltapsi)+vy*cos(deltapsi));
deltapsi = deltapsi + (dt/Ni)*(psidot-kappac*(vx*cos(deltapsi)-vy*sin(deltapsi))/(1-d*kappac));
psidot = psidot + (dt/Ni)*((1/Iz)*(lf*Fyf - lr*Fyr));
vx = vx + (dt/Ni)*((1/m)*(Fxf+Fxr));
vy = vy + (dt/Ni)*((1/m)*(Fyf+Fyr)-vx*psidot);
// store data at Nt regular intervals
if(ki % Ni == 0){
k = ki/Ni;
// set x at k+1
trajset_arr[threadIdx.x + 0*Nd + (k+1)*Npp + blockIdx.x*Npb] = s;
trajset_arr[threadIdx.x + 1*Nd + (k+1)*Npp + blockIdx.x*Npb] = d;
trajset_arr[threadIdx.x + 2*Nd + (k+1)*Npp + blockIdx.x*Npb] = deltapsi;
trajset_arr[threadIdx.x + 3*Nd + (k+1)*Npp + blockIdx.x*Npb] = psidot;
trajset_arr[threadIdx.x + 4*Nd + (k+1)*Npp + blockIdx.x*Npb] = vx;
trajset_arr[threadIdx.x + 5*Nd + (k+1)*Npp + blockIdx.x*Npb] = vy;
// set u at k
trajset_arr[threadIdx.x + 6*Nd + (k)*Npp + blockIdx.x*Npb] = Fyf;
trajset_arr[threadIdx.x + 7*Nd + (k)*Npp + blockIdx.x*Npb] = Fxf;
trajset_arr[threadIdx.x + 8*Nd + (k)*Npp + blockIdx.x*Npb] = Fxr;
// set miscvars
trajset_arr[threadIdx.x + 9*Nd + (k)*Npp + blockIdx.x*Npb] = Fzf;
trajset_arr[threadIdx.x + 10*Nd + (k)*Npp + blockIdx.x*Npb] = Fzr;
trajset_arr[threadIdx.x + 11*Nd + (k)*Npp + blockIdx.x*Npb] = kappac;
trajset_arr[threadIdx.x + 12*Nd + (k)*Npp + blockIdx.x*Npb] = mu;
trajset_arr[threadIdx.x + 13*Nd + (k)*Npp + blockIdx.x*Npb] = Cr;
}
}
}
// main rollout fcn, called by saarti_node
void cuda_rollout(std::vector<containers::trajstruct> &trajset_struct,
containers::statestruct initstate,
containers::pathstruct pathlocal,
containers::staticparamstruct sp,
int traction_adaptive,
float mu_nominal,
uint Nt, // N in planning horizon
uint Nd, // Nr of goal pts in d (multiples of 32 to maximize gpu utilization)
uint Nvx, // Nr of goal pts in vx
float vxub, // upper bound on sampled vx
float dt) // dt of planning horizon
{
// init variables
uint Ni = 10; // scaling factor in integration
uint Nx = 6;
uint Nu = 3;
uint Nmisc = 5; // nr of additional traj vars
uint Npp = (Nx+Nu+Nmisc)*Nd; // elements_per_page
uint Npb = Npp*Nt;
float *trajset_arr;
float *x_init;
float *d_ref_arr;
float *vx_ref_arr;
uint Npath = pathlocal.s.size();
float *s_path;
float *vxref_path;
float *kappac_path;
float *mu_path;
float *dub_path;
float *dlb_path;
// allocate shared memory
hipMallocManaged(&trajset_arr, (Nx+Nu+Nmisc)*Nd*Nt*sizeof(float));
hipMallocManaged(&x_init, Nx*sizeof(float));
hipMallocManaged(&d_ref_arr, Nd*sizeof(float));
hipMallocManaged(&vx_ref_arr, Nvx*sizeof(float));
hipMallocManaged(&s_path, Npath*sizeof(float));
hipMallocManaged(&vxref_path, Npath*sizeof(float));
hipMallocManaged(&kappac_path, Npath*sizeof(float));
hipMallocManaged(&mu_path, Npath*sizeof(float));
hipMallocManaged(&dub_path, Npath*sizeof(float));
hipMallocManaged(&dlb_path, Npath*sizeof(float));
// set init state
x_init[0] = initstate.s;
x_init[1] = initstate.d;
x_init[2] = initstate.deltapsi;
x_init[3] = initstate.psidot;
x_init[4] = initstate.vx;
x_init[5] = initstate.vy;
// set dref_arr
float dub = pathlocal.dub.at(0);
float dlb = pathlocal.dlb.at(0);
float dstep = (dub-dlb)/(float(Nd)-1);
for(uint id=0; id<Nd; ++id) {
d_ref_arr[id] = dlb+float(id)*dstep;
//std::cout << "d_ref_arr[id] = " << d_ref_arr[id] << std::endl;
}
// set vxref_arr
float vxlb = 1.0f; // avoid singulatity at vx = 0
float vxstep = (vxub-vxlb)/(float(Nvx)-1);
//std::cout << "Nvx = " << Nvx << std::endl;
for(uint id=0; id<Nvx; ++id) {
vx_ref_arr[id] = vxlb+float(id)*vxstep;
//std::cout << "vx_ref_arr[id] = " << vx_ref_arr[id] << std::endl;
}
// set path variables
for(uint id=0; id<Npath; ++id) {
s_path[id] = pathlocal.s.at(id);
kappac_path[id] = pathlocal.kappa_c.at(id);
mu_path[id] = pathlocal.mu.at(id);
dub_path[id] = pathlocal.dub.at(id);
dlb_path[id] = pathlocal.dlb.at(id);
}
// run Nd*Nvx rollouts on Ntraj threads on gpu
hipLaunchKernelGGL(( single_rollout), dim3(Nvx), dim3(Nd), 0, 0, trajset_arr,
x_init,
Npath,
s_path,
vxref_path,
kappac_path,
mu_path,
dub_path,
dlb_path,
mu_nominal,
sp.m,
sp.Iz,
sp.lf,
sp.lr,
sp.h_cg,
sp.g,
Nt,
Ni,
Nx,
Nu,
Nd,
Nvx,
Npp,
Npb,
dt,
traction_adaptive,
d_ref_arr,
vx_ref_arr);
// wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// put result on struct format
for (uint l=0;l<Nvx;l++){
for (uint j=0;j<Nd;j++) {
containers::trajstruct traj;
for (size_t k=0;k<Nt+1;k++) {
// std::cout << "s = " << trajset_arr[j + 0*Nd + k*Npp] << std::endl;
traj.s.push_back(trajset_arr[j + 0*Nd + k*Npp + l*Npb]);
traj.d.push_back(trajset_arr[j + 1*Nd + k*Npp + l*Npb]);
traj.deltapsi.push_back(trajset_arr[j + 2*Nd + k*Npp + l*Npb]);
traj.psidot.push_back(trajset_arr[j + 3*Nd + k*Npp + l*Npb]);
traj.vx.push_back(trajset_arr[j + 4*Nd + k*Npp + l*Npb]);
traj.vy.push_back(trajset_arr[j + 5*Nd + k*Npp + l*Npb]);
if(k<Nt){ // N+1 states and N ctrls
traj.Fyf.push_back(trajset_arr[j + 6*Nd + k*Npp + l*Npb]);
traj.Fxf.push_back(trajset_arr[j + 7*Nd + k*Npp + l*Npb]);
traj.Fxr.push_back(trajset_arr[j + 8*Nd + k*Npp + l*Npb]);
// miscvars
traj.Fzf.push_back(trajset_arr[j + 9*Nd + k*Npp + l*Npb]);
traj.Fzr.push_back(trajset_arr[j + 10*Nd + k*Npp + l*Npb]);
traj.kappac.push_back(trajset_arr[j + 11*Nd + k*Npp + l*Npb]);
traj.mu.push_back(trajset_arr[j + 12*Nd + k*Npp + l*Npb]);
traj.Cr.push_back(trajset_arr[j + 13*Nd + k*Npp + l*Npb]);
}
}
// add last element of misc vars
traj.Fzf.push_back(traj.Fzf.back());
traj.Fzr.push_back(traj.Fzr.back());
traj.kappac.push_back(traj.kappac.back());
traj.mu.push_back(traj.mu.back());
traj.Cr.push_back(traj.Cr.back());
trajset_struct.push_back(traj);
}
}
// Free memory
hipFree(trajset_arr);
}
| 857c53aee8bdf762294f51feb58fadf65f97576d.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <vector>
#include "containers.h"
// device code for rollout
__global__ void single_rollout(float *trajset_arr,
float *x_init,
uint Npath,
float *s_path,
float *vxref_path,
float *kappac_path,
float *mu_path,
float *dub_path,
float *dlb_path,
float mu_nominal,
float m,
float Iz,
float lf,
float lr,
float h_cg,
float g,
uint Nt,
uint Ni,
uint Nx,
uint Nu,
uint Nd,
uint Nvx,
uint Npp,
uint Npb,
float dt,
int traction_adaptive,
float *d_ref_arr,
float *vx_ref_arr)
{
// notes on indexing:
// trajset_arr:
// row (i) <- state (Nx+Nu+Nmisc)
// column (j) <- traj (Nd)
// page (k) <- time (Nt)
// iii - index of the 1d array that represents the 3d array (increase order: rows - columns - pages)
// threadIdx.x = index of the current thread within its block (replaces j)
// get init state
float s = x_init[0];
float d = x_init[1];
float deltapsi = x_init[2];
float psidot = x_init[3];
float vx = x_init[4];
float vy = x_init[5];
// set init state in trajset_arr
trajset_arr[threadIdx.x + 0*Nd + 0*Npp + blockIdx.x*Npb] = s;
trajset_arr[threadIdx.x + 1*Nd + 0*Npp + blockIdx.x*Npb] = d;
trajset_arr[threadIdx.x + 2*Nd + 0*Npp + blockIdx.x*Npb] = deltapsi;
trajset_arr[threadIdx.x + 3*Nd + 0*Npp + blockIdx.x*Npb] = psidot;
trajset_arr[threadIdx.x + 4*Nd + 0*Npp + blockIdx.x*Npb] = vx;
trajset_arr[threadIdx.x + 5*Nd + 0*Npp + blockIdx.x*Npb] = vy;
// rollout loop
float Fyf = 0.0f;
float Fxf = 0.0f;
float Fxr = 0.0f;
float Fyr = 0.0f;
float Fzf = 0.0f;
float Fzr = 0.0f;
float kappac = 0.0f;
float mu = 0.0f;
float Cr = 0.0f;
uint k = 0; // k is the regular iterator, ki is upsampled
for (int ki=0; ki<((Nt+1)*Ni); ki++){
if(ki % Ni == 0){ // only the euler step is performed every ki
// init vars for cost eval
bool colliding = false;
bool exitroad = false;
float cost = 0;
// check if exitroad
for (uint id = 0; id<Npath; id++) {
if (d < dlb_path[id] || d > dub_path[id]){
exitroad = true;
}
}
// check collision with obstacle
// get path index
uint path_idx;
for (path_idx = 0; path_idx<Npath; path_idx++) {
// break if s - spath is negative, save index
if(s-s_path[path_idx] <= 0){
break;
}
}
// get kappac
kappac = kappac_path[path_idx];
// set ax for Fz computation from previous cmd
float ax = (Fxf+Fxr)/m;
// get normal forces front and back and mu
if(traction_adaptive == 1){
float theta = 0; // grade angle todo get from pathlocal via traj
Fzf = (1.0f/(lf+lr))*(m*ax*h_cg - m*g*h_cg*sin(theta) + m*g*lr*cos(theta));
Fzr = (1.0f/(lf+lr))*(-m*ax*h_cg + m*g*h_cg*sin(theta) + m*g*lf*cos(theta));
mu = mu_path[path_idx]; // get mu from pathlocal at s
} else { // (traction_adaptive == 0)
Fzf = (1.0f/(lf+lr))*(m*g*lr);
Fzr = (1.0f/(lf+lr))*(m*g*lf);
mu = mu_nominal;
}
// get rear cornering stiffness
float B, C, D; //, E;
if(0.0f <= mu && mu <0.3f){ // ice
B = 4.0f;
C = 2.0f;
D = mu;
//E = 1.0f;
} else if (0.3f <= mu && mu <0.5f) { // snow
B = 5.0f;
C = 2.0f;
D = mu;
//E = 1.0f;
} else if (0.5f <= mu && mu <0.9f) { // wet
B = 12.0f;
C = 2.3f;
D = mu;
//E = 1.0f;
} else if (0.9f <= mu && mu <1.5f) { // dry
B = 10.0f;
C = 1.9f;
D = mu;
//E = 0.97f;
} else if (1.5f <= mu && mu <2.5f) { // dry + racing tires (gotthard default)
B = 12.56f;
C = 1.38f;
D = mu;
//E = 1.00f;
} else {
// todo Error = nonzero nr - check at host and throw error
}
Cr = B*C*D*Fzr; // Rajamani
// compute maximum tire forces
float Ffmax = mu*Fzf;
float Frmax = mu*Fzr;
// get vxerror (one per block)
float vxerror = vx_ref_arr[blockIdx.x] - vx;
// get derror (one per thread)
float derror = d_ref_arr[threadIdx.x] - d;
/*
* ROLLOUT CONTROLLER
*/
// TODO GET PARAMS FOR STARNDARD STATE FEEDBACK
// select Fyf
float feedfwd = 0.5f*m*vx*vx*kappac*cos(deltapsi);
float feedback = 3000*derror - 500*deltapsi;
Fyf = feedfwd + feedback;
// saturate Fyf at Ffmax
if(Fyf >= Ffmax){
Fyf = Ffmax;
}
if(Fyf<=-Ffmax){
Fyf = -Ffmax;
}
// select Fxf
float Fxfmax = sqrt(Ffmax*Ffmax-Fyf*Fyf);
if(vxerror > 0){ // accelerating
Fxf = 0; // rear wheel drive - no drive on front wheel
} else { // braking
Fxf = 1000*vxerror;
// saturate
if(Fxf<=-Fxfmax){
Fxf = -Fxfmax;
}
}
// select Fxr
float Fxrmax = sqrt(Frmax*Frmax-Fyr*Fyr);
Fxr = 1000*vxerror;
// saturate
if(Fxr >= Fxrmax){
Fxr = Fxrmax;
}
if(Fxr<=-Fxrmax){
Fxr = -Fxrmax;
}
}
// set Fyr
float Fyr = 2*Cr*atan(lr*psidot-vy)/vx; // help variable
// euler fwd step
s = s + (dt/Ni)*((vx*cos(deltapsi)-vy*sin(deltapsi))/(1-d*kappac));
d = d + (dt/Ni)*(vx*sin(deltapsi)+vy*cos(deltapsi));
deltapsi = deltapsi + (dt/Ni)*(psidot-kappac*(vx*cos(deltapsi)-vy*sin(deltapsi))/(1-d*kappac));
psidot = psidot + (dt/Ni)*((1/Iz)*(lf*Fyf - lr*Fyr));
vx = vx + (dt/Ni)*((1/m)*(Fxf+Fxr));
vy = vy + (dt/Ni)*((1/m)*(Fyf+Fyr)-vx*psidot);
// store data at Nt regular intervals
if(ki % Ni == 0){
k = ki/Ni;
// set x at k+1
trajset_arr[threadIdx.x + 0*Nd + (k+1)*Npp + blockIdx.x*Npb] = s;
trajset_arr[threadIdx.x + 1*Nd + (k+1)*Npp + blockIdx.x*Npb] = d;
trajset_arr[threadIdx.x + 2*Nd + (k+1)*Npp + blockIdx.x*Npb] = deltapsi;
trajset_arr[threadIdx.x + 3*Nd + (k+1)*Npp + blockIdx.x*Npb] = psidot;
trajset_arr[threadIdx.x + 4*Nd + (k+1)*Npp + blockIdx.x*Npb] = vx;
trajset_arr[threadIdx.x + 5*Nd + (k+1)*Npp + blockIdx.x*Npb] = vy;
// set u at k
trajset_arr[threadIdx.x + 6*Nd + (k)*Npp + blockIdx.x*Npb] = Fyf;
trajset_arr[threadIdx.x + 7*Nd + (k)*Npp + blockIdx.x*Npb] = Fxf;
trajset_arr[threadIdx.x + 8*Nd + (k)*Npp + blockIdx.x*Npb] = Fxr;
// set miscvars
trajset_arr[threadIdx.x + 9*Nd + (k)*Npp + blockIdx.x*Npb] = Fzf;
trajset_arr[threadIdx.x + 10*Nd + (k)*Npp + blockIdx.x*Npb] = Fzr;
trajset_arr[threadIdx.x + 11*Nd + (k)*Npp + blockIdx.x*Npb] = kappac;
trajset_arr[threadIdx.x + 12*Nd + (k)*Npp + blockIdx.x*Npb] = mu;
trajset_arr[threadIdx.x + 13*Nd + (k)*Npp + blockIdx.x*Npb] = Cr;
}
}
}
// main rollout fcn, called by saarti_node
void cuda_rollout(std::vector<containers::trajstruct> &trajset_struct,
containers::statestruct initstate,
containers::pathstruct pathlocal,
containers::staticparamstruct sp,
int traction_adaptive,
float mu_nominal,
uint Nt, // N in planning horizon
uint Nd, // Nr of goal pts in d (multiples of 32 to maximize gpu utilization)
uint Nvx, // Nr of goal pts in vx
float vxub, // upper bound on sampled vx
float dt) // dt of planning horizon
{
// init variables
uint Ni = 10; // scaling factor in integration
uint Nx = 6;
uint Nu = 3;
uint Nmisc = 5; // nr of additional traj vars
uint Npp = (Nx+Nu+Nmisc)*Nd; // elements_per_page
uint Npb = Npp*Nt;
float *trajset_arr;
float *x_init;
float *d_ref_arr;
float *vx_ref_arr;
uint Npath = pathlocal.s.size();
float *s_path;
float *vxref_path;
float *kappac_path;
float *mu_path;
float *dub_path;
float *dlb_path;
// allocate shared memory
cudaMallocManaged(&trajset_arr, (Nx+Nu+Nmisc)*Nd*Nt*sizeof(float));
cudaMallocManaged(&x_init, Nx*sizeof(float));
cudaMallocManaged(&d_ref_arr, Nd*sizeof(float));
cudaMallocManaged(&vx_ref_arr, Nvx*sizeof(float));
cudaMallocManaged(&s_path, Npath*sizeof(float));
cudaMallocManaged(&vxref_path, Npath*sizeof(float));
cudaMallocManaged(&kappac_path, Npath*sizeof(float));
cudaMallocManaged(&mu_path, Npath*sizeof(float));
cudaMallocManaged(&dub_path, Npath*sizeof(float));
cudaMallocManaged(&dlb_path, Npath*sizeof(float));
// set init state
x_init[0] = initstate.s;
x_init[1] = initstate.d;
x_init[2] = initstate.deltapsi;
x_init[3] = initstate.psidot;
x_init[4] = initstate.vx;
x_init[5] = initstate.vy;
// set dref_arr
float dub = pathlocal.dub.at(0);
float dlb = pathlocal.dlb.at(0);
float dstep = (dub-dlb)/(float(Nd)-1);
for(uint id=0; id<Nd; ++id) {
d_ref_arr[id] = dlb+float(id)*dstep;
//std::cout << "d_ref_arr[id] = " << d_ref_arr[id] << std::endl;
}
// set vxref_arr
float vxlb = 1.0f; // avoid singulatity at vx = 0
float vxstep = (vxub-vxlb)/(float(Nvx)-1);
//std::cout << "Nvx = " << Nvx << std::endl;
for(uint id=0; id<Nvx; ++id) {
vx_ref_arr[id] = vxlb+float(id)*vxstep;
//std::cout << "vx_ref_arr[id] = " << vx_ref_arr[id] << std::endl;
}
// set path variables
for(uint id=0; id<Npath; ++id) {
s_path[id] = pathlocal.s.at(id);
kappac_path[id] = pathlocal.kappa_c.at(id);
mu_path[id] = pathlocal.mu.at(id);
dub_path[id] = pathlocal.dub.at(id);
dlb_path[id] = pathlocal.dlb.at(id);
}
// run Nd*Nvx rollouts on Ntraj threads on gpu
single_rollout<<<Nvx, Nd>>>(trajset_arr,
x_init,
Npath,
s_path,
vxref_path,
kappac_path,
mu_path,
dub_path,
dlb_path,
mu_nominal,
sp.m,
sp.Iz,
sp.lf,
sp.lr,
sp.h_cg,
sp.g,
Nt,
Ni,
Nx,
Nu,
Nd,
Nvx,
Npp,
Npb,
dt,
traction_adaptive,
d_ref_arr,
vx_ref_arr);
// wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// put result on struct format
for (uint l=0;l<Nvx;l++){
for (uint j=0;j<Nd;j++) {
containers::trajstruct traj;
for (size_t k=0;k<Nt+1;k++) {
// std::cout << "s = " << trajset_arr[j + 0*Nd + k*Npp] << std::endl;
traj.s.push_back(trajset_arr[j + 0*Nd + k*Npp + l*Npb]);
traj.d.push_back(trajset_arr[j + 1*Nd + k*Npp + l*Npb]);
traj.deltapsi.push_back(trajset_arr[j + 2*Nd + k*Npp + l*Npb]);
traj.psidot.push_back(trajset_arr[j + 3*Nd + k*Npp + l*Npb]);
traj.vx.push_back(trajset_arr[j + 4*Nd + k*Npp + l*Npb]);
traj.vy.push_back(trajset_arr[j + 5*Nd + k*Npp + l*Npb]);
if(k<Nt){ // N+1 states and N ctrls
traj.Fyf.push_back(trajset_arr[j + 6*Nd + k*Npp + l*Npb]);
traj.Fxf.push_back(trajset_arr[j + 7*Nd + k*Npp + l*Npb]);
traj.Fxr.push_back(trajset_arr[j + 8*Nd + k*Npp + l*Npb]);
// miscvars
traj.Fzf.push_back(trajset_arr[j + 9*Nd + k*Npp + l*Npb]);
traj.Fzr.push_back(trajset_arr[j + 10*Nd + k*Npp + l*Npb]);
traj.kappac.push_back(trajset_arr[j + 11*Nd + k*Npp + l*Npb]);
traj.mu.push_back(trajset_arr[j + 12*Nd + k*Npp + l*Npb]);
traj.Cr.push_back(trajset_arr[j + 13*Nd + k*Npp + l*Npb]);
}
}
// add last element of misc vars
traj.Fzf.push_back(traj.Fzf.back());
traj.Fzr.push_back(traj.Fzr.back());
traj.kappac.push_back(traj.kappac.back());
traj.mu.push_back(traj.mu.back());
traj.Cr.push_back(traj.Cr.back());
trajset_struct.push_back(traj);
}
}
// Free memory
cudaFree(trajset_arr);
}
|
fd1c475323eb54f4c9bbbe54b0075e2c457b1b41.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
/********************************************************
CUDA Kernel
********************************************************/
__global__ void matrixMul (float* C, float* A, float* B, int TA)
{
const int TILE_SIZE = 16;
__shared__ float As[TILE_SIZE][TILE_SIZE];
__shared__ float Bs[TILE_SIZE][TILE_SIZE];
for (int tileId = 0; tileId < TA; tileId += TILE_SIZE) {
/* calcul des coordonnees du thread dans les matrices locales As/Bs */
int i = threadIdx.y;
int j = threadIdx.x;
// copie un element de A et B vers la mmoire partage
As[i][j] = A[blockIdx.y * TA + tileId * TILE_SIZE];
Bs[i][j] = B[tileId * TILE_SIZE + blockIdx.x];
__syncthreads();
/* calcul de c[i][j] */
float cc = 0;
for (int k = 0; k < TILE_SIZE; ++k) {
float elementA, elementB;
elementA = As[i][k];
elementB = Bs[k][j];
cc += elementA * elementB;
}
__syncthreads();
// copier dans C ?
C[blockIdx.y * TA + j] += cc;
}
}
/********************************************************
Programme main
********************************************************/
/////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
int i, j, GRID_SIZE_X, GRID_SIZE_Y, GRID_SIZE_Z, BLOCK_SIZE_X, BLOCK_SIZE_Y, TILE_SIZE;
int TM;
hipError_t cerror; /*valeur retour gpU*/
/* pour le calcul du temps de traitement */
float tc;
hipEvent_t depart, arret;
hipEventCreate(&depart);
hipEventCreate(&arret);
/* valeurs par defaut */
TM = 2048;
BLOCK_SIZE_X = 16;
BLOCK_SIZE_Y = BLOCK_SIZE_X;
TILE_SIZE = BLOCK_SIZE_Y;
if ((TM % BLOCK_SIZE_X) != 0 || (TM % BLOCK_SIZE_Y) != 0) {
printf("Taille matrice non multiple des dim bloc %d, %d \n", BLOCK_SIZE_X, BLOCK_SIZE_Y);
exit(1);
}
GRID_SIZE_X = TM / BLOCK_SIZE_X;
GRID_SIZE_Y = TM / BLOCK_SIZE_Y;
GRID_SIZE_Z = TM / TILE_SIZE;
/* allocation des matrices sur CPU */
unsigned int size_A = TM * TM;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = TM * TM;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
unsigned int size_C = TM * TM;
unsigned int mem_size_C = sizeof(float) * size_C;
float* h_C = (float*) malloc(mem_size_C);
/* initialisation des matrices avec des valeurs permettant de verifier le resultat */
for(i = 0; i < TM; i++){
for(j = 0; j < TM; j++){
h_A[i*TM+j] = 1.0;
h_B[i*TM+j] = 1.0;
h_C[i*TM+j] = 0.0;
if (i==j) {
h_A[i*TM+j]=(float) (i+1);
h_B[i*TM+j]=(float) (i+1);
}
}
}
/* allocation des matrices sur GPU */
float* d_A;
float* d_B;
hipMalloc((void**) &d_A, mem_size_A);
hipMalloc((void**) &d_B, mem_size_B);
float* d_C;
hipMalloc((void**) &d_C, mem_size_C);
/* top depart pour le calcul du temps de transfert */
hipEventRecord(depart,0);
/* copie des matrives A et B depuis le CPU vers le GPU */
hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
hipEventRecord(arret,0);
hipEventSynchronize(arret);
hipEventElapsedTime(&tc,depart, arret);
printf("Temps de transfert host vers device : %f seconde\n", tc/1000.0);
/* definiton de la grille et des blocs */
dim3 grid(GRID_SIZE_X, GRID_SIZE_Y);
dim3 block(BLOCK_SIZE_X, BLOCK_SIZE_Y);
printf("grille %d, %d \n", GRID_SIZE_X, GRID_SIZE_Y, GRID_SIZE_Z);
printf("bloc %d, %d \n", BLOCK_SIZE_X, BLOCK_SIZE_Y);
hipEventRecord(depart,0);
/* execution du kernel */
hipLaunchKernelGGL(( matrixMul), dim3(grid), dim3(block) , 0, 0, d_C, d_A, d_B, TM);
hipEventRecord(arret,0);
hipEventSynchronize(arret);
hipEventElapsedTime(&tc,depart, arret);
printf("Temps de calcul : %f seconde\n", tc/1000.0);
/* valeur retour GPU : 0 = OK, sinon erreur */
cerror=hipGetLastError();
printf(" retour GPU = %d \n", (int) cerror);
hipEventRecord(depart,0);
/* copie de la matrive C depuis le GPU */
hipMemcpy(h_C, d_C, mem_size_C,
hipMemcpyDeviceToHost);
hipEventRecord(arret,0);
hipEventSynchronize(arret);
hipEventElapsedTime(&tc,depart, arret);
printf("Temps transfert device vers host : %f seconde\n", tc/1000.0);
/* verification du resultat */
for (i = 0; i < TM; i++){
for (j = 0; j < TM; j++){
if ((i==j) && (h_C[i*TM+j] != (float)((i+1)*(i+1)+TM-1))) {
printf("Erreur i: %d j: %d %f\n", i, j, h_C[i*TM+j] );
exit(1);
} else if ((i!=j) && (h_C[i*TM+j] != (float) (i + j + TM))) {
printf("Erreur i: %d j: %d\n", i, j);
exit(1);
}
}
}
hipEventDestroy(depart);
hipEventDestroy(arret);
/* liberation de la memoire */
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
} | fd1c475323eb54f4c9bbbe54b0075e2c457b1b41.cu |
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
/********************************************************
CUDA Kernel
********************************************************/
__global__ void matrixMul (float* C, float* A, float* B, int TA)
{
const int TILE_SIZE = 16;
__shared__ float As[TILE_SIZE][TILE_SIZE];
__shared__ float Bs[TILE_SIZE][TILE_SIZE];
for (int tileId = 0; tileId < TA; tileId += TILE_SIZE) {
/* calcul des coordonnees du thread dans les matrices locales As/Bs */
int i = threadIdx.y;
int j = threadIdx.x;
// copie un element de A et B vers la mémoire partagée
As[i][j] = A[blockIdx.y * TA + tileId * TILE_SIZE];
Bs[i][j] = B[tileId * TILE_SIZE + blockIdx.x];
__syncthreads();
/* calcul de c[i][j] */
float cc = 0;
for (int k = 0; k < TILE_SIZE; ++k) {
float elementA, elementB;
elementA = As[i][k];
elementB = Bs[k][j];
cc += elementA * elementB;
}
__syncthreads();
// copier dans C ?
C[blockIdx.y * TA + j] += cc;
}
}
/********************************************************
Programme main
********************************************************/
/////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
int i, j, GRID_SIZE_X, GRID_SIZE_Y, GRID_SIZE_Z, BLOCK_SIZE_X, BLOCK_SIZE_Y, TILE_SIZE;
int TM;
cudaError_t cerror; /*valeur retour gpU*/
/* pour le calcul du temps de traitement */
float tc;
cudaEvent_t depart, arret;
cudaEventCreate(&depart);
cudaEventCreate(&arret);
/* valeurs par defaut */
TM = 2048;
BLOCK_SIZE_X = 16;
BLOCK_SIZE_Y = BLOCK_SIZE_X;
TILE_SIZE = BLOCK_SIZE_Y;
if ((TM % BLOCK_SIZE_X) != 0 || (TM % BLOCK_SIZE_Y) != 0) {
printf("Taille matrice non multiple des dim bloc %d, %d \n", BLOCK_SIZE_X, BLOCK_SIZE_Y);
exit(1);
}
GRID_SIZE_X = TM / BLOCK_SIZE_X;
GRID_SIZE_Y = TM / BLOCK_SIZE_Y;
GRID_SIZE_Z = TM / TILE_SIZE;
/* allocation des matrices sur CPU */
unsigned int size_A = TM * TM;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = TM * TM;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
unsigned int size_C = TM * TM;
unsigned int mem_size_C = sizeof(float) * size_C;
float* h_C = (float*) malloc(mem_size_C);
/* initialisation des matrices avec des valeurs permettant de verifier le resultat */
for(i = 0; i < TM; i++){
for(j = 0; j < TM; j++){
h_A[i*TM+j] = 1.0;
h_B[i*TM+j] = 1.0;
h_C[i*TM+j] = 0.0;
if (i==j) {
h_A[i*TM+j]=(float) (i+1);
h_B[i*TM+j]=(float) (i+1);
}
}
}
/* allocation des matrices sur GPU */
float* d_A;
float* d_B;
cudaMalloc((void**) &d_A, mem_size_A);
cudaMalloc((void**) &d_B, mem_size_B);
float* d_C;
cudaMalloc((void**) &d_C, mem_size_C);
/* top depart pour le calcul du temps de transfert */
cudaEventRecord(depart,0);
/* copie des matrives A et B depuis le CPU vers le GPU */
cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
cudaEventRecord(arret,0);
cudaEventSynchronize(arret);
cudaEventElapsedTime(&tc,depart, arret);
printf("Temps de transfert host vers device : %f seconde\n", tc/1000.0);
/* definiton de la grille et des blocs */
dim3 grid(GRID_SIZE_X, GRID_SIZE_Y);
dim3 block(BLOCK_SIZE_X, BLOCK_SIZE_Y);
printf("grille %d, %d \n", GRID_SIZE_X, GRID_SIZE_Y, GRID_SIZE_Z);
printf("bloc %d, %d \n", BLOCK_SIZE_X, BLOCK_SIZE_Y);
cudaEventRecord(depart,0);
/* execution du kernel */
matrixMul<<< grid, block >>>(d_C, d_A, d_B, TM);
cudaEventRecord(arret,0);
cudaEventSynchronize(arret);
cudaEventElapsedTime(&tc,depart, arret);
printf("Temps de calcul : %f seconde\n", tc/1000.0);
/* valeur retour GPU : 0 = OK, sinon erreur */
cerror=cudaGetLastError();
printf(" retour GPU = %d \n", (int) cerror);
cudaEventRecord(depart,0);
/* copie de la matrive C depuis le GPU */
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
cudaEventRecord(arret,0);
cudaEventSynchronize(arret);
cudaEventElapsedTime(&tc,depart, arret);
printf("Temps transfert device vers host : %f seconde\n", tc/1000.0);
/* verification du resultat */
for (i = 0; i < TM; i++){
for (j = 0; j < TM; j++){
if ((i==j) && (h_C[i*TM+j] != (float)((i+1)*(i+1)+TM-1))) {
printf("Erreur i: %d j: %d %f\n", i, j, h_C[i*TM+j] );
exit(1);
} else if ((i!=j) && (h_C[i*TM+j] != (float) (i + j + TM))) {
printf("Erreur i: %d j: %d\n", i, j);
exit(1);
}
}
}
cudaEventDestroy(depart);
cudaEventDestroy(arret);
/* liberation de la memoire */
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
} |
ef20043ef4ae763e63f696de073b53b3fb897c48.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<bits/stdc++.h>
#include<device_launch_parameters.h>
#include<cuda_runtime.h>
using namespace std;
#define SIZE 256
#define SSIZE SIZE*4 // sizeof(int)
__global__ void sum_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] += partial_sum[threadIdx.x+s];
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void max_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] = max(partial_sum[threadIdx.x],partial_sum[threadIdx.x+s]);
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void variance(int *v,int *v_r,float *mean){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
partial_sum[threadIdx.x] = (partial_sum[threadIdx.x] - *mean) * (partial_sum[threadIdx.x] - *mean);
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] += partial_sum[threadIdx.x+s];
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void min_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] = min(partial_sum[threadIdx.x],partial_sum[threadIdx.x+s]);
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
void inititialise(int* v,int n){
for(int i =0;i<n;i++){
v[i]= rand()%1000;
}
}
int main(){
int n = SIZE*SIZE;
float elapsed_cpu, elapsed_gpu;
clock_t t1, t2;
int thread_block_size = SIZE;
int num_blocks = n / thread_block_size;
int *h_v,*d_v,*h_v_r,*d_v_r;
float *d_mean;
h_v = (int*)malloc(n*sizeof(int));
hipMalloc(&d_v,n*sizeof(int));
h_v_r = (int*)malloc(num_blocks*sizeof(int));
hipMalloc(&d_v_r,num_blocks*sizeof(int));
hipMalloc((void**)&d_mean,sizeof(float));
inititialise(h_v,n);
int minimum = 0;
for(int i =0;i<n;i++){
minimum = minimum+h_v[i];
}
//cout<<minimum<<endl;
float mean = minimum / n;
int var = 0;
t1 = clock();
for(int i =0;i<n;i++){
var = var + (h_v[i]-mean)*(h_v[i]-mean);
}
cout<<var<<endl;
t2 = clock();
elapsed_cpu = ((float)t2 - (float)t1) / CLOCKS_PER_SEC * 1000; //cpu elapsed time in ms
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipMemcpy(d_v,h_v,n*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(d_mean,&mean,sizeof(float),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( variance), dim3(num_blocks),dim3(thread_block_size), 0, 0, d_v,d_v_r,d_mean);
hipLaunchKernelGGL(( sum_reduction), dim3(1),dim3(thread_block_size), 0, 0, d_v_r,d_v_r);
hipMemcpy(h_v_r,d_v_r,thread_block_size*sizeof(int),hipMemcpyDeviceToHost);
cout<<h_v_r[0]<<endl;
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_gpu, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
cout<<elapsed_cpu<<endl;
cout<<elapsed_gpu<<endl;
cout<<"speedup"<<elapsed_cpu/elapsed_gpu<<endl;
return 0;
}
| ef20043ef4ae763e63f696de073b53b3fb897c48.cu | #include<bits/stdc++.h>
#include<device_launch_parameters.h>
#include<cuda_runtime.h>
using namespace std;
#define SIZE 256
#define SSIZE SIZE*4 // sizeof(int)
__global__ void sum_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] += partial_sum[threadIdx.x+s];
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void max_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] = max(partial_sum[threadIdx.x],partial_sum[threadIdx.x+s]);
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void variance(int *v,int *v_r,float *mean){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
partial_sum[threadIdx.x] = (partial_sum[threadIdx.x] - *mean) * (partial_sum[threadIdx.x] - *mean);
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] += partial_sum[threadIdx.x+s];
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void min_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] = min(partial_sum[threadIdx.x],partial_sum[threadIdx.x+s]);
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
void inititialise(int* v,int n){
for(int i =0;i<n;i++){
v[i]= rand()%1000;
}
}
int main(){
int n = SIZE*SIZE;
float elapsed_cpu, elapsed_gpu;
clock_t t1, t2;
int thread_block_size = SIZE;
int num_blocks = n / thread_block_size;
int *h_v,*d_v,*h_v_r,*d_v_r;
float *d_mean;
h_v = (int*)malloc(n*sizeof(int));
cudaMalloc(&d_v,n*sizeof(int));
h_v_r = (int*)malloc(num_blocks*sizeof(int));
cudaMalloc(&d_v_r,num_blocks*sizeof(int));
cudaMalloc((void**)&d_mean,sizeof(float));
inititialise(h_v,n);
int minimum = 0;
for(int i =0;i<n;i++){
minimum = minimum+h_v[i];
}
//cout<<minimum<<endl;
float mean = minimum / n;
int var = 0;
t1 = clock();
for(int i =0;i<n;i++){
var = var + (h_v[i]-mean)*(h_v[i]-mean);
}
cout<<var<<endl;
t2 = clock();
elapsed_cpu = ((float)t2 - (float)t1) / CLOCKS_PER_SEC * 1000; //cpu elapsed time in ms
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMemcpy(d_v,h_v,n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_mean,&mean,sizeof(float),cudaMemcpyHostToDevice);
variance<<<num_blocks,thread_block_size>>>(d_v,d_v_r,d_mean);
sum_reduction<<<1,thread_block_size>>>(d_v_r,d_v_r);
cudaMemcpy(h_v_r,d_v_r,thread_block_size*sizeof(int),cudaMemcpyDeviceToHost);
cout<<h_v_r[0]<<endl;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_gpu, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout<<elapsed_cpu<<endl;
cout<<elapsed_gpu<<endl;
cout<<"speedup"<<elapsed_cpu/elapsed_gpu<<endl;
return 0;
}
|
25cc77eca9db9c4742fa90845dfebb5b4bee654d.hip | // !!! This is a file automatically generated by hipify!!!
// Includes, system
// #include <stdio.h>
// #include <stdlib.h>
// Includes, cuda
// #include <hip/hip_runtime.h>
// #include <rocblas.h>
// Includes, cuda helper functions
// #include <helper_cuda.h>
// For the functors
#include "detail/ctc_helper.h"
#include "ctc.h"
const int warp_size = 32;
template<int NT, typename T, typename Rop>
struct CTAReduce;
template<int NT, typename T, typename Rop>
struct CTAReduce {
enum { Size = NT, Capacity = NT };
struct Storage { T shared[Capacity]; };
__device__ static T reduce(int tid, T x, Storage& storage, int count, Rop g) {
T* s = storage.shared;
s[tid] = x;
__syncthreads();
// Fold the data in half with each pass.
#pragma unroll
for(int offset = NT / 2; offset >= warp_size; offset /= 2) {
if(tid + offset < count && tid < offset) {
// Read from the right half and store to the left half.
x = g(x, s[offset + tid]);
s[tid] = x;
}
__syncthreads();
}
T shuff;
for (int offset = warp_size / 2; offset > 0; offset /= 2) {
shuff = __shfl_down_sync(0xFFFFFFFF, x, offset);
if (tid + offset < count && tid < offset)
x = g(x, shuff);
}
return x;
}
};
template <int NT, typename Iop, typename Rop, typename T>
__global__ void reduce_rows(Iop f, Rop g, const T* input, T* output,
int num_rows, int num_cols) {
typedef CTAReduce<NT, T, Rop> R;
__shared__ typename R::Storage storage;
int tid = threadIdx.x;
int idx = tid;
int col = blockIdx.x;
T curr;
// Each block works on a column
if (idx < num_rows)
curr = f(input[idx + col*num_rows]);
idx += NT;
while (idx < num_rows) {
curr = g(curr, f(input[idx + col*num_rows]));
idx += NT;
}
// Sum thread-totals over the CTA.
curr = R::reduce(tid, curr, storage, num_rows, g);
// Store result in out
if (tid == 0)
output[col] = curr;
}
template <int NT, typename Iop, typename Rop, typename T>
__global__ void reduce_cols(Iop f, Rop g, const T* input, T* output,
int num_rows, int num_cols) {
__shared__ T s[NT];
int warps_per_block = NT / warp_size;
int row = blockDim.x * blockIdx.x + threadIdx.x;
int col = threadIdx.y;
T curr;
if (row < num_rows && col < num_cols) {
curr = f(input[row + col*num_rows]);
col += blockDim.y;
while (col < num_cols) {
curr = g(curr, f(input[row + col*num_rows]));
col += blockDim.y;
}
}
s[threadIdx.x * warps_per_block + threadIdx.y] = curr;
__syncthreads();
// Reduce
if (threadIdx.y == 0 && row < num_rows) {
#pragma unroll
for (int i = 1; i < warps_per_block && i < num_cols; ++i)
curr = g(curr, s[i + threadIdx.x * warps_per_block]);
output[row] = curr;
}
}
struct ReduceHelper {
template<typename T, typename Iof, typename Rof>
static void impl(Iof f, Rof g, const T* input, T* output, int num_rows, int num_cols, bool axis, hipStream_t stream) {
int grid_size;
if (axis) {
grid_size = num_cols;
hipLaunchKernelGGL(( reduce_rows<128>), dim3(grid_size), dim3(128), 0, stream,
f, g, input, output, num_rows, num_cols);
} else {
dim3 tpb(warp_size, 128 / warp_size);
grid_size = (num_cols + warp_size - 1)/warp_size;
hipLaunchKernelGGL(( reduce_cols<128>), dim3(grid_size), dim3(tpb), 0, stream,
f, g, input, output, num_rows, num_cols);
}
}
};
template<typename T, typename Iof, typename Rof>
ctcStatus_t reduce(Iof f, Rof g, const T* input, T* output, int rows, int cols, bool axis, hipStream_t stream) {
ReduceHelper::impl(f, g, input, output, rows, cols, axis, stream);
hipStreamSynchronize(stream);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
return CTC_STATUS_EXECUTION_FAILED;
return CTC_STATUS_SUCCESS;
}
ctcStatus_t reduce_negate(const float *input, float *output, int rows, int cols, bool axis, hipStream_t stream) {
return reduce(ctc_helper::negate<float>(), ctc_helper::add<float>(), input, output, rows, cols, axis, stream);
}
ctcStatus_t reduce_exp(const float *input, float *output, int rows, int cols, bool axis, hipStream_t stream) {
return reduce(ctc_helper::exponential<float>(), ctc_helper::add<float>(), input, output, rows, cols, axis, stream);
}
ctcStatus_t reduce_max(const float *input, float *output, int rows, int cols, bool axis, hipStream_t stream) {
return reduce(ctc_helper::identity<float>(), ctc_helper::maximum<float>(),input, output, rows, cols, axis, stream);
}
| 25cc77eca9db9c4742fa90845dfebb5b4bee654d.cu | // Includes, system
// #include <stdio.h>
// #include <stdlib.h>
// Includes, cuda
// #include <cuda_runtime.h>
// #include <cublas_v2.h>
// Includes, cuda helper functions
// #include <helper_cuda.h>
// For the functors
#include "detail/ctc_helper.h"
#include "ctc.h"
const int warp_size = 32;
template<int NT, typename T, typename Rop>
struct CTAReduce;
template<int NT, typename T, typename Rop>
struct CTAReduce {
enum { Size = NT, Capacity = NT };
struct Storage { T shared[Capacity]; };
__device__ static T reduce(int tid, T x, Storage& storage, int count, Rop g) {
T* s = storage.shared;
s[tid] = x;
__syncthreads();
// Fold the data in half with each pass.
#pragma unroll
for(int offset = NT / 2; offset >= warp_size; offset /= 2) {
if(tid + offset < count && tid < offset) {
// Read from the right half and store to the left half.
x = g(x, s[offset + tid]);
s[tid] = x;
}
__syncthreads();
}
T shuff;
for (int offset = warp_size / 2; offset > 0; offset /= 2) {
shuff = __shfl_down_sync(0xFFFFFFFF, x, offset);
if (tid + offset < count && tid < offset)
x = g(x, shuff);
}
return x;
}
};
template <int NT, typename Iop, typename Rop, typename T>
__global__ void reduce_rows(Iop f, Rop g, const T* input, T* output,
int num_rows, int num_cols) {
typedef CTAReduce<NT, T, Rop> R;
__shared__ typename R::Storage storage;
int tid = threadIdx.x;
int idx = tid;
int col = blockIdx.x;
T curr;
// Each block works on a column
if (idx < num_rows)
curr = f(input[idx + col*num_rows]);
idx += NT;
while (idx < num_rows) {
curr = g(curr, f(input[idx + col*num_rows]));
idx += NT;
}
// Sum thread-totals over the CTA.
curr = R::reduce(tid, curr, storage, num_rows, g);
// Store result in out
if (tid == 0)
output[col] = curr;
}
template <int NT, typename Iop, typename Rop, typename T>
__global__ void reduce_cols(Iop f, Rop g, const T* input, T* output,
int num_rows, int num_cols) {
__shared__ T s[NT];
int warps_per_block = NT / warp_size;
int row = blockDim.x * blockIdx.x + threadIdx.x;
int col = threadIdx.y;
T curr;
if (row < num_rows && col < num_cols) {
curr = f(input[row + col*num_rows]);
col += blockDim.y;
while (col < num_cols) {
curr = g(curr, f(input[row + col*num_rows]));
col += blockDim.y;
}
}
s[threadIdx.x * warps_per_block + threadIdx.y] = curr;
__syncthreads();
// Reduce
if (threadIdx.y == 0 && row < num_rows) {
#pragma unroll
for (int i = 1; i < warps_per_block && i < num_cols; ++i)
curr = g(curr, s[i + threadIdx.x * warps_per_block]);
output[row] = curr;
}
}
struct ReduceHelper {
template<typename T, typename Iof, typename Rof>
static void impl(Iof f, Rof g, const T* input, T* output, int num_rows, int num_cols, bool axis, cudaStream_t stream) {
int grid_size;
if (axis) {
grid_size = num_cols;
reduce_rows<128><<<grid_size, 128, 0, stream>>>
(f, g, input, output, num_rows, num_cols);
} else {
dim3 tpb(warp_size, 128 / warp_size);
grid_size = (num_cols + warp_size - 1)/warp_size;
reduce_cols<128><<<grid_size, tpb, 0, stream>>>
(f, g, input, output, num_rows, num_cols);
}
}
};
template<typename T, typename Iof, typename Rof>
ctcStatus_t reduce(Iof f, Rof g, const T* input, T* output, int rows, int cols, bool axis, cudaStream_t stream) {
ReduceHelper::impl(f, g, input, output, rows, cols, axis, stream);
cudaStreamSynchronize(stream);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
return CTC_STATUS_EXECUTION_FAILED;
return CTC_STATUS_SUCCESS;
}
ctcStatus_t reduce_negate(const float *input, float *output, int rows, int cols, bool axis, cudaStream_t stream) {
return reduce(ctc_helper::negate<float>(), ctc_helper::add<float>(), input, output, rows, cols, axis, stream);
}
ctcStatus_t reduce_exp(const float *input, float *output, int rows, int cols, bool axis, cudaStream_t stream) {
return reduce(ctc_helper::exponential<float>(), ctc_helper::add<float>(), input, output, rows, cols, axis, stream);
}
ctcStatus_t reduce_max(const float *input, float *output, int rows, int cols, bool axis, cudaStream_t stream) {
return reduce(ctc_helper::identity<float>(), ctc_helper::maximum<float>(),input, output, rows, cols, axis, stream);
}
|
42b278f6f6d67c434670f77375d302990920b271.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by pierfied on 10/5/20.
//
#include "wigner.cuh"
// Compute the log factorial via the gamma function.
__device__ double lnfac(double x) {
return lgamma(x + 1);
}
// Compute the recursion seed for the case d(j, -j, m, beta).
__device__ double emmRecursionSeed(double j, double m, double beta) {
double prefac = (lnfac(2 * j) - lnfac(j + m) - lnfac(j - m)) / 2;
double cosfac = (j - m) * log(cos(beta / 2));
double sinfac = (j + m) * log(sin(beta / 2));
double d = exp(prefac + cosfac + sinfac);
return d;
}
// Compute the recursion seed for the case d(j, m, j, beta).
__device__ double spinRecursionSeed(double j, double m, double beta) {
double prefac = (lnfac(2 * j) - lnfac(j + m) - lnfac(j - m)) / 2;
double cosfac = (j + m) * log(cos(beta / 2));
double sinfac = (j - m) * log(sin(beta / 2));
double d = exp(prefac + cosfac + sinfac);
return d;
}
__global__ void recursionCoeffKernel(int lmax, int spin, double *fac1, double *fac2, double *fac3) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int nEll = lmax + 1;
for (int i = index; i < nEll * nEll; i += stride) {
// Get the l & m values from the index.
int im = i / nEll;
int il = i % nEll;
double m = im;
double l = il;
// Only compute the coefficients for l >= m.
if (l < m) continue;
// Compute the recursion coefficients.
int ind = im * nEll - (im - 1) * im / 2 + (il - im);
double denomFac = sqrt(((l + 1) * (l + 1) - m * m) * ((l + 1) * (l + 1) - spin * spin));
fac1[ind] = (l + 1) * (2 * l + 1) / denomFac;
fac2[ind] = m * spin / (l * (l + 1));
fac3[ind] = (l + 1) * sqrt((l * l - m * m) * (l * l - spin * spin)) / (l * denomFac);
}
// Set the second and third l,m = 0 coefficients to 0 because of divide by zero error.
fac2[0] = 0;
fac3[0] = 0;
}
void computeRecursionCoeffs(int lmax, int spin, double **fac1, double **fac2, double **fac3) {
// Create the arrays for the coefficients.
int facSize = (lmax + 1) * (lmax + 2) / 2;
hipMallocManaged(fac1, sizeof(double) * facSize);
hipMallocManaged(fac2, sizeof(double) * facSize);
hipMallocManaged(fac3, sizeof(double) * facSize);
// Launch the kernel to compute the coefficients.
int blockSize, gridSize;
hipOccupancyMaxPotentialBlockSize(&gridSize, &blockSize, recursionCoeffKernel, 0, 0);
hipLaunchKernelGGL(( recursionCoeffKernel), dim3(gridSize), dim3(blockSize), 0, 0, lmax, spin, *fac1, *fac2, *fac3);
}
| 42b278f6f6d67c434670f77375d302990920b271.cu | //
// Created by pierfied on 10/5/20.
//
#include "wigner.cuh"
// Compute the log factorial via the gamma function.
__device__ double lnfac(double x) {
return lgamma(x + 1);
}
// Compute the recursion seed for the case d(j, -j, m, beta).
__device__ double emmRecursionSeed(double j, double m, double beta) {
double prefac = (lnfac(2 * j) - lnfac(j + m) - lnfac(j - m)) / 2;
double cosfac = (j - m) * log(cos(beta / 2));
double sinfac = (j + m) * log(sin(beta / 2));
double d = exp(prefac + cosfac + sinfac);
return d;
}
// Compute the recursion seed for the case d(j, m, j, beta).
__device__ double spinRecursionSeed(double j, double m, double beta) {
double prefac = (lnfac(2 * j) - lnfac(j + m) - lnfac(j - m)) / 2;
double cosfac = (j + m) * log(cos(beta / 2));
double sinfac = (j - m) * log(sin(beta / 2));
double d = exp(prefac + cosfac + sinfac);
return d;
}
__global__ void recursionCoeffKernel(int lmax, int spin, double *fac1, double *fac2, double *fac3) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int nEll = lmax + 1;
for (int i = index; i < nEll * nEll; i += stride) {
// Get the l & m values from the index.
int im = i / nEll;
int il = i % nEll;
double m = im;
double l = il;
// Only compute the coefficients for l >= m.
if (l < m) continue;
// Compute the recursion coefficients.
int ind = im * nEll - (im - 1) * im / 2 + (il - im);
double denomFac = sqrt(((l + 1) * (l + 1) - m * m) * ((l + 1) * (l + 1) - spin * spin));
fac1[ind] = (l + 1) * (2 * l + 1) / denomFac;
fac2[ind] = m * spin / (l * (l + 1));
fac3[ind] = (l + 1) * sqrt((l * l - m * m) * (l * l - spin * spin)) / (l * denomFac);
}
// Set the second and third l,m = 0 coefficients to 0 because of divide by zero error.
fac2[0] = 0;
fac3[0] = 0;
}
void computeRecursionCoeffs(int lmax, int spin, double **fac1, double **fac2, double **fac3) {
// Create the arrays for the coefficients.
int facSize = (lmax + 1) * (lmax + 2) / 2;
cudaMallocManaged(fac1, sizeof(double) * facSize);
cudaMallocManaged(fac2, sizeof(double) * facSize);
cudaMallocManaged(fac3, sizeof(double) * facSize);
// Launch the kernel to compute the coefficients.
int blockSize, gridSize;
cudaOccupancyMaxPotentialBlockSize(&gridSize, &blockSize, recursionCoeffKernel, 0, 0);
recursionCoeffKernel<<<gridSize, blockSize>>>(lmax, spin, *fac1, *fac2, *fac3);
}
|
3aebf6cc206f41d1975638607c161de4acdbbe9e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/concat_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe9 {
template <typename Dtype>
__global__ void Concat(const int nthreads, const Dtype* in_data,
const bool forward, const int num_concats, const int concat_size,
const int top_concat_axis, const int bottom_concat_axis,
const int offset_concat_axis, Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_concat_size = concat_size * bottom_concat_axis;
const int concat_num = index / total_concat_size;
const int concat_index = index % total_concat_size;
const int top_index = concat_index +
(concat_num * top_concat_axis + offset_concat_axis) * concat_size;
if (forward) {
out_data[top_index] = in_data[index];
} else {
out_data[index] = in_data[top_index];
}
}
}
template <typename Dtype>
void ConcatLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (bottom.size() == 1) { return; }
Dtype* top_data = top[0]->mutable_gpu_data();
int offset_concat_axis = 0;
const int top_concat_axis = top[0]->shape(concat_axis_);
const bool kForward = true;
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
const int bottom_concat_axis = bottom[i]->shape(concat_axis_);
const int bottom_concat_size = bottom_concat_axis * concat_input_size_;
const int nthreads = bottom_concat_size * num_concats_;
Concat<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, bottom_data, kForward, num_concats_, concat_input_size_,
top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data);
offset_concat_axis += bottom_concat_axis;
}
}
template <typename Dtype>
void ConcatLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (bottom.size() == 1) { return; }
const Dtype* top_diff = top[0]->gpu_diff();
int offset_concat_axis = 0;
const int top_concat_axis = top[0]->shape(concat_axis_);
const bool kForward = false;
for (int i = 0; i < bottom.size(); ++i) {
const int bottom_concat_axis = bottom[i]->shape(concat_axis_);
if (propagate_down[i]) {
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
const int bottom_concat_size = bottom_concat_axis * concat_input_size_;
const int nthreads = bottom_concat_size * num_concats_;
Concat<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, top_diff, kForward, num_concats_, concat_input_size_,
top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff);
}
offset_concat_axis += bottom_concat_axis;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ConcatLayer);
} // namespace caffe9
| 3aebf6cc206f41d1975638607c161de4acdbbe9e.cu | #include <vector>
#include "caffe/layers/concat_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe9 {
template <typename Dtype>
__global__ void Concat(const int nthreads, const Dtype* in_data,
const bool forward, const int num_concats, const int concat_size,
const int top_concat_axis, const int bottom_concat_axis,
const int offset_concat_axis, Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_concat_size = concat_size * bottom_concat_axis;
const int concat_num = index / total_concat_size;
const int concat_index = index % total_concat_size;
const int top_index = concat_index +
(concat_num * top_concat_axis + offset_concat_axis) * concat_size;
if (forward) {
out_data[top_index] = in_data[index];
} else {
out_data[index] = in_data[top_index];
}
}
}
template <typename Dtype>
void ConcatLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (bottom.size() == 1) { return; }
Dtype* top_data = top[0]->mutable_gpu_data();
int offset_concat_axis = 0;
const int top_concat_axis = top[0]->shape(concat_axis_);
const bool kForward = true;
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
const int bottom_concat_axis = bottom[i]->shape(concat_axis_);
const int bottom_concat_size = bottom_concat_axis * concat_input_size_;
const int nthreads = bottom_concat_size * num_concats_;
Concat<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, bottom_data, kForward, num_concats_, concat_input_size_,
top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data);
offset_concat_axis += bottom_concat_axis;
}
}
template <typename Dtype>
void ConcatLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (bottom.size() == 1) { return; }
const Dtype* top_diff = top[0]->gpu_diff();
int offset_concat_axis = 0;
const int top_concat_axis = top[0]->shape(concat_axis_);
const bool kForward = false;
for (int i = 0; i < bottom.size(); ++i) {
const int bottom_concat_axis = bottom[i]->shape(concat_axis_);
if (propagate_down[i]) {
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
const int bottom_concat_size = bottom_concat_axis * concat_input_size_;
const int nthreads = bottom_concat_size * num_concats_;
Concat<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, top_diff, kForward, num_concats_, concat_input_size_,
top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff);
}
offset_concat_axis += bottom_concat_axis;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ConcatLayer);
} // namespace caffe9
|
da74cd3ff5ba794657cc68f7dc8ee264785de4d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
Matrix* copy(Matrix* src, Matrix* dest)
{
int n = src->rows * src->cols;
int threads = threads_per_block(n);
int blocks = num_blocks(n, threads);
hipLaunchKernelGGL(( do_copy), dim3(blocks),dim3(threads), 0, 0, src, dest);
hipDeviceSynchronize();
return dest;
}
__global__ void do_copy(Matrix* src, Matrix* dest)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < src->rows * src->stride)
{
dest->data[index] = src->data[index];
}
}
// CONVOLUTION
Matrix* convolve(Matrix* M, CONV_KERNEL_TYPE ck_type, Matrix* custom_kernel,
BORDER_TYPE b, float* bargs, int anchor_x, int anchor_y)
{
Matrix* dest = init_matrix(M->rows, M->cols);
int n = M->rows * M->cols;
int threads = threads_per_block(n);
Matrix* ck;
switch (ck_type)
{
case BOX:
ck = scalar(ones(3, 3), 1.0 / 9);
break;
case CUSTOM:
ck = custom_kernel;
default:
// printf("no kernel to convolve with!\n");
return NULL;
}
// printf("convolving with kernel: \n");
print_matrix(ck);
hipLaunchKernelGGL(( do_convolution), dim3(num_blocks(n, threads)),dim3(threads), 0, 0, M, ck, dest, b, bargs, anchor_x, anchor_y);
hipDeviceSynchronize();
free_matrix(ck);
free_matrix(M);
return dest;
}
__global__
void do_convolution(Matrix* M, Matrix* ck, Matrix* dest, BORDER_TYPE b, float* bargs, int anchor_x, int anchor_y)
{
int target = blockIdx.x * blockDim.x + threadIdx.x;
if (target < M->rows * M->stride)
{
// printf("target = %d\n", target);
// anchor offset
int anchor_index = target - anchor_y * M->stride - anchor_y;
// printf("anchor_index = %d\n", anchor_index);
// calculate row and column for edge checking
int roi_row = anchor_index / M->stride - anchor_y - ck->rows / 2;
int roi_col = anchor_index % M->stride - anchor_x - ck->cols / 2;
// printf("roi_row, roi_col = %d, %d\n", roi_row, roi_col);
int ck_index = 0;
float sum = 0;
for (int row = roi_row; row < roi_row + ck->rows ; row++)
{
// printf(" row = %d\n", row);
for (int col = roi_col; col < roi_col + ck->cols; col++)
{
// printf(" col = %d\n", col);
float val, prod;
val = ((col < 0) || (col >= M->cols) ||
(row < 0) || (row >= M->rows)) ?
border_val(M, row * M->stride + col, b, bargs) :
M->data[row * M->stride + col];
// printf(" val = %f\n", val);
prod = val * ck->data[ck_index++];
// printf(" prod = %f\n", prod);
sum += prod;
// printf(" sum = %f\n", sum);
}
}
__syncthreads();
dest->data[target] = sum;
// printf("result: %f\n", dest->data[target]);
}
}
__global__
void do_on_submatrix(Matrix* M, int height, int width, int start_index)
{
Matrix s = submatrix(M, height, width, start_index);
for (int i = 0; i < s.rows; i++)
{
for (int j = 0; j < s.cols; j++)
{
s.data[i*s.stride + j] = 2.0;
}
}
}
__device__
Matrix submatrix(Matrix* M, int height, int width, int start_index)
{
Matrix N;
N.rows = height;
N.cols = width;
N.stride = M->stride;
N.start = start_index;
N.data = (N.start >= 0) ? &M->data[start_index] : &M->data[0];
return N;
}
// TRANSFORMS
Matrix* translate(Matrix* M, int dx, int dy, int bg_value)
{
Matrix* t_mat = identity_matrix(3);
t_mat->data[2] = dx;
t_mat->data[5] = dy;
return affine_transform(M, t_mat, bg_value);
}
Matrix* rotate(Matrix* M, float radians, int origin_x, int origin_y,
int bg_value)
{
if (origin_x < 0)
{
origin_x = M->cols / 2;
}
if (origin_y < 0)
{
origin_y = M->rows / 2;
}
Matrix* r_mat = identity_matrix(3);
r_mat->data[0] = cosf(radians);
r_mat->data[1] = sinf(radians) * -1;
r_mat->data[3] = sinf(radians);
r_mat->data[4] = cosf(radians);
return affine_transform(M, r_mat, origin_x, origin_y);
}
Matrix* affine_transform(Matrix* M, Matrix* t_mat, int origin_x, int origin_y, int bg_value)
{
Matrix* m_trans = init_matrix(M->rows, M->cols, bg_value);
printf("transformation matrix:\n");
print_matrix(t_mat);
int n = M->rows * M->cols;
int threads = threads_per_block(n);
hipLaunchKernelGGL(( do_affine_transform), dim3(num_blocks(n, threads)), dim3(threads), 0, 0,
M, t_mat, m_trans, origin_x, origin_y);
hipDeviceSynchronize();
free_matrix(t_mat);
free_matrix(M);
return m_trans;
}
// t_mat assumed to be 3x3 transformation matrix
__global__
void do_affine_transform(Matrix* M, Matrix* t_mat, Matrix* dest,
int origin_x, int origin_y)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int x = index % M->stride;
int y = index / M->stride;
if ((x >= 0) && (y >= 0) &&
(x < M->cols) && (y < M->rows))
{
x -= origin_x;
y -= origin_y;
float xp = x * t_mat->data[0] + y * t_mat->data[1] + t_mat->data[2];
float yp = x * t_mat->data[3] + y * t_mat->data[4] + t_mat->data[5];
xp += origin_x;
yp += origin_y;
int nx = floor(xp);
int ny = floor(yp);
int ni = ny * M->stride + nx;
if ((nx >= 0) && (ny >= 0) &&
(nx < M->cols) && (ny < M->rows))
{
if ((nx == xp) && (ny == yp)) // don't need interpolate
{
dest->data[index] = M->data[ni];
}
else if ((nx < M->cols - 1) && (ny < M->rows - 1))
{
float dx = xp - nx;
float dy = yp - ny;
dest->data[index] = (1 - dx) * (1 - dy) * M->data[ni] +
dx * (1 - dy) * M->data[ni + 1] +
(1 - dx) * dy * M->data[ni + M->stride] +
dx * dy * M->data[ni + M->stride + 1];
}
}
}
}
// HELPERS
__device__
float border_val(Matrix* M, int target_index, BORDER_TYPE b, float* args)
{
switch (b)
{
case BLACK: return 0;
case WHITE: return 255;
case VALUE: return args[0];
default: return 0;
}
} | da74cd3ff5ba794657cc68f7dc8ee264785de4d9.cu | Matrix* copy(Matrix* src, Matrix* dest)
{
int n = src->rows * src->cols;
int threads = threads_per_block(n);
int blocks = num_blocks(n, threads);
do_copy<<<blocks,threads>>>(src, dest);
cudaDeviceSynchronize();
return dest;
}
__global__ void do_copy(Matrix* src, Matrix* dest)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < src->rows * src->stride)
{
dest->data[index] = src->data[index];
}
}
// CONVOLUTION
Matrix* convolve(Matrix* M, CONV_KERNEL_TYPE ck_type, Matrix* custom_kernel,
BORDER_TYPE b, float* bargs, int anchor_x, int anchor_y)
{
Matrix* dest = init_matrix(M->rows, M->cols);
int n = M->rows * M->cols;
int threads = threads_per_block(n);
Matrix* ck;
switch (ck_type)
{
case BOX:
ck = scalar(ones(3, 3), 1.0 / 9);
break;
case CUSTOM:
ck = custom_kernel;
default:
// printf("no kernel to convolve with!\n");
return NULL;
}
// printf("convolving with kernel: \n");
print_matrix(ck);
do_convolution<<<num_blocks(n, threads),threads>>>(M, ck, dest, b, bargs, anchor_x, anchor_y);
cudaDeviceSynchronize();
free_matrix(ck);
free_matrix(M);
return dest;
}
__global__
void do_convolution(Matrix* M, Matrix* ck, Matrix* dest, BORDER_TYPE b, float* bargs, int anchor_x, int anchor_y)
{
int target = blockIdx.x * blockDim.x + threadIdx.x;
if (target < M->rows * M->stride)
{
// printf("target = %d\n", target);
// anchor offset
int anchor_index = target - anchor_y * M->stride - anchor_y;
// printf("anchor_index = %d\n", anchor_index);
// calculate row and column for edge checking
int roi_row = anchor_index / M->stride - anchor_y - ck->rows / 2;
int roi_col = anchor_index % M->stride - anchor_x - ck->cols / 2;
// printf("roi_row, roi_col = %d, %d\n", roi_row, roi_col);
int ck_index = 0;
float sum = 0;
for (int row = roi_row; row < roi_row + ck->rows ; row++)
{
// printf(" row = %d\n", row);
for (int col = roi_col; col < roi_col + ck->cols; col++)
{
// printf(" col = %d\n", col);
float val, prod;
val = ((col < 0) || (col >= M->cols) ||
(row < 0) || (row >= M->rows)) ?
border_val(M, row * M->stride + col, b, bargs) :
M->data[row * M->stride + col];
// printf(" val = %f\n", val);
prod = val * ck->data[ck_index++];
// printf(" prod = %f\n", prod);
sum += prod;
// printf(" sum = %f\n", sum);
}
}
__syncthreads();
dest->data[target] = sum;
// printf("result: %f\n", dest->data[target]);
}
}
__global__
void do_on_submatrix(Matrix* M, int height, int width, int start_index)
{
Matrix s = submatrix(M, height, width, start_index);
for (int i = 0; i < s.rows; i++)
{
for (int j = 0; j < s.cols; j++)
{
s.data[i*s.stride + j] = 2.0;
}
}
}
__device__
Matrix submatrix(Matrix* M, int height, int width, int start_index)
{
Matrix N;
N.rows = height;
N.cols = width;
N.stride = M->stride;
N.start = start_index;
N.data = (N.start >= 0) ? &M->data[start_index] : &M->data[0];
return N;
}
// TRANSFORMS
Matrix* translate(Matrix* M, int dx, int dy, int bg_value)
{
Matrix* t_mat = identity_matrix(3);
t_mat->data[2] = dx;
t_mat->data[5] = dy;
return affine_transform(M, t_mat, bg_value);
}
Matrix* rotate(Matrix* M, float radians, int origin_x, int origin_y,
int bg_value)
{
if (origin_x < 0)
{
origin_x = M->cols / 2;
}
if (origin_y < 0)
{
origin_y = M->rows / 2;
}
Matrix* r_mat = identity_matrix(3);
r_mat->data[0] = cosf(radians);
r_mat->data[1] = sinf(radians) * -1;
r_mat->data[3] = sinf(radians);
r_mat->data[4] = cosf(radians);
return affine_transform(M, r_mat, origin_x, origin_y);
}
Matrix* affine_transform(Matrix* M, Matrix* t_mat, int origin_x, int origin_y, int bg_value)
{
Matrix* m_trans = init_matrix(M->rows, M->cols, bg_value);
printf("transformation matrix:\n");
print_matrix(t_mat);
int n = M->rows * M->cols;
int threads = threads_per_block(n);
do_affine_transform<<<num_blocks(n, threads), threads>>>
(M, t_mat, m_trans, origin_x, origin_y);
cudaDeviceSynchronize();
free_matrix(t_mat);
free_matrix(M);
return m_trans;
}
// t_mat assumed to be 3x3 transformation matrix
__global__
void do_affine_transform(Matrix* M, Matrix* t_mat, Matrix* dest,
int origin_x, int origin_y)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int x = index % M->stride;
int y = index / M->stride;
if ((x >= 0) && (y >= 0) &&
(x < M->cols) && (y < M->rows))
{
x -= origin_x;
y -= origin_y;
float xp = x * t_mat->data[0] + y * t_mat->data[1] + t_mat->data[2];
float yp = x * t_mat->data[3] + y * t_mat->data[4] + t_mat->data[5];
xp += origin_x;
yp += origin_y;
int nx = floor(xp);
int ny = floor(yp);
int ni = ny * M->stride + nx;
if ((nx >= 0) && (ny >= 0) &&
(nx < M->cols) && (ny < M->rows))
{
if ((nx == xp) && (ny == yp)) // don't need interpolate
{
dest->data[index] = M->data[ni];
}
else if ((nx < M->cols - 1) && (ny < M->rows - 1))
{
float dx = xp - nx;
float dy = yp - ny;
dest->data[index] = (1 - dx) * (1 - dy) * M->data[ni] +
dx * (1 - dy) * M->data[ni + 1] +
(1 - dx) * dy * M->data[ni + M->stride] +
dx * dy * M->data[ni + M->stride + 1];
}
}
}
}
// HELPERS
__device__
float border_val(Matrix* M, int target_index, BORDER_TYPE b, float* args)
{
switch (b)
{
case BLACK: return 0;
case WHITE: return 255;
case VALUE: return args[0];
default: return 0;
}
} |
e4a12a48f5a53eb40d666e64896ccbf615dbe7d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/common/nd_index_offset_helper.h"
#include "oneflow/core/kernel/new_kernel_util.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void FillTensorGpuForward(const int n, const T* value, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = value[0]; }
}
}; // namespace
template<typename T>
class FillTensorGpuKernel final : public user_op::OpKernel {
public:
FillTensorGpuKernel() = default;
~FillTensorGpuKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const user_op::Tensor* value = ctx->Tensor4ArgNameAndIndex("value", 0);
const int32_t elem_cnt = in->shape_view().elem_cnt();
RUN_CUDA_KERNEL((FillTensorGpuForward<T>), ctx->stream(), elem_cnt, elem_cnt, value->dptr<T>(),
out->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_FILL_CUDA_KERNEL(dtype) \
REGISTER_USER_KERNEL("fill_tensor_") \
.SetCreateFn<FillTensorGpuKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value));
REGISTER_FILL_CUDA_KERNEL(float)
REGISTER_FILL_CUDA_KERNEL(half)
REGISTER_FILL_CUDA_KERNEL(double)
REGISTER_FILL_CUDA_KERNEL(int8_t)
REGISTER_FILL_CUDA_KERNEL(int32_t)
REGISTER_FILL_CUDA_KERNEL(int64_t)
} // namespace oneflow
| e4a12a48f5a53eb40d666e64896ccbf615dbe7d1.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/common/nd_index_offset_helper.h"
#include "oneflow/core/kernel/new_kernel_util.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void FillTensorGpuForward(const int n, const T* value, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) { y[i] = value[0]; }
}
}; // namespace
template<typename T>
class FillTensorGpuKernel final : public user_op::OpKernel {
public:
FillTensorGpuKernel() = default;
~FillTensorGpuKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const user_op::Tensor* value = ctx->Tensor4ArgNameAndIndex("value", 0);
const int32_t elem_cnt = in->shape_view().elem_cnt();
RUN_CUDA_KERNEL((FillTensorGpuForward<T>), ctx->stream(), elem_cnt, elem_cnt, value->dptr<T>(),
out->mut_dptr<T>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_FILL_CUDA_KERNEL(dtype) \
REGISTER_USER_KERNEL("fill_tensor_") \
.SetCreateFn<FillTensorGpuKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \
&& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value));
REGISTER_FILL_CUDA_KERNEL(float)
REGISTER_FILL_CUDA_KERNEL(half)
REGISTER_FILL_CUDA_KERNEL(double)
REGISTER_FILL_CUDA_KERNEL(int8_t)
REGISTER_FILL_CUDA_KERNEL(int32_t)
REGISTER_FILL_CUDA_KERNEL(int64_t)
} // namespace oneflow
|
d4a9808a36e02eb0df1bf36d41a7aa28599ea6f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pli11
/*
#ifndef SEARCH_RESULTS
#define SEARCH_RESULTS 4
#endif
typedef struct {
uint32_t count;
struct {
// One word for gid and 8 for mix hash
uint32_t gid;
uint32_t mix[8];
} result[SEARCH_RESULTS];
} search_results;
typedef struct
{
uint32_t uint32s[32 / sizeof(uint32_t)];
} hash32_t;
*/
//pli11
//
#include "cuda_helper.h"
#include "CUDAMiner_cuda.h"
#include "stdio.h"
#include "nvm_til.h"
// Inner loop for prog_seed 3000
__device__ __forceinline__ void progPowLoop(const uint32_t loop,
uint32_t mix[PROGPOW_REGS],
const dag_t *g_dag,
const uint32_t c_dag[PROGPOW_CACHE_WORDS],
const bool hack_false)
{
dag_t data_dag;
uint32_t offset, data;
const uint32_t lane_id = threadIdx.x & (PROGPOW_LANES-1);
// global load
offset = __shfl_sync(0xFFFFFFFF, mix[0], loop%PROGPOW_LANES, PROGPOW_LANES);
offset %= PROGPOW_DAG_ELEMENTS;
offset = offset * PROGPOW_LANES + (lane_id ^ loop) % PROGPOW_LANES;
data_dag = g_dag[offset];
// hack to prevent compiler from reordering LD and usage
if (hack_false) __threadfence_block();
// cache load 0
offset = mix[12] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[26] = ROTR32(mix[26], 17) ^ data;
// random math 0
data = mix[13] ^ mix[3];
mix[9] = ROTL32(mix[9], 17) ^ data;
// cache load 1
offset = mix[1] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[15] = ROTL32(mix[15], 15) ^ data;
// random math 1
data = mix[24] ^ mix[10];
mix[16] = (mix[16] * 33) + data;
// cache load 2
offset = mix[29] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[25] = (mix[25] ^ data) * 33;
// random math 2
data = ROTL32(mix[4], mix[12]);
mix[12] = ROTR32(mix[12], 13) ^ data;
// cache load 3
offset = mix[6] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[7] = ROTL32(mix[7], 8) ^ data;
// random math 3
data = mix[8] * mix[24];
mix[31] = (mix[31] ^ data) * 33;
// cache load 4
offset = mix[11] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[27] = ROTL32(mix[27], 2) ^ data;
// random math 4
data = popcount(mix[28]) + popcount(mix[17]);
mix[5] = (mix[5] * 33) + data;
// cache load 5
offset = mix[18] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[11] = ROTR32(mix[11], 28) ^ data;
// random math 5
data = mix[31] ^ mix[12];
mix[17] = (mix[17] ^ data) * 33;
// cache load 6
offset = mix[8] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[29] = ROTR32(mix[29], 10) ^ data;
// random math 6
data = popcount(mix[4]) + popcount(mix[12]);
mix[10] = (mix[10] * 33) + data;
// cache load 7
offset = mix[14] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[6] = (mix[6] ^ data) * 33;
// random math 7
data = min(mix[10], mix[20]);
mix[24] = (mix[24] * 33) + data;
// cache load 8
offset = mix[17] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[14] = (mix[14] ^ data) * 33;
// random math 8
data = mix[0] * mix[10];
mix[19] = ROTR32(mix[19], 23) ^ data;
// cache load 9
offset = mix[9] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[23] = (mix[23] * 33) + data;
// random math 9
data = min(mix[22], mix[28]);
mix[1] = ROTR32(mix[1], 4) ^ data;
// cache load 10
offset = mix[0] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[18] = (mix[18] ^ data) * 33;
// random math 10
data = ROTL32(mix[22], mix[9]);
mix[21] = ROTR32(mix[21], 5) ^ data;
// random math 11
data = min(mix[26], mix[4]);
mix[22] = (mix[22] * 33) + data;
// random math 12
data = min(mix[19], mix[30]);
mix[8] = ROTL32(mix[8], 26) ^ data;
// random math 13
data = mix[12] ^ mix[24];
mix[3] = ROTL32(mix[3], 30) ^ data;
// random math 14
data = min(mix[8], mix[13]);
mix[28] = ROTL32(mix[28], 31) ^ data;
// random math 15
data = ROTL32(mix[12], mix[9]);
mix[30] = ROTL32(mix[30], 31) ^ data;
// random math 16
data = ROTL32(mix[28], mix[27]);
mix[2] = (mix[2] * 33) + data;
// random math 17
data = ROTL32(mix[30], mix[28]);
mix[20] = ROTL32(mix[20], 12) ^ data;
// consume global load data
// hack to prevent compiler from reordering LD and usage
if (hack_false) __threadfence_block();
mix[0] = (mix[0] * 33) + data_dag.s[0];
mix[4] = ROTL32(mix[4], 13) ^ data_dag.s[1];
mix[13] = (mix[13] ^ data_dag.s[2]) * 33;
mix[0] = ROTR32(mix[0], 12) ^ data_dag.s[3];
}
//
// Implementation based on:
// https://github.com/mjosaarinen/tiny_sha3/blob/master/sha3.c
__device__ __constant__ const uint32_t keccakf_rndc[24] = {
0x00000001, 0x00008082, 0x0000808a, 0x80008000, 0x0000808b, 0x80000001,
0x80008081, 0x00008009, 0x0000008a, 0x00000088, 0x80008009, 0x8000000a,
0x8000808b, 0x0000008b, 0x00008089, 0x00008003, 0x00008002, 0x00000080,
0x0000800a, 0x8000000a, 0x80008081, 0x00008080, 0x80000001, 0x80008008
};
// Implementation of the permutation Keccakf with width 800.
__device__ __forceinline__ void keccak_f800_round(uint32_t st[25], const int r)
{
const uint32_t keccakf_rotc[24] = {
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
};
const uint32_t keccakf_piln[24] = {
10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
};
uint32_t t, bc[5];
// Theta
for (int i = 0; i < 5; i++)
bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] ^ st[i + 20];
for (int i = 0; i < 5; i++) {
t = bc[(i + 4) % 5] ^ ROTL32(bc[(i + 1) % 5], 1);
for (uint32_t j = 0; j < 25; j += 5)
st[j + i] ^= t;
}
// Rho Pi
t = st[1];
for (int i = 0; i < 24; i++) {
uint32_t j = keccakf_piln[i];
bc[0] = st[j];
st[j] = ROTL32(t, keccakf_rotc[i]);
t = bc[0];
}
// Chi
for (uint32_t j = 0; j < 25; j += 5) {
for (int i = 0; i < 5; i++)
bc[i] = st[j + i];
for (int i = 0; i < 5; i++)
st[j + i] ^= (~bc[(i + 1) % 5]) & bc[(i + 2) % 5];
}
// Iota
st[0] ^= keccakf_rndc[r];
}
//pli11
/*
__device__ __forceinline__ uint32_t cuda_swab32(const uint32_t x)
{
return __byte_perm(x, x, 0x0123);
}
*/
// Keccak - implemented as a variant of SHAKE
// The width is 800, with a bitrate of 576, a capacity of 224, and no padding
// Only need 64 bits of output for mining
/*
__device__ __noinline__ uint64_t keccak_f800(hash32_t header, uint64_t seed, hash32_t digest)
{
uint32_t st[25];
for (int i = 0; i < 25; i++)
st[i] = 0;
for (int i = 0; i < 8; i++)
st[i] = header.uint32s[i];
st[8] = seed;
st[9] = seed >> 32;
for (int i = 0; i < 8; i++)
st[10+i] = digest.uint32s[i];
for (int r = 0; r < 21; r++) {
keccak_f800_round(st, r);
}
keccak_f800_round(st, 21);
return (uint64_t)cuda_swab32(st[0]) << 32 | cuda_swab32(st[1]);
}
*/
__device__ __noinline__ uint64_t keccak_f800(hash32_t header, uint64_t seed, hash32_t digest)
{
uint32_t st[25];
for (int i = 0; i < 25; i++)
st[i] = 0;
for (int i = 0; i < 8; i++)
st[i] = header.uint32s[i];
st[8] = seed;
st[9] = seed >> 32;
// st[8] = split_result(seed);
// st[9] = split_result(seed>>32);
for (int i = 0; i < 8; i++)
st[10+i] = digest.uint32s[i];
for (int r = 0; r < 21; r++) {
keccak_f800_round(st, r);
}
// last round can be simplified due to partial output
keccak_f800_round(st, 21);
// Byte swap so byte 0 of hash is MSB of result
//return (uint64_t)cuda_swab32(st[0]) << 32 | cuda_swab32(st[1]);
//return combine_result(cuda_swab32(st[1]),cuda_swab32(st[0]));
return 0;
}
#define fnv1a(h, d) (h = (uint32_t(h) ^ uint32_t(d)) * uint32_t(0x1000193))
typedef struct {
uint32_t z, w, jsr, jcong;
} kiss99_t;
// KISS99 is simple, fast, and passes the TestU01 suite
// https://en.wikipedia.org/wiki/KISS_(algorithm)
// http://www.cse.yorku.ca/~oz/marsaglia-rng.html
__device__ __forceinline__ uint32_t kiss99(kiss99_t &st)
{
st.z = 36969 * (st.z & 65535) + (st.z >> 16);
st.w = 18000 * (st.w & 65535) + (st.w >> 16);
uint32_t MWC = ((st.z << 16) + st.w);
st.jsr ^= (st.jsr << 17);
st.jsr ^= (st.jsr >> 13);
st.jsr ^= (st.jsr << 5);
st.jcong = 69069 * st.jcong + 1234567;
return ((MWC^st.jcong) + st.jsr);
}
__device__ __forceinline__ void fill_mix(uint64_t seed, uint32_t lane_id, uint32_t mix[PROGPOW_REGS])
{
// Use FNV to expand the per-warp seed to per-lane
// Use KISS to expand the per-lane seed to fill mix
uint32_t fnv_hash = 0x811c9dc5;
kiss99_t st;
//st.z = fnv1a(fnv_hash, split_result(seed));
//st.w = fnv1a(fnv_hash, split_result(seed>>32));
st.z = fnv1a(fnv_hash, seed);
st.w = fnv1a(fnv_hash, seed>>32);
st.jsr = fnv1a(fnv_hash, lane_id);
st.jcong = fnv1a(fnv_hash, lane_id);
#pragma unroll
for (int i = 0; i < PROGPOW_REGS; i++)
mix[i] = kiss99(st);
}
__device__ float NVM_log[LOG_SIZE_16M];
__device__ float NVM_flag[FLAG_SIZE_1M];
__global__ void
progpow_search(
uint64_t start_nonce,
const hash32_t header,
const uint64_t target,
const dag_t *g_dag,
search_results* g_output,
bool hack_false
)
{
__shared__ uint32_t c_dag[PROGPOW_CACHE_WORDS];
uint32_t const gid = blockIdx.x * blockDim.x + threadIdx.x;
uint64_t const nonce = start_nonce + gid;
const uint32_t lane_id = threadIdx.x & (PROGPOW_LANES - 1);
// Load the first portion of the DAG into the cache
for (uint32_t word = threadIdx.x*PROGPOW_DAG_LOADS; word < PROGPOW_CACHE_WORDS; word += blockDim.x*PROGPOW_DAG_LOADS)
{
dag_t load = g_dag[word/PROGPOW_DAG_LOADS];
for(int i=0; i<PROGPOW_DAG_LOADS; i++)
c_dag[word + i] = load.s[i];
}
hash32_t digest;
for (int i = 0; i < 8; i++)
digest.uint32s[i] = 0;
// keccak(header..nonce)
uint64_t seed = keccak_f800(header, nonce, digest);
__syncthreads();
#pragma unroll 1
for (uint32_t h = 0; h < PROGPOW_LANES; h++)
{
uint32_t mix[PROGPOW_REGS];
// share the hash's seed across all lanes
uint64_t hash_seed = __shfl_sync(0xFFFFFFFF, seed, h, PROGPOW_LANES);
// initialize mix for all lanes
fill_mix(hash_seed, lane_id, mix);
#pragma unroll 1
for (uint32_t l = 0; l < PROGPOW_CNT_DAG; l++)
progPowLoop(l, mix, g_dag, c_dag, hack_false);
// Reduce mix data to a per-lane 32-bit digest
uint32_t digest_lane = 0x811c9dc5;
#pragma unroll
for (int i = 0; i < PROGPOW_REGS; i++)
fnv1a(digest_lane, mix[i]);
// Reduce all lanes to a single 256-bit digest
hash32_t digest_temp;
#pragma unroll
for (int i = 0; i < 8; i++)
digest_temp.uint32s[i] = 0x811c9dc5;
for (int i = 0; i < PROGPOW_LANES; i += 8)
#pragma unroll
for (int j = 0; j < 8; j++)
fnv1a(digest_temp.uint32s[j], __shfl_sync(0xFFFFFFFF, digest_lane, i + j, PROGPOW_LANES));
if (h == lane_id)
digest = digest_temp;
}
// keccak(header .. keccak(header..nonce) .. digest);
if (keccak_f800(header, seed, digest) >= target)
return;
uint32_t index = atomicInc((uint32_t *)&g_output->count, 0xffffffff);
if (index >= SEARCH_RESULTS)
return;
ST_WT_U32(&NVM_log[index],g_output->result[index].gid);
MEM_FENCE; __syncthreads();
SET_NVM_FLAG(1);
//g_output->result[index].gid = gid;
asm("st.global.wt.u32 [%0], %1;" :: "l"(&g_output->result[index].gid ), "r"(gid) : "memory");
#pragma unroll
for (int i = 0; i < 8; i++){
//g_output->result[index].mix[i] = digest.uint32s[i];
asm("st.global.wt.u32 [%0], %1;" :: "l"(&g_output->result[index].mix[i] ), "r"(digest.uint32s[i]) : "memory");
}
MEM_FENCE;__syncthreads();
SET_NVM_FLAG(2);
}
//pli11
void search_kernel(
uint64_t start_nonce,
const hash32_t header,
const uint64_t target,
const dag_t *g_dag,
search_results* g_output,
bool hack_false,
uint32_t blocks,
uint32_t threads,
hipStream_t stream
)
{
hipLaunchKernelGGL(( progpow_search) , dim3(blocks), dim3(threads), 0, stream , start_nonce,header,target,g_dag,g_output,hack_false);
CUDA_SAFE_CALL(hipDeviceSynchronize());
}
| d4a9808a36e02eb0df1bf36d41a7aa28599ea6f6.cu | //pli11
/*
#ifndef SEARCH_RESULTS
#define SEARCH_RESULTS 4
#endif
typedef struct {
uint32_t count;
struct {
// One word for gid and 8 for mix hash
uint32_t gid;
uint32_t mix[8];
} result[SEARCH_RESULTS];
} search_results;
typedef struct
{
uint32_t uint32s[32 / sizeof(uint32_t)];
} hash32_t;
*/
//pli11
//
#include "cuda_helper.h"
#include "CUDAMiner_cuda.h"
#include "stdio.h"
#include "nvm_til.h"
// Inner loop for prog_seed 3000
__device__ __forceinline__ void progPowLoop(const uint32_t loop,
uint32_t mix[PROGPOW_REGS],
const dag_t *g_dag,
const uint32_t c_dag[PROGPOW_CACHE_WORDS],
const bool hack_false)
{
dag_t data_dag;
uint32_t offset, data;
const uint32_t lane_id = threadIdx.x & (PROGPOW_LANES-1);
// global load
offset = __shfl_sync(0xFFFFFFFF, mix[0], loop%PROGPOW_LANES, PROGPOW_LANES);
offset %= PROGPOW_DAG_ELEMENTS;
offset = offset * PROGPOW_LANES + (lane_id ^ loop) % PROGPOW_LANES;
data_dag = g_dag[offset];
// hack to prevent compiler from reordering LD and usage
if (hack_false) __threadfence_block();
// cache load 0
offset = mix[12] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[26] = ROTR32(mix[26], 17) ^ data;
// random math 0
data = mix[13] ^ mix[3];
mix[9] = ROTL32(mix[9], 17) ^ data;
// cache load 1
offset = mix[1] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[15] = ROTL32(mix[15], 15) ^ data;
// random math 1
data = mix[24] ^ mix[10];
mix[16] = (mix[16] * 33) + data;
// cache load 2
offset = mix[29] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[25] = (mix[25] ^ data) * 33;
// random math 2
data = ROTL32(mix[4], mix[12]);
mix[12] = ROTR32(mix[12], 13) ^ data;
// cache load 3
offset = mix[6] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[7] = ROTL32(mix[7], 8) ^ data;
// random math 3
data = mix[8] * mix[24];
mix[31] = (mix[31] ^ data) * 33;
// cache load 4
offset = mix[11] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[27] = ROTL32(mix[27], 2) ^ data;
// random math 4
data = popcount(mix[28]) + popcount(mix[17]);
mix[5] = (mix[5] * 33) + data;
// cache load 5
offset = mix[18] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[11] = ROTR32(mix[11], 28) ^ data;
// random math 5
data = mix[31] ^ mix[12];
mix[17] = (mix[17] ^ data) * 33;
// cache load 6
offset = mix[8] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[29] = ROTR32(mix[29], 10) ^ data;
// random math 6
data = popcount(mix[4]) + popcount(mix[12]);
mix[10] = (mix[10] * 33) + data;
// cache load 7
offset = mix[14] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[6] = (mix[6] ^ data) * 33;
// random math 7
data = min(mix[10], mix[20]);
mix[24] = (mix[24] * 33) + data;
// cache load 8
offset = mix[17] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[14] = (mix[14] ^ data) * 33;
// random math 8
data = mix[0] * mix[10];
mix[19] = ROTR32(mix[19], 23) ^ data;
// cache load 9
offset = mix[9] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[23] = (mix[23] * 33) + data;
// random math 9
data = min(mix[22], mix[28]);
mix[1] = ROTR32(mix[1], 4) ^ data;
// cache load 10
offset = mix[0] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[18] = (mix[18] ^ data) * 33;
// random math 10
data = ROTL32(mix[22], mix[9]);
mix[21] = ROTR32(mix[21], 5) ^ data;
// random math 11
data = min(mix[26], mix[4]);
mix[22] = (mix[22] * 33) + data;
// random math 12
data = min(mix[19], mix[30]);
mix[8] = ROTL32(mix[8], 26) ^ data;
// random math 13
data = mix[12] ^ mix[24];
mix[3] = ROTL32(mix[3], 30) ^ data;
// random math 14
data = min(mix[8], mix[13]);
mix[28] = ROTL32(mix[28], 31) ^ data;
// random math 15
data = ROTL32(mix[12], mix[9]);
mix[30] = ROTL32(mix[30], 31) ^ data;
// random math 16
data = ROTL32(mix[28], mix[27]);
mix[2] = (mix[2] * 33) + data;
// random math 17
data = ROTL32(mix[30], mix[28]);
mix[20] = ROTL32(mix[20], 12) ^ data;
// consume global load data
// hack to prevent compiler from reordering LD and usage
if (hack_false) __threadfence_block();
mix[0] = (mix[0] * 33) + data_dag.s[0];
mix[4] = ROTL32(mix[4], 13) ^ data_dag.s[1];
mix[13] = (mix[13] ^ data_dag.s[2]) * 33;
mix[0] = ROTR32(mix[0], 12) ^ data_dag.s[3];
}
//
// Implementation based on:
// https://github.com/mjosaarinen/tiny_sha3/blob/master/sha3.c
__device__ __constant__ const uint32_t keccakf_rndc[24] = {
0x00000001, 0x00008082, 0x0000808a, 0x80008000, 0x0000808b, 0x80000001,
0x80008081, 0x00008009, 0x0000008a, 0x00000088, 0x80008009, 0x8000000a,
0x8000808b, 0x0000008b, 0x00008089, 0x00008003, 0x00008002, 0x00000080,
0x0000800a, 0x8000000a, 0x80008081, 0x00008080, 0x80000001, 0x80008008
};
// Implementation of the permutation Keccakf with width 800.
__device__ __forceinline__ void keccak_f800_round(uint32_t st[25], const int r)
{
const uint32_t keccakf_rotc[24] = {
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
};
const uint32_t keccakf_piln[24] = {
10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
};
uint32_t t, bc[5];
// Theta
for (int i = 0; i < 5; i++)
bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] ^ st[i + 20];
for (int i = 0; i < 5; i++) {
t = bc[(i + 4) % 5] ^ ROTL32(bc[(i + 1) % 5], 1);
for (uint32_t j = 0; j < 25; j += 5)
st[j + i] ^= t;
}
// Rho Pi
t = st[1];
for (int i = 0; i < 24; i++) {
uint32_t j = keccakf_piln[i];
bc[0] = st[j];
st[j] = ROTL32(t, keccakf_rotc[i]);
t = bc[0];
}
// Chi
for (uint32_t j = 0; j < 25; j += 5) {
for (int i = 0; i < 5; i++)
bc[i] = st[j + i];
for (int i = 0; i < 5; i++)
st[j + i] ^= (~bc[(i + 1) % 5]) & bc[(i + 2) % 5];
}
// Iota
st[0] ^= keccakf_rndc[r];
}
//pli11
/*
__device__ __forceinline__ uint32_t cuda_swab32(const uint32_t x)
{
return __byte_perm(x, x, 0x0123);
}
*/
// Keccak - implemented as a variant of SHAKE
// The width is 800, with a bitrate of 576, a capacity of 224, and no padding
// Only need 64 bits of output for mining
/*
__device__ __noinline__ uint64_t keccak_f800(hash32_t header, uint64_t seed, hash32_t digest)
{
uint32_t st[25];
for (int i = 0; i < 25; i++)
st[i] = 0;
for (int i = 0; i < 8; i++)
st[i] = header.uint32s[i];
st[8] = seed;
st[9] = seed >> 32;
for (int i = 0; i < 8; i++)
st[10+i] = digest.uint32s[i];
for (int r = 0; r < 21; r++) {
keccak_f800_round(st, r);
}
keccak_f800_round(st, 21);
return (uint64_t)cuda_swab32(st[0]) << 32 | cuda_swab32(st[1]);
}
*/
__device__ __noinline__ uint64_t keccak_f800(hash32_t header, uint64_t seed, hash32_t digest)
{
uint32_t st[25];
for (int i = 0; i < 25; i++)
st[i] = 0;
for (int i = 0; i < 8; i++)
st[i] = header.uint32s[i];
st[8] = seed;
st[9] = seed >> 32;
// st[8] = split_result(seed);
// st[9] = split_result(seed>>32);
for (int i = 0; i < 8; i++)
st[10+i] = digest.uint32s[i];
for (int r = 0; r < 21; r++) {
keccak_f800_round(st, r);
}
// last round can be simplified due to partial output
keccak_f800_round(st, 21);
// Byte swap so byte 0 of hash is MSB of result
//return (uint64_t)cuda_swab32(st[0]) << 32 | cuda_swab32(st[1]);
//return combine_result(cuda_swab32(st[1]),cuda_swab32(st[0]));
return 0;
}
#define fnv1a(h, d) (h = (uint32_t(h) ^ uint32_t(d)) * uint32_t(0x1000193))
typedef struct {
uint32_t z, w, jsr, jcong;
} kiss99_t;
// KISS99 is simple, fast, and passes the TestU01 suite
// https://en.wikipedia.org/wiki/KISS_(algorithm)
// http://www.cse.yorku.ca/~oz/marsaglia-rng.html
__device__ __forceinline__ uint32_t kiss99(kiss99_t &st)
{
st.z = 36969 * (st.z & 65535) + (st.z >> 16);
st.w = 18000 * (st.w & 65535) + (st.w >> 16);
uint32_t MWC = ((st.z << 16) + st.w);
st.jsr ^= (st.jsr << 17);
st.jsr ^= (st.jsr >> 13);
st.jsr ^= (st.jsr << 5);
st.jcong = 69069 * st.jcong + 1234567;
return ((MWC^st.jcong) + st.jsr);
}
__device__ __forceinline__ void fill_mix(uint64_t seed, uint32_t lane_id, uint32_t mix[PROGPOW_REGS])
{
// Use FNV to expand the per-warp seed to per-lane
// Use KISS to expand the per-lane seed to fill mix
uint32_t fnv_hash = 0x811c9dc5;
kiss99_t st;
//st.z = fnv1a(fnv_hash, split_result(seed));
//st.w = fnv1a(fnv_hash, split_result(seed>>32));
st.z = fnv1a(fnv_hash, seed);
st.w = fnv1a(fnv_hash, seed>>32);
st.jsr = fnv1a(fnv_hash, lane_id);
st.jcong = fnv1a(fnv_hash, lane_id);
#pragma unroll
for (int i = 0; i < PROGPOW_REGS; i++)
mix[i] = kiss99(st);
}
__device__ float NVM_log[LOG_SIZE_16M];
__device__ float NVM_flag[FLAG_SIZE_1M];
__global__ void
progpow_search(
uint64_t start_nonce,
const hash32_t header,
const uint64_t target,
const dag_t *g_dag,
search_results* g_output,
bool hack_false
)
{
__shared__ uint32_t c_dag[PROGPOW_CACHE_WORDS];
uint32_t const gid = blockIdx.x * blockDim.x + threadIdx.x;
uint64_t const nonce = start_nonce + gid;
const uint32_t lane_id = threadIdx.x & (PROGPOW_LANES - 1);
// Load the first portion of the DAG into the cache
for (uint32_t word = threadIdx.x*PROGPOW_DAG_LOADS; word < PROGPOW_CACHE_WORDS; word += blockDim.x*PROGPOW_DAG_LOADS)
{
dag_t load = g_dag[word/PROGPOW_DAG_LOADS];
for(int i=0; i<PROGPOW_DAG_LOADS; i++)
c_dag[word + i] = load.s[i];
}
hash32_t digest;
for (int i = 0; i < 8; i++)
digest.uint32s[i] = 0;
// keccak(header..nonce)
uint64_t seed = keccak_f800(header, nonce, digest);
__syncthreads();
#pragma unroll 1
for (uint32_t h = 0; h < PROGPOW_LANES; h++)
{
uint32_t mix[PROGPOW_REGS];
// share the hash's seed across all lanes
uint64_t hash_seed = __shfl_sync(0xFFFFFFFF, seed, h, PROGPOW_LANES);
// initialize mix for all lanes
fill_mix(hash_seed, lane_id, mix);
#pragma unroll 1
for (uint32_t l = 0; l < PROGPOW_CNT_DAG; l++)
progPowLoop(l, mix, g_dag, c_dag, hack_false);
// Reduce mix data to a per-lane 32-bit digest
uint32_t digest_lane = 0x811c9dc5;
#pragma unroll
for (int i = 0; i < PROGPOW_REGS; i++)
fnv1a(digest_lane, mix[i]);
// Reduce all lanes to a single 256-bit digest
hash32_t digest_temp;
#pragma unroll
for (int i = 0; i < 8; i++)
digest_temp.uint32s[i] = 0x811c9dc5;
for (int i = 0; i < PROGPOW_LANES; i += 8)
#pragma unroll
for (int j = 0; j < 8; j++)
fnv1a(digest_temp.uint32s[j], __shfl_sync(0xFFFFFFFF, digest_lane, i + j, PROGPOW_LANES));
if (h == lane_id)
digest = digest_temp;
}
// keccak(header .. keccak(header..nonce) .. digest);
if (keccak_f800(header, seed, digest) >= target)
return;
uint32_t index = atomicInc((uint32_t *)&g_output->count, 0xffffffff);
if (index >= SEARCH_RESULTS)
return;
ST_WT_U32(&NVM_log[index],g_output->result[index].gid);
MEM_FENCE; __syncthreads();
SET_NVM_FLAG(1);
//g_output->result[index].gid = gid;
asm("st.global.wt.u32 [%0], %1;" :: "l"(&g_output->result[index].gid ), "r"(gid) : "memory");
#pragma unroll
for (int i = 0; i < 8; i++){
//g_output->result[index].mix[i] = digest.uint32s[i];
asm("st.global.wt.u32 [%0], %1;" :: "l"(&g_output->result[index].mix[i] ), "r"(digest.uint32s[i]) : "memory");
}
MEM_FENCE;__syncthreads();
SET_NVM_FLAG(2);
}
//pli11
void search_kernel(
uint64_t start_nonce,
const hash32_t header,
const uint64_t target,
const dag_t *g_dag,
search_results* g_output,
bool hack_false,
uint32_t blocks,
uint32_t threads,
cudaStream_t stream
)
{
progpow_search <<<blocks, threads, 0, stream >>>(start_nonce,header,target,g_dag,g_output,hack_false);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
}
|
ad28e9a902f0ef682acde402f1e4d48bea84d438.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated c Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
#define PRECISION_c
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, float* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
template< int n >
__device__ void sum_reduce_2d( /*int n,*/ int i, int c, float x[][BLOCK_SIZEy+1] )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i][c] += x[i+1024][c]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i][c] += x[i+ 512][c]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i][c] += x[i+ 256][c]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i][c] += x[i+ 128][c]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i][c] += x[i+ 64][c]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i][c] += x[i+ 32][c]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i][c] += x[i+ 16][c]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i][c] += x[i+ 8][c]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i][c] += x[i+ 4][c]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i][c] += x[i+ 2][c]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i][c] += x[i+ 1][c]; } __syncthreads(); }
}
// end sum_reduce
//==============================================================================
__global__ void
magmablas_scnrm2_kernel( int m, magmaFloatComplex *da, int ldda, float *dxnorm )
{
const int i = threadIdx.x;
magmaFloatComplex *dx = da + blockIdx.x * ldda;
__shared__ float sum[ BLOCK_SIZE ];
float re, lsum;
// get norm of dx
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_C_REAL( dx[j] );
float im = MAGMA_C_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
if (i==0)
dxnorm[blockIdx.x] = sqrt(sum[0]);
}
//==============================================================================
__global__ void
magmablas_scnrm2_check_kernel( int m, magmaFloatComplex *da, int ldda, float *dxnorm,
float *lsticc )
{
const int i = threadIdx.x;
magmaFloatComplex *dx = da + blockIdx.x * ldda;
__shared__ float sum[ BLOCK_SIZE ];
float re, lsum;
// get norm of dx only if lsticc[blockIdx+1] != 0
if( lsticc[blockIdx.x + 1] == 0 ) return;
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_C_REAL( dx[j] );
float im = MAGMA_C_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
if (i==0)
dxnorm[blockIdx.x] = sqrt(sum[0]);
}
extern "C" void
magmablas_scnrm2_check(
magma_int_t m, magma_int_t n, magmaFloatComplex *da, magma_int_t ldda,
float *dxnorm, float *lsticc)
{
dim3 blocks( n );
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( magmablas_scnrm2_check_kernel), dim3(blocks), dim3(threads) , 0, 0, m, da, ldda, dxnorm, lsticc );
}
//==============================================================================
__global__ void
magmablas_scnrm2_smkernel( int m, int n, magmaFloatComplex *da, int ldda,
float *dxnorm )
{
const int i = threadIdx.x, c= threadIdx.y;
__shared__ float sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1];
float re, lsum;
for( int k = c; k < n; k+= BLOCK_SIZEy)
{
magmaFloatComplex *dx = da + k * ldda;
// get norm of dx
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZEx ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_C_REAL( dx[j] );
float im = MAGMA_C_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i][c] = lsum;
sum_reduce_2d< BLOCK_SIZEx >( i, c, sum );
if (i==0)
dxnorm[k] = sqrt(sum[0][c]);
__syncthreads();
}
}
//==============================================================================
/*
Compute the scnrm2 of each column of m-by-n matrix dA.
The resulting norms are written in the dxnorm array.
This routine uses only one SM (block).
*/
extern "C" void
magmablas_scnrm2_sm(
magma_int_t m, magma_int_t n, magmaFloatComplex *da, magma_int_t ldda,
float *dxnorm)
{
dim3 blocks( 1 );
dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy );
hipLaunchKernelGGL(( magmablas_scnrm2_smkernel), dim3(blocks), dim3(threads), 0, magma_stream , m, n, da, ldda, dxnorm );
}
//==============================================================================
static
__device__ void dsum_reduce( int n, int i, float* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
__global__ void
magma_scnrm2_adjust_kernel(float *xnorm, magmaFloatComplex *c)
{
const int i = threadIdx.x;
__shared__ float sum[ BLOCK_SIZE ];
float temp;
temp = MAGMA_C_ABS( c[i] ) / xnorm[0];
sum[i] = -temp * temp;
dsum_reduce( blockDim.x, i, sum );
__syncthreads();
if (i==0)
xnorm[0] = xnorm[0] * sqrt(1+sum[0]);
}
/*
Adjust the norm of c to give the norm of c[k+1:], assumin that
c was changed with orthogonal transformations.
*/
extern "C" void
magmablas_scnrm2_adjust(magma_int_t k, float *xnorm, magmaFloatComplex *c)
{
hipLaunchKernelGGL(( magma_scnrm2_adjust_kernel), dim3(1), dim3(k), 0, magma_stream , xnorm, c);
}
//==============================================================================
#define BS 256
__global__ void
magma_scnrm2_row_check_adjust_kernel(int n, float tol, float *xnorm, float *xnorm2,
magmaFloatComplex *c, int ldc, float *lsticc)
{
const int i = threadIdx.x + blockIdx.x*BS;
lsticc[i+1] = 0;
if (i<n){
float temp = MAGMA_C_ABS( c[i*ldc] ) / xnorm[i];
temp = max( 0.0, ((1.0 + temp) * (1.0 - temp)) );
float temp2 = xnorm[i] / xnorm2[i];
temp2 = temp * (temp2 * temp2);
if (temp2 <= tol) {
lsticc[i+1] = 1;
} else {
xnorm[i] *= sqrt(temp);
}
}
if( i==0 ) lsticc[0] = 0;
dsum_reduce( blockDim.x, i, lsticc );
}
/*
Adjust the norm of c[,1:k] to give the norm of c[k+1:,1:k], assuming that
c was changed with orthogonal transformations.
It also do checks for QP3
*/
extern "C" void
magmablas_scnrm2_row_check_adjust(
magma_int_t k, float tol, float *xnorm, float *xnorm2,
magmaFloatComplex *c, magma_int_t ldc, float *lsticc)
{
int nblocks = (k+BS-1)/BS;
hipLaunchKernelGGL(( magma_scnrm2_row_check_adjust_kernel), dim3(nblocks), dim3(BS) , 0, 0, k, tol, xnorm, xnorm2, c, ldc, lsticc);
}
//==============================================================================
/*
Compute the scnrm2 of each column of m-by-n matrix dA.
The resulting norms are written in the dxnorm array.
The computation can be done using n blocks (default) or on one SM (commented).
*/
extern "C" void
magmablas_scnrm2_cols(
magma_int_t m, magma_int_t n,
magmaFloatComplex *da, magma_int_t ldda,
float *dxnorm)
{
dim3 blocks( n );
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( magmablas_scnrm2_kernel), dim3(blocks), dim3(threads), 0, magma_stream , m, da, ldda, dxnorm );
// The following would do the computation on one SM
// magmablas_scnrm2_sm(m, n, da, ldda, dxnorm);
}
//==============================================================================
| ad28e9a902f0ef682acde402f1e4d48bea84d438.cu | /*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated c Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
#define PRECISION_c
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce( /*int n,*/ int i, float* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
template< int n >
__device__ void sum_reduce_2d( /*int n,*/ int i, int c, float x[][BLOCK_SIZEy+1] )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i][c] += x[i+1024][c]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i][c] += x[i+ 512][c]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i][c] += x[i+ 256][c]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i][c] += x[i+ 128][c]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i][c] += x[i+ 64][c]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i][c] += x[i+ 32][c]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i][c] += x[i+ 16][c]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i][c] += x[i+ 8][c]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i][c] += x[i+ 4][c]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i][c] += x[i+ 2][c]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i][c] += x[i+ 1][c]; } __syncthreads(); }
}
// end sum_reduce
//==============================================================================
__global__ void
magmablas_scnrm2_kernel( int m, magmaFloatComplex *da, int ldda, float *dxnorm )
{
const int i = threadIdx.x;
magmaFloatComplex *dx = da + blockIdx.x * ldda;
__shared__ float sum[ BLOCK_SIZE ];
float re, lsum;
// get norm of dx
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_C_REAL( dx[j] );
float im = MAGMA_C_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
if (i==0)
dxnorm[blockIdx.x] = sqrt(sum[0]);
}
//==============================================================================
__global__ void
magmablas_scnrm2_check_kernel( int m, magmaFloatComplex *da, int ldda, float *dxnorm,
float *lsticc )
{
const int i = threadIdx.x;
magmaFloatComplex *dx = da + blockIdx.x * ldda;
__shared__ float sum[ BLOCK_SIZE ];
float re, lsum;
// get norm of dx only if lsticc[blockIdx+1] != 0
if( lsticc[blockIdx.x + 1] == 0 ) return;
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZE ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_C_REAL( dx[j] );
float im = MAGMA_C_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i] = lsum;
sum_reduce< BLOCK_SIZE >( i, sum );
if (i==0)
dxnorm[blockIdx.x] = sqrt(sum[0]);
}
extern "C" void
magmablas_scnrm2_check(
magma_int_t m, magma_int_t n, magmaFloatComplex *da, magma_int_t ldda,
float *dxnorm, float *lsticc)
{
dim3 blocks( n );
dim3 threads( BLOCK_SIZE );
magmablas_scnrm2_check_kernel<<< blocks, threads >>>( m, da, ldda, dxnorm, lsticc );
}
//==============================================================================
__global__ void
magmablas_scnrm2_smkernel( int m, int n, magmaFloatComplex *da, int ldda,
float *dxnorm )
{
const int i = threadIdx.x, c= threadIdx.y;
__shared__ float sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1];
float re, lsum;
for( int k = c; k < n; k+= BLOCK_SIZEy)
{
magmaFloatComplex *dx = da + k * ldda;
// get norm of dx
lsum = 0;
for( int j = i; j < m; j += BLOCK_SIZEx ) {
#if (defined(PRECISION_s) || defined(PRECISION_d))
re = dx[j];
lsum += re*re;
#else
re = MAGMA_C_REAL( dx[j] );
float im = MAGMA_C_IMAG( dx[j] );
lsum += re*re + im*im;
#endif
}
sum[i][c] = lsum;
sum_reduce_2d< BLOCK_SIZEx >( i, c, sum );
if (i==0)
dxnorm[k] = sqrt(sum[0][c]);
__syncthreads();
}
}
//==============================================================================
/*
Compute the scnrm2 of each column of m-by-n matrix dA.
The resulting norms are written in the dxnorm array.
This routine uses only one SM (block).
*/
extern "C" void
magmablas_scnrm2_sm(
magma_int_t m, magma_int_t n, magmaFloatComplex *da, magma_int_t ldda,
float *dxnorm)
{
dim3 blocks( 1 );
dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy );
magmablas_scnrm2_smkernel<<< blocks, threads, 0, magma_stream >>>( m, n, da, ldda, dxnorm );
}
//==============================================================================
static
__device__ void dsum_reduce( int n, int i, float* x )
{
__syncthreads();
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); }
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); }
}
// end sum_reduce
__global__ void
magma_scnrm2_adjust_kernel(float *xnorm, magmaFloatComplex *c)
{
const int i = threadIdx.x;
__shared__ float sum[ BLOCK_SIZE ];
float temp;
temp = MAGMA_C_ABS( c[i] ) / xnorm[0];
sum[i] = -temp * temp;
dsum_reduce( blockDim.x, i, sum );
__syncthreads();
if (i==0)
xnorm[0] = xnorm[0] * sqrt(1+sum[0]);
}
/*
Adjust the norm of c to give the norm of c[k+1:], assumin that
c was changed with orthogonal transformations.
*/
extern "C" void
magmablas_scnrm2_adjust(magma_int_t k, float *xnorm, magmaFloatComplex *c)
{
magma_scnrm2_adjust_kernel<<< 1, k, 0, magma_stream >>> (xnorm, c);
}
//==============================================================================
#define BS 256
__global__ void
magma_scnrm2_row_check_adjust_kernel(int n, float tol, float *xnorm, float *xnorm2,
magmaFloatComplex *c, int ldc, float *lsticc)
{
const int i = threadIdx.x + blockIdx.x*BS;
lsticc[i+1] = 0;
if (i<n){
float temp = MAGMA_C_ABS( c[i*ldc] ) / xnorm[i];
temp = max( 0.0, ((1.0 + temp) * (1.0 - temp)) );
float temp2 = xnorm[i] / xnorm2[i];
temp2 = temp * (temp2 * temp2);
if (temp2 <= tol) {
lsticc[i+1] = 1;
} else {
xnorm[i] *= sqrt(temp);
}
}
if( i==0 ) lsticc[0] = 0;
dsum_reduce( blockDim.x, i, lsticc );
}
/*
Adjust the norm of c[,1:k] to give the norm of c[k+1:,1:k], assuming that
c was changed with orthogonal transformations.
It also do checks for QP3
*/
extern "C" void
magmablas_scnrm2_row_check_adjust(
magma_int_t k, float tol, float *xnorm, float *xnorm2,
magmaFloatComplex *c, magma_int_t ldc, float *lsticc)
{
int nblocks = (k+BS-1)/BS;
magma_scnrm2_row_check_adjust_kernel<<< nblocks, BS >>> (k, tol, xnorm, xnorm2, c, ldc, lsticc);
}
//==============================================================================
/*
Compute the scnrm2 of each column of m-by-n matrix dA.
The resulting norms are written in the dxnorm array.
The computation can be done using n blocks (default) or on one SM (commented).
*/
extern "C" void
magmablas_scnrm2_cols(
magma_int_t m, magma_int_t n,
magmaFloatComplex *da, magma_int_t ldda,
float *dxnorm)
{
dim3 blocks( n );
dim3 threads( BLOCK_SIZE );
magmablas_scnrm2_kernel<<< blocks, threads, 0, magma_stream >>>( m, da, ldda, dxnorm );
// The following would do the computation on one SM
// magmablas_scnrm2_sm(m, n, da, ldda, dxnorm);
}
//==============================================================================
|
453ddc9a1d9c2ab7ea7e997154c49e3e2312df6f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common/book.h"
#define SIZE (100 * 1024 * 1024)
__global__ void histo_kernel(unsigned char *buffer, long const size,
unsigned int *histo) {
__shared__ unsigned int temp[256];
temp[threadIdx.x] = 0;
__syncthreads();
int i = threadIdx.x + blockIdx.x * blockDim.x;
int const offset = blockDim.x * gridDim.x;
while (i < size) {
atomicAdd(&temp[buffer[i]], 1);
i += offset;
}
__syncthreads();
atomicAdd(&histo[threadIdx.x], temp[threadIdx.x]);
}
int main(void) {
unsigned char *buffer = (unsigned char *)big_random_block(SIZE);
hipEvent_t start, stop;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
HANDLE_ERROR(hipEventRecord(start, 0));
unsigned char *dev_buffer;
unsigned int *dev_histo;
HANDLE_ERROR(hipMalloc((void **)&dev_buffer, SIZE));
HANDLE_ERROR(hipMemcpy(dev_buffer, buffer, SIZE, hipMemcpyHostToDevice));
HANDLE_ERROR(hipMalloc((void **)&dev_histo, 256 * sizeof(long)));
HANDLE_ERROR(hipMemset(dev_histo, 0, 256 * sizeof(int)));
hipDeviceProp_t prop;
HANDLE_ERROR(hipGetDeviceProperties(&prop, 0));
int const blocks = prop.multiProcessorCount;
hipLaunchKernelGGL(( histo_kernel), dim3(blocks * 2), dim3(256), 0, 0, dev_buffer, SIZE, dev_histo);
unsigned int histo[256];
HANDLE_ERROR(
hipMemcpy(histo, dev_histo, 256 * sizeof(int), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
float elapsed_time;
HANDLE_ERROR(hipEventElapsedTime(&elapsed_time, start, stop));
printf("Time to generate: %3.1fms\n", elapsed_time);
long histo_count = 0;
for (int i = 0; i < 256; ++i) {
histo_count += histo[i];
}
printf("Histogram sum: %ld\n", histo_count);
for (int i = 0; i < SIZE; ++i) {
--histo[buffer[i]];
}
for (int i = 0; i < 256; ++i) {
if (histo[i] != 0) {
printf("Failure at %d!\n", i);
}
}
HANDLE_ERROR(hipEventDestroy(start));
HANDLE_ERROR(hipEventDestroy(stop));
hipFree(dev_buffer);
hipFree(dev_histo);
free(buffer);
return 0;
}
| 453ddc9a1d9c2ab7ea7e997154c49e3e2312df6f.cu | #include "common/book.h"
#define SIZE (100 * 1024 * 1024)
__global__ void histo_kernel(unsigned char *buffer, long const size,
unsigned int *histo) {
__shared__ unsigned int temp[256];
temp[threadIdx.x] = 0;
__syncthreads();
int i = threadIdx.x + blockIdx.x * blockDim.x;
int const offset = blockDim.x * gridDim.x;
while (i < size) {
atomicAdd(&temp[buffer[i]], 1);
i += offset;
}
__syncthreads();
atomicAdd(&histo[threadIdx.x], temp[threadIdx.x]);
}
int main(void) {
unsigned char *buffer = (unsigned char *)big_random_block(SIZE);
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start, 0));
unsigned char *dev_buffer;
unsigned int *dev_histo;
HANDLE_ERROR(cudaMalloc((void **)&dev_buffer, SIZE));
HANDLE_ERROR(cudaMemcpy(dev_buffer, buffer, SIZE, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMalloc((void **)&dev_histo, 256 * sizeof(long)));
HANDLE_ERROR(cudaMemset(dev_histo, 0, 256 * sizeof(int)));
cudaDeviceProp prop;
HANDLE_ERROR(cudaGetDeviceProperties(&prop, 0));
int const blocks = prop.multiProcessorCount;
histo_kernel<<<blocks * 2, 256>>>(dev_buffer, SIZE, dev_histo);
unsigned int histo[256];
HANDLE_ERROR(
cudaMemcpy(histo, dev_histo, 256 * sizeof(int), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
float elapsed_time;
HANDLE_ERROR(cudaEventElapsedTime(&elapsed_time, start, stop));
printf("Time to generate: %3.1fms\n", elapsed_time);
long histo_count = 0;
for (int i = 0; i < 256; ++i) {
histo_count += histo[i];
}
printf("Histogram sum: %ld\n", histo_count);
for (int i = 0; i < SIZE; ++i) {
--histo[buffer[i]];
}
for (int i = 0; i < 256; ++i) {
if (histo[i] != 0) {
printf("Failure at %d!\n", i);
}
}
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
cudaFree(dev_buffer);
cudaFree(dev_histo);
free(buffer);
return 0;
}
|
b7112712137e11a5a24ac1b1cc2c6af96bab9c19.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************
Emitting C Generated Code
*******************************************/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include "cudnn_header.h"
#include "nccl_header.h"
#include <string.h>
#include <cblas.h>
#include <stdlib.h>
#include "cuda_header.h"
#include <stdio.h>
#include <stdint.h>
#include "cublas_header.h"
#include <stdbool.h>
#include "mpi_header.h"
#include "scanner_header.h"
/************* Functions **************/
__global__ void x10(float* x11, float x12, int x13) {
// begin generating kernel function for FILL of type Float
int x14 = gridDim.x * blockDim.x;
int x15 = threadIdx.x + blockIdx.x * blockDim.x;
while (x15 < x13) {
x11[x15] = x12;
x15 = x15 + x14;
}
// end generating kernel function for FILL of type Float
}
__global__ void x22(float* x23, float** x24) {
// This is cuda 2-section split kernel for 3D input at axis 2.
// It takes a 3D array and splits on the innermost dimension (dim2) into 2 arrays.
// arg0: input array
// arg1: array of output arrays
// call constraint: sum of out(i).size = in.size for i in [0, 2)
int x25 = blockIdx.x * blockDim.x + threadIdx.x;
if (x25 < 16384) {
float x26 = x23[x25];
int x27 = x25 % 32;
if (x27 < 16) x24[0][x25 / 32 * 16 + x27] = x26;
else x24[1][x25 / 32 * 16 + (x27 - 16)] = x26;
}
}
__global__ void x29(float* x30, float* x31, float* x32, int x33) {
// begin generating kernel function for MULT of type Float
int x34 = gridDim.x * blockDim.x;
int x35 = threadIdx.x + blockIdx.x * blockDim.x;
while (x35 < x33) {
int x36 = x35;
x32[x36] = x30[x36] * x31[x36];
x35 = x35 + x34;
}
// end generating kernel function for MULT of type Float
}
__global__ void x43(float* x44, float* x45, int x46) {
// begin generating kernel function for ACCUM of type Float
int x47 = gridDim.x * blockDim.x;
int x48 = threadIdx.x + blockIdx.x * blockDim.x;
while (x48 < x46) {
int x49 = x48;
x44[x49] = x44[x49] + x45[x49];
x48 = x48 + x47;
}
// end generating kernel function for ACCUM of type Float
}
__global__ void x54(float** x55, float* x56) {
// this is cuda 2-section concat kernel for 3D inputs at axis 2.
// It concatenates 2 3D arrays on the innermost dimension (dim2).
// arg0: array of input input arrays
// arg1: output array
// call constraint: in.size = 2
// call constraint: sum of in(i).size = out.size for i in [0, 2)
int x57 = blockIdx.x * blockDim.x + threadIdx.x;
if (x57 < 16384) {
int x58 = x57 % 32;
if (x58 < 16) x56[x57] = x55[0][x57 / 32 * 16 + x58];
else x56[x57] = x55[1][x57 / 32 * 16 + (x58 - 16)];
}
}
/**************** Snippet ****************/
void Snippet(int x0) {
// begin setting up the MPI/NCCL environment
int x1 = 0;
int x2 = 0;
MPICHECK(MPI_Init(NULL, NULL));
MPICHECK(MPI_Comm_rank(MPI_COMM_WORLD, &x2));
MPICHECK(MPI_Comm_size(MPI_COMM_WORLD, &x1));
MPICHECK(MPI_Barrier(MPI_COMM_WORLD));
CUDA_CALL(hipSetDevice(x2));
ncclUniqueId x3;
NCCLCHECK(ncclGetUniqueId(&x3));
MPICHECK(MPI_Bcast(&x3, NCCL_UNIQUE_ID_BYTES, MPI_CHAR, 0, MPI_COMM_WORLD));
ncclComm_t x4;
NCCLCHECK(ncclCommInitRank(&x4, x1, x3, x2));
hipStream_t x5;
CUDA_CALL(hipStreamCreateWithFlags(&x5, hipStreamNonBlocking));
int x6 = x2;
// end setting up the MPI/NCCL environment
// begin initializing GPU array of size 8192 and type Float
float* x7 = (float*)malloc(8192 * sizeof(float));
CUDA_CALL(hipSetDevice(x6));
float* x8 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x8, (size_t)(8192 * sizeof(float))));
scan_float_array(x7, 8192, "golden/weight_rank_%d.data", x6);
CUDA_CALL(hipMemcpy(x8, x7, (size_t)(8192 * sizeof(float)), hipMemcpyHostToDevice));
// end initializing GPU array of size 8192 and type Float
// begin initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39
CUDA_CALL(hipSetDevice(x6));
float* x9 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x9, (size_t)(8192 * sizeof(float))));
hipLaunchKernelGGL(( x10), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x9, 0, 8192);
// end initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39
// begin initializing GPU array of size 16384 and type Float
float* x16 = (float*)malloc(16384 * sizeof(float));
CUDA_CALL(hipSetDevice(x6));
float* x17 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x17, (size_t)(16384 * sizeof(float))));
scan_float_array(x16, 16384, "golden/input_rank_%d.data", x6);
CUDA_CALL(hipMemcpy(x17, x16, (size_t)(16384 * sizeof(float)), hipMemcpyHostToDevice));
// end initializing GPU array of size 16384 and type Float
CUDA_CALL(hipSetDevice(x6));
float* x18 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x18, (size_t)(8192 * sizeof(float))));
CUDA_CALL(hipSetDevice(x6));
float* x19 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x19, (size_t)(8192 * sizeof(float))));
float** x20 = (float**)malloc(2 * sizeof(float*));
x20[0] = x18;
x20[1] = x19;
float** x21 = (float**)malloc(0 * sizeof(float*));
CUDA_CALL(hipMalloc(&x21, (size_t)(2 * sizeof(float*))));
CUDA_CALL(hipMemcpy(x21, x20, (size_t)(2 * sizeof(float*)), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( x22), dim3(dim3(32, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x17, x21);
// begin computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x119 and right_operand x45
CUDA_CALL(hipSetDevice(x6));
float* x28 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x28, (size_t)(8192 * sizeof(float))));
hipLaunchKernelGGL(( x29), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x18, x8, x28, 8192);
// end computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x119 and right_operand x45
// begin initializing fixed GPU array of size 16384 and type Float and device (pre-rename) x39
CUDA_CALL(hipSetDevice(x6));
float* x37 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x37, (size_t)(16384 * sizeof(float))));
hipLaunchKernelGGL(( x10), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x37, 0, 16384);
// end initializing fixed GPU array of size 16384 and type Float and device (pre-rename) x39
// begin initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39
CUDA_CALL(hipSetDevice(x6));
float* x38 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x38, (size_t)(8192 * sizeof(float))));
hipLaunchKernelGGL(( x10), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x38, 0, 8192);
// end initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39
// begin initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39
CUDA_CALL(hipSetDevice(x6));
float* x39 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x39, (size_t)(8192 * sizeof(float))));
hipLaunchKernelGGL(( x10), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x39, 0, 8192);
// end initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39
// begin checking GPU array of size 8192 and type Float
float* x40 = (float*)malloc(8192 * sizeof(float));
CUDA_CALL(hipMemcpy(x40, x28, (size_t)(8192 * sizeof(float)), hipMemcpyDeviceToHost));
check_float_array_with_file(x40, 8192, "golden/loss_rank_%d.data", x6);
// end checking GPU array of size 8192 and type Float
// begin initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39
CUDA_CALL(hipSetDevice(x6));
float* x41 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x41, (size_t)(8192 * sizeof(float))));
hipLaunchKernelGGL(( x10), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x41, 1, 8192);
// end initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39
// begin computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x45 and right_operand x282
CUDA_CALL(hipSetDevice(x6));
float* x42 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x42, (size_t)(8192 * sizeof(float))));
hipLaunchKernelGGL(( x29), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x8, x41, x42, 8192);
// end computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x45 and right_operand x282
// begin computing ACCUM on GPU for size 8192 and type Float at device (pre-rename) x39 with base_operand x246 and addition_operand x295
CUDA_CALL(hipSetDevice(x6));
hipLaunchKernelGGL(( x43), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x38, x42, 8192);
// end computing ACCUM on GPU for size 8192 and type Float at device (pre-rename) x39 with base_operand x246 and addition_operand x295
// begin computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x119 and right_operand x282
CUDA_CALL(hipSetDevice(x6));
float* x50 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x50, (size_t)(8192 * sizeof(float))));
hipLaunchKernelGGL(( x29), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x18, x41, x50, 8192);
// end computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x119 and right_operand x282
// begin computing ACCUM on GPU for size 8192 and type Float at device (pre-rename) x39 with base_operand x62 and addition_operand x345
CUDA_CALL(hipSetDevice(x6));
hipLaunchKernelGGL(( x43), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x9, x50, 8192);
// end computing ACCUM on GPU for size 8192 and type Float at device (pre-rename) x39 with base_operand x62 and addition_operand x345
CUDA_CALL(hipSetDevice(x6));
float* x51 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x51, (size_t)(16384 * sizeof(float))));
float** x52 = (float**)malloc(2 * sizeof(float*));
x52[0] = x38;
x52[1] = x39;
float** x53 = (float**)malloc(0 * sizeof(float*));
CUDA_CALL(hipMalloc(&x53, (size_t)(2 * sizeof(float*))));
CUDA_CALL(hipMemcpy(x53, x52, (size_t)(2 * sizeof(float*)), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( x54), dim3(dim3(32, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x53, x51);
// begin computing ACCUM on GPU for size 16384 and type Float at device (pre-rename) x39 with base_operand x233 and addition_operand x364
CUDA_CALL(hipSetDevice(x6));
hipLaunchKernelGGL(( x43), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x37, x51, 16384);
// end computing ACCUM on GPU for size 16384 and type Float at device (pre-rename) x39 with base_operand x233 and addition_operand x364
// begin checking GPU array of size 8192 and type Float
float* x59 = (float*)malloc(8192 * sizeof(float));
CUDA_CALL(hipMemcpy(x59, x9, (size_t)(8192 * sizeof(float)), hipMemcpyDeviceToHost));
check_float_array_with_file(x59, 8192, "golden/weight_grad_rank_%d.data", x6);
// end checking GPU array of size 8192 and type Float
// begin checking GPU array of size 16384 and type Float
float* x60 = (float*)malloc(16384 * sizeof(float));
CUDA_CALL(hipMemcpy(x60, x37, (size_t)(16384 * sizeof(float)), hipMemcpyDeviceToHost));
check_float_array_with_file(x60, 16384, "golden/input_grad_rank_%d.data", x6);
// end checking GPU array of size 16384 and type Float
NCCLCHECK(ncclCommDestroy(x4));
MPICHECK(MPI_Finalize());
}
/*****************************************
End of C Generated Code
*******************************************/
int main(int argc, char *argv[]) {
if (argc != 2) {
printf("usage: %s <arg>\n", argv[0]);
return 0;
}
Snippet(atoi(argv[1]));
return 0;
}
| b7112712137e11a5a24ac1b1cc2c6af96bab9c19.cu | /*****************************************
Emitting C Generated Code
*******************************************/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include "cudnn_header.h"
#include "nccl_header.h"
#include <string.h>
#include <cblas.h>
#include <stdlib.h>
#include "cuda_header.h"
#include <stdio.h>
#include <stdint.h>
#include "cublas_header.h"
#include <stdbool.h>
#include "mpi_header.h"
#include "scanner_header.h"
/************* Functions **************/
__global__ void x10(float* x11, float x12, int x13) {
// begin generating kernel function for FILL of type Float
int x14 = gridDim.x * blockDim.x;
int x15 = threadIdx.x + blockIdx.x * blockDim.x;
while (x15 < x13) {
x11[x15] = x12;
x15 = x15 + x14;
}
// end generating kernel function for FILL of type Float
}
__global__ void x22(float* x23, float** x24) {
// This is cuda 2-section split kernel for 3D input at axis 2.
// It takes a 3D array and splits on the innermost dimension (dim2) into 2 arrays.
// arg0: input array
// arg1: array of output arrays
// call constraint: sum of out(i).size = in.size for i in [0, 2)
int x25 = blockIdx.x * blockDim.x + threadIdx.x;
if (x25 < 16384) {
float x26 = x23[x25];
int x27 = x25 % 32;
if (x27 < 16) x24[0][x25 / 32 * 16 + x27] = x26;
else x24[1][x25 / 32 * 16 + (x27 - 16)] = x26;
}
}
__global__ void x29(float* x30, float* x31, float* x32, int x33) {
// begin generating kernel function for MULT of type Float
int x34 = gridDim.x * blockDim.x;
int x35 = threadIdx.x + blockIdx.x * blockDim.x;
while (x35 < x33) {
int x36 = x35;
x32[x36] = x30[x36] * x31[x36];
x35 = x35 + x34;
}
// end generating kernel function for MULT of type Float
}
__global__ void x43(float* x44, float* x45, int x46) {
// begin generating kernel function for ACCUM of type Float
int x47 = gridDim.x * blockDim.x;
int x48 = threadIdx.x + blockIdx.x * blockDim.x;
while (x48 < x46) {
int x49 = x48;
x44[x49] = x44[x49] + x45[x49];
x48 = x48 + x47;
}
// end generating kernel function for ACCUM of type Float
}
__global__ void x54(float** x55, float* x56) {
// this is cuda 2-section concat kernel for 3D inputs at axis 2.
// It concatenates 2 3D arrays on the innermost dimension (dim2).
// arg0: array of input input arrays
// arg1: output array
// call constraint: in.size = 2
// call constraint: sum of in(i).size = out.size for i in [0, 2)
int x57 = blockIdx.x * blockDim.x + threadIdx.x;
if (x57 < 16384) {
int x58 = x57 % 32;
if (x58 < 16) x56[x57] = x55[0][x57 / 32 * 16 + x58];
else x56[x57] = x55[1][x57 / 32 * 16 + (x58 - 16)];
}
}
/**************** Snippet ****************/
void Snippet(int x0) {
// begin setting up the MPI/NCCL environment
int x1 = 0;
int x2 = 0;
MPICHECK(MPI_Init(NULL, NULL));
MPICHECK(MPI_Comm_rank(MPI_COMM_WORLD, &x2));
MPICHECK(MPI_Comm_size(MPI_COMM_WORLD, &x1));
MPICHECK(MPI_Barrier(MPI_COMM_WORLD));
CUDA_CALL(cudaSetDevice(x2));
ncclUniqueId x3;
NCCLCHECK(ncclGetUniqueId(&x3));
MPICHECK(MPI_Bcast(&x3, NCCL_UNIQUE_ID_BYTES, MPI_CHAR, 0, MPI_COMM_WORLD));
ncclComm_t x4;
NCCLCHECK(ncclCommInitRank(&x4, x1, x3, x2));
cudaStream_t x5;
CUDA_CALL(cudaStreamCreateWithFlags(&x5, cudaStreamNonBlocking));
int x6 = x2;
// end setting up the MPI/NCCL environment
// begin initializing GPU array of size 8192 and type Float
float* x7 = (float*)malloc(8192 * sizeof(float));
CUDA_CALL(cudaSetDevice(x6));
float* x8 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x8, (size_t)(8192 * sizeof(float))));
scan_float_array(x7, 8192, "golden/weight_rank_%d.data", x6);
CUDA_CALL(cudaMemcpy(x8, x7, (size_t)(8192 * sizeof(float)), cudaMemcpyHostToDevice));
// end initializing GPU array of size 8192 and type Float
// begin initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39
CUDA_CALL(cudaSetDevice(x6));
float* x9 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x9, (size_t)(8192 * sizeof(float))));
x10<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x9, 0, 8192);
// end initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39
// begin initializing GPU array of size 16384 and type Float
float* x16 = (float*)malloc(16384 * sizeof(float));
CUDA_CALL(cudaSetDevice(x6));
float* x17 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x17, (size_t)(16384 * sizeof(float))));
scan_float_array(x16, 16384, "golden/input_rank_%d.data", x6);
CUDA_CALL(cudaMemcpy(x17, x16, (size_t)(16384 * sizeof(float)), cudaMemcpyHostToDevice));
// end initializing GPU array of size 16384 and type Float
CUDA_CALL(cudaSetDevice(x6));
float* x18 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x18, (size_t)(8192 * sizeof(float))));
CUDA_CALL(cudaSetDevice(x6));
float* x19 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x19, (size_t)(8192 * sizeof(float))));
float** x20 = (float**)malloc(2 * sizeof(float*));
x20[0] = x18;
x20[1] = x19;
float** x21 = (float**)malloc(0 * sizeof(float*));
CUDA_CALL(cudaMalloc(&x21, (size_t)(2 * sizeof(float*))));
CUDA_CALL(cudaMemcpy(x21, x20, (size_t)(2 * sizeof(float*)), cudaMemcpyHostToDevice));
x22<<<dim3(32, 1, 1), dim3(512, 1, 1)>>>(x17, x21);
// begin computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x119 and right_operand x45
CUDA_CALL(cudaSetDevice(x6));
float* x28 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x28, (size_t)(8192 * sizeof(float))));
x29<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x18, x8, x28, 8192);
// end computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x119 and right_operand x45
// begin initializing fixed GPU array of size 16384 and type Float and device (pre-rename) x39
CUDA_CALL(cudaSetDevice(x6));
float* x37 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x37, (size_t)(16384 * sizeof(float))));
x10<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x37, 0, 16384);
// end initializing fixed GPU array of size 16384 and type Float and device (pre-rename) x39
// begin initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39
CUDA_CALL(cudaSetDevice(x6));
float* x38 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x38, (size_t)(8192 * sizeof(float))));
x10<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x38, 0, 8192);
// end initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39
// begin initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39
CUDA_CALL(cudaSetDevice(x6));
float* x39 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x39, (size_t)(8192 * sizeof(float))));
x10<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x39, 0, 8192);
// end initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39
// begin checking GPU array of size 8192 and type Float
float* x40 = (float*)malloc(8192 * sizeof(float));
CUDA_CALL(cudaMemcpy(x40, x28, (size_t)(8192 * sizeof(float)), cudaMemcpyDeviceToHost));
check_float_array_with_file(x40, 8192, "golden/loss_rank_%d.data", x6);
// end checking GPU array of size 8192 and type Float
// begin initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39
CUDA_CALL(cudaSetDevice(x6));
float* x41 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x41, (size_t)(8192 * sizeof(float))));
x10<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x41, 1, 8192);
// end initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39
// begin computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x45 and right_operand x282
CUDA_CALL(cudaSetDevice(x6));
float* x42 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x42, (size_t)(8192 * sizeof(float))));
x29<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x8, x41, x42, 8192);
// end computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x45 and right_operand x282
// begin computing ACCUM on GPU for size 8192 and type Float at device (pre-rename) x39 with base_operand x246 and addition_operand x295
CUDA_CALL(cudaSetDevice(x6));
x43<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x38, x42, 8192);
// end computing ACCUM on GPU for size 8192 and type Float at device (pre-rename) x39 with base_operand x246 and addition_operand x295
// begin computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x119 and right_operand x282
CUDA_CALL(cudaSetDevice(x6));
float* x50 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x50, (size_t)(8192 * sizeof(float))));
x29<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x18, x41, x50, 8192);
// end computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x119 and right_operand x282
// begin computing ACCUM on GPU for size 8192 and type Float at device (pre-rename) x39 with base_operand x62 and addition_operand x345
CUDA_CALL(cudaSetDevice(x6));
x43<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x9, x50, 8192);
// end computing ACCUM on GPU for size 8192 and type Float at device (pre-rename) x39 with base_operand x62 and addition_operand x345
CUDA_CALL(cudaSetDevice(x6));
float* x51 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x51, (size_t)(16384 * sizeof(float))));
float** x52 = (float**)malloc(2 * sizeof(float*));
x52[0] = x38;
x52[1] = x39;
float** x53 = (float**)malloc(0 * sizeof(float*));
CUDA_CALL(cudaMalloc(&x53, (size_t)(2 * sizeof(float*))));
CUDA_CALL(cudaMemcpy(x53, x52, (size_t)(2 * sizeof(float*)), cudaMemcpyHostToDevice));
x54<<<dim3(32, 1, 1), dim3(512, 1, 1)>>>(x53, x51);
// begin computing ACCUM on GPU for size 16384 and type Float at device (pre-rename) x39 with base_operand x233 and addition_operand x364
CUDA_CALL(cudaSetDevice(x6));
x43<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x37, x51, 16384);
// end computing ACCUM on GPU for size 16384 and type Float at device (pre-rename) x39 with base_operand x233 and addition_operand x364
// begin checking GPU array of size 8192 and type Float
float* x59 = (float*)malloc(8192 * sizeof(float));
CUDA_CALL(cudaMemcpy(x59, x9, (size_t)(8192 * sizeof(float)), cudaMemcpyDeviceToHost));
check_float_array_with_file(x59, 8192, "golden/weight_grad_rank_%d.data", x6);
// end checking GPU array of size 8192 and type Float
// begin checking GPU array of size 16384 and type Float
float* x60 = (float*)malloc(16384 * sizeof(float));
CUDA_CALL(cudaMemcpy(x60, x37, (size_t)(16384 * sizeof(float)), cudaMemcpyDeviceToHost));
check_float_array_with_file(x60, 16384, "golden/input_grad_rank_%d.data", x6);
// end checking GPU array of size 16384 and type Float
NCCLCHECK(ncclCommDestroy(x4));
MPICHECK(MPI_Finalize());
}
/*****************************************
End of C Generated Code
*******************************************/
int main(int argc, char *argv[]) {
if (argc != 2) {
printf("usage: %s <arg>\n", argv[0]);
return 0;
}
Snippet(atoi(argv[1]));
return 0;
}
|
0113f6a2e0d965e30274519b5a2b23a856d61fb8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/boolean_mask_ops.h"
#include <hipcub/hipcub.hpp>
namespace caffe2 {
namespace {
__global__ void BooleanMaskCopyKernel(
const int64_t numOfOutput,
const int64_t numBytes,
const int64_t* indices,
const uint8_t* src,
uint8_t* dest) {
for (int64_t i = blockIdx.x; i < numOfOutput; i += gridDim.x) {
const auto srcBase = indices[i] * numBytes;
const auto destBase = i * numBytes;
for (int64_t j = threadIdx.x; j < numBytes; j += blockDim.x) {
dest[destBase + j] = src[srcBase + j];
}
}
}
}
template <>
class BooleanMaskOp<CUDAContext> final : public Operator<CUDAContext> {
public:
BooleanMaskOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<CUDAContext>(operator_def, ws) {}
bool RunOnDevice() override {
const auto& src = Input(0);
const auto& mask = Input(1);
auto* dest = Output(0);
CAFFE_ENFORCE(src.dim() >= 1);
CAFFE_ENFORCE_EQ(mask.dim(), 1);
CAFFE_ENFORCE(src.size(0) == mask.size(0));
const auto* maskData = mask.data<bool>();
const auto outerSize = mask.size(0);
ReinitializeTensor(
&indices_, {outerSize}, at::dtype<int64_t>().device(CUDA));
auto* indicesData = indices_.mutable_data<int64_t>();
size_t numBytes = 0;
hipcub::CountingInputIterator<int> itr(0);
hipcub::DeviceSelect::Flagged(
nullptr,
numBytes,
itr,
maskData,
indicesData,
static_cast<int64_t*>(nullptr),
outerSize,
context_.cuda_stream());
auto numint64_t =
static_cast<int64_t>((numBytes + sizeof(int64_t) - 1) / sizeof(int64_t));
// allocate one more int64_t at the end of scratch for storing numOfOutput
ReinitializeTensor(
&scratch_, {numint64_t + 1}, at::dtype<int64_t>().device(CUDA));
auto* scratchData = scratch_.mutable_data<int64_t>();
auto* numOfOutputData = scratchData + numint64_t;
hipcub::DeviceSelect::Flagged(
static_cast<void*>(scratchData),
numBytes,
itr,
maskData,
indicesData,
numOfOutputData,
outerSize,
context_.cuda_stream());
// Copy numOfOutput from gpu to cpu
int64_t numOfOutput;
context_.CopyToCPU(1, numOfOutputData, &numOfOutput);
indices_.Resize(numOfOutput);
std::vector<int64_t> dims = src.sizes().vec();
dims[0] = numOfOutput;
dest->Resize(dims);
auto* destData = (uint8_t*)dest->raw_mutable_data(src.meta());
const auto* srcData = (uint8_t*)src.raw_data();
if (OutputSize() == 2) {
auto* indicesOut = Output(1, {numOfOutput}, at::dtype<int64_t>());
indicesOut->template mutable_data<int64_t>();
}
if (numOfOutput > 0) {
hipLaunchKernelGGL(( BooleanMaskCopyKernel),
dim3(::min(numOfOutput, static_cast<int64_t>(CAFFE_MAXIMUM_NUM_BLOCKS))),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
numOfOutput,
src.size_from_dim(1) * src.meta().itemsize(),
indicesData,
srcData,
destData);
C10_HIP_KERNEL_LAUNCH_CHECK();
if (OutputSize() == 2) {
Output(1)->CopyFrom(indices_, /* async */ true);
}
}
return true;
}
private:
Tensor indices_;
Tensor scratch_;
};
REGISTER_CUDA_OPERATOR(BooleanMask, BooleanMaskOp<CUDAContext>);
namespace {
#define minf (-1.0f * std::numeric_limits<float>::infinity())
template <typename T>
__global__ void sequenceMaskKernel(
int N,
int M,
int B,
const T* in,
const int* seq_lengths,
T fill_val,
T* out) {
if (B >= 0) {
CUDA_1D_KERNEL_LOOP(index, B * N * M) {
int k = index % M;
int j = (index - k) / M % N;
int i = (index - M * j - k) / (N * M);
int ind = N * M * i + M * j + k;
out[ind] = (k >= seq_lengths[j] ? fill_val : in[ind]);
}
} else {
CUDA_1D_KERNEL_LOOP(index, N * M) {
int i = index / M;
int j = index % M;
out[index] = (j >= seq_lengths[i] ? fill_val : in[index]);
}
}
}
template <typename T>
__global__ void repeatedSequenceMaskKernel(
int N,
int M,
int D,
const T* in,
const int* seq_lengths,
T fill_val,
T* out) {
CUDA_1D_KERNEL_LOOP(index, N * M * D) {
int i = index / (D * M);
int j = (index / D) % M;
out[index] = (j >= seq_lengths[i] ? fill_val : in[index]);
}
}
template <typename T>
__global__ void windowMaskKernel(
int N,
int M,
int B,
const T* in,
const int* window_centers,
const int radius,
T fill_val,
T* out) {
if (B >= 0) {
CUDA_1D_KERNEL_LOOP(index, B * N * M) {
int k = index % M;
int j = (index - k) / M % N;
int i = (index - M * j - k) / (N * M);
int ind = N * M * i + M * j + k;
out[ind] =
(k < window_centers[j] - radius || k > window_centers[j] + radius
? fill_val
: in[ind]);
}
} else {
CUDA_1D_KERNEL_LOOP(index, N * M) {
int i = index / M;
int j = index % M;
out[index] =
(j < window_centers[i] - radius || j > window_centers[i] + radius
? fill_val
: in[index]);
}
}
}
template <typename T>
__global__ void
upperMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) {
if (B >= 0) {
CUDA_1D_KERNEL_LOOP(index, B * N * M) {
int k = index % M;
int j = (index - k) / M % N;
int i = (index - M * j - k) / (N * M);
int ind = N * M * i + M * j + k;
out[ind] = (k > j ? fill_val : in[ind]);
}
} else {
CUDA_1D_KERNEL_LOOP(index, N * M) {
int i = index / M;
int j = index % M;
out[index] = (j > i ? fill_val : in[index]);
}
}
}
template <typename T>
__global__ void
lowerMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) {
if (B >= 0) {
CUDA_1D_KERNEL_LOOP(index, B * N * M) {
int k = index % M;
int j = (index - k) / M % N;
int i = (index - M * j - k) / (N * M);
int ind = N * M * i + M * j + k;
out[ind] = (k < j ? fill_val : in[ind]);
}
} else {
CUDA_1D_KERNEL_LOOP(index, N * M) {
int i = index / M;
int j = index % M;
out[index] = (j < i ? fill_val : in[index]);
}
}
}
template <typename T>
__global__ void
upperDiagMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) {
if (B >= 0) {
CUDA_1D_KERNEL_LOOP(index, B * N * M) {
int k = index % M;
int j = (index - k) / M % N;
int i = (index - M * j - k) / (N * M);
int ind = N * M * i + M * j + k;
out[ind] = (k >= j ? fill_val : in[ind]);
}
} else {
CUDA_1D_KERNEL_LOOP(index, N * M) {
int i = index / M;
int j = index % M;
out[index] = (j >= i ? fill_val : in[index]);
}
}
}
template <typename T>
__global__ void
lowerDiagMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) {
if (B >= 0) {
CUDA_1D_KERNEL_LOOP(index, B * N * M) {
int k = index % M;
int j = (index - k) / M % N;
int i = (index - M * j - k) / (N * M);
int ind = N * M * i + M * j + k;
out[ind] = (k <= j ? fill_val : in[ind]);
}
} else {
CUDA_1D_KERNEL_LOOP(index, N * M) {
int i = index / M;
int j = index % M;
out[index] = (j <= i ? fill_val : in[index]);
}
}
}
} // namespace
template <>
bool SequenceMaskOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<at::Half, float>>::call(this, Input(0));
}
template <>
template <class T>
bool SequenceMaskOp<CUDAContext>::DoRunWithType() {
const Tensor* input = &Input(0);
const Tensor* sequence_lengths = nullptr;
const Tensor* window_centers = nullptr;
if (mode_ == "sequence") {
sequence_lengths = &Input(1);
} else if (mode_ == "window") {
window_centers = &Input(1);
}
auto* output = Output(0, input->sizes(), at::dtype<T>());
const auto canonical_axis = input->canonical_axis_index(axis_);
// canonical_batch is non-negative if batching, -1 otherwise
int canonical_batch = -1;
if ((HasArgument("batch"))) {
canonical_batch = input->canonical_axis_index(batch_);
}
// make sure batch < axis
if (canonical_batch >= 0) {
CAFFE_ENFORCE_LT(canonical_batch, canonical_axis);
}
// if no batch, then left is product of dims up to axis
// otherwise, left is product of dims between batch and axis
const int left =
(canonical_batch >= 0
? input->size_between_dim(canonical_batch, canonical_axis)
: input->size_to_dim(canonical_axis));
const int right = input->size_from_dim(canonical_axis);
// product of dims from 1 to batch
const int batch_dim =
(canonical_batch >= 0
? input->size_to_dim(canonical_batch) * input->dim(canonical_batch)
: -1);
T fill_val = convert::To<float, T>(grad_ ? 0.0f : fill_val_);
if (mode_ == "sequence") {
if (HasArgument("repeat_from_axis")) {
const int canonical_repeat_from =
input->canonical_axis_index(repeat_from_);
const int repeated_dims = input->size_from_dim(canonical_repeat_from);
const int masked_dims = right / repeated_dims;
hipLaunchKernelGGL(( repeatedSequenceMaskKernel),
dim3(CAFFE_GET_BLOCKS(left * right)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
left,
masked_dims,
repeated_dims,
input->data<T>(),
sequence_lengths->data<int>(),
fill_val,
output->template mutable_data<T>());
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
hipLaunchKernelGGL(( sequenceMaskKernel),
dim3(CAFFE_GET_BLOCKS(left * right)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
left,
right,
batch_dim,
input->data<T>(),
sequence_lengths->data<int>(),
fill_val,
output->template mutable_data<T>());
C10_HIP_KERNEL_LAUNCH_CHECK();
}
} else if (mode_ == "window") {
hipLaunchKernelGGL(( windowMaskKernel),
dim3(CAFFE_GET_BLOCKS(left * right)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
left,
right,
batch_dim,
input->data<T>(),
window_centers->data<int>(),
radius_,
fill_val,
output->template mutable_data<T>());
C10_HIP_KERNEL_LAUNCH_CHECK();
} else if (mode_ == "upper") {
hipLaunchKernelGGL(( upperMaskKernel),
dim3(CAFFE_GET_BLOCKS(left * right)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
left,
right,
batch_dim,
input->data<T>(),
fill_val,
output->template mutable_data<T>());
C10_HIP_KERNEL_LAUNCH_CHECK();
} else if (mode_ == "lower") {
hipLaunchKernelGGL(( lowerMaskKernel),
dim3(CAFFE_GET_BLOCKS(left * right)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
left,
right,
batch_dim,
input->data<T>(),
fill_val,
output->template mutable_data<T>());
C10_HIP_KERNEL_LAUNCH_CHECK();
} else if (mode_ == "upperdiag") {
hipLaunchKernelGGL(( upperDiagMaskKernel),
dim3(CAFFE_GET_BLOCKS(left * right)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
left,
right,
batch_dim,
input->data<T>(),
fill_val,
output->template mutable_data<T>());
C10_HIP_KERNEL_LAUNCH_CHECK();
} else if (mode_ == "lowerdiag") {
hipLaunchKernelGGL(( lowerDiagMaskKernel),
dim3(CAFFE_GET_BLOCKS(left * right)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
left,
right,
batch_dim,
input->data<T>(),
fill_val,
output->template mutable_data<T>());
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
CAFFE_ENFORCE(false, "Unsupported mode for SequenceMaskOp!");
}
return true;
}
REGISTER_CUDA_OPERATOR(SequenceMask, SequenceMaskOp<CUDAContext>);
} // namespace caffe2
| 0113f6a2e0d965e30274519b5a2b23a856d61fb8.cu | #include <algorithm>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/boolean_mask_ops.h"
#include <cub/cub.cuh>
namespace caffe2 {
namespace {
__global__ void BooleanMaskCopyKernel(
const int64_t numOfOutput,
const int64_t numBytes,
const int64_t* indices,
const uint8_t* src,
uint8_t* dest) {
for (int64_t i = blockIdx.x; i < numOfOutput; i += gridDim.x) {
const auto srcBase = indices[i] * numBytes;
const auto destBase = i * numBytes;
for (int64_t j = threadIdx.x; j < numBytes; j += blockDim.x) {
dest[destBase + j] = src[srcBase + j];
}
}
}
}
template <>
class BooleanMaskOp<CUDAContext> final : public Operator<CUDAContext> {
public:
BooleanMaskOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<CUDAContext>(operator_def, ws) {}
bool RunOnDevice() override {
const auto& src = Input(0);
const auto& mask = Input(1);
auto* dest = Output(0);
CAFFE_ENFORCE(src.dim() >= 1);
CAFFE_ENFORCE_EQ(mask.dim(), 1);
CAFFE_ENFORCE(src.size(0) == mask.size(0));
const auto* maskData = mask.data<bool>();
const auto outerSize = mask.size(0);
ReinitializeTensor(
&indices_, {outerSize}, at::dtype<int64_t>().device(CUDA));
auto* indicesData = indices_.mutable_data<int64_t>();
size_t numBytes = 0;
cub::CountingInputIterator<int> itr(0);
cub::DeviceSelect::Flagged(
nullptr,
numBytes,
itr,
maskData,
indicesData,
static_cast<int64_t*>(nullptr),
outerSize,
context_.cuda_stream());
auto numint64_t =
static_cast<int64_t>((numBytes + sizeof(int64_t) - 1) / sizeof(int64_t));
// allocate one more int64_t at the end of scratch for storing numOfOutput
ReinitializeTensor(
&scratch_, {numint64_t + 1}, at::dtype<int64_t>().device(CUDA));
auto* scratchData = scratch_.mutable_data<int64_t>();
auto* numOfOutputData = scratchData + numint64_t;
cub::DeviceSelect::Flagged(
static_cast<void*>(scratchData),
numBytes,
itr,
maskData,
indicesData,
numOfOutputData,
outerSize,
context_.cuda_stream());
// Copy numOfOutput from gpu to cpu
int64_t numOfOutput;
context_.CopyToCPU(1, numOfOutputData, &numOfOutput);
indices_.Resize(numOfOutput);
std::vector<int64_t> dims = src.sizes().vec();
dims[0] = numOfOutput;
dest->Resize(dims);
auto* destData = (uint8_t*)dest->raw_mutable_data(src.meta());
const auto* srcData = (uint8_t*)src.raw_data();
if (OutputSize() == 2) {
auto* indicesOut = Output(1, {numOfOutput}, at::dtype<int64_t>());
indicesOut->template mutable_data<int64_t>();
}
if (numOfOutput > 0) {
BooleanMaskCopyKernel<<<
std::min(numOfOutput, static_cast<int64_t>(CAFFE_MAXIMUM_NUM_BLOCKS)),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
numOfOutput,
src.size_from_dim(1) * src.meta().itemsize(),
indicesData,
srcData,
destData);
C10_CUDA_KERNEL_LAUNCH_CHECK();
if (OutputSize() == 2) {
Output(1)->CopyFrom(indices_, /* async */ true);
}
}
return true;
}
private:
Tensor indices_;
Tensor scratch_;
};
REGISTER_CUDA_OPERATOR(BooleanMask, BooleanMaskOp<CUDAContext>);
namespace {
#define minf (-1.0f * std::numeric_limits<float>::infinity())
template <typename T>
__global__ void sequenceMaskKernel(
int N,
int M,
int B,
const T* in,
const int* seq_lengths,
T fill_val,
T* out) {
if (B >= 0) {
CUDA_1D_KERNEL_LOOP(index, B * N * M) {
int k = index % M;
int j = (index - k) / M % N;
int i = (index - M * j - k) / (N * M);
int ind = N * M * i + M * j + k;
out[ind] = (k >= seq_lengths[j] ? fill_val : in[ind]);
}
} else {
CUDA_1D_KERNEL_LOOP(index, N * M) {
int i = index / M;
int j = index % M;
out[index] = (j >= seq_lengths[i] ? fill_val : in[index]);
}
}
}
template <typename T>
__global__ void repeatedSequenceMaskKernel(
int N,
int M,
int D,
const T* in,
const int* seq_lengths,
T fill_val,
T* out) {
CUDA_1D_KERNEL_LOOP(index, N * M * D) {
int i = index / (D * M);
int j = (index / D) % M;
out[index] = (j >= seq_lengths[i] ? fill_val : in[index]);
}
}
template <typename T>
__global__ void windowMaskKernel(
int N,
int M,
int B,
const T* in,
const int* window_centers,
const int radius,
T fill_val,
T* out) {
if (B >= 0) {
CUDA_1D_KERNEL_LOOP(index, B * N * M) {
int k = index % M;
int j = (index - k) / M % N;
int i = (index - M * j - k) / (N * M);
int ind = N * M * i + M * j + k;
out[ind] =
(k < window_centers[j] - radius || k > window_centers[j] + radius
? fill_val
: in[ind]);
}
} else {
CUDA_1D_KERNEL_LOOP(index, N * M) {
int i = index / M;
int j = index % M;
out[index] =
(j < window_centers[i] - radius || j > window_centers[i] + radius
? fill_val
: in[index]);
}
}
}
template <typename T>
__global__ void
upperMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) {
if (B >= 0) {
CUDA_1D_KERNEL_LOOP(index, B * N * M) {
int k = index % M;
int j = (index - k) / M % N;
int i = (index - M * j - k) / (N * M);
int ind = N * M * i + M * j + k;
out[ind] = (k > j ? fill_val : in[ind]);
}
} else {
CUDA_1D_KERNEL_LOOP(index, N * M) {
int i = index / M;
int j = index % M;
out[index] = (j > i ? fill_val : in[index]);
}
}
}
template <typename T>
__global__ void
lowerMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) {
if (B >= 0) {
CUDA_1D_KERNEL_LOOP(index, B * N * M) {
int k = index % M;
int j = (index - k) / M % N;
int i = (index - M * j - k) / (N * M);
int ind = N * M * i + M * j + k;
out[ind] = (k < j ? fill_val : in[ind]);
}
} else {
CUDA_1D_KERNEL_LOOP(index, N * M) {
int i = index / M;
int j = index % M;
out[index] = (j < i ? fill_val : in[index]);
}
}
}
template <typename T>
__global__ void
upperDiagMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) {
if (B >= 0) {
CUDA_1D_KERNEL_LOOP(index, B * N * M) {
int k = index % M;
int j = (index - k) / M % N;
int i = (index - M * j - k) / (N * M);
int ind = N * M * i + M * j + k;
out[ind] = (k >= j ? fill_val : in[ind]);
}
} else {
CUDA_1D_KERNEL_LOOP(index, N * M) {
int i = index / M;
int j = index % M;
out[index] = (j >= i ? fill_val : in[index]);
}
}
}
template <typename T>
__global__ void
lowerDiagMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) {
if (B >= 0) {
CUDA_1D_KERNEL_LOOP(index, B * N * M) {
int k = index % M;
int j = (index - k) / M % N;
int i = (index - M * j - k) / (N * M);
int ind = N * M * i + M * j + k;
out[ind] = (k <= j ? fill_val : in[ind]);
}
} else {
CUDA_1D_KERNEL_LOOP(index, N * M) {
int i = index / M;
int j = index % M;
out[index] = (j <= i ? fill_val : in[index]);
}
}
}
} // namespace
template <>
bool SequenceMaskOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<at::Half, float>>::call(this, Input(0));
}
template <>
template <class T>
bool SequenceMaskOp<CUDAContext>::DoRunWithType() {
const Tensor* input = &Input(0);
const Tensor* sequence_lengths = nullptr;
const Tensor* window_centers = nullptr;
if (mode_ == "sequence") {
sequence_lengths = &Input(1);
} else if (mode_ == "window") {
window_centers = &Input(1);
}
auto* output = Output(0, input->sizes(), at::dtype<T>());
const auto canonical_axis = input->canonical_axis_index(axis_);
// canonical_batch is non-negative if batching, -1 otherwise
int canonical_batch = -1;
if ((HasArgument("batch"))) {
canonical_batch = input->canonical_axis_index(batch_);
}
// make sure batch < axis
if (canonical_batch >= 0) {
CAFFE_ENFORCE_LT(canonical_batch, canonical_axis);
}
// if no batch, then left is product of dims up to axis
// otherwise, left is product of dims between batch and axis
const int left =
(canonical_batch >= 0
? input->size_between_dim(canonical_batch, canonical_axis)
: input->size_to_dim(canonical_axis));
const int right = input->size_from_dim(canonical_axis);
// product of dims from 1 to batch
const int batch_dim =
(canonical_batch >= 0
? input->size_to_dim(canonical_batch) * input->dim(canonical_batch)
: -1);
T fill_val = convert::To<float, T>(grad_ ? 0.0f : fill_val_);
if (mode_ == "sequence") {
if (HasArgument("repeat_from_axis")) {
const int canonical_repeat_from =
input->canonical_axis_index(repeat_from_);
const int repeated_dims = input->size_from_dim(canonical_repeat_from);
const int masked_dims = right / repeated_dims;
repeatedSequenceMaskKernel<<<
CAFFE_GET_BLOCKS(left * right),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
left,
masked_dims,
repeated_dims,
input->data<T>(),
sequence_lengths->data<int>(),
fill_val,
output->template mutable_data<T>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
sequenceMaskKernel<<<
CAFFE_GET_BLOCKS(left * right),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
left,
right,
batch_dim,
input->data<T>(),
sequence_lengths->data<int>(),
fill_val,
output->template mutable_data<T>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
} else if (mode_ == "window") {
windowMaskKernel<<<
CAFFE_GET_BLOCKS(left * right),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
left,
right,
batch_dim,
input->data<T>(),
window_centers->data<int>(),
radius_,
fill_val,
output->template mutable_data<T>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else if (mode_ == "upper") {
upperMaskKernel<<<
CAFFE_GET_BLOCKS(left * right),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
left,
right,
batch_dim,
input->data<T>(),
fill_val,
output->template mutable_data<T>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else if (mode_ == "lower") {
lowerMaskKernel<<<
CAFFE_GET_BLOCKS(left * right),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
left,
right,
batch_dim,
input->data<T>(),
fill_val,
output->template mutable_data<T>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else if (mode_ == "upperdiag") {
upperDiagMaskKernel<<<
CAFFE_GET_BLOCKS(left * right),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
left,
right,
batch_dim,
input->data<T>(),
fill_val,
output->template mutable_data<T>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else if (mode_ == "lowerdiag") {
lowerDiagMaskKernel<<<
CAFFE_GET_BLOCKS(left * right),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
left,
right,
batch_dim,
input->data<T>(),
fill_val,
output->template mutable_data<T>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
CAFFE_ENFORCE(false, "Unsupported mode for SequenceMaskOp!");
}
return true;
}
REGISTER_CUDA_OPERATOR(SequenceMask, SequenceMaskOp<CUDAContext>);
} // namespace caffe2
|
3927e3e78fcb35a6c56a28a7223fb0970a57d637.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
*
* (C) Copyright 2010 The Board of Trustees of the
* University of Illinois
* All Rights Reserved
*
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "kernel.hip"
#include "support.h"
int main (int argc, char *argv[])
{
Timer timer;
hipError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
// size_t bytes = n*sizeof(float);
float *A_h, *B_h, *C_h;
float *A_d, *B_d, *C_d;
size_t A_sz, B_sz, C_sz;
unsigned matArow, matAcol;
unsigned matBrow, matBcol;
dim3 dim_grid, dim_block;
if (argc == 1) {
matArow = 1000;
matAcol = matBrow = 1000;
matBcol = 1000;
} else if (argc == 2) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[1]);
matBcol = atoi(argv[1]);
} else if (argc == 4) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[2]);
matBcol = atoi(argv[3]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./sgemm-tiled # All matrices are 1000 x 1000"
"\n Usage: ./sgemm-tiled <m> # All matrices are m x m"
"\n Usage: ./sgemm-tiled <m> <k> <n> # A: m x k, B: k x n, C: m x n"
"\n");
exit(0);
}
A_sz = matArow*matAcol;
B_sz = matBrow*matBcol;
C_sz = matArow*matBcol;
A_h = (float*) malloc( sizeof(float)*A_sz );
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; }
B_h = (float*) malloc( sizeof(float)*B_sz );
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; }
C_h = (float*) malloc( sizeof(float)*C_sz );
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n", matArow, matAcol,
matBrow, matBcol, matArow, matBcol);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipMalloc(&A_d, A_sz * sizeof(float));
hipMalloc(&B_d, B_sz * sizeof(float));
hipMalloc(&C_d, C_sz * sizeof(float)) ;
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipMemcpy(A_d, A_h, A_sz * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(B_d, B_h, B_sz * sizeof(float), hipMemcpyHostToDevice);
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel using standard sgemm interface ---------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
basicSgemm('N', 'N', matArow, matBcol, matBrow, 1.0f, \
A_d, matArow, B_d, matBrow, 0.0f, C_d, matBrow);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipMemcpy(C_h, C_d, C_sz * sizeof(float), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, matArow, matAcol, matBcol);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
return 0;
}
| 3927e3e78fcb35a6c56a28a7223fb0970a57d637.cu | /******************************************************************************
*
* (C) Copyright 2010 The Board of Trustees of the
* University of Illinois
* All Rights Reserved
*
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "kernel.cu"
#include "support.h"
int main (int argc, char *argv[])
{
Timer timer;
cudaError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
// size_t bytes = n*sizeof(float);
float *A_h, *B_h, *C_h;
float *A_d, *B_d, *C_d;
size_t A_sz, B_sz, C_sz;
unsigned matArow, matAcol;
unsigned matBrow, matBcol;
dim3 dim_grid, dim_block;
if (argc == 1) {
matArow = 1000;
matAcol = matBrow = 1000;
matBcol = 1000;
} else if (argc == 2) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[1]);
matBcol = atoi(argv[1]);
} else if (argc == 4) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[2]);
matBcol = atoi(argv[3]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./sgemm-tiled # All matrices are 1000 x 1000"
"\n Usage: ./sgemm-tiled <m> # All matrices are m x m"
"\n Usage: ./sgemm-tiled <m> <k> <n> # A: m x k, B: k x n, C: m x n"
"\n");
exit(0);
}
A_sz = matArow*matAcol;
B_sz = matBrow*matBcol;
C_sz = matArow*matBcol;
A_h = (float*) malloc( sizeof(float)*A_sz );
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; }
B_h = (float*) malloc( sizeof(float)*B_sz );
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; }
C_h = (float*) malloc( sizeof(float)*C_sz );
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n", matArow, matAcol,
matBrow, matBcol, matArow, matBcol);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaMalloc(&A_d, A_sz * sizeof(float));
cudaMalloc(&B_d, B_sz * sizeof(float));
cudaMalloc(&C_d, C_sz * sizeof(float)) ;
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaMemcpy(A_d, A_h, A_sz * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, B_sz * sizeof(float), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel using standard sgemm interface ---------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
basicSgemm('N', 'N', matArow, matBcol, matBrow, 1.0f, \
A_d, matArow, B_d, matBrow, 0.0f, C_d, matBrow);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaMemcpy(C_h, C_d, C_sz * sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, matArow, matAcol, matBcol);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
return 0;
}
|
d093d52f367297810a7421562242c1d727ede2ec.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
#define LINE_SIZE 128
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int N){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
int size = (LINE_SIZE*ASSOC*SETS*2)/sizeof(int); // Always force the cache to miss
unsigned j=0, k=0;
int m_sum=0;
// Fill the L1 cache, Miss on every iteration
for(k=0; k<ITERATIONS; ++k){
for(j=0; j<size; j+=THREADS_PER_BLOCK){
m_sum += A[tid+j];
}
}
C[tid]=m_sum;
m_sum = 0;
/*
// Fill the L1 cache, Miss on first LD, Hit on subsequent LDs
for(k=0; k<ITERATIONS; ++k){
for(j=0; j<(size/2); j+=THREADS_PER_BLOCK){
C[tid+j] = A[tid+j];
}
}
*/
__syncthreads();
}
// Host code
int main(){
printf("Power Microbenchmarks\n");
int N = LINE_SIZE*SETS*ASSOC;
size_t size = N * sizeof(int) * 2;
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
//checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
//checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
hipFree(d_A);
//if (d_B)
// hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
| d093d52f367297810a7421562242c1d727ede2ec.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
#define LINE_SIZE 128
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int N){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
int size = (LINE_SIZE*ASSOC*SETS*2)/sizeof(int); // Always force the cache to miss
unsigned j=0, k=0;
int m_sum=0;
// Fill the L1 cache, Miss on every iteration
for(k=0; k<ITERATIONS; ++k){
for(j=0; j<size; j+=THREADS_PER_BLOCK){
m_sum += A[tid+j];
}
}
C[tid]=m_sum;
m_sum = 0;
/*
// Fill the L1 cache, Miss on first LD, Hit on subsequent LDs
for(k=0; k<ITERATIONS; ++k){
for(j=0; j<(size/2); j+=THREADS_PER_BLOCK){
C[tid+j] = A[tid+j];
}
}
*/
__syncthreads();
}
// Host code
int main(){
printf("Power Microbenchmarks\n");
int N = LINE_SIZE*SETS*ASSOC;
size_t size = N * sizeof(int) * 2;
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
//checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
//checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
cudaFree(d_A);
//if (d_B)
// cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
896962d04274eee7a39b9ea2c4b0624bb2fc6ef0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string>
#include <vector>
#include <map>
#include <iostream>
#include <sstream>
#include "hip/device_functions.h"
#include <helper_cuda.h>
#include <fstream>
using std::vector;
using std::string;
using std::map;
#define THREAD_NUM 512
#define BLOCK_NUM 1024
const float pi = 3.1415926;
__constant__ static int dd_angle[30];
__constant__ static float dd_start[30];
__constant__ static float d_s_angle_1[30];
__constant__ static float d_s_angle_2[30];
struct InitData //
{
InitData(int c, int fs, int f0, float width, float kerf, int N_elements, int length, float image_length, vector<int> angle) :
speed(c), sample_frequency(fs), central_frequency(f0), width(width), kerf(kerf), N_elements(N_elements), data_length(length), image_length(image_length), angle(angle)
{
pitch = width + kerf;
array_length = pitch*(N_elements - 1) + width;
d_x = array_length / N_elements;
d_z = double(1) / fs;
}
void push_tstart(float tstart, int i)
{
tstatrt[i] = tstart;
}
//
int speed;
float sample_frequency;
int central_frequency;
float width;
float kerf;
int N_elements;
int data_length;
float pitch;
float array_length;
float d_x;
float image_length;
double d_z;
vector<int> angle;
map<int, float> tstatrt;
};
// analysereadDatadat
void analyse(float* in, const char* buf)
{
string contents = buf;
string::size_type pos1 = 0;
int n = 0;
int i = 0;
while ((pos1 = contents.find_first_of("+-.0123456789e", pos1)) != string::npos)
{
auto pos2 = contents.find_first_not_of("+-.0123456789e", pos1);
n = pos2 - pos1;
float d = stod(contents.substr(pos1, n));
in[i++] = d;
pos1 += n;
}
}
float* readData(string path, InitData &init)
{
int one_frame_length = init.N_elements*init.data_length;
int all_data_length = (init.angle.size())*one_frame_length;
float *all_rf = new float[all_data_length];
float *t_start = new float[init.angle.size()];
const int MAXS = one_frame_length * 20;//
char *buf = new char[MAXS];
char *t_buf = new char[20];
int kk = 0;
for (auto ii : init.angle)
{
std::cout << "" << ii << "" << std::endl;
std::stringstream pathname;
pathname << ii;
string file_path_now = path + "data_" + pathname.str() + ".dat";
std::ifstream ifs(file_path_now, std::ios::binary);
if (ifs)
{
float *data = all_rf + one_frame_length*kk;
ifs.read((char*)data, one_frame_length*sizeof(data));
}
string t_path = path + "tstart_" + pathname.str() + ".txt";
const char* t_file_path = t_path.c_str();
FILE* t_fp = fopen(t_file_path, "rb");
if (t_fp)
{
int len = fread(t_buf, 1, 20, t_fp);
t_buf[len] = '\0';
analyse(t_start + kk, t_buf);//
init.push_tstart(t_start[kk], ii);
}
kk++;
}
delete buf;
return all_rf;
}
//
__global__ void cuda_compoundData(float* out, float* in, int new_length, int length, int N_elements, float pitch, int angle_n,
int fs, int c, double d_z)
{
int col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
int angle_index = row / N_elements;
float t1 = dd_start[angle_index];
float temp1 = d_s_angle_1[angle_index];
float temp2 = d_s_angle_2[angle_index];
int real_row = row - angle_index*N_elements;
float i_real_own = (dd_angle[angle_index] > 0) ? real_row*pitch : (N_elements - real_row - 1)*pitch;
float j_real = d_z*(col + 1) *c / 2;
//
float j_real_2 = j_real*j_real;
int oneFrameLength = N_elements*new_length;
float j_temp1 = j_real*temp1;
float i_temp2 = i_real_own*temp2;
for (int row_i = 0; row_i != N_elements; ++row_i)
{
float i_real = (real_row - row_i)*pitch;
int jj = ((j_temp1 + i_temp2 + (sqrtf(j_real_2 + i_real *i_real))) / c - t1)*fs - 0.5f;//
if ((jj >= 0) && (jj < new_length))
{
out[row*new_length + col] += in[angle_index*oneFrameLength + row_i*new_length + jj];
}
}
}
//
__global__ void cuda_AddData(float* out, float* in, int length, int N_elements, int angle_n)
{
int col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
for (int i = 0; i != angle_n; ++i)
{
out[row*length + col] += in[i*N_elements*length + row*length + col];
}
}
//
void compoundData(float* data, InitData &init)
{
//
int one_frame_length = init.N_elements*init.data_length;
int all_data_length = (init.angle.size())*one_frame_length;
int new_length = (init.data_length / 32 + 1) * 32;//
//int new_length = 64;
int new_one_frame_length = init.N_elements*new_length;
int new_all_data_length = (init.angle.size())*new_one_frame_length;
std::cout << "" << new_length;
//
hipEvent_t startMemcpy; hipEvent_t stopMemcpy;
hipEvent_t startKernel; hipEvent_t stopKernel;
hipEventCreate(&startMemcpy);
hipEventCreate(&stopMemcpy);
hipEventCreate(&startKernel);
hipEventCreate(&stopKernel);
hipEventRecord(startMemcpy); //GPU
float *new_data = new float[new_all_data_length]();//
for (int kk = 0; kk != init.angle.size(); ++kk)
{
for (int jj = 0; jj < init.data_length; jj++) {
for (int ii = 0; ii < init.N_elements; ii++) {
new_data[kk*new_one_frame_length + ii*new_length + jj] = data[kk*one_frame_length + ii*init.data_length + jj];
}
}
}
//device
float *d_new_data;
hipMalloc(&d_new_data, new_all_data_length*sizeof(float));
hipMemcpy(d_new_data, new_data, sizeof(float) * new_all_data_length, hipMemcpyHostToDevice);
float *d_ans_data; //
hipMalloc(&d_ans_data, new_one_frame_length*sizeof(float));
float *ans_data = new float[new_one_frame_length]();
const size_t smemSize = THREAD_NUM*sizeof(float);
//
int *d_angle = new int[init.angle.size()];
float *d_start = new float[init.angle.size()];
float *s_angle_1 = new float[init.angle.size()];
float *s_angle_2 = new float[init.angle.size()];
for (int i = 0; i != init.angle.size(); ++i)
{
d_angle[i] = init.angle[i];
d_start[i] = init.tstatrt[init.angle[i]];
s_angle_1[i] = cos(float(d_angle[i])*pi / 180);
s_angle_2[i] = sin(float(d_angle[i])*pi / 180);
}
//
hipMemcpyToSymbol(dd_angle, d_angle, sizeof(int) * init.angle.size());
hipMemcpyToSymbol(dd_start, d_start, sizeof(float) * init.angle.size());
hipMemcpyToSymbol(d_s_angle_1, s_angle_1, sizeof(float) * init.angle.size());
hipMemcpyToSymbol(d_s_angle_2, s_angle_2, sizeof(float) * init.angle.size());
checkCudaErrors(hipPeekAtLastError());
checkCudaErrors(hipDeviceSynchronize());
hipEventRecord(stopMemcpy);
hipEventRecord(startKernel);//
float *d_temp_data; //
hipMalloc(&d_temp_data, new_all_data_length*sizeof(float));
float *temp_data = new float[new_all_data_length]();
//
dim3 dimBlock(8, 8, 1);
dim3 dimGrid((new_length + dimBlock.x - 1) / dimBlock.x,
(init.N_elements*init.angle.size() + dimBlock.y - 1) / dimBlock.y, 1);
cuda_compoundData << <dimGrid, dimBlock >> >(d_temp_data, d_new_data, new_length, init.data_length, init.N_elements, init.pitch, init.angle.size(),
init.sample_frequency, init.speed, init.d_z);
checkCudaErrors(hipPeekAtLastError());
checkCudaErrors(hipDeviceSynchronize());
//
dim3 dimBlock2(8, 8, 1);
dim3 dimGrid2((new_length + dimBlock.x - 1) / dimBlock.x,
(init.N_elements + dimBlock.y - 1) / dimBlock.y, 1);
cuda_AddData << <dimGrid2, dimBlock2 >> >(d_ans_data, d_temp_data, new_length, init.N_elements, init.angle.size());
hipEventRecord(stopKernel);
checkCudaErrors(hipPeekAtLastError());
checkCudaErrors(hipDeviceSynchronize());
//devicehost
hipMemcpy(temp_data, d_temp_data, sizeof(float) * new_all_data_length, hipMemcpyDeviceToHost);
hipMemcpy(ans_data, d_ans_data, sizeof(float) * new_one_frame_length, hipMemcpyDeviceToHost);
//hostdevice
hipFree(dd_angle);
hipFree(dd_start);
hipFree(d_s_angle_1);
hipFree(d_s_angle_2);
hipFree(d_ans_data);
hipFree(d_new_data);
delete d_angle;
delete d_start;
delete s_angle_1;
delete s_angle_2;
delete data;
delete new_data;
delete temp_data;
delete ans_data;
//
float memcpyTime = 0;
hipEventElapsedTime(&memcpyTime, startMemcpy, stopMemcpy);
float kernelTime = 0;
hipEventElapsedTime(&kernelTime, startKernel, stopKernel);
std::cout << "GPUCPU" << memcpyTime << "ms" << std::endl;
std::cout << "" << kernelTime << "ms" << std::endl;
}
int main()
{
string path = "..//data//";
vector<int> angle = { -9,-7,-5,-3,-1, 0,1,3,5,7,9 };
//vector<int> angle = { -7,-5,-3,-1, 0,1,3,5,7 };
InitData init(1540, 50e6, 3.5e6, 0.2798e-3, 0.025e-3, 128, 6000, 0.11, angle);
//std::cout << cos(1*pi/180);
float* test = readData(path, init);
compoundData(test, init);
//std::cout << *test;
std::cout << "";
} | 896962d04274eee7a39b9ea2c4b0624bb2fc6ef0.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string>
#include <vector>
#include <map>
#include <iostream>
#include <sstream>
#include "device_functions.h"
#include <helper_cuda.h>
#include <fstream>
using std::vector;
using std::string;
using std::map;
#define THREAD_NUM 512
#define BLOCK_NUM 1024
const float pi = 3.1415926;
__constant__ static int dd_angle[30];
__constant__ static float dd_start[30];
__constant__ static float d_s_angle_1[30];
__constant__ static float d_s_angle_2[30];
struct InitData //换能器基本数据
{
InitData(int c, int fs, int f0, float width, float kerf, int N_elements, int length, float image_length, vector<int> angle) :
speed(c), sample_frequency(fs), central_frequency(f0), width(width), kerf(kerf), N_elements(N_elements), data_length(length), image_length(image_length), angle(angle)
{
pitch = width + kerf;
array_length = pitch*(N_elements - 1) + width;
d_x = array_length / N_elements;
d_z = double(1) / fs;
}
void push_tstart(float tstart, int i)
{
tstatrt[i] = tstart;
}
//原始参数
int speed;
float sample_frequency;
int central_frequency;
float width;
float kerf;
int N_elements;
int data_length;
float pitch;
float array_length;
float d_x;
float image_length;
double d_z;
vector<int> angle;
map<int, float> tstatrt;
};
// analyse和readData是读取dat文件的函数
void analyse(float* in, const char* buf)
{
string contents = buf;
string::size_type pos1 = 0;
int n = 0;
int i = 0;
while ((pos1 = contents.find_first_of("+-.0123456789e", pos1)) != string::npos)
{
auto pos2 = contents.find_first_not_of("+-.0123456789e", pos1);
n = pos2 - pos1;
float d = stod(contents.substr(pos1, n));
in[i++] = d;
pos1 += n;
}
}
float* readData(string path, InitData &init)
{
int one_frame_length = init.N_elements*init.data_length;
int all_data_length = (init.angle.size())*one_frame_length;
float *all_rf = new float[all_data_length];
float *t_start = new float[init.angle.size()];
const int MAXS = one_frame_length * 20;//数字字符数量
char *buf = new char[MAXS];
char *t_buf = new char[20];
int kk = 0;
for (auto ii : init.angle)
{
std::cout << "正在读取第" << ii << "帧数据" << std::endl;
std::stringstream pathname;
pathname << ii;
string file_path_now = path + "data_" + pathname.str() + ".dat";
std::ifstream ifs(file_path_now, std::ios::binary);
if (ifs)
{
float *data = all_rf + one_frame_length*kk;
ifs.read((char*)data, one_frame_length*sizeof(data));
}
string t_path = path + "tstart_" + pathname.str() + ".txt";
const char* t_file_path = t_path.c_str();
FILE* t_fp = fopen(t_file_path, "rb");
if (t_fp)
{
int len = fread(t_buf, 1, 20, t_fp);
t_buf[len] = '\0';
analyse(t_start + kk, t_buf);//连续存入
init.push_tstart(t_start[kk], ii);
}
kk++;
}
delete buf;
return all_rf;
}
//第一步并行计算的核函数
__global__ void cuda_compoundData(float* out, float* in, int new_length, int length, int N_elements, float pitch, int angle_n,
int fs, int c, double d_z)
{
int col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
int angle_index = row / N_elements;
float t1 = dd_start[angle_index];
float temp1 = d_s_angle_1[angle_index];
float temp2 = d_s_angle_2[angle_index];
int real_row = row - angle_index*N_elements;
float i_real_own = (dd_angle[angle_index] > 0) ? real_row*pitch : (N_elements - real_row - 1)*pitch;
float j_real = d_z*(col + 1) *c / 2;
//减少下面循环里面的计算量
float j_real_2 = j_real*j_real;
int oneFrameLength = N_elements*new_length;
float j_temp1 = j_real*temp1;
float i_temp2 = i_real_own*temp2;
for (int row_i = 0; row_i != N_elements; ++row_i)
{
float i_real = (real_row - row_i)*pitch;
int jj = ((j_temp1 + i_temp2 + (sqrtf(j_real_2 + i_real *i_real))) / c - t1)*fs - 0.5f;//确定数据所在索引
if ((jj >= 0) && (jj < new_length))
{
out[row*new_length + col] += in[angle_index*oneFrameLength + row_i*new_length + jj];
}
}
}
//第二步并行计算的核函数
__global__ void cuda_AddData(float* out, float* in, int length, int N_elements, int angle_n)
{
int col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
for (int i = 0; i != angle_n; ++i)
{
out[row*length + col] += in[i*N_elements*length + row*length + col];
}
}
//调用核函数的主要计算函数
void compoundData(float* data, InitData &init)
{
//数据量的计算
int one_frame_length = init.N_elements*init.data_length;
int all_data_length = (init.angle.size())*one_frame_length;
int new_length = (init.data_length / 32 + 1) * 32;//线程束的倍数
//int new_length = 64;
int new_one_frame_length = init.N_elements*new_length;
int new_all_data_length = (init.angle.size())*new_one_frame_length;
std::cout << "最大数据长度为" << new_length;
//计时函数
cudaEvent_t startMemcpy; cudaEvent_t stopMemcpy;
cudaEvent_t startKernel; cudaEvent_t stopKernel;
cudaEventCreate(&startMemcpy);
cudaEventCreate(&stopMemcpy);
cudaEventCreate(&startKernel);
cudaEventCreate(&stopKernel);
cudaEventRecord(startMemcpy); //计算GPU中复制和开辟数据所需时间
float *new_data = new float[new_all_data_length]();//结果数据 主机和设备内存的开辟
for (int kk = 0; kk != init.angle.size(); ++kk)
{
for (int jj = 0; jj < init.data_length; jj++) {
for (int ii = 0; ii < init.N_elements; ii++) {
new_data[kk*new_one_frame_length + ii*new_length + jj] = data[kk*one_frame_length + ii*init.data_length + jj];
}
}
}
//开辟device内存
float *d_new_data;
cudaMalloc(&d_new_data, new_all_data_length*sizeof(float));
cudaMemcpy(d_new_data, new_data, sizeof(float) * new_all_data_length, cudaMemcpyHostToDevice);
float *d_ans_data; //计算数据 设备内存的开辟和赋值
cudaMalloc(&d_ans_data, new_one_frame_length*sizeof(float));
float *ans_data = new float[new_one_frame_length]();
const size_t smemSize = THREAD_NUM*sizeof(float);
//常量内存 存放角度等数据
int *d_angle = new int[init.angle.size()];
float *d_start = new float[init.angle.size()];
float *s_angle_1 = new float[init.angle.size()];
float *s_angle_2 = new float[init.angle.size()];
for (int i = 0; i != init.angle.size(); ++i)
{
d_angle[i] = init.angle[i];
d_start[i] = init.tstatrt[init.angle[i]];
s_angle_1[i] = cos(float(d_angle[i])*pi / 180);
s_angle_2[i] = sin(float(d_angle[i])*pi / 180);
}
//设置常量内存
cudaMemcpyToSymbol(dd_angle, d_angle, sizeof(int) * init.angle.size());
cudaMemcpyToSymbol(dd_start, d_start, sizeof(float) * init.angle.size());
cudaMemcpyToSymbol(d_s_angle_1, s_angle_1, sizeof(float) * init.angle.size());
cudaMemcpyToSymbol(d_s_angle_2, s_angle_2, sizeof(float) * init.angle.size());
checkCudaErrors(cudaPeekAtLastError());
checkCudaErrors(cudaDeviceSynchronize());
cudaEventRecord(stopMemcpy);
cudaEventRecord(startKernel);//计算核函数用时
float *d_temp_data; //计算数据 设备内存的开辟和赋值
cudaMalloc(&d_temp_data, new_all_data_length*sizeof(float));
float *temp_data = new float[new_all_data_length]();
//调用第一个核函数
dim3 dimBlock(8, 8, 1);
dim3 dimGrid((new_length + dimBlock.x - 1) / dimBlock.x,
(init.N_elements*init.angle.size() + dimBlock.y - 1) / dimBlock.y, 1);
cuda_compoundData << <dimGrid, dimBlock >> >(d_temp_data, d_new_data, new_length, init.data_length, init.N_elements, init.pitch, init.angle.size(),
init.sample_frequency, init.speed, init.d_z);
checkCudaErrors(cudaPeekAtLastError());
checkCudaErrors(cudaDeviceSynchronize());
//调用第二个核函数
dim3 dimBlock2(8, 8, 1);
dim3 dimGrid2((new_length + dimBlock.x - 1) / dimBlock.x,
(init.N_elements + dimBlock.y - 1) / dimBlock.y, 1);
cuda_AddData << <dimGrid2, dimBlock2 >> >(d_ans_data, d_temp_data, new_length, init.N_elements, init.angle.size());
cudaEventRecord(stopKernel);
checkCudaErrors(cudaPeekAtLastError());
checkCudaErrors(cudaDeviceSynchronize());
//复制device结果至host
cudaMemcpy(temp_data, d_temp_data, sizeof(float) * new_all_data_length, cudaMemcpyDeviceToHost);
cudaMemcpy(ans_data, d_ans_data, sizeof(float) * new_one_frame_length, cudaMemcpyDeviceToHost);
//销毁host和device内存
cudaFree(dd_angle);
cudaFree(dd_start);
cudaFree(d_s_angle_1);
cudaFree(d_s_angle_2);
cudaFree(d_ans_data);
cudaFree(d_new_data);
delete d_angle;
delete d_start;
delete s_angle_1;
delete s_angle_2;
delete data;
delete new_data;
delete temp_data;
delete ans_data;
//计算用时
float memcpyTime = 0;
cudaEventElapsedTime(&memcpyTime, startMemcpy, stopMemcpy);
float kernelTime = 0;
cudaEventElapsedTime(&kernelTime, startKernel, stopKernel);
std::cout << "GPU和CPU中复制数据用时" << memcpyTime << "ms" << std::endl;
std::cout << "核函数计算用时(多角度复合)" << kernelTime << "ms" << std::endl;
}
int main()
{
string path = "..//data//";
vector<int> angle = { -9,-7,-5,-3,-1, 0,1,3,5,7,9 };
//vector<int> angle = { -7,-5,-3,-1, 0,1,3,5,7 };
InitData init(1540, 50e6, 3.5e6, 0.2798e-3, 0.025e-3, 128, 6000, 0.11, angle);
//std::cout << cos(1*pi/180);
float* test = readData(path, init);
compoundData(test, init);
//std::cout << *test;
std::cout << "计算结束!";
} |
d69c133a12a0f5fd99b4b77438a255258c0db421.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" {
__global__ void Run(int n, float* __restrict input, float* __restrict output) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) output[i] = log(input[i]);
}
} | d69c133a12a0f5fd99b4b77438a255258c0db421.cu | extern "C" {
__global__ void Run(int n, float* __restrict input, float* __restrict output) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) output[i] = log(input[i]);
}
} |
9a38be31f8fb34c52e0b22f2536bb1a10c82bee5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "maxReduce.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *d_idata = NULL;
hipMalloc(&d_idata, XSIZE*YSIZE);
int *d_odata = NULL;
hipMalloc(&d_odata, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
maxReduce), dim3(gridBlock),dim3(threadBlock), 0, 0, d_idata,d_odata);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
maxReduce), dim3(gridBlock),dim3(threadBlock), 0, 0, d_idata,d_odata);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
maxReduce), dim3(gridBlock),dim3(threadBlock), 0, 0, d_idata,d_odata);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 9a38be31f8fb34c52e0b22f2536bb1a10c82bee5.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "maxReduce.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *d_idata = NULL;
cudaMalloc(&d_idata, XSIZE*YSIZE);
int *d_odata = NULL;
cudaMalloc(&d_odata, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
maxReduce<<<gridBlock,threadBlock>>>(d_idata,d_odata);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
maxReduce<<<gridBlock,threadBlock>>>(d_idata,d_odata);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
maxReduce<<<gridBlock,threadBlock>>>(d_idata,d_odata);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
91fc396a16e5068a112fb569bbe9099415cca868.hip | // !!! This is a file automatically generated by hipify!!!
///sta programa calcula la versin paralelizada del algoritmo FFT_DIF_DIT_TD
///(18/01/2017)
///Grafica en Matlab los tiempos de ejecucin, considerando Radix-2. N = 2^20, Li = {2, 4,,N}, Lo= N. (precisin doble).
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hipfft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_complex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int Li);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[500],int vector_2[500],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,hipDoubleComplex *x,hipDoubleComplex *W,hipDoubleComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,hipDoubleComplex *z,hipDoubleComplex *W,hipDoubleComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
hipDoubleComplex *x_host;
hipDoubleComplex *W_host;
//hipDoubleComplex *y_host;
//hipDoubleComplex *z_host;
hipDoubleComplex *X_host;
hipDoubleComplex *x_device;
hipDoubleComplex *W_device;
hipDoubleComplex *y_device;
hipDoubleComplex *z_device;
hipDoubleComplex *X_device;
hipfftDoubleComplex *in,*out;
FILE *db_open,*dc_open;
int Dip,Dop,P,N,Li,Lo;
int vF[500]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[500];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Nmero de elementos del vector de entrada
/// Li >>> Nmero de elementos de entrada diferentes de cero
/// Lo >>> Nmero de elementos de salida requeridos
/// loop >>> Nmero de iteraciones
/// muestras >>> Nmero de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el nmero de iteraciones requeridas
const int loop = 300;
///Ingrese el valor de N_max
const int N_max = 20;
///Ingrese el valor de Li_max
const int Lo_max = 1048576;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Funcin principal
int main()
{
//////////////////////////////////////////////////////////////////////////
//////////////////////////SELECCIN DEL DEVICE////////////////////////////
//////////////////////////////////////////////////////////////////////////
int device;
FILE *da;
hipSetDevice(0);
hipGetDevice(&device);
if(device == 1)
{
printf("\n\n---DEVICE = GeForce GTX 970---\n\n");
da = fopen("Tiempos_N20_LiVARIA_LoN_CUDA_GTX970_DO.bin","a+b"); //Crea o sobre escribe archivo
}
if(device == 0)
{
printf("\n\n---DEVICE = TESLA K20---\n\n");
da = fopen("Tiempos_N20_LiVARIA_LoN_CUDA_TESLAK20c_DO.bin","a+b"); //Crea o sobre escribe archivo
}
//////////////////////////////////////////////////////////////////////////
int i,j,i_N,j_res,k_res,cont,i_prom;
float suma;
float promedio[N_max];
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
for(i_N = N_max;i_N <= N_max;i_N++)
{
N = (int )pow(2,i_N);
printf("\n N = %d \n",N);
for(j_res=Lo_max;j_res <= Lo_max;j_res++)
{
Lo=j_res;
for(k_res=1;k_res <= N_max;k_res++)
{
Li=(int )pow(2,k_res);
printf("\n Li = %d Lo = %d",Li,Lo);
///Se abre el archivo binario
db_open = fopen("Entrada_real_N20_C.bin","rb");
dc_open = fopen("Entrada_imag_N20_C.bin","rb");
suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
hipEvent_t start_app, stop_app;
hipEventCreate(&start_app);
hipEventCreate(&stop_app);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(Li);
///Se genera el arreglo W[N]
arreglo_W(N);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
hipEventRecord(start_app,0);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Clculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//Funcin auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Funcin auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Funcin auxiliar del host para ejecutar la etapa de salida
etapa_salida();
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
hipEventRecord(stop_app,0);
hipEventSynchronize(stop_app);
hipEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
hipEventDestroy(start_app);
hipEventDestroy(stop_app);
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(X_host);
hipFree(x_device);
hipFree(W_device);
hipFree(y_device);
hipFree(z_device);
hipFree(X_device);
}
//printf("\n\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
promedio[k_res-1] = suma/(float)loop;
fclose(db_open);
fclose(dc_open);
}
}
}
fwrite(promedio,sizeof(float),N_max,da);
printf("\n\nTIEMPOS:\n\n");
int time_print;
for(time_print = 0;time_print < N_max;time_print++)
{
printf("\nTime (%d)= %f ms",time_print,promedio[time_print]);
}
fclose(da);
return EXIT_SUCCESS;
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//sta funcin genera el vector de entrada x[n]
void vector_entrada_xn(int Li)
{
//Declaracin de variables locales
int k;
float *buffer_real,*buffer_imag;
//Se reserva memoria para xn_host en el host
x_host = (hipDoubleComplex*)malloc(sizeof(hipDoubleComplex)*Li);
buffer_real = (float*)malloc(sizeof(float)*N);
buffer_imag = (float*)malloc(sizeof(float)*N);
///Se lee el vector de entrada del archivo binario
fread(buffer_real,sizeof(float),N,db_open);
fread(buffer_imag,sizeof(float),N,dc_open);
//Se dan valores a x[n]
for(k = 0;k < Li; k++)
{
//x_host[k] = make_cuFloatComplex((double)(rand()%11),(double)(rand()%11));
//x_host[k] = make_cuDoubleComplex((double)(k + 1),(double)(0.0));
x_host[k] = make_cuDoubleComplex((double)buffer_real[k],(double)buffer_imag[k]);
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<Li;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCreal(x_host[k]),cuCimag(x_host[k]));
}
*/
free(buffer_real);
free(buffer_imag);
}
//sta funcin genera el arreglo W
void arreglo_W(int N)
{
//Declaracin de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (hipDoubleComplex*)malloc(sizeof(hipDoubleComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuDoubleComplex((double)cos((2*CUDART_PI*n)/N),(double)(-1)*sin((2*CUDART_PI*n)/N));
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCreal(W_host[n]),cuCimag(W_host[n]));
}
*/
}
//sta funcin genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaracin de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[500];
int k[500];
int G;
int g,i,t,ta;
int Dipt[500],Dopt[500];
float distrapt,distrap;
int Pos,h,Poss;
int nk[500];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el nmero de factores de "N"
factor(N);
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//sta funcin encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//sta funcin encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[500],int vector_2[500],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Funcin auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,n1,n2;
//Asignacin de memoria en el device para el arreglo "x_device"
hipMalloc((void**)&x_device,Li*sizeof(hipDoubleComplex));
//Se reserva memoria en el device para el arreglo "W_device"
hipMalloc((void**)&W_device,N*sizeof(hipDoubleComplex));
//Asignacin de memoria en el device para el arreglo "y"
hipMalloc((void**)&y_device,P*Dip*Dop*sizeof(hipDoubleComplex));
//Se pasa el arreglo x_host a x_device
hipMemcpy(x_device,x_host,Li*sizeof(hipDoubleComplex),hipMemcpyHostToDevice);
//Envo de los arreglos W hacia la memoria global del device
hipMemcpy(W_device,W_host,N*sizeof(hipDoubleComplex),hipMemcpyHostToDevice);
//Asignacin de memoria en el host para "y"
//y_host = (hipDoubleComplex*)malloc(sizeof(hipDoubleComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la funcin kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
hipLaunchKernelGGL(( inputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
hipMemcpy(y_host,y_device,sizeof(hipDoubleComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCreal(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimag(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//funcin kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,hipDoubleComplex *x,hipDoubleComplex *W,hipDoubleComplex *y)
{
int n1,n2;
hipDoubleComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
//flag_inputstage_1_d[0] = 0;
//flag_inputstage_2_d[0] = 0;
//flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generacin de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
//flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmul(W[((n*k1)%N)-1],t1);
}
///Flag
//flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuDoubleComplex(0.0,0.0);
///Flag
//flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Funcin auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignacin de memoria en el device para "z"
hipMalloc((void**)&z_device,P*Dip*Dop*sizeof(hipDoubleComplex));
//Asignacin de memoria en el host para "z"
//z_host = (hipDoubleComplex*)malloc(sizeof(hipDoubleComplex)*P*Dip*Dop);
//Asignacin de memoria en el device para "in" y "out"
hipMalloc((void**)&in,sizeof(hipfftDoubleComplex)*P*Dip*Dop);
hipMalloc((void**)&out,sizeof(hipfftDoubleComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
hipMemcpy(in,y_device,sizeof(hipDoubleComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se crea un plan
hipfftHandle plan;
hipfftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,HIPFFT_Z2Z,Dip*Dop);
//Ejecucin del plan
hipfftExecZ2Z(plan,in,out,HIPFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
hipMemcpy(z_device,out,sizeof(hipfftDoubleComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se destruye el plan
hipfftDestroy(plan);
//Se liberan los arreglos "in" y "out"
hipFree(in);
hipFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
hipMemcpy(z_host,z_device,sizeof(hipDoubleComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCreal(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimag(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Funcin auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int m;
//Asignacin de memoria en el device para "X"
hipMalloc((void**)&X_device,Lo*sizeof(hipDoubleComplex));
//Asignacin de memoria en el host para "X"
X_host = (hipDoubleComplex*)malloc(sizeof(hipDoubleComplex)*Lo);
//Dimensionamiento del grid para la funcin kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
hipLaunchKernelGGL(( outputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
hipMemcpy(X_host,X_device,sizeof(hipDoubleComplex)*Lo,hipMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %f + (%f)",m,cuCreal(X_host[m]),cuCimag(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//funcin kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,hipDoubleComplex *z,hipDoubleComplex *W,hipDoubleComplex *X)
{
//Declaracin de variables locales
int n1,k_aux,k1,k2,a,b;
hipDoubleComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
//flag_outputstage_1_d[0] = 0;
//flag_outputstage_2_d[0] = 0;
//flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Clculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
//flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCadd(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
//flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Clculo de X(k) para 0<=k<=Dip-1.
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCadd(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el mtodo directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
//printf("\nk = %d,k_aux = %d,k2 = %d,k1 = %d",k,k_aux,k2,k1);
///Flag
//flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCadd(X[k],cuCmul(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
//flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el mtodo filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0));
/*
if(k == 256)
{
printf("\nW = %d, k = %d,k_aux = %d,k2 = %d,k1 = %d, b= %d,z= %d",(((k2+(P*(b)))*Dip)%N)-1,k,k_aux,k2,k1,b,(k1*Dop*P)+((Dop-1)*P)+ (k2%P));
}
*/
///Flag
//flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCadd(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0));
t4 = cuCsub(t3,t2);
/*
if(k == 256)
{
printf("\nW= %d",(((k2+(P*(b)))*Dip)%N)-1);
}
*/
}
if(n1 == (Dop-1))
{
t5 = cuCadd(z[(k1*Dop*P)+(k2%P)],t4);
X[k] = cuCsub(t5,cuCmul(t1,cuConj(W[(((k2+(P*(b)))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
//flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0));
t5 = cuCadd(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsub(t5,cuCmul(t1,cuConj(W[(((k2+(P*(b)))*Dip)%N)-1])));
///Flag
//flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
}
| 91fc396a16e5068a112fb569bbe9099415cca868.cu | ///Ésta programa calcula la versión paralelizada del algoritmo FFT_DIF_DIT_TD
///(18/01/2017)
///Grafica en Matlab los tiempos de ejecución, considerando Radix-2. N = 2^20, Li = {2, 4,…,N}, Lo= N. (precisión doble).
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuComplex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIÓN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int Li);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[500],int vector_2[500],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuDoubleComplex *x,cuDoubleComplex *W,cuDoubleComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuDoubleComplex *z,cuDoubleComplex *W,cuDoubleComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIÓN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
cuDoubleComplex *x_host;
cuDoubleComplex *W_host;
//cuDoubleComplex *y_host;
//cuDoubleComplex *z_host;
cuDoubleComplex *X_host;
cuDoubleComplex *x_device;
cuDoubleComplex *W_device;
cuDoubleComplex *y_device;
cuDoubleComplex *z_device;
cuDoubleComplex *X_device;
cufftDoubleComplex *in,*out;
FILE *db_open,*dc_open;
int Dip,Dop,P,N,Li,Lo;
int vF[500]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[500];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Número de elementos del vector de entrada
/// Li >>> Número de elementos de entrada diferentes de cero
/// Lo >>> Número de elementos de salida requeridos
/// loop >>> Número de iteraciones
/// muestras >>> Número de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el número de iteraciones requeridas
const int loop = 300;
///Ingrese el valor de N_max
const int N_max = 20;
///Ingrese el valor de Li_max
const int Lo_max = 1048576;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Función principal
int main()
{
//////////////////////////////////////////////////////////////////////////
//////////////////////////SELECCIÓN DEL DEVICE////////////////////////////
//////////////////////////////////////////////////////////////////////////
int device;
FILE *da;
cudaSetDevice(0);
cudaGetDevice(&device);
if(device == 1)
{
printf("\n\n---DEVICE = GeForce GTX 970---\n\n");
da = fopen("Tiempos_N20_LiVARIA_LoN_CUDA_GTX970_DO.bin","a+b"); //Crea o sobre escribe archivo
}
if(device == 0)
{
printf("\n\n---DEVICE = TESLA K20---\n\n");
da = fopen("Tiempos_N20_LiVARIA_LoN_CUDA_TESLAK20c_DO.bin","a+b"); //Crea o sobre escribe archivo
}
//////////////////////////////////////////////////////////////////////////
int i,j,i_N,j_res,k_res,cont,i_prom;
float suma;
float promedio[N_max];
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
for(i_N = N_max;i_N <= N_max;i_N++)
{
N = (int )pow(2,i_N);
printf("\n N = %d \n",N);
for(j_res=Lo_max;j_res <= Lo_max;j_res++)
{
Lo=j_res;
for(k_res=1;k_res <= N_max;k_res++)
{
Li=(int )pow(2,k_res);
printf("\n Li = %d Lo = %d",Li,Lo);
///Se abre el archivo binario
db_open = fopen("Entrada_real_N20_C.bin","rb");
dc_open = fopen("Entrada_imag_N20_C.bin","rb");
suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
cudaEvent_t start_app, stop_app;
cudaEventCreate(&start_app);
cudaEventCreate(&stop_app);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(Li);
///Se genera el arreglo W[N]
arreglo_W(N);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
cudaEventRecord(start_app,0);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Cálculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//Función auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Función auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Función auxiliar del host para ejecutar la etapa de salida
etapa_salida();
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
cudaEventRecord(stop_app,0);
cudaEventSynchronize(stop_app);
cudaEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
cudaEventDestroy(start_app);
cudaEventDestroy(stop_app);
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(X_host);
cudaFree(x_device);
cudaFree(W_device);
cudaFree(y_device);
cudaFree(z_device);
cudaFree(X_device);
}
//printf("\n\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
promedio[k_res-1] = suma/(float)loop;
fclose(db_open);
fclose(dc_open);
}
}
}
fwrite(promedio,sizeof(float),N_max,da);
printf("\n\nTIEMPOS:\n\n");
int time_print;
for(time_print = 0;time_print < N_max;time_print++)
{
printf("\nTime (%d)= %f ms",time_print,promedio[time_print]);
}
fclose(da);
return EXIT_SUCCESS;
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Ésta función genera el vector de entrada x[n]
void vector_entrada_xn(int Li)
{
//Declaración de variables locales
int k;
float *buffer_real,*buffer_imag;
//Se reserva memoria para xn_host en el host
x_host = (cuDoubleComplex*)malloc(sizeof(cuDoubleComplex)*Li);
buffer_real = (float*)malloc(sizeof(float)*N);
buffer_imag = (float*)malloc(sizeof(float)*N);
///Se lee el vector de entrada del archivo binario
fread(buffer_real,sizeof(float),N,db_open);
fread(buffer_imag,sizeof(float),N,dc_open);
//Se dan valores a x[n]
for(k = 0;k < Li; k++)
{
//x_host[k] = make_cuFloatComplex((double)(rand()%11),(double)(rand()%11));
//x_host[k] = make_cuDoubleComplex((double)(k + 1),(double)(0.0));
x_host[k] = make_cuDoubleComplex((double)buffer_real[k],(double)buffer_imag[k]);
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<Li;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCreal(x_host[k]),cuCimag(x_host[k]));
}
*/
free(buffer_real);
free(buffer_imag);
}
//Ésta función genera el arreglo W
void arreglo_W(int N)
{
//Declaración de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (cuDoubleComplex*)malloc(sizeof(cuDoubleComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuDoubleComplex((double)cos((2*CUDART_PI*n)/N),(double)(-1)*sin((2*CUDART_PI*n)/N));
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCreal(W_host[n]),cuCimag(W_host[n]));
}
*/
}
//Ésta función genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaración de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[500];
int k[500];
int G;
int g,i,t,ta;
int Dipt[500],Dopt[500];
float distrapt,distrap;
int Pos,h,Poss;
int nk[500];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el número de factores de "N"
factor(N);
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//Ésta función encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//Ésta función encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[500],int vector_2[500],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Función auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,n1,n2;
//Asignación de memoria en el device para el arreglo "x_device"
cudaMalloc((void**)&x_device,Li*sizeof(cuDoubleComplex));
//Se reserva memoria en el device para el arreglo "W_device"
cudaMalloc((void**)&W_device,N*sizeof(cuDoubleComplex));
//Asignación de memoria en el device para el arreglo "y"
cudaMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuDoubleComplex));
//Se pasa el arreglo x_host a x_device
cudaMemcpy(x_device,x_host,Li*sizeof(cuDoubleComplex),cudaMemcpyHostToDevice);
//Envío de los arreglos W hacia la memoria global del device
cudaMemcpy(W_device,W_host,N*sizeof(cuDoubleComplex),cudaMemcpyHostToDevice);
//Asignación de memoria en el host para "y"
//y_host = (cuDoubleComplex*)malloc(sizeof(cuDoubleComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la función kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
inputStage_kernel<<<gridDim,blockDim>>>(N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
cudaMemcpy(y_host,y_device,sizeof(cuDoubleComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCreal(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimag(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//función kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuDoubleComplex *x,cuDoubleComplex *W,cuDoubleComplex *y)
{
int n1,n2;
cuDoubleComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
//flag_inputstage_1_d[0] = 0;
//flag_inputstage_2_d[0] = 0;
//flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generación de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
//flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmul(W[((n*k1)%N)-1],t1);
}
///Flag
//flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuDoubleComplex(0.0,0.0);
///Flag
//flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Función auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignación de memoria en el device para "z"
cudaMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuDoubleComplex));
//Asignación de memoria en el host para "z"
//z_host = (cuDoubleComplex*)malloc(sizeof(cuDoubleComplex)*P*Dip*Dop);
//Asignación de memoria en el device para "in" y "out"
cudaMalloc((void**)&in,sizeof(cufftDoubleComplex)*P*Dip*Dop);
cudaMalloc((void**)&out,sizeof(cufftDoubleComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
cudaMemcpy(in,y_device,sizeof(cuDoubleComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se crea un plan
cufftHandle plan;
cufftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,CUFFT_Z2Z,Dip*Dop);
//Ejecución del plan
cufftExecZ2Z(plan,in,out,CUFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
cudaMemcpy(z_device,out,sizeof(cufftDoubleComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se destruye el plan
cufftDestroy(plan);
//Se liberan los arreglos "in" y "out"
cudaFree(in);
cudaFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
cudaMemcpy(z_host,z_device,sizeof(cuDoubleComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCreal(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimag(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Función auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int m;
//Asignación de memoria en el device para "X"
cudaMalloc((void**)&X_device,Lo*sizeof(cuDoubleComplex));
//Asignación de memoria en el host para "X"
X_host = (cuDoubleComplex*)malloc(sizeof(cuDoubleComplex)*Lo);
//Dimensionamiento del grid para la función kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
outputStage_kernel<<<gridDim,blockDim>>>(N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
cudaMemcpy(X_host,X_device,sizeof(cuDoubleComplex)*Lo,cudaMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %f + (%f)",m,cuCreal(X_host[m]),cuCimag(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//función kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuDoubleComplex *z,cuDoubleComplex *W,cuDoubleComplex *X)
{
//Declaración de variables locales
int n1,k_aux,k1,k2,a,b;
cuDoubleComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
//flag_outputstage_1_d[0] = 0;
//flag_outputstage_2_d[0] = 0;
//flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Cálculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
//flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCadd(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
//flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Cálculo de X(k) para 0<=k<=Dip-1.
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCadd(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el método directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
//printf("\nk = %d,k_aux = %d,k2 = %d,k1 = %d",k,k_aux,k2,k1);
///Flag
//flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCadd(X[k],cuCmul(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
//flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el método filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0));
/*
if(k == 256)
{
printf("\nW = %d, k = %d,k_aux = %d,k2 = %d,k1 = %d, b= %d,z= %d",(((k2+(P*(b)))*Dip)%N)-1,k,k_aux,k2,k1,b,(k1*Dop*P)+((Dop-1)*P)+ (k2%P));
}
*/
///Flag
//flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCadd(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0));
t4 = cuCsub(t3,t2);
/*
if(k == 256)
{
printf("\nW= %d",(((k2+(P*(b)))*Dip)%N)-1);
}
*/
}
if(n1 == (Dop-1))
{
t5 = cuCadd(z[(k1*Dop*P)+(k2%P)],t4);
X[k] = cuCsub(t5,cuCmul(t1,cuConj(W[(((k2+(P*(b)))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
//flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0));
t5 = cuCadd(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsub(t5,cuCmul(t1,cuConj(W[(((k2+(P*(b)))*Dip)%N)-1])));
///Flag
//flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
}
|
1eb5638ec7c45a067acd37396fbc1ac8c5037dfe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// SINGLE PRECISION KERNELS FOR ODD/EVEN FERMIONS
__global__ void DslashKernelEO(float2 *out,
int *tables,
int *phases,
size_t gauge_offset,
size_t ferm_offset)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x + size_dev_h; // idx>sizeh, ODD
int stag_phase = 1;
//Store result in sharedMem
__shared__ float ferm_out[3][2][NUM_THREADS];
//New tables indexing (index fastest)
__shared__ int site_table[NUM_THREADS];
//Load link matrix U_mu(ix) in registers
DeclareMatrixRegs; //12 registers
float2 ferm_in_0, ferm_in_1, ferm_in_2;
#ifdef IM_CHEM_POT
float2 ferm_aux_0, ferm_aux_1, ferm_aux_2;
#endif
// Direction 0
site_table[threadIdx.x] = tables[idx+4*size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, idx + gauge_offset, 0);
ferm_out[0][0][threadIdx.x] = link0.x*ferm_in_0.x-link0.y*ferm_in_0.y+
link0.z*ferm_in_1.x-link0.w*ferm_in_1.y+
link1.x*ferm_in_2.x-link1.y*ferm_in_2.y;
ferm_out[0][1][threadIdx.x] = link0.x*ferm_in_0.y+link0.y*ferm_in_0.x+
link0.z*ferm_in_1.y+link0.w*ferm_in_1.x+
link1.x*ferm_in_2.y+link1.y*ferm_in_2.x;
ferm_out[1][0][threadIdx.x] = link1.z*ferm_in_0.x-link1.w*ferm_in_0.y+
link2.x*ferm_in_1.x-link2.y*ferm_in_1.y+
link2.z*ferm_in_2.x-link2.w*ferm_in_2.y;
ferm_out[1][1][threadIdx.x] = link1.z*ferm_in_0.y+link1.w*ferm_in_0.x+
link2.x*ferm_in_1.y+link2.y*ferm_in_1.x+
link2.z*ferm_in_2.y+link2.w*ferm_in_2.x;
ferm_out[2][0][threadIdx.x] = C1RE*ferm_in_0.x-C1IM*ferm_in_0.y+
C2RE*ferm_in_1.x-C2IM*ferm_in_1.y+
C3RE*ferm_in_2.x-C3IM*ferm_in_2.y;
ferm_out[2][1][threadIdx.x] = C1RE*ferm_in_0.y+C1IM*ferm_in_0.x+
C2RE*ferm_in_1.y+C2IM*ferm_in_1.x+
C3RE*ferm_in_2.y+C3IM*ferm_in_2.x;
//Direction 1
site_table[threadIdx.x] = tables[idx+5*size_dev];
stag_phase = phases[idx+size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, idx + gauge_offset, 1);
ferm_out[0][0][threadIdx.x] += link0.x*ferm_in_0.x-link0.y*ferm_in_0.y+
link0.z*ferm_in_1.x-link0.w*ferm_in_1.y+
link1.x*ferm_in_2.x-link1.y*ferm_in_2.y;
ferm_out[0][1][threadIdx.x] += link0.x*ferm_in_0.y+link0.y*ferm_in_0.x+
link0.z*ferm_in_1.y+link0.w*ferm_in_1.x+
link1.x*ferm_in_2.y+link1.y*ferm_in_2.x;
ferm_out[1][0][threadIdx.x] += link1.z*ferm_in_0.x-link1.w*ferm_in_0.y+
link2.x*ferm_in_1.x-link2.y*ferm_in_1.y+
link2.z*ferm_in_2.x-link2.w*ferm_in_2.y;
ferm_out[1][1][threadIdx.x] += link1.z*ferm_in_0.y+link1.w*ferm_in_0.x+
link2.x*ferm_in_1.y+link2.y*ferm_in_1.x+
link2.z*ferm_in_2.y+link2.w*ferm_in_2.x;
ferm_out[2][0][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.x-C1IM*ferm_in_0.y+
C2RE*ferm_in_1.x-C2IM*ferm_in_1.y+
C3RE*ferm_in_2.x-C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.y+C1IM*ferm_in_0.x+
C2RE*ferm_in_1.y+C2IM*ferm_in_1.x+
C3RE*ferm_in_2.y+C3IM*ferm_in_2.x);
//Direction 2
site_table[threadIdx.x] = tables[idx+6*size_dev];
stag_phase = phases[idx+2*size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, idx + gauge_offset, 2);
ferm_out[0][0][threadIdx.x] += link0.x*ferm_in_0.x-link0.y*ferm_in_0.y+
link0.z*ferm_in_1.x-link0.w*ferm_in_1.y+
link1.x*ferm_in_2.x-link1.y*ferm_in_2.y;
ferm_out[0][1][threadIdx.x] += link0.x*ferm_in_0.y+link0.y*ferm_in_0.x+
link0.z*ferm_in_1.y+link0.w*ferm_in_1.x+
link1.x*ferm_in_2.y+link1.y*ferm_in_2.x;
ferm_out[1][0][threadIdx.x] += link1.z*ferm_in_0.x-link1.w*ferm_in_0.y+
link2.x*ferm_in_1.x-link2.y*ferm_in_1.y+
link2.z*ferm_in_2.x-link2.w*ferm_in_2.y;
ferm_out[1][1][threadIdx.x] += link1.z*ferm_in_0.y+link1.w*ferm_in_0.x+
link2.x*ferm_in_1.y+link2.y*ferm_in_1.x+
link2.z*ferm_in_2.y+link2.w*ferm_in_2.x;
ferm_out[2][0][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.x-C1IM*ferm_in_0.y+
C2RE*ferm_in_1.x-C2IM*ferm_in_1.y+
C3RE*ferm_in_2.x-C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.y+C1IM*ferm_in_0.x+
C2RE*ferm_in_1.y+C2IM*ferm_in_1.x+
C3RE*ferm_in_2.y+C3IM*ferm_in_2.x);
//Direction 3
site_table[threadIdx.x] = tables[idx+7*size_dev];
stag_phase = phases[idx+3*size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, idx + gauge_offset, 3);
#ifndef IM_CHEM_POT
ferm_out[0][0][threadIdx.x] += link0.x*ferm_in_0.x-link0.y*ferm_in_0.y+
link0.z*ferm_in_1.x-link0.w*ferm_in_1.y+
link1.x*ferm_in_2.x-link1.y*ferm_in_2.y;
ferm_out[0][1][threadIdx.x] += link0.x*ferm_in_0.y+link0.y*ferm_in_0.x+
link0.z*ferm_in_1.y+link0.w*ferm_in_1.x+
link1.x*ferm_in_2.y+link1.y*ferm_in_2.x;
ferm_out[1][0][threadIdx.x] += link1.z*ferm_in_0.x-link1.w*ferm_in_0.y+
link2.x*ferm_in_1.x-link2.y*ferm_in_1.y+
link2.z*ferm_in_2.x-link2.w*ferm_in_2.y;
ferm_out[1][1][threadIdx.x] += link1.z*ferm_in_0.y+link1.w*ferm_in_0.x+
link2.x*ferm_in_1.y+link2.y*ferm_in_1.x+
link2.z*ferm_in_2.y+link2.w*ferm_in_2.x;
ferm_out[2][0][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.x-C1IM*ferm_in_0.y+
C2RE*ferm_in_1.x-C2IM*ferm_in_1.y+
C3RE*ferm_in_2.x-C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.y+C1IM*ferm_in_0.x+
C2RE*ferm_in_1.y+C2IM*ferm_in_1.x+
C3RE*ferm_in_2.y+C3IM*ferm_in_2.x);
#else
ferm_aux_0.x = link0.x*ferm_in_0.x-link0.y*ferm_in_0.y+
link0.z*ferm_in_1.x-link0.w*ferm_in_1.y+
link1.x*ferm_in_2.x-link1.y*ferm_in_2.y;
ferm_aux_0.y = link0.x*ferm_in_0.y+link0.y*ferm_in_0.x+
link0.z*ferm_in_1.y+link0.w*ferm_in_1.x+
link1.x*ferm_in_2.y+link1.y*ferm_in_2.x;
ferm_aux_1.x = link1.z*ferm_in_0.x-link1.w*ferm_in_0.y+
link2.x*ferm_in_1.x-link2.y*ferm_in_1.y+
link2.z*ferm_in_2.x-link2.w*ferm_in_2.y;
ferm_aux_1.y = link1.z*ferm_in_0.y+link1.w*ferm_in_0.x+
link2.x*ferm_in_1.y+link2.y*ferm_in_1.x+
link2.z*ferm_in_2.y+link2.w*ferm_in_2.x;
ferm_aux_2.x = stag_phase*(C1RE*ferm_in_0.x-C1IM*ferm_in_0.y+
C2RE*ferm_in_1.x-C2IM*ferm_in_1.y+
C3RE*ferm_in_2.x-C3IM*ferm_in_2.y);
ferm_aux_2.y = stag_phase*(C1RE*ferm_in_0.y+C1IM*ferm_in_0.x+
C2RE*ferm_in_1.y+C2IM*ferm_in_1.x+
C3RE*ferm_in_2.y+C3IM*ferm_in_2.x);
ferm_out[0][0][threadIdx.x] += ferm_aux_0.x*dev_eim_cos_f - ferm_aux_0.y*dev_eim_sin_f; // Re[e^{imu}*ferm_aux_0]
ferm_out[0][1][threadIdx.x] += ferm_aux_0.x*dev_eim_sin_f + ferm_aux_0.y*dev_eim_cos_f; // Im[e^{imu}*ferm_aux_0]
ferm_out[1][0][threadIdx.x] += ferm_aux_1.x*dev_eim_cos_f - ferm_aux_1.y*dev_eim_sin_f; // Re[e^{imu}*ferm_aux_1]
ferm_out[1][1][threadIdx.x] += ferm_aux_1.x*dev_eim_sin_f + ferm_aux_1.y*dev_eim_cos_f; // Im[e^{imu}*ferm_aux_1]
ferm_out[2][0][threadIdx.x] += ferm_aux_2.x*dev_eim_cos_f - ferm_aux_2.y*dev_eim_sin_f; // Re[e^{imu}*ferm_aux_2]
ferm_out[2][1][threadIdx.x] += ferm_aux_2.x*dev_eim_sin_f + ferm_aux_2.y*dev_eim_cos_f; // Im[e^{imu}*ferm_aux_2]
#endif
//---------------------------------------------------end of first block
//Direction 0
site_table[threadIdx.x] = tables[idx];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, site_table[threadIdx.x] + gauge_offset, 0);
ferm_out[0][0][threadIdx.x] -= link0.x*ferm_in_0.x+link0.y*ferm_in_0.y +
link1.z*ferm_in_1.x+link1.w*ferm_in_1.y +
C1RE*ferm_in_2.x +C1IM*ferm_in_2.y;
ferm_out[0][1][threadIdx.x] -= link0.x*ferm_in_0.y-link0.y*ferm_in_0.x +
link1.z*ferm_in_1.y-link1.w*ferm_in_1.x +
C1RE*ferm_in_2.y -C1IM*ferm_in_2.x;
ferm_out[1][0][threadIdx.x] -= link0.z*ferm_in_0.x+link0.w*ferm_in_0.y +
link2.x*ferm_in_1.x+link2.y*ferm_in_1.y +
C2RE*ferm_in_2.x +C2IM*ferm_in_2.y;
ferm_out[1][1][threadIdx.x] -= link0.z*ferm_in_0.y-link0.w*ferm_in_0.x +
link2.x*ferm_in_1.y-link2.y*ferm_in_1.x +
C2RE*ferm_in_2.y -C2IM*ferm_in_2.x;
ferm_out[2][0][threadIdx.x] -= link1.x*ferm_in_0.x+link1.y*ferm_in_0.y +
link2.z*ferm_in_1.x+link2.w*ferm_in_1.y +
C3RE*ferm_in_2.x +C3IM*ferm_in_2.y;
ferm_out[2][1][threadIdx.x] -= link1.x*ferm_in_0.y-link1.y*ferm_in_0.x +
link2.z*ferm_in_1.y-link2.w*ferm_in_1.x +
C3RE*ferm_in_2.y -C3IM*ferm_in_2.x;
//Direction 1
site_table[threadIdx.x] = tables[idx+size_dev];
stag_phase = phases[site_table[threadIdx.x]+size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, site_table[threadIdx.x] + gauge_offset, 1);
ferm_out[0][0][threadIdx.x] -= link0.x*ferm_in_0.x+link0.y*ferm_in_0.y +
link1.z*ferm_in_1.x+link1.w*ferm_in_1.y +
stag_phase*(C1RE*ferm_in_2.x+C1IM*ferm_in_2.y);
ferm_out[0][1][threadIdx.x] -= link0.x*ferm_in_0.y-link0.y*ferm_in_0.x +
link1.z*ferm_in_1.y-link1.w*ferm_in_1.x +
stag_phase*(C1RE*ferm_in_2.y-C1IM*ferm_in_2.x);
ferm_out[1][0][threadIdx.x] -= link0.z*ferm_in_0.x+link0.w*ferm_in_0.y +
link2.x*ferm_in_1.x+link2.y*ferm_in_1.y +
stag_phase*(C2RE*ferm_in_2.x+C2IM*ferm_in_2.y);
ferm_out[1][1][threadIdx.x] -= link0.z*ferm_in_0.y-link0.w*ferm_in_0.x +
link2.x*ferm_in_1.y-link2.y*ferm_in_1.x +
stag_phase*(C2RE*ferm_in_2.y-C2IM*ferm_in_2.x);
ferm_out[2][0][threadIdx.x] -= link1.x*ferm_in_0.x+link1.y*ferm_in_0.y +
link2.z*ferm_in_1.x+link2.w*ferm_in_1.y +
stag_phase*(C3RE*ferm_in_2.x+C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] -= link1.x*ferm_in_0.y-link1.y*ferm_in_0.x +
link2.z*ferm_in_1.y-link2.w*ferm_in_1.x +
stag_phase*(C3RE*ferm_in_2.y- C3IM*ferm_in_2.x);
//Direction 2
site_table[threadIdx.x] = tables[idx+2*size_dev];
stag_phase = phases[site_table[threadIdx.x]+2*size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, site_table[threadIdx.x] + gauge_offset, 2);
ferm_out[0][0][threadIdx.x] -= link0.x*ferm_in_0.x+link0.y*ferm_in_0.y +
link1.z*ferm_in_1.x+link1.w*ferm_in_1.y +
stag_phase*(C1RE*ferm_in_2.x+ C1IM*ferm_in_2.y);
ferm_out[0][1][threadIdx.x] -= link0.x*ferm_in_0.y-link0.y*ferm_in_0.x +
link1.z*ferm_in_1.y-link1.w*ferm_in_1.x +
stag_phase*(C1RE*ferm_in_2.y- C1IM*ferm_in_2.x);
ferm_out[1][0][threadIdx.x] -= link0.z*ferm_in_0.x+link0.w*ferm_in_0.y +
link2.x*ferm_in_1.x+link2.y*ferm_in_1.y +
stag_phase*(C2RE*ferm_in_2.x+ C2IM*ferm_in_2.y);
ferm_out[1][1][threadIdx.x] -= link0.z*ferm_in_0.y-link0.w*ferm_in_0.x +
link2.x*ferm_in_1.y-link2.y*ferm_in_1.x +
stag_phase*(C2RE*ferm_in_2.y- C2IM*ferm_in_2.x);
ferm_out[2][0][threadIdx.x] -= link1.x*ferm_in_0.x+link1.y*ferm_in_0.y +
link2.z*ferm_in_1.x+link2.w*ferm_in_1.y +
stag_phase*(C3RE*ferm_in_2.x+ C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] -= link1.x*ferm_in_0.y-link1.y*ferm_in_0.x +
link2.z*ferm_in_1.y-link2.w*ferm_in_1.x +
stag_phase*(C3RE*ferm_in_2.y- C3IM*ferm_in_2.x);
//Direction 3
site_table[threadIdx.x] = tables[idx+3*size_dev];
stag_phase = phases[site_table[threadIdx.x]+3*size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, site_table[threadIdx.x] + gauge_offset, 3);
#ifndef IM_CHEM_POT
ferm_out[0][0][threadIdx.x] -= link0.x*ferm_in_0.x+link0.y*ferm_in_0.y +
link1.z*ferm_in_1.x+link1.w*ferm_in_1.y +
stag_phase*(C1RE*ferm_in_2.x+ C1IM*ferm_in_2.y);
ferm_out[0][1][threadIdx.x] -= link0.x*ferm_in_0.y-link0.y*ferm_in_0.x +
link1.z*ferm_in_1.y-link1.w*ferm_in_1.x +
stag_phase*(C1RE*ferm_in_2.y- C1IM*ferm_in_2.x);
ferm_out[1][0][threadIdx.x] -= link0.z*ferm_in_0.x+link0.w*ferm_in_0.y +
link2.x*ferm_in_1.x+link2.y*ferm_in_1.y +
stag_phase*(C2RE*ferm_in_2.x+ C2IM*ferm_in_2.y);
ferm_out[1][1][threadIdx.x] -= link0.z*ferm_in_0.y-link0.w*ferm_in_0.x +
link2.x*ferm_in_1.y-link2.y*ferm_in_1.x +
stag_phase*(C2RE*ferm_in_2.y- C2IM*ferm_in_2.x);
ferm_out[2][0][threadIdx.x] -= link1.x*ferm_in_0.x+link1.y*ferm_in_0.y +
link2.z*ferm_in_1.x+link2.w*ferm_in_1.y +
stag_phase*(C3RE*ferm_in_2.x+ C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] -= link1.x*ferm_in_0.y-link1.y*ferm_in_0.x +
link2.z*ferm_in_1.y-link2.w*ferm_in_1.x +
stag_phase*(C3RE*ferm_in_2.y- C3IM*ferm_in_2.x);
#else
ferm_aux_0.x = link0.x*ferm_in_0.x+link0.y*ferm_in_0.y +
link1.z*ferm_in_1.x+link1.w*ferm_in_1.y +
stag_phase*(C1RE*ferm_in_2.x+ C1IM*ferm_in_2.y);
ferm_aux_0.y = link0.x*ferm_in_0.y-link0.y*ferm_in_0.x +
link1.z*ferm_in_1.y-link1.w*ferm_in_1.x +
stag_phase*(C1RE*ferm_in_2.y- C1IM*ferm_in_2.x);
ferm_aux_1.x = link0.z*ferm_in_0.x+link0.w*ferm_in_0.y +
link2.x*ferm_in_1.x+link2.y*ferm_in_1.y +
stag_phase*(C2RE*ferm_in_2.x+ C2IM*ferm_in_2.y);
ferm_aux_1.y = link0.z*ferm_in_0.y-link0.w*ferm_in_0.x +
link2.x*ferm_in_1.y-link2.y*ferm_in_1.x +
stag_phase*(C2RE*ferm_in_2.y- C2IM*ferm_in_2.x);
ferm_aux_2.x = link1.x*ferm_in_0.x+link1.y*ferm_in_0.y +
link2.z*ferm_in_1.x+link2.w*ferm_in_1.y +
stag_phase*(C3RE*ferm_in_2.x+ C3IM*ferm_in_2.y);
ferm_aux_2.y = link1.x*ferm_in_0.y-link1.y*ferm_in_0.x +
link2.z*ferm_in_1.y-link2.w*ferm_in_1.x +
stag_phase*(C3RE*ferm_in_2.y- C3IM*ferm_in_2.x);
ferm_out[0][0][threadIdx.x] -= ferm_aux_0.x*dev_eim_cos_f + ferm_aux_0.y*dev_eim_sin_f; // Re[e^{-imu}*ferm_aux_0]
ferm_out[0][1][threadIdx.x] -= -ferm_aux_0.x*dev_eim_sin_f + ferm_aux_0.y*dev_eim_cos_f; // Im[e^{-imu}*ferm_aux_0]
ferm_out[1][0][threadIdx.x] -= ferm_aux_1.x*dev_eim_cos_f + ferm_aux_1.y*dev_eim_sin_f; // Re[e^{-imu}*ferm_aux_1]
ferm_out[1][1][threadIdx.x] -= -ferm_aux_1.x*dev_eim_sin_f + ferm_aux_1.y*dev_eim_cos_f; // Im[e^{-imu}*ferm_aux_1]
ferm_out[2][0][threadIdx.x] -= ferm_aux_2.x*dev_eim_cos_f + ferm_aux_2.y*dev_eim_sin_f; // Re[e^{-imu}*ferm_aux_2]
ferm_out[2][1][threadIdx.x] -= -ferm_aux_2.x*dev_eim_sin_f + ferm_aux_2.y*dev_eim_cos_f; // Im[e^{-imu}*ferm_aux_2]
#endif
//-------------------------------------------------end of second block
//even
ferm_in_0 = tex1Dfetch(fermion_texRef, idx + ferm_offset - size_dev_h);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + idx + ferm_offset - size_dev_h);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + idx + ferm_offset - size_dev_h);
out[idx - size_dev_h ].x = mass_dev*ferm_in_0.x;
out[idx - size_dev_h ].y = mass_dev*ferm_in_0.y;
out[idx + size_dev - size_dev_h ].x = mass_dev*ferm_in_1.x;
out[idx + size_dev - size_dev_h ].y = mass_dev*ferm_in_1.y;
out[idx + 2*size_dev - size_dev_h ].x = mass_dev*ferm_in_2.x;
out[idx + 2*size_dev - size_dev_h ].y = mass_dev*ferm_in_2.y;
//odd
out[idx ].x = ferm_out[0][0][threadIdx.x]*(0.5f);
out[idx ].y = ferm_out[0][1][threadIdx.x]*(0.5f);
out[idx + size_dev ].x = ferm_out[1][0][threadIdx.x]*(0.5f);
out[idx + size_dev ].y = ferm_out[1][1][threadIdx.x]*(0.5f);
out[idx + 2*size_dev ].x = ferm_out[2][0][threadIdx.x]*(0.5f);
out[idx + 2*size_dev ].y = ferm_out[2][1][threadIdx.x]*(0.5f);
//-------------------------------------------------end of Dslash
}
__global__ void DslashDaggerKernelEO(float2 *out,
int *tables,
int *phases,
size_t gauge_offset,
size_t ferm_offset)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x; // idx< sizeh, EVEN!!
int stag_phase = 1;
//Store result in sharedMem
__shared__ float ferm_out[3][2][NUM_THREADS];
//New tables indexing (index fastest)
__shared__ int site_table[NUM_THREADS];
//Load link matrix U_mu(ix) in registers
DeclareMatrixRegs; //12 registers
float2 ferm_in_0, ferm_in_1, ferm_in_2;
#ifdef IM_CHEM_POT
float2 ferm_aux_0, ferm_aux_1, ferm_aux_2;
#endif
// Direction 0
site_table[threadIdx.x] = tables[idx+4*size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, idx + gauge_offset, 0);
ferm_out[0][0][threadIdx.x] = link0.x*ferm_in_0.x-link0.y*ferm_in_0.y+
link0.z*ferm_in_1.x-link0.w*ferm_in_1.y+
link1.x*ferm_in_2.x-link1.y*ferm_in_2.y;
ferm_out[0][1][threadIdx.x] = link0.x*ferm_in_0.y+link0.y*ferm_in_0.x+
link0.z*ferm_in_1.y+link0.w*ferm_in_1.x+
link1.x*ferm_in_2.y+link1.y*ferm_in_2.x;
ferm_out[1][0][threadIdx.x] = link1.z*ferm_in_0.x-link1.w*ferm_in_0.y+
link2.x*ferm_in_1.x-link2.y*ferm_in_1.y+
link2.z*ferm_in_2.x-link2.w*ferm_in_2.y;
ferm_out[1][1][threadIdx.x] = link1.z*ferm_in_0.y+link1.w*ferm_in_0.x+
link2.x*ferm_in_1.y+link2.y*ferm_in_1.x+
link2.z*ferm_in_2.y+link2.w*ferm_in_2.x;
ferm_out[2][0][threadIdx.x] = C1RE*ferm_in_0.x-C1IM*ferm_in_0.y+
C2RE*ferm_in_1.x-C2IM*ferm_in_1.y+
C3RE*ferm_in_2.x-C3IM*ferm_in_2.y;
ferm_out[2][1][threadIdx.x] = C1RE*ferm_in_0.y+C1IM*ferm_in_0.x+
C2RE*ferm_in_1.y+C2IM*ferm_in_1.x+
C3RE*ferm_in_2.y+C3IM*ferm_in_2.x;
//Direction 1
site_table[threadIdx.x] = tables[idx+5*size_dev];
stag_phase = phases[idx+size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, idx + gauge_offset, 1);
ferm_out[0][0][threadIdx.x] += link0.x*ferm_in_0.x-link0.y*ferm_in_0.y+
link0.z*ferm_in_1.x-link0.w*ferm_in_1.y+
link1.x*ferm_in_2.x-link1.y*ferm_in_2.y;
ferm_out[0][1][threadIdx.x] += link0.x*ferm_in_0.y+link0.y*ferm_in_0.x+
link0.z*ferm_in_1.y+link0.w*ferm_in_1.x+
link1.x*ferm_in_2.y+link1.y*ferm_in_2.x;
ferm_out[1][0][threadIdx.x] += link1.z*ferm_in_0.x-link1.w*ferm_in_0.y+
link2.x*ferm_in_1.x-link2.y*ferm_in_1.y+
link2.z*ferm_in_2.x-link2.w*ferm_in_2.y;
ferm_out[1][1][threadIdx.x] += link1.z*ferm_in_0.y+link1.w*ferm_in_0.x+
link2.x*ferm_in_1.y+link2.y*ferm_in_1.x+
link2.z*ferm_in_2.y+link2.w*ferm_in_2.x;
ferm_out[2][0][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.x-C1IM*ferm_in_0.y+
C2RE*ferm_in_1.x-C2IM*ferm_in_1.y+
C3RE*ferm_in_2.x-C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.y+C1IM*ferm_in_0.x+
C2RE*ferm_in_1.y+C2IM*ferm_in_1.x+
C3RE*ferm_in_2.y+C3IM*ferm_in_2.x);
//Direction 2
site_table[threadIdx.x] = tables[idx+6*size_dev];
stag_phase = phases[idx+2*size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, idx + gauge_offset, 2);
ferm_out[0][0][threadIdx.x] += link0.x*ferm_in_0.x-link0.y*ferm_in_0.y+
link0.z*ferm_in_1.x-link0.w*ferm_in_1.y+
link1.x*ferm_in_2.x-link1.y*ferm_in_2.y;
ferm_out[0][1][threadIdx.x] += link0.x*ferm_in_0.y+link0.y*ferm_in_0.x+
link0.z*ferm_in_1.y+link0.w*ferm_in_1.x+
link1.x*ferm_in_2.y+link1.y*ferm_in_2.x;
ferm_out[1][0][threadIdx.x] += link1.z*ferm_in_0.x-link1.w*ferm_in_0.y+
link2.x*ferm_in_1.x-link2.y*ferm_in_1.y+
link2.z*ferm_in_2.x-link2.w*ferm_in_2.y;
ferm_out[1][1][threadIdx.x] += link1.z*ferm_in_0.y+link1.w*ferm_in_0.x+
link2.x*ferm_in_1.y+link2.y*ferm_in_1.x+
link2.z*ferm_in_2.y+link2.w*ferm_in_2.x;
ferm_out[2][0][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.x-C1IM*ferm_in_0.y+
C2RE*ferm_in_1.x-C2IM*ferm_in_1.y+
C3RE*ferm_in_2.x-C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.y+C1IM*ferm_in_0.x+
C2RE*ferm_in_1.y+C2IM*ferm_in_1.x+
C3RE*ferm_in_2.y+C3IM*ferm_in_2.x);
//Direction 3
site_table[threadIdx.x] = tables[idx+7*size_dev];
stag_phase = phases[idx+3*size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, idx + gauge_offset, 3);
#ifndef IM_CHEM_POT
ferm_out[0][0][threadIdx.x] += link0.x*ferm_in_0.x-link0.y*ferm_in_0.y+
link0.z*ferm_in_1.x-link0.w*ferm_in_1.y+
link1.x*ferm_in_2.x-link1.y*ferm_in_2.y;
ferm_out[0][1][threadIdx.x] += link0.x*ferm_in_0.y+link0.y*ferm_in_0.x+
link0.z*ferm_in_1.y+link0.w*ferm_in_1.x+
link1.x*ferm_in_2.y+link1.y*ferm_in_2.x;
ferm_out[1][0][threadIdx.x] += link1.z*ferm_in_0.x-link1.w*ferm_in_0.y+
link2.x*ferm_in_1.x-link2.y*ferm_in_1.y+
link2.z*ferm_in_2.x-link2.w*ferm_in_2.y;
ferm_out[1][1][threadIdx.x] += link1.z*ferm_in_0.y+link1.w*ferm_in_0.x+
link2.x*ferm_in_1.y+link2.y*ferm_in_1.x+
link2.z*ferm_in_2.y+link2.w*ferm_in_2.x;
ferm_out[2][0][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.x-C1IM*ferm_in_0.y+
C2RE*ferm_in_1.x-C2IM*ferm_in_1.y+
C3RE*ferm_in_2.x-C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.y+C1IM*ferm_in_0.x+
C2RE*ferm_in_1.y+C2IM*ferm_in_1.x+
C3RE*ferm_in_2.y+C3IM*ferm_in_2.x);
#else
ferm_aux_0.x = link0.x*ferm_in_0.x-link0.y*ferm_in_0.y+
link0.z*ferm_in_1.x-link0.w*ferm_in_1.y+
link1.x*ferm_in_2.x-link1.y*ferm_in_2.y;
ferm_aux_0.y = link0.x*ferm_in_0.y+link0.y*ferm_in_0.x+
link0.z*ferm_in_1.y+link0.w*ferm_in_1.x+
link1.x*ferm_in_2.y+link1.y*ferm_in_2.x;
ferm_aux_1.x = link1.z*ferm_in_0.x-link1.w*ferm_in_0.y+
link2.x*ferm_in_1.x-link2.y*ferm_in_1.y+
link2.z*ferm_in_2.x-link2.w*ferm_in_2.y;
ferm_aux_1.y = link1.z*ferm_in_0.y+link1.w*ferm_in_0.x+
link2.x*ferm_in_1.y+link2.y*ferm_in_1.x+
link2.z*ferm_in_2.y+link2.w*ferm_in_2.x;
ferm_aux_2.x = stag_phase*(C1RE*ferm_in_0.x-C1IM*ferm_in_0.y+
C2RE*ferm_in_1.x-C2IM*ferm_in_1.y+
C3RE*ferm_in_2.x-C3IM*ferm_in_2.y);
ferm_aux_2.y = stag_phase*(C1RE*ferm_in_0.y+C1IM*ferm_in_0.x+
C2RE*ferm_in_1.y+C2IM*ferm_in_1.x+
C3RE*ferm_in_2.y+C3IM*ferm_in_2.x);
ferm_out[0][0][threadIdx.x] += ferm_aux_0.x*dev_eim_cos_f - ferm_aux_0.y*dev_eim_sin_f; // Re[e^{imu}*ferm_aux_0]
ferm_out[0][1][threadIdx.x] += ferm_aux_0.x*dev_eim_sin_f + ferm_aux_0.y*dev_eim_cos_f; // Im[e^{imu}*ferm_aux_0]
ferm_out[1][0][threadIdx.x] += ferm_aux_1.x*dev_eim_cos_f - ferm_aux_1.y*dev_eim_sin_f; // Re[e^{imu}*ferm_aux_1]
ferm_out[1][1][threadIdx.x] += ferm_aux_1.x*dev_eim_sin_f + ferm_aux_1.y*dev_eim_cos_f; // Im[e^{imu}*ferm_aux_1]
ferm_out[2][0][threadIdx.x] += ferm_aux_2.x*dev_eim_cos_f - ferm_aux_2.y*dev_eim_sin_f; // Re[e^{imu}*ferm_aux_2]
ferm_out[2][1][threadIdx.x] += ferm_aux_2.x*dev_eim_sin_f + ferm_aux_2.y*dev_eim_cos_f; // Im[e^{imu}*ferm_aux_2]
#endif
//---------------------------------------------------end of first block
//Direction 0
site_table[threadIdx.x] = tables[idx];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs( gauge_texRef, size_dev, site_table[threadIdx.x] + gauge_offset, 0);
ferm_out[0][0][threadIdx.x] -= link0.x*ferm_in_0.x+link0.y*ferm_in_0.y +
link1.z*ferm_in_1.x+link1.w*ferm_in_1.y +
C1RE*ferm_in_2.x +C1IM*ferm_in_2.y;
ferm_out[0][1][threadIdx.x] -= link0.x*ferm_in_0.y-link0.y*ferm_in_0.x +
link1.z*ferm_in_1.y-link1.w*ferm_in_1.x +
C1RE*ferm_in_2.y -C1IM*ferm_in_2.x;
ferm_out[1][0][threadIdx.x] -= link0.z*ferm_in_0.x+link0.w*ferm_in_0.y +
link2.x*ferm_in_1.x+link2.y*ferm_in_1.y +
C2RE*ferm_in_2.x +C2IM*ferm_in_2.y;
ferm_out[1][1][threadIdx.x] -= link0.z*ferm_in_0.y-link0.w*ferm_in_0.x +
link2.x*ferm_in_1.y-link2.y*ferm_in_1.x +
C2RE*ferm_in_2.y -C2IM*ferm_in_2.x;
ferm_out[2][0][threadIdx.x] -= link1.x*ferm_in_0.x+link1.y*ferm_in_0.y +
link2.z*ferm_in_1.x+link2.w*ferm_in_1.y +
C3RE*ferm_in_2.x +C3IM*ferm_in_2.y;
ferm_out[2][1][threadIdx.x] -= link1.x*ferm_in_0.y-link1.y*ferm_in_0.x +
link2.z*ferm_in_1.y-link2.w*ferm_in_1.x +
C3RE*ferm_in_2.y -C3IM*ferm_in_2.x;
//Direction 1
site_table[threadIdx.x] = tables[idx+size_dev];
stag_phase = phases[site_table[threadIdx.x]+size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, site_table[threadIdx.x] + gauge_offset, 1);
ferm_out[0][0][threadIdx.x] -= link0.x*ferm_in_0.x+link0.y*ferm_in_0.y +
link1.z*ferm_in_1.x+link1.w*ferm_in_1.y +
stag_phase*(C1RE*ferm_in_2.x+C1IM*ferm_in_2.y);
ferm_out[0][1][threadIdx.x] -= link0.x*ferm_in_0.y-link0.y*ferm_in_0.x +
link1.z*ferm_in_1.y-link1.w*ferm_in_1.x +
stag_phase*(C1RE*ferm_in_2.y-C1IM*ferm_in_2.x);
ferm_out[1][0][threadIdx.x] -= link0.z*ferm_in_0.x+link0.w*ferm_in_0.y +
link2.x*ferm_in_1.x+link2.y*ferm_in_1.y +
stag_phase*(C2RE*ferm_in_2.x+C2IM*ferm_in_2.y);
ferm_out[1][1][threadIdx.x] -= link0.z*ferm_in_0.y-link0.w*ferm_in_0.x +
link2.x*ferm_in_1.y-link2.y*ferm_in_1.x +
stag_phase*(C2RE*ferm_in_2.y-C2IM*ferm_in_2.x);
ferm_out[2][0][threadIdx.x] -= link1.x*ferm_in_0.x+link1.y*ferm_in_0.y +
link2.z*ferm_in_1.x+link2.w*ferm_in_1.y +
stag_phase*(C3RE*ferm_in_2.x+C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] -= link1.x*ferm_in_0.y-link1.y*ferm_in_0.x +
link2.z*ferm_in_1.y-link2.w*ferm_in_1.x +
stag_phase*(C3RE*ferm_in_2.y- C3IM*ferm_in_2.x);
//Direction 2
site_table[threadIdx.x] = tables[idx+2*size_dev];
stag_phase = phases[site_table[threadIdx.x]+2*size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, site_table[threadIdx.x] + gauge_offset, 2);
ferm_out[0][0][threadIdx.x] -= link0.x*ferm_in_0.x+link0.y*ferm_in_0.y +
link1.z*ferm_in_1.x+link1.w*ferm_in_1.y +
stag_phase*(C1RE*ferm_in_2.x+ C1IM*ferm_in_2.y);
ferm_out[0][1][threadIdx.x] -= link0.x*ferm_in_0.y-link0.y*ferm_in_0.x +
link1.z*ferm_in_1.y-link1.w*ferm_in_1.x +
stag_phase*(C1RE*ferm_in_2.y- C1IM*ferm_in_2.x);
ferm_out[1][0][threadIdx.x] -= link0.z*ferm_in_0.x+link0.w*ferm_in_0.y +
link2.x*ferm_in_1.x+link2.y*ferm_in_1.y +
stag_phase*(C2RE*ferm_in_2.x+ C2IM*ferm_in_2.y);
ferm_out[1][1][threadIdx.x] -= link0.z*ferm_in_0.y-link0.w*ferm_in_0.x +
link2.x*ferm_in_1.y-link2.y*ferm_in_1.x +
stag_phase*(C2RE*ferm_in_2.y- C2IM*ferm_in_2.x);
ferm_out[2][0][threadIdx.x] -= link1.x*ferm_in_0.x+link1.y*ferm_in_0.y +
link2.z*ferm_in_1.x+link2.w*ferm_in_1.y +
stag_phase*(C3RE*ferm_in_2.x+ C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] -= link1.x*ferm_in_0.y-link1.y*ferm_in_0.x +
link2.z*ferm_in_1.y-link2.w*ferm_in_1.x +
stag_phase*(C3RE*ferm_in_2.y- C3IM*ferm_in_2.x);
//Direction 3
site_table[threadIdx.x] = tables[idx+3*size_dev];
stag_phase = phases[site_table[threadIdx.x]+3*size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, site_table[threadIdx.x] + gauge_offset, 3);
#ifndef IM_CHEM_POT
ferm_out[0][0][threadIdx.x] -= link0.x*ferm_in_0.x+link0.y*ferm_in_0.y +
link1.z*ferm_in_1.x+link1.w*ferm_in_1.y +
stag_phase*(C1RE*ferm_in_2.x+ C1IM*ferm_in_2.y);
ferm_out[0][1][threadIdx.x] -= link0.x*ferm_in_0.y-link0.y*ferm_in_0.x +
link1.z*ferm_in_1.y-link1.w*ferm_in_1.x +
stag_phase*(C1RE*ferm_in_2.y- C1IM*ferm_in_2.x);
ferm_out[1][0][threadIdx.x] -= link0.z*ferm_in_0.x+link0.w*ferm_in_0.y +
link2.x*ferm_in_1.x+link2.y*ferm_in_1.y +
stag_phase*(C2RE*ferm_in_2.x+ C2IM*ferm_in_2.y);
ferm_out[1][1][threadIdx.x] -= link0.z*ferm_in_0.y-link0.w*ferm_in_0.x +
link2.x*ferm_in_1.y-link2.y*ferm_in_1.x +
stag_phase*(C2RE*ferm_in_2.y- C2IM*ferm_in_2.x);
ferm_out[2][0][threadIdx.x] -= link1.x*ferm_in_0.x+link1.y*ferm_in_0.y +
link2.z*ferm_in_1.x+link2.w*ferm_in_1.y +
stag_phase*(C3RE*ferm_in_2.x+ C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] -= link1.x*ferm_in_0.y-link1.y*ferm_in_0.x +
link2.z*ferm_in_1.y-link2.w*ferm_in_1.x +
stag_phase*(C3RE*ferm_in_2.y- C3IM*ferm_in_2.x);
#else
ferm_aux_0.x = link0.x*ferm_in_0.x+link0.y*ferm_in_0.y +
link1.z*ferm_in_1.x+link1.w*ferm_in_1.y +
stag_phase*(C1RE*ferm_in_2.x+ C1IM*ferm_in_2.y);
ferm_aux_0.y = link0.x*ferm_in_0.y-link0.y*ferm_in_0.x +
link1.z*ferm_in_1.y-link1.w*ferm_in_1.x +
stag_phase*(C1RE*ferm_in_2.y- C1IM*ferm_in_2.x);
ferm_aux_1.x = link0.z*ferm_in_0.x+link0.w*ferm_in_0.y +
link2.x*ferm_in_1.x+link2.y*ferm_in_1.y +
stag_phase*(C2RE*ferm_in_2.x+ C2IM*ferm_in_2.y);
ferm_aux_1.y = link0.z*ferm_in_0.y-link0.w*ferm_in_0.x +
link2.x*ferm_in_1.y-link2.y*ferm_in_1.x +
stag_phase*(C2RE*ferm_in_2.y- C2IM*ferm_in_2.x);
ferm_aux_2.x = link1.x*ferm_in_0.x+link1.y*ferm_in_0.y +
link2.z*ferm_in_1.x+link2.w*ferm_in_1.y +
stag_phase*(C3RE*ferm_in_2.x+ C3IM*ferm_in_2.y);
ferm_aux_2.y = link1.x*ferm_in_0.y-link1.y*ferm_in_0.x +
link2.z*ferm_in_1.y-link2.w*ferm_in_1.x +
stag_phase*(C3RE*ferm_in_2.y- C3IM*ferm_in_2.x);
ferm_out[0][0][threadIdx.x] -= ferm_aux_0.x*dev_eim_cos_f + ferm_aux_0.y*dev_eim_sin_f; // Re[e^{-imu}*ferm_aux_0]
ferm_out[0][1][threadIdx.x] -= -ferm_aux_0.x*dev_eim_sin_f + ferm_aux_0.y*dev_eim_cos_f; // Im[e^{-imu}*ferm_aux_0]
ferm_out[1][0][threadIdx.x] -= ferm_aux_1.x*dev_eim_cos_f + ferm_aux_1.y*dev_eim_sin_f; // Re[e^{-imu}*ferm_aux_1]
ferm_out[1][1][threadIdx.x] -= -ferm_aux_1.x*dev_eim_sin_f + ferm_aux_1.y*dev_eim_cos_f; // Im[e^{-imu}*ferm_aux_1]
ferm_out[2][0][threadIdx.x] -= ferm_aux_2.x*dev_eim_cos_f + ferm_aux_2.y*dev_eim_sin_f; // Re[e^{-imu}*ferm_aux_2]
ferm_out[2][1][threadIdx.x] -= -ferm_aux_2.x*dev_eim_sin_f + ferm_aux_2.y*dev_eim_cos_f; // Im[e^{-imu}*ferm_aux_2]
#endif
//-------------------------------------------------end of second block
// even
ferm_in_0 = tex1Dfetch(fermion_texRef, idx + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + idx + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + idx + ferm_offset);
out[idx ].x = mass_dev*ferm_in_0.x + ferm_out[0][0][threadIdx.x]*(-0.5f);
out[idx ].y = mass_dev*ferm_in_0.y + ferm_out[0][1][threadIdx.x]*(-0.5f);
out[idx + size_dev ].x = mass_dev*ferm_in_1.x + ferm_out[1][0][threadIdx.x]*(-0.5f);
out[idx + size_dev ].y = mass_dev*ferm_in_1.y + ferm_out[1][1][threadIdx.x]*(-0.5f);
out[idx + 2*size_dev ].x = mass_dev*ferm_in_2.x + ferm_out[2][0][threadIdx.x]*(-0.5f);
out[idx + 2*size_dev ].y = mass_dev*ferm_in_2.y + ferm_out[2][1][threadIdx.x]*(-0.5f);
// odd
out[idx + size_dev_h ].x = 0.0f;
out[idx + size_dev_h ].y = 0.0f;
out[idx + size_dev + size_dev_h ].x = 0.0f;
out[idx + size_dev + size_dev_h ].y = 0.0f;
out[idx + 2*size_dev + size_dev_h ].x = 0.0f;
out[idx + 2*size_dev + size_dev_h ].y = 0.0f;
//-------------------------------------------------end of DslashDagger
}
///////////////////////////////////////////////////////////////////////////////////////// END OF KERNELS
void DslashOperatorEO(float2 *out,
float2 *in,
const int isign)
{
#ifdef DEBUG_MODE_2
printf("\033[32mDEBUG: inside DslashOperatorEO ...\033[0m\n");
#endif
dim3 BlockDimension(NUM_THREADS);
dim3 GridDimension(sizeh/BlockDimension.x); // Only even sites!
size_t vector_size=3*size*sizeof(float2);
size_t gauge_field_size = sizeof(float4)*size*12;
size_t offset_g, offset_f;
cudaSafe(AT,hipBindTexture(&offset_f, fermion_texRef, in, vector_size), "hipBindTexture");
offset_f/=sizeof(float2);
cudaSafe(AT,hipBindTexture(&offset_g, gauge_texRef, gauge_field_device, gauge_field_size), "hipBindTexture");
offset_g/=sizeof(float4);
if(isign == PLUS)
{
hipLaunchKernelGGL(( DslashKernelEO), dim3(GridDimension),dim3(BlockDimension), 0, 0, out, device_table, device_phases, offset_g, offset_f);
cudaCheckError(AT,"DslashKernelEO");
}
if(isign == MINUS)
{
hipLaunchKernelGGL(( DslashDaggerKernelEO), dim3(GridDimension),dim3(BlockDimension), 0, 0, out, device_table, device_phases, offset_g, offset_f);
cudaCheckError(AT,"DslashDaggerKernelEO");
}
cudaSafe(AT,hipUnbindTexture(fermion_texRef), "hipUnbindTexture");
cudaSafe(AT,hipUnbindTexture(gauge_texRef), "hipUnbindTexture");
#ifdef DEBUG_MODE_2
printf("\033[32m\tterminated DslashOperator \033[0m\n");
#endif
}
| 1eb5638ec7c45a067acd37396fbc1ac8c5037dfe.cu | // SINGLE PRECISION KERNELS FOR ODD/EVEN FERMIONS
__global__ void DslashKernelEO(float2 *out,
int *tables,
int *phases,
size_t gauge_offset,
size_t ferm_offset)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x + size_dev_h; // idx>sizeh, ODD
int stag_phase = 1;
//Store result in sharedMem
__shared__ float ferm_out[3][2][NUM_THREADS];
//New tables indexing (index fastest)
__shared__ int site_table[NUM_THREADS];
//Load link matrix U_mu(ix) in registers
DeclareMatrixRegs; //12 registers
float2 ferm_in_0, ferm_in_1, ferm_in_2;
#ifdef IM_CHEM_POT
float2 ferm_aux_0, ferm_aux_1, ferm_aux_2;
#endif
// Direction 0
site_table[threadIdx.x] = tables[idx+4*size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, idx + gauge_offset, 0);
ferm_out[0][0][threadIdx.x] = link0.x*ferm_in_0.x-link0.y*ferm_in_0.y+
link0.z*ferm_in_1.x-link0.w*ferm_in_1.y+
link1.x*ferm_in_2.x-link1.y*ferm_in_2.y;
ferm_out[0][1][threadIdx.x] = link0.x*ferm_in_0.y+link0.y*ferm_in_0.x+
link0.z*ferm_in_1.y+link0.w*ferm_in_1.x+
link1.x*ferm_in_2.y+link1.y*ferm_in_2.x;
ferm_out[1][0][threadIdx.x] = link1.z*ferm_in_0.x-link1.w*ferm_in_0.y+
link2.x*ferm_in_1.x-link2.y*ferm_in_1.y+
link2.z*ferm_in_2.x-link2.w*ferm_in_2.y;
ferm_out[1][1][threadIdx.x] = link1.z*ferm_in_0.y+link1.w*ferm_in_0.x+
link2.x*ferm_in_1.y+link2.y*ferm_in_1.x+
link2.z*ferm_in_2.y+link2.w*ferm_in_2.x;
ferm_out[2][0][threadIdx.x] = C1RE*ferm_in_0.x-C1IM*ferm_in_0.y+
C2RE*ferm_in_1.x-C2IM*ferm_in_1.y+
C3RE*ferm_in_2.x-C3IM*ferm_in_2.y;
ferm_out[2][1][threadIdx.x] = C1RE*ferm_in_0.y+C1IM*ferm_in_0.x+
C2RE*ferm_in_1.y+C2IM*ferm_in_1.x+
C3RE*ferm_in_2.y+C3IM*ferm_in_2.x;
//Direction 1
site_table[threadIdx.x] = tables[idx+5*size_dev];
stag_phase = phases[idx+size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, idx + gauge_offset, 1);
ferm_out[0][0][threadIdx.x] += link0.x*ferm_in_0.x-link0.y*ferm_in_0.y+
link0.z*ferm_in_1.x-link0.w*ferm_in_1.y+
link1.x*ferm_in_2.x-link1.y*ferm_in_2.y;
ferm_out[0][1][threadIdx.x] += link0.x*ferm_in_0.y+link0.y*ferm_in_0.x+
link0.z*ferm_in_1.y+link0.w*ferm_in_1.x+
link1.x*ferm_in_2.y+link1.y*ferm_in_2.x;
ferm_out[1][0][threadIdx.x] += link1.z*ferm_in_0.x-link1.w*ferm_in_0.y+
link2.x*ferm_in_1.x-link2.y*ferm_in_1.y+
link2.z*ferm_in_2.x-link2.w*ferm_in_2.y;
ferm_out[1][1][threadIdx.x] += link1.z*ferm_in_0.y+link1.w*ferm_in_0.x+
link2.x*ferm_in_1.y+link2.y*ferm_in_1.x+
link2.z*ferm_in_2.y+link2.w*ferm_in_2.x;
ferm_out[2][0][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.x-C1IM*ferm_in_0.y+
C2RE*ferm_in_1.x-C2IM*ferm_in_1.y+
C3RE*ferm_in_2.x-C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.y+C1IM*ferm_in_0.x+
C2RE*ferm_in_1.y+C2IM*ferm_in_1.x+
C3RE*ferm_in_2.y+C3IM*ferm_in_2.x);
//Direction 2
site_table[threadIdx.x] = tables[idx+6*size_dev];
stag_phase = phases[idx+2*size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, idx + gauge_offset, 2);
ferm_out[0][0][threadIdx.x] += link0.x*ferm_in_0.x-link0.y*ferm_in_0.y+
link0.z*ferm_in_1.x-link0.w*ferm_in_1.y+
link1.x*ferm_in_2.x-link1.y*ferm_in_2.y;
ferm_out[0][1][threadIdx.x] += link0.x*ferm_in_0.y+link0.y*ferm_in_0.x+
link0.z*ferm_in_1.y+link0.w*ferm_in_1.x+
link1.x*ferm_in_2.y+link1.y*ferm_in_2.x;
ferm_out[1][0][threadIdx.x] += link1.z*ferm_in_0.x-link1.w*ferm_in_0.y+
link2.x*ferm_in_1.x-link2.y*ferm_in_1.y+
link2.z*ferm_in_2.x-link2.w*ferm_in_2.y;
ferm_out[1][1][threadIdx.x] += link1.z*ferm_in_0.y+link1.w*ferm_in_0.x+
link2.x*ferm_in_1.y+link2.y*ferm_in_1.x+
link2.z*ferm_in_2.y+link2.w*ferm_in_2.x;
ferm_out[2][0][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.x-C1IM*ferm_in_0.y+
C2RE*ferm_in_1.x-C2IM*ferm_in_1.y+
C3RE*ferm_in_2.x-C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.y+C1IM*ferm_in_0.x+
C2RE*ferm_in_1.y+C2IM*ferm_in_1.x+
C3RE*ferm_in_2.y+C3IM*ferm_in_2.x);
//Direction 3
site_table[threadIdx.x] = tables[idx+7*size_dev];
stag_phase = phases[idx+3*size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, idx + gauge_offset, 3);
#ifndef IM_CHEM_POT
ferm_out[0][0][threadIdx.x] += link0.x*ferm_in_0.x-link0.y*ferm_in_0.y+
link0.z*ferm_in_1.x-link0.w*ferm_in_1.y+
link1.x*ferm_in_2.x-link1.y*ferm_in_2.y;
ferm_out[0][1][threadIdx.x] += link0.x*ferm_in_0.y+link0.y*ferm_in_0.x+
link0.z*ferm_in_1.y+link0.w*ferm_in_1.x+
link1.x*ferm_in_2.y+link1.y*ferm_in_2.x;
ferm_out[1][0][threadIdx.x] += link1.z*ferm_in_0.x-link1.w*ferm_in_0.y+
link2.x*ferm_in_1.x-link2.y*ferm_in_1.y+
link2.z*ferm_in_2.x-link2.w*ferm_in_2.y;
ferm_out[1][1][threadIdx.x] += link1.z*ferm_in_0.y+link1.w*ferm_in_0.x+
link2.x*ferm_in_1.y+link2.y*ferm_in_1.x+
link2.z*ferm_in_2.y+link2.w*ferm_in_2.x;
ferm_out[2][0][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.x-C1IM*ferm_in_0.y+
C2RE*ferm_in_1.x-C2IM*ferm_in_1.y+
C3RE*ferm_in_2.x-C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.y+C1IM*ferm_in_0.x+
C2RE*ferm_in_1.y+C2IM*ferm_in_1.x+
C3RE*ferm_in_2.y+C3IM*ferm_in_2.x);
#else
ferm_aux_0.x = link0.x*ferm_in_0.x-link0.y*ferm_in_0.y+
link0.z*ferm_in_1.x-link0.w*ferm_in_1.y+
link1.x*ferm_in_2.x-link1.y*ferm_in_2.y;
ferm_aux_0.y = link0.x*ferm_in_0.y+link0.y*ferm_in_0.x+
link0.z*ferm_in_1.y+link0.w*ferm_in_1.x+
link1.x*ferm_in_2.y+link1.y*ferm_in_2.x;
ferm_aux_1.x = link1.z*ferm_in_0.x-link1.w*ferm_in_0.y+
link2.x*ferm_in_1.x-link2.y*ferm_in_1.y+
link2.z*ferm_in_2.x-link2.w*ferm_in_2.y;
ferm_aux_1.y = link1.z*ferm_in_0.y+link1.w*ferm_in_0.x+
link2.x*ferm_in_1.y+link2.y*ferm_in_1.x+
link2.z*ferm_in_2.y+link2.w*ferm_in_2.x;
ferm_aux_2.x = stag_phase*(C1RE*ferm_in_0.x-C1IM*ferm_in_0.y+
C2RE*ferm_in_1.x-C2IM*ferm_in_1.y+
C3RE*ferm_in_2.x-C3IM*ferm_in_2.y);
ferm_aux_2.y = stag_phase*(C1RE*ferm_in_0.y+C1IM*ferm_in_0.x+
C2RE*ferm_in_1.y+C2IM*ferm_in_1.x+
C3RE*ferm_in_2.y+C3IM*ferm_in_2.x);
ferm_out[0][0][threadIdx.x] += ferm_aux_0.x*dev_eim_cos_f - ferm_aux_0.y*dev_eim_sin_f; // Re[e^{imu}*ferm_aux_0]
ferm_out[0][1][threadIdx.x] += ferm_aux_0.x*dev_eim_sin_f + ferm_aux_0.y*dev_eim_cos_f; // Im[e^{imu}*ferm_aux_0]
ferm_out[1][0][threadIdx.x] += ferm_aux_1.x*dev_eim_cos_f - ferm_aux_1.y*dev_eim_sin_f; // Re[e^{imu}*ferm_aux_1]
ferm_out[1][1][threadIdx.x] += ferm_aux_1.x*dev_eim_sin_f + ferm_aux_1.y*dev_eim_cos_f; // Im[e^{imu}*ferm_aux_1]
ferm_out[2][0][threadIdx.x] += ferm_aux_2.x*dev_eim_cos_f - ferm_aux_2.y*dev_eim_sin_f; // Re[e^{imu}*ferm_aux_2]
ferm_out[2][1][threadIdx.x] += ferm_aux_2.x*dev_eim_sin_f + ferm_aux_2.y*dev_eim_cos_f; // Im[e^{imu}*ferm_aux_2]
#endif
//---------------------------------------------------end of first block
//Direction 0
site_table[threadIdx.x] = tables[idx];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, site_table[threadIdx.x] + gauge_offset, 0);
ferm_out[0][0][threadIdx.x] -= link0.x*ferm_in_0.x+link0.y*ferm_in_0.y +
link1.z*ferm_in_1.x+link1.w*ferm_in_1.y +
C1RE*ferm_in_2.x +C1IM*ferm_in_2.y;
ferm_out[0][1][threadIdx.x] -= link0.x*ferm_in_0.y-link0.y*ferm_in_0.x +
link1.z*ferm_in_1.y-link1.w*ferm_in_1.x +
C1RE*ferm_in_2.y -C1IM*ferm_in_2.x;
ferm_out[1][0][threadIdx.x] -= link0.z*ferm_in_0.x+link0.w*ferm_in_0.y +
link2.x*ferm_in_1.x+link2.y*ferm_in_1.y +
C2RE*ferm_in_2.x +C2IM*ferm_in_2.y;
ferm_out[1][1][threadIdx.x] -= link0.z*ferm_in_0.y-link0.w*ferm_in_0.x +
link2.x*ferm_in_1.y-link2.y*ferm_in_1.x +
C2RE*ferm_in_2.y -C2IM*ferm_in_2.x;
ferm_out[2][0][threadIdx.x] -= link1.x*ferm_in_0.x+link1.y*ferm_in_0.y +
link2.z*ferm_in_1.x+link2.w*ferm_in_1.y +
C3RE*ferm_in_2.x +C3IM*ferm_in_2.y;
ferm_out[2][1][threadIdx.x] -= link1.x*ferm_in_0.y-link1.y*ferm_in_0.x +
link2.z*ferm_in_1.y-link2.w*ferm_in_1.x +
C3RE*ferm_in_2.y -C3IM*ferm_in_2.x;
//Direction 1
site_table[threadIdx.x] = tables[idx+size_dev];
stag_phase = phases[site_table[threadIdx.x]+size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, site_table[threadIdx.x] + gauge_offset, 1);
ferm_out[0][0][threadIdx.x] -= link0.x*ferm_in_0.x+link0.y*ferm_in_0.y +
link1.z*ferm_in_1.x+link1.w*ferm_in_1.y +
stag_phase*(C1RE*ferm_in_2.x+C1IM*ferm_in_2.y);
ferm_out[0][1][threadIdx.x] -= link0.x*ferm_in_0.y-link0.y*ferm_in_0.x +
link1.z*ferm_in_1.y-link1.w*ferm_in_1.x +
stag_phase*(C1RE*ferm_in_2.y-C1IM*ferm_in_2.x);
ferm_out[1][0][threadIdx.x] -= link0.z*ferm_in_0.x+link0.w*ferm_in_0.y +
link2.x*ferm_in_1.x+link2.y*ferm_in_1.y +
stag_phase*(C2RE*ferm_in_2.x+C2IM*ferm_in_2.y);
ferm_out[1][1][threadIdx.x] -= link0.z*ferm_in_0.y-link0.w*ferm_in_0.x +
link2.x*ferm_in_1.y-link2.y*ferm_in_1.x +
stag_phase*(C2RE*ferm_in_2.y-C2IM*ferm_in_2.x);
ferm_out[2][0][threadIdx.x] -= link1.x*ferm_in_0.x+link1.y*ferm_in_0.y +
link2.z*ferm_in_1.x+link2.w*ferm_in_1.y +
stag_phase*(C3RE*ferm_in_2.x+C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] -= link1.x*ferm_in_0.y-link1.y*ferm_in_0.x +
link2.z*ferm_in_1.y-link2.w*ferm_in_1.x +
stag_phase*(C3RE*ferm_in_2.y- C3IM*ferm_in_2.x);
//Direction 2
site_table[threadIdx.x] = tables[idx+2*size_dev];
stag_phase = phases[site_table[threadIdx.x]+2*size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, site_table[threadIdx.x] + gauge_offset, 2);
ferm_out[0][0][threadIdx.x] -= link0.x*ferm_in_0.x+link0.y*ferm_in_0.y +
link1.z*ferm_in_1.x+link1.w*ferm_in_1.y +
stag_phase*(C1RE*ferm_in_2.x+ C1IM*ferm_in_2.y);
ferm_out[0][1][threadIdx.x] -= link0.x*ferm_in_0.y-link0.y*ferm_in_0.x +
link1.z*ferm_in_1.y-link1.w*ferm_in_1.x +
stag_phase*(C1RE*ferm_in_2.y- C1IM*ferm_in_2.x);
ferm_out[1][0][threadIdx.x] -= link0.z*ferm_in_0.x+link0.w*ferm_in_0.y +
link2.x*ferm_in_1.x+link2.y*ferm_in_1.y +
stag_phase*(C2RE*ferm_in_2.x+ C2IM*ferm_in_2.y);
ferm_out[1][1][threadIdx.x] -= link0.z*ferm_in_0.y-link0.w*ferm_in_0.x +
link2.x*ferm_in_1.y-link2.y*ferm_in_1.x +
stag_phase*(C2RE*ferm_in_2.y- C2IM*ferm_in_2.x);
ferm_out[2][0][threadIdx.x] -= link1.x*ferm_in_0.x+link1.y*ferm_in_0.y +
link2.z*ferm_in_1.x+link2.w*ferm_in_1.y +
stag_phase*(C3RE*ferm_in_2.x+ C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] -= link1.x*ferm_in_0.y-link1.y*ferm_in_0.x +
link2.z*ferm_in_1.y-link2.w*ferm_in_1.x +
stag_phase*(C3RE*ferm_in_2.y- C3IM*ferm_in_2.x);
//Direction 3
site_table[threadIdx.x] = tables[idx+3*size_dev];
stag_phase = phases[site_table[threadIdx.x]+3*size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, site_table[threadIdx.x] + gauge_offset, 3);
#ifndef IM_CHEM_POT
ferm_out[0][0][threadIdx.x] -= link0.x*ferm_in_0.x+link0.y*ferm_in_0.y +
link1.z*ferm_in_1.x+link1.w*ferm_in_1.y +
stag_phase*(C1RE*ferm_in_2.x+ C1IM*ferm_in_2.y);
ferm_out[0][1][threadIdx.x] -= link0.x*ferm_in_0.y-link0.y*ferm_in_0.x +
link1.z*ferm_in_1.y-link1.w*ferm_in_1.x +
stag_phase*(C1RE*ferm_in_2.y- C1IM*ferm_in_2.x);
ferm_out[1][0][threadIdx.x] -= link0.z*ferm_in_0.x+link0.w*ferm_in_0.y +
link2.x*ferm_in_1.x+link2.y*ferm_in_1.y +
stag_phase*(C2RE*ferm_in_2.x+ C2IM*ferm_in_2.y);
ferm_out[1][1][threadIdx.x] -= link0.z*ferm_in_0.y-link0.w*ferm_in_0.x +
link2.x*ferm_in_1.y-link2.y*ferm_in_1.x +
stag_phase*(C2RE*ferm_in_2.y- C2IM*ferm_in_2.x);
ferm_out[2][0][threadIdx.x] -= link1.x*ferm_in_0.x+link1.y*ferm_in_0.y +
link2.z*ferm_in_1.x+link2.w*ferm_in_1.y +
stag_phase*(C3RE*ferm_in_2.x+ C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] -= link1.x*ferm_in_0.y-link1.y*ferm_in_0.x +
link2.z*ferm_in_1.y-link2.w*ferm_in_1.x +
stag_phase*(C3RE*ferm_in_2.y- C3IM*ferm_in_2.x);
#else
ferm_aux_0.x = link0.x*ferm_in_0.x+link0.y*ferm_in_0.y +
link1.z*ferm_in_1.x+link1.w*ferm_in_1.y +
stag_phase*(C1RE*ferm_in_2.x+ C1IM*ferm_in_2.y);
ferm_aux_0.y = link0.x*ferm_in_0.y-link0.y*ferm_in_0.x +
link1.z*ferm_in_1.y-link1.w*ferm_in_1.x +
stag_phase*(C1RE*ferm_in_2.y- C1IM*ferm_in_2.x);
ferm_aux_1.x = link0.z*ferm_in_0.x+link0.w*ferm_in_0.y +
link2.x*ferm_in_1.x+link2.y*ferm_in_1.y +
stag_phase*(C2RE*ferm_in_2.x+ C2IM*ferm_in_2.y);
ferm_aux_1.y = link0.z*ferm_in_0.y-link0.w*ferm_in_0.x +
link2.x*ferm_in_1.y-link2.y*ferm_in_1.x +
stag_phase*(C2RE*ferm_in_2.y- C2IM*ferm_in_2.x);
ferm_aux_2.x = link1.x*ferm_in_0.x+link1.y*ferm_in_0.y +
link2.z*ferm_in_1.x+link2.w*ferm_in_1.y +
stag_phase*(C3RE*ferm_in_2.x+ C3IM*ferm_in_2.y);
ferm_aux_2.y = link1.x*ferm_in_0.y-link1.y*ferm_in_0.x +
link2.z*ferm_in_1.y-link2.w*ferm_in_1.x +
stag_phase*(C3RE*ferm_in_2.y- C3IM*ferm_in_2.x);
ferm_out[0][0][threadIdx.x] -= ferm_aux_0.x*dev_eim_cos_f + ferm_aux_0.y*dev_eim_sin_f; // Re[e^{-imu}*ferm_aux_0]
ferm_out[0][1][threadIdx.x] -= -ferm_aux_0.x*dev_eim_sin_f + ferm_aux_0.y*dev_eim_cos_f; // Im[e^{-imu}*ferm_aux_0]
ferm_out[1][0][threadIdx.x] -= ferm_aux_1.x*dev_eim_cos_f + ferm_aux_1.y*dev_eim_sin_f; // Re[e^{-imu}*ferm_aux_1]
ferm_out[1][1][threadIdx.x] -= -ferm_aux_1.x*dev_eim_sin_f + ferm_aux_1.y*dev_eim_cos_f; // Im[e^{-imu}*ferm_aux_1]
ferm_out[2][0][threadIdx.x] -= ferm_aux_2.x*dev_eim_cos_f + ferm_aux_2.y*dev_eim_sin_f; // Re[e^{-imu}*ferm_aux_2]
ferm_out[2][1][threadIdx.x] -= -ferm_aux_2.x*dev_eim_sin_f + ferm_aux_2.y*dev_eim_cos_f; // Im[e^{-imu}*ferm_aux_2]
#endif
//-------------------------------------------------end of second block
//even
ferm_in_0 = tex1Dfetch(fermion_texRef, idx + ferm_offset - size_dev_h);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + idx + ferm_offset - size_dev_h);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + idx + ferm_offset - size_dev_h);
out[idx - size_dev_h ].x = mass_dev*ferm_in_0.x;
out[idx - size_dev_h ].y = mass_dev*ferm_in_0.y;
out[idx + size_dev - size_dev_h ].x = mass_dev*ferm_in_1.x;
out[idx + size_dev - size_dev_h ].y = mass_dev*ferm_in_1.y;
out[idx + 2*size_dev - size_dev_h ].x = mass_dev*ferm_in_2.x;
out[idx + 2*size_dev - size_dev_h ].y = mass_dev*ferm_in_2.y;
//odd
out[idx ].x = ferm_out[0][0][threadIdx.x]*(0.5f);
out[idx ].y = ferm_out[0][1][threadIdx.x]*(0.5f);
out[idx + size_dev ].x = ferm_out[1][0][threadIdx.x]*(0.5f);
out[idx + size_dev ].y = ferm_out[1][1][threadIdx.x]*(0.5f);
out[idx + 2*size_dev ].x = ferm_out[2][0][threadIdx.x]*(0.5f);
out[idx + 2*size_dev ].y = ferm_out[2][1][threadIdx.x]*(0.5f);
//-------------------------------------------------end of Dslash
}
__global__ void DslashDaggerKernelEO(float2 *out,
int *tables,
int *phases,
size_t gauge_offset,
size_t ferm_offset)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x; // idx< sizeh, EVEN!!
int stag_phase = 1;
//Store result in sharedMem
__shared__ float ferm_out[3][2][NUM_THREADS];
//New tables indexing (index fastest)
__shared__ int site_table[NUM_THREADS];
//Load link matrix U_mu(ix) in registers
DeclareMatrixRegs; //12 registers
float2 ferm_in_0, ferm_in_1, ferm_in_2;
#ifdef IM_CHEM_POT
float2 ferm_aux_0, ferm_aux_1, ferm_aux_2;
#endif
// Direction 0
site_table[threadIdx.x] = tables[idx+4*size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, idx + gauge_offset, 0);
ferm_out[0][0][threadIdx.x] = link0.x*ferm_in_0.x-link0.y*ferm_in_0.y+
link0.z*ferm_in_1.x-link0.w*ferm_in_1.y+
link1.x*ferm_in_2.x-link1.y*ferm_in_2.y;
ferm_out[0][1][threadIdx.x] = link0.x*ferm_in_0.y+link0.y*ferm_in_0.x+
link0.z*ferm_in_1.y+link0.w*ferm_in_1.x+
link1.x*ferm_in_2.y+link1.y*ferm_in_2.x;
ferm_out[1][0][threadIdx.x] = link1.z*ferm_in_0.x-link1.w*ferm_in_0.y+
link2.x*ferm_in_1.x-link2.y*ferm_in_1.y+
link2.z*ferm_in_2.x-link2.w*ferm_in_2.y;
ferm_out[1][1][threadIdx.x] = link1.z*ferm_in_0.y+link1.w*ferm_in_0.x+
link2.x*ferm_in_1.y+link2.y*ferm_in_1.x+
link2.z*ferm_in_2.y+link2.w*ferm_in_2.x;
ferm_out[2][0][threadIdx.x] = C1RE*ferm_in_0.x-C1IM*ferm_in_0.y+
C2RE*ferm_in_1.x-C2IM*ferm_in_1.y+
C3RE*ferm_in_2.x-C3IM*ferm_in_2.y;
ferm_out[2][1][threadIdx.x] = C1RE*ferm_in_0.y+C1IM*ferm_in_0.x+
C2RE*ferm_in_1.y+C2IM*ferm_in_1.x+
C3RE*ferm_in_2.y+C3IM*ferm_in_2.x;
//Direction 1
site_table[threadIdx.x] = tables[idx+5*size_dev];
stag_phase = phases[idx+size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, idx + gauge_offset, 1);
ferm_out[0][0][threadIdx.x] += link0.x*ferm_in_0.x-link0.y*ferm_in_0.y+
link0.z*ferm_in_1.x-link0.w*ferm_in_1.y+
link1.x*ferm_in_2.x-link1.y*ferm_in_2.y;
ferm_out[0][1][threadIdx.x] += link0.x*ferm_in_0.y+link0.y*ferm_in_0.x+
link0.z*ferm_in_1.y+link0.w*ferm_in_1.x+
link1.x*ferm_in_2.y+link1.y*ferm_in_2.x;
ferm_out[1][0][threadIdx.x] += link1.z*ferm_in_0.x-link1.w*ferm_in_0.y+
link2.x*ferm_in_1.x-link2.y*ferm_in_1.y+
link2.z*ferm_in_2.x-link2.w*ferm_in_2.y;
ferm_out[1][1][threadIdx.x] += link1.z*ferm_in_0.y+link1.w*ferm_in_0.x+
link2.x*ferm_in_1.y+link2.y*ferm_in_1.x+
link2.z*ferm_in_2.y+link2.w*ferm_in_2.x;
ferm_out[2][0][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.x-C1IM*ferm_in_0.y+
C2RE*ferm_in_1.x-C2IM*ferm_in_1.y+
C3RE*ferm_in_2.x-C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.y+C1IM*ferm_in_0.x+
C2RE*ferm_in_1.y+C2IM*ferm_in_1.x+
C3RE*ferm_in_2.y+C3IM*ferm_in_2.x);
//Direction 2
site_table[threadIdx.x] = tables[idx+6*size_dev];
stag_phase = phases[idx+2*size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, idx + gauge_offset, 2);
ferm_out[0][0][threadIdx.x] += link0.x*ferm_in_0.x-link0.y*ferm_in_0.y+
link0.z*ferm_in_1.x-link0.w*ferm_in_1.y+
link1.x*ferm_in_2.x-link1.y*ferm_in_2.y;
ferm_out[0][1][threadIdx.x] += link0.x*ferm_in_0.y+link0.y*ferm_in_0.x+
link0.z*ferm_in_1.y+link0.w*ferm_in_1.x+
link1.x*ferm_in_2.y+link1.y*ferm_in_2.x;
ferm_out[1][0][threadIdx.x] += link1.z*ferm_in_0.x-link1.w*ferm_in_0.y+
link2.x*ferm_in_1.x-link2.y*ferm_in_1.y+
link2.z*ferm_in_2.x-link2.w*ferm_in_2.y;
ferm_out[1][1][threadIdx.x] += link1.z*ferm_in_0.y+link1.w*ferm_in_0.x+
link2.x*ferm_in_1.y+link2.y*ferm_in_1.x+
link2.z*ferm_in_2.y+link2.w*ferm_in_2.x;
ferm_out[2][0][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.x-C1IM*ferm_in_0.y+
C2RE*ferm_in_1.x-C2IM*ferm_in_1.y+
C3RE*ferm_in_2.x-C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.y+C1IM*ferm_in_0.x+
C2RE*ferm_in_1.y+C2IM*ferm_in_1.x+
C3RE*ferm_in_2.y+C3IM*ferm_in_2.x);
//Direction 3
site_table[threadIdx.x] = tables[idx+7*size_dev];
stag_phase = phases[idx+3*size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, idx + gauge_offset, 3);
#ifndef IM_CHEM_POT
ferm_out[0][0][threadIdx.x] += link0.x*ferm_in_0.x-link0.y*ferm_in_0.y+
link0.z*ferm_in_1.x-link0.w*ferm_in_1.y+
link1.x*ferm_in_2.x-link1.y*ferm_in_2.y;
ferm_out[0][1][threadIdx.x] += link0.x*ferm_in_0.y+link0.y*ferm_in_0.x+
link0.z*ferm_in_1.y+link0.w*ferm_in_1.x+
link1.x*ferm_in_2.y+link1.y*ferm_in_2.x;
ferm_out[1][0][threadIdx.x] += link1.z*ferm_in_0.x-link1.w*ferm_in_0.y+
link2.x*ferm_in_1.x-link2.y*ferm_in_1.y+
link2.z*ferm_in_2.x-link2.w*ferm_in_2.y;
ferm_out[1][1][threadIdx.x] += link1.z*ferm_in_0.y+link1.w*ferm_in_0.x+
link2.x*ferm_in_1.y+link2.y*ferm_in_1.x+
link2.z*ferm_in_2.y+link2.w*ferm_in_2.x;
ferm_out[2][0][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.x-C1IM*ferm_in_0.y+
C2RE*ferm_in_1.x-C2IM*ferm_in_1.y+
C3RE*ferm_in_2.x-C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] += stag_phase*(C1RE*ferm_in_0.y+C1IM*ferm_in_0.x+
C2RE*ferm_in_1.y+C2IM*ferm_in_1.x+
C3RE*ferm_in_2.y+C3IM*ferm_in_2.x);
#else
ferm_aux_0.x = link0.x*ferm_in_0.x-link0.y*ferm_in_0.y+
link0.z*ferm_in_1.x-link0.w*ferm_in_1.y+
link1.x*ferm_in_2.x-link1.y*ferm_in_2.y;
ferm_aux_0.y = link0.x*ferm_in_0.y+link0.y*ferm_in_0.x+
link0.z*ferm_in_1.y+link0.w*ferm_in_1.x+
link1.x*ferm_in_2.y+link1.y*ferm_in_2.x;
ferm_aux_1.x = link1.z*ferm_in_0.x-link1.w*ferm_in_0.y+
link2.x*ferm_in_1.x-link2.y*ferm_in_1.y+
link2.z*ferm_in_2.x-link2.w*ferm_in_2.y;
ferm_aux_1.y = link1.z*ferm_in_0.y+link1.w*ferm_in_0.x+
link2.x*ferm_in_1.y+link2.y*ferm_in_1.x+
link2.z*ferm_in_2.y+link2.w*ferm_in_2.x;
ferm_aux_2.x = stag_phase*(C1RE*ferm_in_0.x-C1IM*ferm_in_0.y+
C2RE*ferm_in_1.x-C2IM*ferm_in_1.y+
C3RE*ferm_in_2.x-C3IM*ferm_in_2.y);
ferm_aux_2.y = stag_phase*(C1RE*ferm_in_0.y+C1IM*ferm_in_0.x+
C2RE*ferm_in_1.y+C2IM*ferm_in_1.x+
C3RE*ferm_in_2.y+C3IM*ferm_in_2.x);
ferm_out[0][0][threadIdx.x] += ferm_aux_0.x*dev_eim_cos_f - ferm_aux_0.y*dev_eim_sin_f; // Re[e^{imu}*ferm_aux_0]
ferm_out[0][1][threadIdx.x] += ferm_aux_0.x*dev_eim_sin_f + ferm_aux_0.y*dev_eim_cos_f; // Im[e^{imu}*ferm_aux_0]
ferm_out[1][0][threadIdx.x] += ferm_aux_1.x*dev_eim_cos_f - ferm_aux_1.y*dev_eim_sin_f; // Re[e^{imu}*ferm_aux_1]
ferm_out[1][1][threadIdx.x] += ferm_aux_1.x*dev_eim_sin_f + ferm_aux_1.y*dev_eim_cos_f; // Im[e^{imu}*ferm_aux_1]
ferm_out[2][0][threadIdx.x] += ferm_aux_2.x*dev_eim_cos_f - ferm_aux_2.y*dev_eim_sin_f; // Re[e^{imu}*ferm_aux_2]
ferm_out[2][1][threadIdx.x] += ferm_aux_2.x*dev_eim_sin_f + ferm_aux_2.y*dev_eim_cos_f; // Im[e^{imu}*ferm_aux_2]
#endif
//---------------------------------------------------end of first block
//Direction 0
site_table[threadIdx.x] = tables[idx];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs( gauge_texRef, size_dev, site_table[threadIdx.x] + gauge_offset, 0);
ferm_out[0][0][threadIdx.x] -= link0.x*ferm_in_0.x+link0.y*ferm_in_0.y +
link1.z*ferm_in_1.x+link1.w*ferm_in_1.y +
C1RE*ferm_in_2.x +C1IM*ferm_in_2.y;
ferm_out[0][1][threadIdx.x] -= link0.x*ferm_in_0.y-link0.y*ferm_in_0.x +
link1.z*ferm_in_1.y-link1.w*ferm_in_1.x +
C1RE*ferm_in_2.y -C1IM*ferm_in_2.x;
ferm_out[1][0][threadIdx.x] -= link0.z*ferm_in_0.x+link0.w*ferm_in_0.y +
link2.x*ferm_in_1.x+link2.y*ferm_in_1.y +
C2RE*ferm_in_2.x +C2IM*ferm_in_2.y;
ferm_out[1][1][threadIdx.x] -= link0.z*ferm_in_0.y-link0.w*ferm_in_0.x +
link2.x*ferm_in_1.y-link2.y*ferm_in_1.x +
C2RE*ferm_in_2.y -C2IM*ferm_in_2.x;
ferm_out[2][0][threadIdx.x] -= link1.x*ferm_in_0.x+link1.y*ferm_in_0.y +
link2.z*ferm_in_1.x+link2.w*ferm_in_1.y +
C3RE*ferm_in_2.x +C3IM*ferm_in_2.y;
ferm_out[2][1][threadIdx.x] -= link1.x*ferm_in_0.y-link1.y*ferm_in_0.x +
link2.z*ferm_in_1.y-link2.w*ferm_in_1.x +
C3RE*ferm_in_2.y -C3IM*ferm_in_2.x;
//Direction 1
site_table[threadIdx.x] = tables[idx+size_dev];
stag_phase = phases[site_table[threadIdx.x]+size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, site_table[threadIdx.x] + gauge_offset, 1);
ferm_out[0][0][threadIdx.x] -= link0.x*ferm_in_0.x+link0.y*ferm_in_0.y +
link1.z*ferm_in_1.x+link1.w*ferm_in_1.y +
stag_phase*(C1RE*ferm_in_2.x+C1IM*ferm_in_2.y);
ferm_out[0][1][threadIdx.x] -= link0.x*ferm_in_0.y-link0.y*ferm_in_0.x +
link1.z*ferm_in_1.y-link1.w*ferm_in_1.x +
stag_phase*(C1RE*ferm_in_2.y-C1IM*ferm_in_2.x);
ferm_out[1][0][threadIdx.x] -= link0.z*ferm_in_0.x+link0.w*ferm_in_0.y +
link2.x*ferm_in_1.x+link2.y*ferm_in_1.y +
stag_phase*(C2RE*ferm_in_2.x+C2IM*ferm_in_2.y);
ferm_out[1][1][threadIdx.x] -= link0.z*ferm_in_0.y-link0.w*ferm_in_0.x +
link2.x*ferm_in_1.y-link2.y*ferm_in_1.x +
stag_phase*(C2RE*ferm_in_2.y-C2IM*ferm_in_2.x);
ferm_out[2][0][threadIdx.x] -= link1.x*ferm_in_0.x+link1.y*ferm_in_0.y +
link2.z*ferm_in_1.x+link2.w*ferm_in_1.y +
stag_phase*(C3RE*ferm_in_2.x+C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] -= link1.x*ferm_in_0.y-link1.y*ferm_in_0.x +
link2.z*ferm_in_1.y-link2.w*ferm_in_1.x +
stag_phase*(C3RE*ferm_in_2.y- C3IM*ferm_in_2.x);
//Direction 2
site_table[threadIdx.x] = tables[idx+2*size_dev];
stag_phase = phases[site_table[threadIdx.x]+2*size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, site_table[threadIdx.x] + gauge_offset, 2);
ferm_out[0][0][threadIdx.x] -= link0.x*ferm_in_0.x+link0.y*ferm_in_0.y +
link1.z*ferm_in_1.x+link1.w*ferm_in_1.y +
stag_phase*(C1RE*ferm_in_2.x+ C1IM*ferm_in_2.y);
ferm_out[0][1][threadIdx.x] -= link0.x*ferm_in_0.y-link0.y*ferm_in_0.x +
link1.z*ferm_in_1.y-link1.w*ferm_in_1.x +
stag_phase*(C1RE*ferm_in_2.y- C1IM*ferm_in_2.x);
ferm_out[1][0][threadIdx.x] -= link0.z*ferm_in_0.x+link0.w*ferm_in_0.y +
link2.x*ferm_in_1.x+link2.y*ferm_in_1.y +
stag_phase*(C2RE*ferm_in_2.x+ C2IM*ferm_in_2.y);
ferm_out[1][1][threadIdx.x] -= link0.z*ferm_in_0.y-link0.w*ferm_in_0.x +
link2.x*ferm_in_1.y-link2.y*ferm_in_1.x +
stag_phase*(C2RE*ferm_in_2.y- C2IM*ferm_in_2.x);
ferm_out[2][0][threadIdx.x] -= link1.x*ferm_in_0.x+link1.y*ferm_in_0.y +
link2.z*ferm_in_1.x+link2.w*ferm_in_1.y +
stag_phase*(C3RE*ferm_in_2.x+ C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] -= link1.x*ferm_in_0.y-link1.y*ferm_in_0.x +
link2.z*ferm_in_1.y-link2.w*ferm_in_1.x +
stag_phase*(C3RE*ferm_in_2.y- C3IM*ferm_in_2.x);
//Direction 3
site_table[threadIdx.x] = tables[idx+3*size_dev];
stag_phase = phases[site_table[threadIdx.x]+3*size_dev];
ferm_in_0 = tex1Dfetch(fermion_texRef, site_table[threadIdx.x] + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + site_table[threadIdx.x] + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + site_table[threadIdx.x] + ferm_offset);
LoadLinkRegs(gauge_texRef, size_dev, site_table[threadIdx.x] + gauge_offset, 3);
#ifndef IM_CHEM_POT
ferm_out[0][0][threadIdx.x] -= link0.x*ferm_in_0.x+link0.y*ferm_in_0.y +
link1.z*ferm_in_1.x+link1.w*ferm_in_1.y +
stag_phase*(C1RE*ferm_in_2.x+ C1IM*ferm_in_2.y);
ferm_out[0][1][threadIdx.x] -= link0.x*ferm_in_0.y-link0.y*ferm_in_0.x +
link1.z*ferm_in_1.y-link1.w*ferm_in_1.x +
stag_phase*(C1RE*ferm_in_2.y- C1IM*ferm_in_2.x);
ferm_out[1][0][threadIdx.x] -= link0.z*ferm_in_0.x+link0.w*ferm_in_0.y +
link2.x*ferm_in_1.x+link2.y*ferm_in_1.y +
stag_phase*(C2RE*ferm_in_2.x+ C2IM*ferm_in_2.y);
ferm_out[1][1][threadIdx.x] -= link0.z*ferm_in_0.y-link0.w*ferm_in_0.x +
link2.x*ferm_in_1.y-link2.y*ferm_in_1.x +
stag_phase*(C2RE*ferm_in_2.y- C2IM*ferm_in_2.x);
ferm_out[2][0][threadIdx.x] -= link1.x*ferm_in_0.x+link1.y*ferm_in_0.y +
link2.z*ferm_in_1.x+link2.w*ferm_in_1.y +
stag_phase*(C3RE*ferm_in_2.x+ C3IM*ferm_in_2.y);
ferm_out[2][1][threadIdx.x] -= link1.x*ferm_in_0.y-link1.y*ferm_in_0.x +
link2.z*ferm_in_1.y-link2.w*ferm_in_1.x +
stag_phase*(C3RE*ferm_in_2.y- C3IM*ferm_in_2.x);
#else
ferm_aux_0.x = link0.x*ferm_in_0.x+link0.y*ferm_in_0.y +
link1.z*ferm_in_1.x+link1.w*ferm_in_1.y +
stag_phase*(C1RE*ferm_in_2.x+ C1IM*ferm_in_2.y);
ferm_aux_0.y = link0.x*ferm_in_0.y-link0.y*ferm_in_0.x +
link1.z*ferm_in_1.y-link1.w*ferm_in_1.x +
stag_phase*(C1RE*ferm_in_2.y- C1IM*ferm_in_2.x);
ferm_aux_1.x = link0.z*ferm_in_0.x+link0.w*ferm_in_0.y +
link2.x*ferm_in_1.x+link2.y*ferm_in_1.y +
stag_phase*(C2RE*ferm_in_2.x+ C2IM*ferm_in_2.y);
ferm_aux_1.y = link0.z*ferm_in_0.y-link0.w*ferm_in_0.x +
link2.x*ferm_in_1.y-link2.y*ferm_in_1.x +
stag_phase*(C2RE*ferm_in_2.y- C2IM*ferm_in_2.x);
ferm_aux_2.x = link1.x*ferm_in_0.x+link1.y*ferm_in_0.y +
link2.z*ferm_in_1.x+link2.w*ferm_in_1.y +
stag_phase*(C3RE*ferm_in_2.x+ C3IM*ferm_in_2.y);
ferm_aux_2.y = link1.x*ferm_in_0.y-link1.y*ferm_in_0.x +
link2.z*ferm_in_1.y-link2.w*ferm_in_1.x +
stag_phase*(C3RE*ferm_in_2.y- C3IM*ferm_in_2.x);
ferm_out[0][0][threadIdx.x] -= ferm_aux_0.x*dev_eim_cos_f + ferm_aux_0.y*dev_eim_sin_f; // Re[e^{-imu}*ferm_aux_0]
ferm_out[0][1][threadIdx.x] -= -ferm_aux_0.x*dev_eim_sin_f + ferm_aux_0.y*dev_eim_cos_f; // Im[e^{-imu}*ferm_aux_0]
ferm_out[1][0][threadIdx.x] -= ferm_aux_1.x*dev_eim_cos_f + ferm_aux_1.y*dev_eim_sin_f; // Re[e^{-imu}*ferm_aux_1]
ferm_out[1][1][threadIdx.x] -= -ferm_aux_1.x*dev_eim_sin_f + ferm_aux_1.y*dev_eim_cos_f; // Im[e^{-imu}*ferm_aux_1]
ferm_out[2][0][threadIdx.x] -= ferm_aux_2.x*dev_eim_cos_f + ferm_aux_2.y*dev_eim_sin_f; // Re[e^{-imu}*ferm_aux_2]
ferm_out[2][1][threadIdx.x] -= -ferm_aux_2.x*dev_eim_sin_f + ferm_aux_2.y*dev_eim_cos_f; // Im[e^{-imu}*ferm_aux_2]
#endif
//-------------------------------------------------end of second block
// even
ferm_in_0 = tex1Dfetch(fermion_texRef, idx + ferm_offset);
ferm_in_1 = tex1Dfetch(fermion_texRef, size_dev + idx + ferm_offset);
ferm_in_2 = tex1Dfetch(fermion_texRef, 2*size_dev + idx + ferm_offset);
out[idx ].x = mass_dev*ferm_in_0.x + ferm_out[0][0][threadIdx.x]*(-0.5f);
out[idx ].y = mass_dev*ferm_in_0.y + ferm_out[0][1][threadIdx.x]*(-0.5f);
out[idx + size_dev ].x = mass_dev*ferm_in_1.x + ferm_out[1][0][threadIdx.x]*(-0.5f);
out[idx + size_dev ].y = mass_dev*ferm_in_1.y + ferm_out[1][1][threadIdx.x]*(-0.5f);
out[idx + 2*size_dev ].x = mass_dev*ferm_in_2.x + ferm_out[2][0][threadIdx.x]*(-0.5f);
out[idx + 2*size_dev ].y = mass_dev*ferm_in_2.y + ferm_out[2][1][threadIdx.x]*(-0.5f);
// odd
out[idx + size_dev_h ].x = 0.0f;
out[idx + size_dev_h ].y = 0.0f;
out[idx + size_dev + size_dev_h ].x = 0.0f;
out[idx + size_dev + size_dev_h ].y = 0.0f;
out[idx + 2*size_dev + size_dev_h ].x = 0.0f;
out[idx + 2*size_dev + size_dev_h ].y = 0.0f;
//-------------------------------------------------end of DslashDagger
}
///////////////////////////////////////////////////////////////////////////////////////// END OF KERNELS
void DslashOperatorEO(float2 *out,
float2 *in,
const int isign)
{
#ifdef DEBUG_MODE_2
printf("\033[32mDEBUG: inside DslashOperatorEO ...\033[0m\n");
#endif
dim3 BlockDimension(NUM_THREADS);
dim3 GridDimension(sizeh/BlockDimension.x); // Only even sites!
size_t vector_size=3*size*sizeof(float2);
size_t gauge_field_size = sizeof(float4)*size*12;
size_t offset_g, offset_f;
cudaSafe(AT,cudaBindTexture(&offset_f, fermion_texRef, in, vector_size), "cudaBindTexture");
offset_f/=sizeof(float2);
cudaSafe(AT,cudaBindTexture(&offset_g, gauge_texRef, gauge_field_device, gauge_field_size), "cudaBindTexture");
offset_g/=sizeof(float4);
if(isign == PLUS)
{
DslashKernelEO<<<GridDimension,BlockDimension>>>(out, device_table, device_phases, offset_g, offset_f);
cudaCheckError(AT,"DslashKernelEO");
}
if(isign == MINUS)
{
DslashDaggerKernelEO<<<GridDimension,BlockDimension>>>(out, device_table, device_phases, offset_g, offset_f);
cudaCheckError(AT,"DslashDaggerKernelEO");
}
cudaSafe(AT,cudaUnbindTexture(fermion_texRef), "cudaUnbindTexture");
cudaSafe(AT,cudaUnbindTexture(gauge_texRef), "cudaUnbindTexture");
#ifdef DEBUG_MODE_2
printf("\033[32m\tterminated DslashOperator \033[0m\n");
#endif
}
|
9de7e9c0083009ac4ebc230a9a9101bf76403ef7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<openssl/sha.h>
#include <stdio.h>
#include <string.h>
typedef struct block_header {
unsigned char shainput[76];
unsigned long nonce;
} block_header;
void byte_swap(unsigned char* data, int len) {
int c;
unsigned char tmp[len];
c=0;
while(c<len)
{
tmp[c] = data[len-(c+1)];
c++;
}
c=0;
while(c<len)
{
data[c] = tmp[c];
c++;
}
}
extern "C"
__global__ void inversehash(int n,char Input[],long Nonce[], char Target[],long Output)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
blockheader header;
memcpy (header.shainput,Input,76);
header.nonce = Nonce[i];
unsigned char hash1[SHA256_DIGEST_LENGTH];
unsigned char hash2[SHA256_DIGEST_LENGTH];
SHA256_CTX sha256_pass1, sha256_pass2;
SHA256_Init(&sha256_pass1);
SHA256_Update(&sha256_pass1, (unsigned char*)header,76+sizeof(long));
SHA256_Final(hash1, &sha256_pass1);
SHA256_Init(&sha256_pass2);
SHA256_Update(&sha256_pass2,hash1,SHA256_DIGEST_LENGTH);
SHA256_Final(hash2, &sha256_pass2);
byte_swap(hash2, SHA256_DIGEST_LENGTH);
if(strcmp (hash2,Target) == 0)
{
Output = i;
}
}
}
| 9de7e9c0083009ac4ebc230a9a9101bf76403ef7.cu | #include<openssl/sha.h>
#include <stdio.h>
#include <string.h>
typedef struct block_header {
unsigned char shainput[76];
unsigned long nonce;
} block_header;
void byte_swap(unsigned char* data, int len) {
int c;
unsigned char tmp[len];
c=0;
while(c<len)
{
tmp[c] = data[len-(c+1)];
c++;
}
c=0;
while(c<len)
{
data[c] = tmp[c];
c++;
}
}
extern "C"
__global__ void inversehash(int n,char Input[],long Nonce[], char Target[],long Output)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
blockheader header;
memcpy (header.shainput,Input,76);
header.nonce = Nonce[i];
unsigned char hash1[SHA256_DIGEST_LENGTH];
unsigned char hash2[SHA256_DIGEST_LENGTH];
SHA256_CTX sha256_pass1, sha256_pass2;
SHA256_Init(&sha256_pass1);
SHA256_Update(&sha256_pass1, (unsigned char*)header,76+sizeof(long));
SHA256_Final(hash1, &sha256_pass1);
SHA256_Init(&sha256_pass2);
SHA256_Update(&sha256_pass2,hash1,SHA256_DIGEST_LENGTH);
SHA256_Final(hash2, &sha256_pass2);
byte_swap(hash2, SHA256_DIGEST_LENGTH);
if(strcmp (hash2,Target) == 0)
{
Output = i;
}
}
}
|
d9b870debd22afe491d1b584b609e0084fe7b86b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void GPU_kernel(int max_itemcount)
{
int i;
int startindex = threadIdx.x;
int step = blockDim.x;
for (i = startindex; i < max_itemcount; i += step)
{
printf("%i\n", i);
}
}
int main(void)
{
GPU_kernel << < 1, 10 >> > (100);
hipDeviceSynchronize();
printf("Finished execution!\n");
return 0;
}
| d9b870debd22afe491d1b584b609e0084fe7b86b.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void GPU_kernel(int max_itemcount)
{
int i;
int startindex = threadIdx.x;
int step = blockDim.x;
for (i = startindex; i < max_itemcount; i += step)
{
printf("%i\n", i);
}
}
int main(void)
{
GPU_kernel << < 1, 10 >> > (100);
cudaDeviceSynchronize();
printf("Finished execution!\n");
return 0;
}
|
82f2c0444cd3f749598bb287d2d59dd5762b42b8.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "device_launch_parameters.h"
#include <sm_35_atomic_functions.h>
#include <iostream>
#include <string>
#include <time.h>
#include <queue>
#include <set>
#include <list>
#include <fstream>
#include <iomanip>
using namespace std;
texture<unsigned int, 1, hipReadModeElementType> LTSOFFSET; //1 means 1-dimension
texture<unsigned int, 1, hipReadModeElementType> STATEOFFSET;
texture<unsigned int, 1, hipReadModeElementType> OUTGOINGDETAIL;
texture<unsigned int, 1, hipReadModeElementType> SYNCOUTGOING;
texture<unsigned int, 1, hipReadModeElementType> TRANSEBYTES;
texture<unsigned int, 1, hipReadModeElementType> LTSSTATEBITS;
__constant__ int LA1;
__constant__ int LA2;
__constant__ int LA3;
__constant__ int LA4;
__constant__ int LB4;
__constant__ int GA1;
__constant__ int GA2;
__constant__ int GA3;
__constant__ int LB1;
__constant__ int LB2;
__constant__ int LB3;
__constant__ int GB1;
__constant__ int GB2;
__constant__ int GB3;
__constant__ int GA4;
__constant__ int GB4;
__constant__ int BUCA;
__constant__ int BUCB;
__constant__ int TableSize;
__constant__ unsigned int PrimeNum = 334214459;
__constant__ int IterationTime;
__constant__ int HashNum;
__constant__ int ProbeTimes;
static const unsigned int EMPTYVECT32 = 0x7FFFFFFF;
static const unsigned int P = 334214459;
static const unsigned int blocksize = 512;
//class LocalRecord{
//public:
// char localmark; //record the BFS layer in Shared Memory
// char toevent;
// unsigned int statevector;
//
// __device__ void operator= (LocalRecord t){
// localmark = t.localmark;
// toevent = t.toevent;
// statevector = t.statevector;
// }
//};
class Bucket{
public:
unsigned int beginindex;
unsigned int endindex;
};
class Nodemark{
public:
unsigned int beginInt;
unsigned int endInt;
unsigned int synbeginInt;
unsigned int synendInt;
};
__device__ unsigned int *GlobalOpenHash;
__device__ Bucket *GlobalBuckets;
__device__ unsigned int GlobalBucketNum;
__device__ unsigned int *GlobalbucketIndex; //just used for open
__device__ unsigned int *GlobalbucketCount;
__device__ unsigned int *GlobalVisitedHash; //here, for visited stateV, use hash to store back to global memory. While this hash doesn't kick the original one. For open stateV, use buckets hash.
//__device__ unsigned int GlobalVisitedHashoffset[3];
__device__ unsigned int communicationlayer[100];
__device__ bool communicationcollision[100];
__device__ Bucket *communicationGstore; //store the buckets that child blocks store their data
__device__ bool Ifreturn2parent[100];
//__device__ volatile unsigned int * GlobalBucketsCount;
__device__ unsigned int OpenSize;
__device__ unsigned int openvisitedborder;
__device__ bool IFDeadlockDetected;
__device__ bool IfDeadlockfree;
volatile __device__ int SynMutex = 0;
__device__ void CudaInterBlocksSyn( int GoalValue)
{
__syncthreads();
int tid_in_block = threadIdx.x;
// only thread 0 is used for synchronization
//switch(tid_in_block)
//{
// case 0:
if(tid_in_block == 0)
{
atomicAdd((int*)&SynMutex, 1);
while(SynMutex < GoalValue);
}
//}
__syncthreads();
}
__device__ unsigned int Buckethash(unsigned int k)
{
unsigned int bucketindex;
bucketindex = k % GlobalBucketNum;
return bucketindex;
}
__device__ unsigned int Globalhash(unsigned int k, int index)
{
if(index == 0)
return (GA1 * k + GB1) % PrimeNum % (3*TableSize);
if(index == 1)
return (GA2 * k + GB2) % PrimeNum % (3*TableSize);
if(index == 2)
return (GA3 * k + GB3) % PrimeNum % (3*TableSize);
if(index == 3)
return (GA4 * k + GB4) % PrimeNum % (3*TableSize);
}
__device__ unsigned int Localhash(unsigned int k, int index)
{
if(index == 0){
return (LA1 ^ k + LB1) % PrimeNum % TableSize;
}
if(index == 1){
return (LA2 ^ k + LB2) % PrimeNum % TableSize;
}
if(index == 2){
return (LA3 ^ k + LB3) % PrimeNum % TableSize;
}
if(index == 3){
return (LA4^k + LB4) % PrimeNum % TableSize;
}
}
__device__ unsigned int CudaGetStateinVec(int index, unsigned int svec)
{
int sbeginbit, sendbit;
unsigned int ltsid;
sbeginbit = 0;
sendbit = 0;
for(int i = 0; i < index; i++){
sbeginbit += tex1Dfetch(LTSSTATEBITS, i);
}
sendbit = sbeginbit + tex1Dfetch(LTSSTATEBITS, index) - 1;
svec = svec << sbeginbit;
svec = svec >> (sbeginbit + 31 - sendbit);
ltsid = svec;
return ltsid;
}
__device__ bool CudaGetAllsuccessors(unsigned int ltsindex, unsigned int sindex, Nodemark * result)
{
unsigned int statesbegin, transbegin, transborder;
statesbegin = tex1Dfetch(LTSOFFSET, ltsindex);
transbegin = tex1Dfetch(STATEOFFSET, statesbegin + sindex);
if(transbegin == 0 && (ltsindex!=0 || sindex!=0))
return false;
transborder = tex1Dfetch(STATEOFFSET, statesbegin + sindex + 1);
result->beginInt = transbegin;
result->endInt = transborder - 1;
result->synendInt = tex1Dfetch(OUTGOINGDETAIL, transborder - 1);
transborder = tex1Dfetch(STATEOFFSET, statesbegin + sindex);
if(transborder == 0)
result->synbeginInt = 0;
else
result->synbeginInt = tex1Dfetch(OUTGOINGDETAIL, transborder - 1);
if(result->beginInt == result->endInt && result->synendInt == result->synbeginInt)
return false;
return true;
}
__device__ bool CudaNewStateV(unsigned int * targetV, int tindex, int * index, int *count, unsigned char* OutgoingTs, unsigned int OutGTbyte, unsigned int EEncode)
{
unsigned int tmp = *targetV;
unsigned int tostate = 0;
int newsbeginbit = 0, endbit;
unsigned int Secode = tex1Dfetch(LTSSTATEBITS, tindex);
int replacebeginbyte, replaceendbyte;
unsigned int i,j;
int k;
replacebeginbyte = *count * OutGTbyte;
replaceendbyte = (*count + 1)*OutGTbyte;
//if(EEncode < 8){
// OutgoingTs[replacebeginbyte] = OutgoingTs[replacebeginbyte] << EEncode;
// OutgoingTs[replacebeginbyte] = OutgoingTs[replacebeginbyte] >> EEncode; //event
//}else{
// replacebeginbyte++;
// OutgoingTs[replacebeginbyte] = OutgoingTs[replacebeginbyte] << EEncode - 8;
// OutgoingTs[replacebeginbyte] = OutgoingTs[replacebeginbyte] >> EEncode - 8;
//}
for(i = 0; i < tindex; i++){
newsbeginbit += tex1Dfetch(LTSSTATEBITS, i);
}
endbit = newsbeginbit + Secode - 1;
if((Secode+EEncode) <= 8){
tostate = (int) OutgoingTs[replaceendbyte - 1];
}else{
tostate = 0;
for( k = replaceendbyte - 1; k > replacebeginbyte-1; k--)
tostate = tostate | (OutgoingTs[k] << 8 * (replaceendbyte - 1 - k));
}
if(tostate == 0){
(*index)++;
(*count)=0;
return false;
}
tostate = tostate << 32-Secode;
tostate = tostate >> 32-Secode;
tostate = tostate << (31-endbit);
i = tmp >> (31 - newsbeginbit + 1);
i = i << (31 - newsbeginbit + 1);
j = tmp << endbit+1;
j = j >> endbit+1;
* targetV = (unsigned int) (i | j | tostate);
if((OutGTbyte)*(*count + 2) > 4){
* index += 1;
*count = 0;
}else
(*count)++;
return true;
}
__device__ void CudaDecodeTransitions(int type, int beginindex, int count, unsigned int * Tostate, unsigned int * Tevent, unsigned int OutGTe, unsigned int Statebitwidth)
{
unsigned int tmp = 0;
unsigned int startbyte, endbyte;
while(tmp==0 && count >= 0){
startbyte = (count * OutGTe)%4;
endbyte = ((count + 1)*OutGTe)%4;
if(endbyte == 0)
endbyte = 4;
tmp = tex1Dfetch(SYNCOUTGOING, beginindex);
tmp = tmp << (startbyte) * 8;
tmp = tmp >> (startbyte + 4 - endbyte)*8;
*Tostate = (unsigned int)(tmp << 32 - Statebitwidth) >> (32- Statebitwidth);
*Tevent = (unsigned int)tmp >> Statebitwidth;
if(tmp == 0 && type == 1)
break;
count--;
}
}
//__device__ unsigned int CudaGenerateKey(unsigned int KV, int snum)
//{
// return KV;
//
//}
__device__ void SynTwoStates(unsigned int * s1, unsigned int s2, int index)
{
unsigned int localstate;
int beginbit = 0, endbit;
unsigned int i,j;
for(i = 0; i < index;i++){
beginbit += tex1Dfetch(LTSSTATEBITS, i);
}
endbit = beginbit + tex1Dfetch(LTSSTATEBITS,index);
s2 = s2 << 32-endbit;
i = ((*s1) << endbit)>>endbit;
//i = (*s1) >> endbit;
j = ((*s1) >> 32-beginbit)<<32-beginbit;
*s1 = i | j | s2;
}
//void SynTwoStatesCPU(unsigned int * tmpStateV, unsigned int succStateV, int i, unsigned int newStateV, unsigned int * bitwidth){
// int beginbit, endbit;
// int beginbyte, endbyte;
// int j,m;
//
// unsigned char tmp1[4];
// unsigned char tmp2[4];
//
// tmp1[0] = (char)(*tmpStateV);
// tmp1[1] = (char)(*tmpStateV >> 8);
// tmp1[2] = (char)(*tmpStateV >> 16);
// tmp1[3] = (char)(*tmpStateV >> 24);
//
// tmp2[0] = (char)(succStateV);
// tmp2[1] = (char)(succStateV >> 8);
// tmp2[2] = (char)(succStateV >> 16);
// tmp2[3] = (char)(succStateV >> 24);
//
// for(j = 0; j < i; j++){
// beginbit += bitwidth[j];
// }
// endbit = beginbit + bitwidth[i];
//
// beginbyte = beginbit / 8;
// endbyte = endbit / 8;
// beginbit = beginbit % 8;
// endbit = endbit % 8;
//
// for(m = beginbyte; m < endbyte; m++){
// tmp1[m] = tmp1[m] >> (8 - beginbit);
// tmp2[m] = tmp2[m] << beginbit;
// tmp2[m] = tmp2[m] >> beginbit;
// tmp1[m] = tmp1[m] | tmp2[m];
// }
//
// *tmpStateV = (unsigned int)(tmp1[0] | tmp1[1] << 8 | tmp1[2] << 16 | tmp1[3] << 24);
//
//
//}
//__device__ bool CudaHashStore2() //use cuckoo+probe
__device__ bool CudaHashStore(unsigned int beginHV, unsigned int PLTSNum, unsigned int * AllT, int Tnum, unsigned int * RkickoutRecord){
unsigned int localhash;
//LocalRecord kickoutRecord;
char tmp;
int i = 0, j = 0;
unsigned int KeyV = beginHV;
unsigned int kickKeyV;
*RkickoutRecord = EMPTYVECT32;
while(i < IterationTime){
localhash = Localhash(KeyV, i % HashNum);
if((atomicCAS(&(AllT[(i%HashNum) * Tnum + localhash]), EMPTYVECT32, KeyV))==EMPTYVECT32)
return false;
else{
if(AllT[(i%HashNum) * Tnum + localhash] == KeyV)
return false;
kickKeyV = atomicExch(&(AllT[(i%HashNum) * Tnum + localhash]), KeyV);
for(j = 0; j < Tnum; j++){
if(atomicCAS(&(AllT[(i%HashNum) * Tnum + (localhash + j)%Tnum]), EMPTYVECT32, kickKeyV)==EMPTYVECT32)
return false;
else if(AllT[(i%HashNum) * Tnum + (localhash+j)%Tnum] == KeyV)
return false;
if(atomicCAS(&(AllT[(i%HashNum) * Tnum + (localhash - j + Tnum)%Tnum]), EMPTYVECT32, kickKeyV)==EMPTYVECT32){
return false;
}
else if(AllT[(i%HashNum) * Tnum + (localhash-j + Tnum)%Tnum] == KeyV)
return false;
}
kickKeyV = atomicExch(&(AllT[(i%HashNum) * Tnum + (localhash - j + Tnum)%Tnum]), KeyV);
KeyV = kickKeyV;
i++;
}
}
*RkickoutRecord = kickKeyV;
return true;
}
__device__ bool CudaVisitedGlobalHashcal(unsigned int * HT, Bucket belongBucket, unsigned int hkey, unsigned int * hashresult){
unsigned int hashposition;
unsigned int KeyV;
int i = 0;
KeyV = hkey;
while(i < HashNum){
hashposition = Globalhash(KeyV, i);
if(HT[belongBucket.beginindex + hashposition] == hkey)
return true;
i++;
}
KeyV = hkey;
i = 0;
while(i < HashNum){
hashposition = Globalhash(KeyV, i);
if(HT[belongBucket.beginindex + hashposition] == EMPTYVECT32){
*hashresult = hashposition;
return false;
}
i++;
}
*hashresult = Globalhash(KeyV, 0);
return false;
}
__device__ bool CudaVisitedGlobalHashstore(unsigned int * HT, unsigned int hasbucket, unsigned int hashv, unsigned int insertedrecord, unsigned int ltsnum){
Bucket buckethash;
unsigned int kickV;
int i = 0, j = 0;
bool ifstored;
unsigned int kickou;
while(true){
buckethash = GlobalBuckets[hasbucket];
if(atomicCAS(&(HT[buckethash.beginindex + hashv]),EMPTYVECT32,insertedrecord)==EMPTYVECT32){
return true;
}else{
i = 1;
kickV = insertedrecord;
while(i < IterationTime){
kickou = atomicExch(&(HT[buckethash.beginindex + hashv]), kickV);
hashv = Globalhash(kickou, i);
if(atomicCAS(&(HT[buckethash.beginindex + hashv]),EMPTYVECT32,kickV)==EMPTYVECT32)
return true;
i++;
kickV = kickou;
}
hasbucket++;
}
if(hasbucket > openvisitedborder-1)
break;
}
i = 0;
while(i < HashNum){
hashv = Globalhash(kickV, i);
for(j = 0; j < ProbeTimes; j++){
if(atomicCAS(&(HT[buckethash.beginindex + (hashv + j) % TableSize]),EMPTYVECT32,kickV)==EMPTYVECT32)
return true;
if(atomicCAS(&(HT[buckethash.beginindex + (hashv - j+ TableSize) % TableSize]),EMPTYVECT32,kickV)==EMPTYVECT32)
return true;
}
}
return false;
}
__global__ void CUDADeadlockBFSVerifyChild(unsigned int ParentID, unsigned int PBucket, Bucket * Cbucket, unsigned int * CG_AllLTS, unsigned int * CG_AllStates, unsigned int * CG_AllTransitions, unsigned int * CG_AllSynctransitions, unsigned int * CG_LTSStateEncodeBits, unsigned int CEventEncodeBits,unsigned int * OutgoingTEbytes, unsigned int CG_Bucketnum, unsigned int PLTSNum, unsigned int StartNum)
{
int i,j,m,k,x;
int Inblocktid = threadIdx.x;
int Ingridtid = threadIdx.x + blockIdx.x * blockDim.x;
int InWarptid = Inblocktid % 31;
int InvthreadgroupID;
int vthreadgroupID;
int Warpid = Inblocktid/32;
int WarpNum = blockDim.x/32;
unsigned int layer;
unsigned int localstateV;
unsigned int localstate;
unsigned int localstate2;
unsigned int belonglts;
unsigned int transevent;
unsigned int maxtransevent;
unsigned int globalbuckethash;
unsigned int visitedstore;
unsigned int offsetborder; //used to mark the border of successors.
bool ifanyoutgoing, ifgetnewstatev, ifglobaldup; //ifglobaldup means if this state is duplicated
int vthreadgroupnuminblock;
int vthreadgroupnuminwarp;
//char tmp;
//unsigned int localKey, localhash;
unsigned int kickoutRecord;
unsigned int insertRecord;
unsigned int visitedRecord;
unsigned int hkey;
unsigned int getindex; // the index to get tasks
unsigned int storeposition;
unsigned int tmpoutT;
unsigned char tmpT[4];
int outgoingcount;
Nodemark SuccessorMark;
vthreadgroupnuminwarp = 32/PLTSNum;
vthreadgroupnuminblock = vthreadgroupnuminwarp * (blockDim.x/32);
if(InWarptid < vthreadgroupnuminwarp * PLTSNum){
vthreadgroupID = Warpid*vthreadgroupnuminwarp + InWarptid/PLTSNum;
InvthreadgroupID = InWarptid % PLTSNum;
}else{
vthreadgroupID = -1;
InvthreadgroupID = -1;
}
__shared__ int nonewcount;
__shared__ bool Ifcollisionhappens;
__shared__ int maxlayer;
extern __shared__ bool C[];
bool * syncduplicate = C;
bool * needsyndupdetect = &syncduplicate[vthreadgroupnuminblock*PLTSNum];
bool * ifnooutgoing = &needsyndupdetect[vthreadgroupnuminblock];
unsigned int * SynEventInteractive = (unsigned int *)&ifnooutgoing[vthreadgroupnuminblock*PLTSNum];
unsigned int * SynStateInteractive = (unsigned int *)&(SynEventInteractive[vthreadgroupnuminblock*PLTSNum]);
unsigned int * RecordTable = &(SynStateInteractive[vthreadgroupnuminblock*PLTSNum]);
unsigned int * GroupStore = &RecordTable[blockDim.x * HashNum];
Bucket * WarpCBindex = (Bucket *)&GroupStore[vthreadgroupnuminblock];
if(Inblocktid == 0){
for(i=0; i<WarpNum; i++){
WarpCBindex[i].beginindex = 0;
WarpCBindex[i].endindex = 0;
}
for(i = 0; i < vthreadgroupnuminblock * PLTSNum; i++){
ifnooutgoing[i] = false;
SynEventInteractive[i] = EMPTYVECT32;
}
for(i = 0; i < vthreadgroupnuminblock; i++)
GroupStore[i] = EMPTYVECT32;
nonewcount = 0;
maxlayer = 0;
Ifcollisionhappens = false;
}
if(InvthreadgroupID != -1){
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = true;
}
if(InvthreadgroupID == 0){
getindex = vthreadgroupnuminblock * blockIdx.x + vthreadgroupID;
j=0;
if(getindex < GlobalbucketCount[PBucket]){
globalbuckethash = PBucket;
}else{
for(i = Cbucket->beginindex; i < Cbucket->endindex; i++){
j += GlobalbucketCount[i];
if(getindex < j){
globalbuckethash = i;
j -= GlobalbucketCount[i];
getindex = getindex - j;
break;
}
}
}
}
__syncthreads();
if(InvthreadgroupID == 0){
GroupStore[vthreadgroupID] = GlobalOpenHash[globalbuckethash];
GlobalOpenHash[globalbuckethash] = EMPTYVECT32;
}
do{
if(GroupStore[vthreadgroupID] != EMPTYVECT32){
localstate = CudaGetStateinVec(InvthreadgroupID, GroupStore[vthreadgroupID]);
printf("vtg%d, itgid%d, gets%d\n", vthreadgroupID, InvthreadgroupID, localstate);
belonglts = InvthreadgroupID;
ifanyoutgoing = CudaGetAllsuccessors(belonglts, localstate-1, &SuccessorMark);
ifglobaldup = false;
//The successor generation consists of two steps: 1. For trans in alltransitions, process them directly. 2.For trans in allsynctrans, parallel sync is needed.
if(ifanyoutgoing){
outgoingcount=0;
i = SuccessorMark.beginInt;
//calculate global hash position for visited stateV
if(InvthreadgroupID == 0){
globalbuckethash = Buckethash(GroupStore[vthreadgroupID]);
hkey = GroupStore[vthreadgroupID];
ifglobaldup = CudaVisitedGlobalHashcal(GlobalVisitedHash, GlobalBuckets[globalbuckethash],hkey, &visitedstore);
}
localstateV = GroupStore[vthreadgroupID];
visitedRecord = GroupStore[vthreadgroupID];
j = 0;
m = -1;
while(i < SuccessorMark.endInt && !ifglobaldup){
if(m != i){
tmpoutT = tex1Dfetch(OUTGOINGDETAIL, i);
tmpT[0] = (char)(tmpoutT >> 24);
tmpT[1] = (char)(tmpoutT >> 16);
tmpT[2] = (char)(tmpoutT >> 8);
tmpT[3] = (char)tmpoutT;
m = i;
}
if(!CudaNewStateV(&localstateV, InvthreadgroupID, &i, &j, tmpT, tex1Dfetch(TRANSEBYTES, InvthreadgroupID), CEventEncodeBits ))
continue;
if(!Ifcollisionhappens){
insertRecord = localstateV;
//hash store and duplicate elimination module.....
Ifcollisionhappens = CudaHashStore(insertRecord, PLTSNum, RecordTable, blockDim.x, &kickoutRecord);
outgoingcount++;
}
localstateV = GroupStore[vthreadgroupID];
if(Ifcollisionhappens){
break;
}
}
//synchronization part
j = SuccessorMark.synbeginInt;
if(!Ifcollisionhappens){
bool ifmatch;
//int tmpcount=0;
int tmpj = 0;
int nosync;
int lessthanall;
m = 0;
x = -1;
CudaDecodeTransitions(0,SuccessorMark.synendInt-1, (SuccessorMark.synendInt - j + 1)*(4/tex1Dfetch(TRANSEBYTES,belonglts))-1,&localstate2, &maxtransevent, tex1Dfetch(TRANSEBYTES,belonglts), tex1Dfetch(LTSSTATEBITS,belonglts));
while(j < SuccessorMark.synendInt){
ifmatch = false;
if(m == 0 && syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum]){
if(j == SuccessorMark.synendInt)
break;
CudaDecodeTransitions(1, j, tmpj, &SynStateInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum], &SynEventInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum], tex1Dfetch(TRANSEBYTES,belonglts), tex1Dfetch(LTSSTATEBITS, belonglts));
if(SynEventInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum] == 0)
{
SynEventInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum] = EMPTYVECT32;
break;
}
if(x != j){
tmpoutT = tex1Dfetch(SYNCOUTGOING, j);
tmpT[0] = (char)(tmpoutT >> 24);
tmpT[1] = (char)(tmpoutT >> 16);
tmpT[2] = (char)(tmpoutT >> 8);
tmpT[3] = (char)tmpoutT;
x = j;
}
CudaNewStateV(&localstateV, InvthreadgroupID, &j, &tmpj, tmpT, tex1Dfetch(TRANSEBYTES,InvthreadgroupID), CEventEncodeBits);
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = false;
}
nosync = 0;
lessthanall = 0;
m=0;
for(i=0; i<PLTSNum; i++){
if(i == InvthreadgroupID)
continue;
if(SynEventInteractive[i + vthreadgroupID * PLTSNum] == EMPTYVECT32)
{
nosync++;
continue;
}
if(SynEventInteractive[i + vthreadgroupID * PLTSNum] <= maxtransevent){ //if bigger than the maxtransevent of local, no need to compare as it's impossible to sync
if(SynEventInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum] > SynEventInteractive[i + vthreadgroupID * PLTSNum]){
m++;
}else if (SynEventInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum] == SynEventInteractive[i + vthreadgroupID * PLTSNum]){
if(needsyndupdetect[vthreadgroupID] == false)
needsyndupdetect[vthreadgroupID] = true;
//GENERATE SYNC STATE V.......
SynTwoStates(&localstateV, SynStateInteractive[i + vthreadgroupID * PLTSNum], i);
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = true;
ifmatch = true;
}else
lessthanall++;
}
}
if(nosync == PLTSNum - 1){
break;
}
if(lessthanall == PLTSNum -1){
m = 0;
syncduplicate[InvthreadgroupID + vthreadgroupID*PLTSNum] = true;
continue;
}
if(syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum])
m = 0;
if(needsyndupdetect[vthreadgroupID] && InvthreadgroupID == 0){ //duplicate elimination after synchronization, so just one synchronized result will be copied to hashtable.
for(i = 0; i < PLTSNum; i++){
if(syncduplicate[i + vthreadgroupID * PLTSNum]){
for(k = 0; k < i; k++)
{
if(SynEventInteractive[k + vthreadgroupID * PLTSNum] == SynEventInteractive[i + vthreadgroupID * PLTSNum]){
break;
}
}
if(k == i){
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = false;
}
}
}
}
if(ifmatch && syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] == false){
//hash copy to table
insertRecord = localstateV;
if(!Ifcollisionhappens)
{
if(CudaHashStore(insertRecord, PLTSNum, RecordTable, blockDim.x, &kickoutRecord))
Ifcollisionhappens = true;
outgoingcount++;
}
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = true;
if(Ifcollisionhappens){
for(k = 511; k > 0; k--){
if(kickoutRecord != EMPTYVECT32){
if(atomicCAS(&(RecordTable[(HashNum-1)*blockDim.x + k]), EMPTYVECT32, kickoutRecord)==EMPTYVECT32){
kickoutRecord = EMPTYVECT32;
break;
}
}else{
if(atomicCAS(&(RecordTable[(HashNum-1)*blockDim.x + k]), EMPTYVECT32, localstateV) == EMPTYVECT32){
break;
}
}
}
}
}
if(ifmatch && m == 0){
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = true;
}
if(j >= SuccessorMark.synendInt){
SynEventInteractive[InvthreadgroupID + vthreadgroupID*PLTSNum] = EMPTYVECT32;
}
localstateV = GroupStore[vthreadgroupID];
}
}
if(outgoingcount == 0 && !ifglobaldup)
ifnooutgoing[vthreadgroupID*PLTSNum + InvthreadgroupID] = true;
}else{
ifnooutgoing[vthreadgroupID*PLTSNum + InvthreadgroupID] = true;
}
if(InvthreadgroupID == 0 && !ifglobaldup){
for(i = 0; i < PLTSNum; i++){
if(!ifnooutgoing[i + vthreadgroupID * PLTSNum] && !Ifcollisionhappens)
break;
}
if(i == PLTSNum){
printf("vtg%d detect deadlock\n", vthreadgroupID);
IFDeadlockDetected = true;
}
}
}
CudaInterBlocksSyn(gridDim.x);
if(IFDeadlockDetected){
break;
}
if(GroupStore[vthreadgroupID] != EMPTYVECT32){
if(InWarptid == 0&&!Ifcollisionhappens&&!ifglobaldup){
//copy visited state to global memory
CudaVisitedGlobalHashstore(GlobalVisitedHash, globalbuckethash, visitedstore, GroupStore[vthreadgroupID], PLTSNum);
if(InvthreadgroupID == 0){
GroupStore[vthreadgroupID] = EMPTYVECT32;
}
}
if(Ifcollisionhappens || communicationcollision[ParentID]){
if(IFDeadlockDetected)
break;
//load new kernel, copy data back
unsigned int myareacount = 0;
globalbuckethash = Buckethash((unsigned int)(blockIdx.x)) + openvisitedborder;
if(blockIdx.x == 0){
communicationGstore[ParentID].beginindex = (unsigned int)blockIdx.x;
}
if(blockIdx.x == blockDim.x - 1){
communicationGstore[ParentID].endindex = (unsigned int)(blockIdx.x);
}
if(InWarptid == 0){
for(m = Warpid*32; m<(Warpid + 1)*32; m++){
for(k = 0; k < HashNum; k++){
if(RecordTable[k*blockDim.x + m] != EMPTYVECT32)
myareacount++;
}
}
k = 0;
for(m = 0; m < vthreadgroupnuminwarp; m++){
if(GroupStore[vthreadgroupnuminwarp * Warpid + m] != EMPTYVECT32){
myareacount++;
k++;
}
}
WarpCBindex[Warpid].beginindex = atomicAdd(&GlobalbucketIndex[globalbuckethash], myareacount);
WarpCBindex[Warpid].endindex = WarpCBindex[Warpid].beginindex + myareacount;
atomicAdd(&GlobalbucketCount[globalbuckethash], myareacount);
}
if(InWarptid == 0){
for(m = 0; m < k; m++){
GlobalOpenHash[GlobalBuckets[globalbuckethash].beginindex + m] = GroupStore[m];
}
}
storeposition = WarpCBindex[Warpid].beginindex + InWarptid + k;
for(i=0; i<HashNum; i++){
if(RecordTable[i*blockDim.x + Warpid * 32 + InWarptid] != EMPTYVECT32){
GlobalOpenHash[GlobalBuckets[globalbuckethash].beginindex + storeposition] = RecordTable[i*blockDim.x + Warpid * 32 + InWarptid];
RecordTable[i*blockDim.x + Warpid * 32 + InWarptid] = EMPTYVECT32;
storeposition+=32;
}
}
if(storeposition < WarpCBindex[Warpid].endindex)
{
for(i=0; i<HashNum; i++){
for(k = Warpid*32; k<(Warpid+1)*32; k++){
if(RecordTable[i*blockDim.x + k] != EMPTYVECT32){
kickoutRecord = RecordTable[i*blockDim.x + k];
if(atomicCAS(&(RecordTable[i*blockDim.x + k]), kickoutRecord, EMPTYVECT32) == kickoutRecord){
GlobalOpenHash[GlobalBuckets[globalbuckethash].beginindex + storeposition] = kickoutRecord;
storeposition+=32;
}
}
}
}
}
//for the elements larger than 512, to be expanded........
break;
}
}
if(IFDeadlockDetected)
break;
if(InvthreadgroupID == 0 && GroupStore[vthreadgroupID] == EMPTYVECT32){
//got new stateV
localstateV = EMPTYVECT32;
ifgetnewstatev = false;
for(j = 0; j < HashNum; j++){
for(i = vthreadgroupID * PLTSNum; i < (vthreadgroupID+1) * PLTSNum; i++){
if((GroupStore[vthreadgroupID] = atomicExch(&(RecordTable[j*blockDim.x + i]), EMPTYVECT32)) != EMPTYVECT32)
{
ifgetnewstatev = true;
break;
}
}
if(ifgetnewstatev == true)
break;
for(i = vthreadgroupnuminblock * PLTSNum; i<(int)(blockDim.x); i++){
if((GroupStore[vthreadgroupID] = atomicExch(&(RecordTable[j*blockDim.x + i]), EMPTYVECT32)) != EMPTYVECT32)
{
ifgetnewstatev = true;
break;
}
}
if(ifgetnewstatev == true)
break;
}
}
__syncthreads();
if(InvthreadgroupID == 0 && layer == maxlayer + 1 && ifgetnewstatev == false){
if(Inblocktid == 0){
for(nonewcount = 0; nonewcount < vthreadgroupnuminblock; nonewcount++){
if(GroupStore[nonewcount] != EMPTYVECT32){
break;
}
}
if(nonewcount == vthreadgroupnuminblock){
break;
}
}
}
__syncthreads();
}while(!IFDeadlockDetected);
CudaInterBlocksSyn(gridDim.x);
}
__global__ void CUDADeadlockBFSVerify(unsigned int * PG_AllLTS, unsigned int * PG_AllStates, unsigned int * PG_AllTransitions, unsigned int * PG_AllSynctransitions, unsigned int * PG_Startlist, unsigned int * PG_LTSStateEncodeBits, unsigned int PEventEncodeBits, unsigned int * OutgoingTEbytes, unsigned int PLTSNum, unsigned int * G_RESULT, unsigned int PGBucketNum, unsigned int PAllLTSStatesNum, unsigned int StartNum)
{
int i,j,m,k,x,y;
int Inblocktid = threadIdx.x;
int Ingridtid = threadIdx.x + blockIdx.x * blockDim.x;
int InWarptid = Inblocktid % 32;
int InvthreadgroupID;
int vthreadgroupID;
int Warpid = Inblocktid/32;
int WarpNum = blockDim.x/32;
unsigned int getindex; //the index to get the initial task from global memory.
unsigned int localstateV;
unsigned int localstate;
unsigned int localstate2;
unsigned int belonglts;
unsigned int transevent;
unsigned int maxtransevent;
int nosync,lessthanall;
unsigned int globalbuckethash;
unsigned int visitedstore;
unsigned int tmpoutT;
int outgoingcount;
unsigned int offsetborder; //used to mark the border of successors.
bool ifanyoutgoing, ifgetnewstatev, ifglobaldup; //ifglobaldup means if this state is duplicated
int vthreadgroupnuminblock;
int vthreadgroupnuminwarp;
unsigned char tmpT[4];
unsigned int localKey, localhash;
unsigned int kickoutRecord;
unsigned int insertRecord;
unsigned int visitedRecord;
unsigned int hkey;
unsigned int storeposition;
Nodemark SuccessorMark;
vthreadgroupnuminwarp = 32/PLTSNum;
vthreadgroupnuminblock = vthreadgroupnuminwarp * (blockDim.x/32);
if(InWarptid < vthreadgroupnuminwarp * PLTSNum){
vthreadgroupID = Warpid*vthreadgroupnuminwarp + InWarptid/PLTSNum;
InvthreadgroupID = InWarptid % PLTSNum;
}else{
vthreadgroupID = -1;
InvthreadgroupID = -1;
}
__shared__ bool Ifcollisionhappens;
__shared__ int collisiontimes; //how the collision times reflect the occupation rate is needed to be explored with experiments.
__shared__ bool ifblocknostate;
__shared__ int nonewcount;
__shared__ bool haveChild;
__shared__ int launchtime;
i = vthreadgroupnuminblock * PLTSNum;
extern __shared__ bool C[];
bool * syncduplicate = C;
bool * needsyndupdetect = &syncduplicate[i];
bool * ifnooutgoing = &needsyndupdetect[vthreadgroupnuminblock];
unsigned int * SynEventInteractive = (unsigned int *)&ifnooutgoing[i];
unsigned int * SynStateInteractive = &SynEventInteractive[i];
unsigned int * RecordTable = &(SynStateInteractive[i]);
unsigned int * GroupStore = &RecordTable[HashNum * blockDim.x];
Bucket * WarpCBindex = (Bucket *)&GroupStore[vthreadgroupnuminblock];
if(Inblocktid == 0){
for(i = 0; i < vthreadgroupnuminblock * PLTSNum; i++){
ifnooutgoing[i] = false;
SynEventInteractive[i] = EMPTYVECT32;
}
for(i = 0; i < HashNum* blockDim.x; i++){
RecordTable[i] = EMPTYVECT32;
}
for(i = 0; i < vthreadgroupnuminblock; i++)
GroupStore[i] = EMPTYVECT32;
nonewcount = 0;
haveChild = false;
launchtime = 0;
ifblocknostate = false;
for(i=0; i<WarpNum; i++){
WarpCBindex[i].beginindex = 0;
WarpCBindex[i].endindex = 0;
}
Ifcollisionhappens = false;
}
__syncthreads();
if(Ingridtid == 0){
GlobalbucketCount = new unsigned int[PGBucketNum];
GlobalbucketIndex = new unsigned int[PGBucketNum];
GlobalBucketNum = PGBucketNum;
GlobalOpenHash = new unsigned int[blockDim.x * 3 * PLTSNum * 4 ];
GlobalBuckets = new Bucket[GlobalBucketNum];
GlobalVisitedHash = new unsigned int[blockDim.x * 3 * PLTSNum * 4]; //bucket/2
communicationGstore = new Bucket[100];
for(i = 0; i < blockDim.x * 3 * PLTSNum * 4; i++)
GlobalOpenHash[i] = EMPTYVECT32;
for(i = 0; i < blockDim.x * 3 * PLTSNum * 4; i++)
GlobalVisitedHash[i] = EMPTYVECT32;
for(i = 0; i < PLTSNum * 4; i++){
GlobalBuckets[i].beginindex = i * blockDim.x;
GlobalBuckets[i].endindex = (i+1)* 3 *blockDim.x - 1;
}
for(i = PLTSNum * 4; i < PLTSNum * 8; i++){
GlobalBuckets[i].beginindex = (i-PLTSNum*4)*blockDim.x;
GlobalBuckets[i].endindex = (i+1-PLTSNum*4)* 3 *blockDim.x - 1;
}
}
if(InvthreadgroupID != -1){
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = true;
}
if(InvthreadgroupID == 0 && vthreadgroupID < StartNum){
getindex = vthreadgroupnuminblock * blockIdx.x + vthreadgroupID;
GroupStore[vthreadgroupID] = PG_Startlist[getindex];
needsyndupdetect[vthreadgroupID] = false;
}
CudaInterBlocksSyn(gridDim.x);
//while(GroupStore[vthreadgroupID].statevector == EMPTYVECT32);
do{
if(GroupStore[vthreadgroupID] != EMPTYVECT32){
localstate = CudaGetStateinVec(InvthreadgroupID, GroupStore[vthreadgroupID]);
printf("vtg%d, itgid%d, gets%d\n", vthreadgroupID, InvthreadgroupID, localstate);
belonglts = InvthreadgroupID;
ifanyoutgoing = CudaGetAllsuccessors(belonglts, localstate-1, &SuccessorMark);
ifglobaldup = false;
//The successor generation consists of two steps: 1. For trans in alltransitions, process them directly. 2.For trans in allsynctrans, parallel sync is needed.
if(ifanyoutgoing){
outgoingcount = 0;
i = SuccessorMark.beginInt;
//calculate global hash position for visited stateV
if(InvthreadgroupID == 0){
globalbuckethash = Buckethash(GroupStore[vthreadgroupID]);
hkey = GroupStore[vthreadgroupID];
ifglobaldup = CudaVisitedGlobalHashcal(GlobalVisitedHash, GlobalBuckets[globalbuckethash],hkey, &visitedstore);
}
localstateV = GroupStore[vthreadgroupID];
j = 0;
m = -1;
while(i < SuccessorMark.endInt && !ifglobaldup){
if(m != i){
tmpoutT = tex1Dfetch(OUTGOINGDETAIL, i);
tmpT[0] = (char)(tmpoutT >> 24);
tmpT[1] = (char)(tmpoutT >> 16);
tmpT[2] = (char)(tmpoutT >> 8);
tmpT[3] = (char)tmpoutT;
m = i;
}
if(!CudaNewStateV(&localstateV, InvthreadgroupID, &i, &j, tmpT,tex1Dfetch(TRANSEBYTES,InvthreadgroupID), PEventEncodeBits ))
continue;
if(!Ifcollisionhappens){
insertRecord = localstateV;
//hash store and duplicate elimination module.....
if(CudaHashStore(insertRecord, PLTSNum, RecordTable, blockDim.x, &kickoutRecord))
Ifcollisionhappens = true;
outgoingcount++;
}
localstateV = GroupStore[vthreadgroupID];
if(Ifcollisionhappens){
break;
}
}
//synchronization part
j = SuccessorMark.synbeginInt;
if(!Ifcollisionhappens && SuccessorMark.synbeginInt != SuccessorMark.synendInt && !ifglobaldup){
bool ifmatch;
//int tmpcount=0;
int tmpj = 0;
m = 0;
x = -1;
CudaDecodeTransitions(0,SuccessorMark.synendInt-1, (SuccessorMark.synendInt - j)*(4/tex1Dfetch(TRANSEBYTES, belonglts))-1,&localstate2, &maxtransevent, tex1Dfetch(TRANSEBYTES, belonglts), tex1Dfetch(LTSSTATEBITS, belonglts));
while(j < SuccessorMark.synendInt){
ifmatch = false;
if(m == 0 && syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum]){
if(j == SuccessorMark.synendInt)
break;
CudaDecodeTransitions(1, j, tmpj, &SynStateInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum], &SynEventInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum], tex1Dfetch(TRANSEBYTES,belonglts), tex1Dfetch(LTSSTATEBITS, belonglts));
if(SynEventInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum] == 0)
{
SynEventInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum] = EMPTYVECT32;
break;
}
if(x != j){
tmpoutT = tex1Dfetch(SYNCOUTGOING, j);
tmpT[0] = (char)(tmpoutT >> 24);
tmpT[1] = (char)(tmpoutT >> 16);
tmpT[2] = (char)(tmpoutT >> 8);
tmpT[3] = (char)tmpoutT;
x = j;
}
CudaNewStateV(&localstateV, InvthreadgroupID, &j, &tmpj, tmpT, tex1Dfetch(TRANSEBYTES, InvthreadgroupID), PEventEncodeBits);
//tmpcount++;
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = false;
}
nosync = 0;
lessthanall = 0;
m=0;
for(i=0; i<PLTSNum; i++){
if(i == InvthreadgroupID)
continue;
if(SynEventInteractive[i + vthreadgroupID * PLTSNum] == EMPTYVECT32)
{
nosync++;
continue;
}
if(SynEventInteractive[i + vthreadgroupID * PLTSNum] <= maxtransevent){ //if bigger than the maxtransevent of local, no need to compare as it's impossible to sync
if(SynEventInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum] > SynEventInteractive[i + vthreadgroupID * PLTSNum]){
m++;
}else if (SynEventInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum] == SynEventInteractive[i + vthreadgroupID * PLTSNum]){
if(needsyndupdetect[vthreadgroupID] == false)
needsyndupdetect[vthreadgroupID] = true;
//GENERATE SYNC STATE V.......
SynTwoStates(&localstateV, SynStateInteractive[i + vthreadgroupID * PLTSNum], i);
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = true;
ifmatch = true;
}else
lessthanall++;
}
}
if(nosync == PLTSNum - 1){
break;
}
if(lessthanall == PLTSNum -1){
m = 0;
syncduplicate[InvthreadgroupID + vthreadgroupID*PLTSNum] = true;
continue;
}
if(syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum])
m = 0;
if(needsyndupdetect[vthreadgroupID] && InvthreadgroupID == 0){ //duplicate elimination after synchronization, so just one synchronized result will be copied to hashtable.
for(i = 0; i < PLTSNum; i++){
if(syncduplicate[i + vthreadgroupID * PLTSNum]){
for(k = 0; k < i; k++)
{
if(SynEventInteractive[k + vthreadgroupID * PLTSNum] == SynEventInteractive[i + vthreadgroupID * PLTSNum]){
break;
}
}
if(k == i){
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = false;
}
}
}
}
if(ifmatch && syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] == false){
//hash copy to table
insertRecord = localstateV;
if(!Ifcollisionhappens)
{
if(CudaHashStore(insertRecord, PLTSNum, RecordTable, blockDim.x, &kickoutRecord))
Ifcollisionhappens = true;
outgoingcount++;
}
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = true;
if(Ifcollisionhappens){
for(k = 511; k >= 0; k--){
if(kickoutRecord != EMPTYVECT32){
if(atomicCAS(&(RecordTable[(HashNum-1)*blockDim.x + k]), EMPTYVECT32, kickoutRecord)==EMPTYVECT32){
kickoutRecord = EMPTYVECT32;
break;
}
}else{
if(atomicCAS(&(RecordTable[(HashNum-1)*blockDim.x + k]), EMPTYVECT32, localstateV) == EMPTYVECT32){
break;
}
}
}
}
}
if(!ifmatch && m == 0){
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = true;
}
localstateV = GroupStore[vthreadgroupID];
}
}
if(outgoingcount == 0 && !ifglobaldup)
ifnooutgoing[vthreadgroupID*PLTSNum + InvthreadgroupID] = true;
}else{
ifnooutgoing[vthreadgroupID*PLTSNum + InvthreadgroupID] = true;
}
if(InvthreadgroupID == 0&&!ifglobaldup &&!Ifcollisionhappens){
for(i = 0; i < PLTSNum; i++){
if(!ifnooutgoing[i + vthreadgroupID * PLTSNum])
break;
}
if(i == PLTSNum){
printf("tgid%d find deadlock\n", vthreadgroupID);
IFDeadlockDetected = true;
}
}
}
CudaInterBlocksSyn(gridDim.x);
if(IFDeadlockDetected)
break;
if(GroupStore[vthreadgroupID] != EMPTYVECT32){
if(InvthreadgroupID == 0&&!Ifcollisionhappens&&!ifglobaldup){
//copy visited state to gl)obal memory
CudaVisitedGlobalHashstore(GlobalVisitedHash, globalbuckethash, visitedstore, GroupStore[vthreadgroupID], PLTSNum);
if(InvthreadgroupID == 0){
GroupStore[vthreadgroupID] = EMPTYVECT32;
}
}else if(Ifcollisionhappens){
if(haveChild)
hipDeviceSynchronize();
//if(IFDeadlockDetected)
// break;
//load new kernel, copy data back
unsigned int myareacount = 0;
globalbuckethash = Buckethash((unsigned int)(blockIdx.x)) + openvisitedborder;
if(InWarptid == 0){
for(m = Warpid*32; m<(Warpid + 1)*32; m++){
for(k = 0; k < HashNum; k++){
if(RecordTable[k*blockDim.x + m] != EMPTYVECT32)
myareacount++;
}
}
WarpCBindex[Warpid].beginindex = atomicAdd(&GlobalbucketIndex[globalbuckethash], myareacount);
WarpCBindex[Warpid].endindex = WarpCBindex[Warpid].beginindex + myareacount;
atomicAdd(&GlobalbucketCount[globalbuckethash], myareacount);
}
storeposition = WarpCBindex[Warpid].beginindex + InWarptid;
for(m = 0; m < HashNum; m++){
if(RecordTable[m*blockDim.x + Warpid * 32 + InWarptid] != EMPTYVECT32){
GlobalOpenHash[GlobalBuckets[globalbuckethash].beginindex + storeposition] = RecordTable[m*blockDim.x + Warpid * 32 + InWarptid];
RecordTable[m*blockDim.x + Warpid * 32 + InWarptid] = EMPTYVECT32;
storeposition+=32;
}
}
if(storeposition < WarpCBindex[Warpid].endindex)
{
for(m = 0; m < HashNum; m++){
for(k = Warpid*32; k<(Warpid+1)*32; k++){
if(RecordTable[m*blockDim.x + k] != EMPTYVECT32){
kickoutRecord = RecordTable[m*blockDim.x + k];
if(atomicCAS(&(RecordTable[m*blockDim.x + k]), RecordTable[m*blockDim.x + k], EMPTYVECT32) == kickoutRecord){
GlobalOpenHash[GlobalBuckets[globalbuckethash].beginindex + storeposition] = kickoutRecord;
storeposition+=32;
}
}
}
}
}
//for the elements larger than 512, to be expanded........
//launch new kernel
if(Inblocktid == launchtime){
if(GlobalbucketCount[globalbuckethash]*PLTSNum % 512 == 0){
m = (GlobalbucketCount[globalbuckethash]*PLTSNum) / 512;
}else{
m = (GlobalbucketCount[globalbuckethash]*PLTSNum) / 512 + 1;
}
StartNum = GlobalbucketCount[globalbuckethash]*PLTSNum;
if(launchtime > 0){
i=0;
for(k = communicationGstore[blockIdx.x].beginindex; k < communicationGstore[blockIdx.x].endindex; k++){
i+=GlobalbucketCount[k];
}
StartNum += i;
if(i*PLTSNum % 512 == 0){
m += i*PLTSNum / 512;
}else{
m += (i*PLTSNum / 512 +1);
}
}
dim3 cgridstructure(m,1,1);
dim3 cblockstructure(512,1,1);
//CUDADeadlockBFSVerifyChild<<<cgridstructure, cblockstructure>>>(blockIdx.x, globalbuckethash, communicationGstore, PG_AllLTS, PG_AllStates, PG_AllTransitions, PG_AllSynctransitions, PG_LTSStateEncodeBits, PEventEncodeBits, OutgoingTEbytes, PGBucketNum, PLTSNum, StartNum );
launchtime++;
haveChild = true;
}
}
}
__syncthreads();
if(InvthreadgroupID == 0 && GroupStore[vthreadgroupID] == EMPTYVECT32){
//got new stateV
localstateV = EMPTYVECT32;
ifgetnewstatev = false;
for(j = 0; j < HashNum; j++){
for(i = vthreadgroupID * PLTSNum; i < (vthreadgroupID+1) * PLTSNum; i++){
if((GroupStore[vthreadgroupID] = atomicExch(&(RecordTable[j*blockDim.x + i]), EMPTYVECT32)) != EMPTYVECT32)
{
ifgetnewstatev = true;
break;
}
}
if(ifgetnewstatev == true)
break;
for(i = vthreadgroupnuminblock * PLTSNum; i<(int)(blockDim.x); i++){
if((GroupStore[vthreadgroupID] = atomicExch(&(RecordTable[j*blockDim.x + i]), EMPTYVECT32)) != EMPTYVECT32)
{
ifgetnewstatev = true;
break;
}
}
if(ifgetnewstatev == true)
break;
}
}
__syncthreads();
if(Inblocktid == launchtime - 1 && ifgetnewstatev == false){
for(nonewcount = 0; nonewcount < vthreadgroupnuminblock; nonewcount++){
if(GroupStore[nonewcount] != EMPTYVECT32){
break;
}
}
if(nonewcount == vthreadgroupnuminblock){
hipDeviceSynchronize();
haveChild = false;
}
}
__syncthreads();
if(nonewcount == vthreadgroupnuminblock){
//get new state again, if no, block stop.
if(InvthreadgroupID == 0){
//got new stateV
if(vthreadgroupID < GlobalbucketCount[communicationGstore[blockIdx.x].beginindex])
{
globalbuckethash = communicationGstore[blockIdx.x].beginindex;
storeposition = vthreadgroupID;
}
//layer = communicationlayer[blockIdx.x];
GroupStore[vthreadgroupID] = GlobalOpenHash[GlobalBuckets[globalbuckethash].beginindex + storeposition];
if(InvthreadgroupID == 0 && GroupStore[vthreadgroupID] == EMPTYVECT32)
{
ifblocknostate = true;
}
}
__syncthreads();
if(ifblocknostate)
break;
if(communicationcollision[blockIdx.x] && Inblocktid == launchtime){
//need more blocks
k = 0;
for(m = communicationGstore[blockIdx.x].beginindex; m < communicationGstore[blockIdx.x].endindex; m++){
k += GlobalbucketCount[m];
}
k -= vthreadgroupnuminblock;
StartNum = k;
if(k*PLTSNum % 512 == 0)
m = (k*PLTSNum)/512;
else
m = (k*PLTSNum)/512 + 1;
dim3 gridstruc(m,1,1);
dim3 blockstruc(512,1,1);
//CUDADeadlockBFSVerifyChild<<<gridstruc, blockstruc>>>(blockIdx.x, globalbuckethash, communicationGstore, PG_AllLTS, PG_AllStates, PG_AllTransitions, PG_AllSynctransitions, PG_LTSStateEncodeBits, PEventEncodeBits, OutgoingTEbytes, PGBucketNum, PLTSNum, StartNum );
launchtime++;
haveChild=true;
}
}
}while(!IFDeadlockDetected);
CudaInterBlocksSyn(gridDim.x);
if(!IFDeadlockDetected && Ingridtid == 0){
*G_RESULT = 1;
}else{
*G_RESULT = 0;
}
}
//void NewStateV(unsigned int * targetV, int tindex, int * index, int *count, unsigned char* OutgoingTs, unsigned int * bitwidth, unsigned int OutGTbyte, unsigned int EEncode)
//{
// unsigned int tmp = *targetV;
// unsigned int tostate = 0;
// int newsbeginbit = 0, endbit;
// unsigned int Secode = bitwidth[tindex];
//
// int i,j,replacebeginbyte, replaceendbyte;
//
// replacebeginbyte = *count * OutGTbyte;
// replaceendbyte = (*count + 1)*OutGTbyte;
//
// //if(EEncode < 8){
// // OutgoingTs[replacebeginbyte] = OutgoingTs[replacebeginbyte] << EEncode;
// // OutgoingTs[replacebeginbyte] = OutgoingTs[replacebeginbyte] >> EEncode; //event
// //}else{
// // replacebeginbyte++;
// // OutgoingTs[replacebeginbyte] = OutgoingTs[replacebeginbyte] << EEncode - 8;
// // OutgoingTs[replacebeginbyte] = OutgoingTs[replacebeginbyte] >> EEncode - 8;
// //}
//
// for(i = 0; i < tindex; i++){
// newsbeginbit += bitwidth[i];
// }
//
// endbit = newsbeginbit + bitwidth[tindex];
//
// if(Secode == 8){
// tostate = (int) OutgoingTs[replaceendbyte - 1];
// tostate = tostate << (31 - endbit);
//
// }else{
// tostate = 0;
//
// for( i = replaceendbyte - 1; i > replacebeginbyte; i--)
// tostate = tostate | (OutgoingTs[i] << 8 * (replaceendbyte - 1 - i));
//
// tostate = tostate << (31-Secode);
// tostate = tostate >> (31-Secode);
// tostate = tostate << (31-endbit);
//
// }
//
// i = tmp >> (endbit + Secode);
// i = i << (endbit + Secode);
// j = tmp << (newsbeginbit + Secode);
// j = j >> (newsbeginbit + Secode);
//
// * targetV = (int) (i | j | tostate);
//
// if((EEncode+Secode)*(*count + 1) > 32){
// * index += 1;
// *count = 0;
// }else
// (*count)++;
//}
//
//void DecodeTransitions(unsigned int * outgoingT, int beginindex, int count, unsigned int * Tostate, unsigned int * Tevent, unsigned int OutGTe, unsigned int Statebitwidth)
//{
// int i, j;
// unsigned int tmp;
// unsigned int startbyte, endbyte;
// startbyte = (count * OutGTe)%4;
// endbyte = ((count + 1)*OutGTe)%4;
//
// if(endbyte == 0)
// endbyte = 4;
//
// tmp = outgoingT[beginindex];
//
// tmp = tmp << (startbyte - 1);
// tmp = tmp >> (startbyte + 3 - endbyte);
//
// *Tostate = (tmp << 31 - Statebitwidth) >> (31- Statebitwidth);
// *Tevent = tmp >> Statebitwidth;
//}
//
//
//
//bool GetAllsuccessors(unsigned int * AllLTS, unsigned int * Allstates, unsigned int * Alltransitions, unsigned int ltsindex, unsigned int sindex, Nodemark * result)
//{
// unsigned int statesbegin, transbegin, transborder, syncbegin;
// statesbegin = AllLTS[ltsindex];
// transbegin = Allstates[statesbegin + sindex];
// transborder = Allstates[statesbegin + sindex + 1];
//
// if(transbegin == 0 && (ltsindex != 0 || sindex !=0))
// return false;
//
// result->beginInt = transbegin;
// result->endInt = transborder - 4;
//
// result->synbeginInt = Alltransitions[transborder - 1] | Alltransitions[transborder - 2] | Alltransitions[transborder - 3] | Alltransitions[transborder - 4];
//
// transborder = Allstates[statesbegin + sindex + 2];
//
// syncbegin = Alltransitions[transborder - 1] | Alltransitions[transborder - 2] | Alltransitions[transborder - 3] | Alltransitions[transborder - 4];
//
// result->synendInt = syncbegin - 1;
// return true;
//}
//
//unsigned int GetStateinVec(int index, unsigned int svec, unsigned int * stateencodebits)
//{
// int sbeginbit, sendbit;
// unsigned int ltsid;
//
// sbeginbit = 0;
// sendbit = 0;
//
// for(int i = 0; i < index; i++){
// sbeginbit += stateencodebits[i];
// }
// sendbit = sbeginbit + stateencodebits[index] - 1;
// svec = svec << sbeginbit;
// svec = svec >> (sbeginbit + 31 - sendbit);
// ltsid = svec;
// return ltsid;
//
//}
//
//int HostGenerateStateSpace(int LTSNum, unsigned int * H_AllLTS, unsigned int * H_AllStates, unsigned int * H_AllTransitions, unsigned int * H_AllSynctrans, unsigned int ** RecordList, unsigned int RequestNum, unsigned int H_InitialStateV, unsigned int * H_LTSStateEncodeBits, unsigned int * OutgoingTEbytes, unsigned int HEventEncodeBits)
//{
// int i,j,m,k;
// int SuccessorCount;
// queue<unsigned int> Taskqueue;
// set<unsigned int> Taskset;
// vector<unsigned int> Syncqueue;
// vector<unsigned int>::iterator Syncit;
//
// vector<unsigned int> Syncevents;
// vector<unsigned int>::iterator ITS;
// bool ifexist;
//
// queue<unsigned int> VisitedS;
//
// unsigned int newStateV;
// unsigned int * succStateV;
// unsigned int * tmpStateV;
// unsigned int newState;
// unsigned int belonglts;
// unsigned int transevent;
//
// unsigned int *tmp;
//
// unsigned int tmpcount;
// unsigned int tmpoutT;
// unsigned char tmpT[4];
//
// int x,y;
//
// bool ifoutgoing;
// int ifoutgoingcount;
// Nodemark allsucc;
//
// SuccessorCount = 1;
// Taskqueue.push(H_InitialStateV);
// while(SuccessorCount < RequestNum){
// newStateV = Taskqueue.front();
// ifoutgoingcount = 0;
// for(i = 0; i < LTSNum; i++){
// ifoutgoing = false;
// GetStateinVec(i, newStateV, &newState);
// ifoutgoing = GetAllsuccessors(H_AllLTS, H_AllStates, H_AllTransitions, belonglts, newState, &allsucc);
// if(!ifoutgoing){
// ifoutgoingcount++;
// continue;
// }
//
// m = allsucc.beginInt;
// x = -1;
// y = 0;
// while(m < allsucc.endInt){
// succStateV = new unsigned int[1];
//
// if(x != m){
// tmpoutT = H_AllTransitions[m];
// tmpT[0] = (char)(tmpoutT >> 24);
// tmpT[1] = (char)(tmpoutT >> 16);
// tmpT[2] = (char)(tmpoutT >> 8);
// tmpT[3] = (char)tmpoutT;
// x = m;
// }
// NewStateV(succStateV, i, &m, &y, tmpT, H_LTSStateEncodeBits, OutgoingTEbytes[i], HEventEncodeBits );
//
// if(Taskset.insert(*succStateV).second){
// Taskqueue.push(*succStateV);
// SuccessorCount++;
// }
// }
//
// k = allsucc.synbeginInt;
// tmpcount = 0;
// x = -1;
// y = 0;
// while(k < allsucc.synendInt){
// succStateV = new unsigned int[1];
//
// DecodeTransitions(H_AllSynctrans, k, tmpcount, &newState, &transevent, OutgoingTEbytes[belonglts], H_LTSStateEncodeBits[i]);
//
// if(x != k){
// tmpoutT = H_AllSynctrans[k];
// tmpT[0] = (char)(tmpoutT >> 24);
// tmpT[1] = (char)(tmpoutT >> 16);
// tmpT[2] = (char)(tmpoutT >> 8);
// tmpT[3] = (char)tmpoutT;
// x = k;
// }
// NewStateV(succStateV, i, &k, &y, tmpT, H_LTSStateEncodeBits, OutgoingTEbytes[i], HEventEncodeBits);
//
// tmpcount++;
// j = 0;
// for(ITS = Syncevents.begin(); ITS < Syncevents.end(); ++ITS){
// if(*ITS == transevent){
// ifexist = true;
// break;
// }else
// j++;
// }
// if(ifexist){
//
// tmpStateV = (unsigned int *)&(Syncqueue[j]);
// SynTwoStatesCPU(tmpStateV, *succStateV, i, newStateV, H_LTSStateEncodeBits);
//
// }else{
// Syncevents.push_back(transevent);
// Syncqueue.push_back(*succStateV);
// SuccessorCount++;
// }
// }
// for(Syncit = Syncqueue.begin(); Syncit != Syncqueue.end(); Syncit++) {
// Taskqueue.push(*Syncit);
// }
// Syncqueue.clear();
// }
// if(ifoutgoingcount == LTSNum){
// return -1;
// }
//
// }
//
// *RecordList = new unsigned int[SuccessorCount];
// for(i = 0; i < SuccessorCount; i++){
// (*RecordList)[i] = Taskqueue.front();
// Taskqueue.pop();
// }
//
// return SuccessorCount;
//}
void CallCudaBFS(unsigned int * AllLTS, unsigned int * AllStates, unsigned int * AllTransitions, unsigned int* AllSyncTrans, unsigned int H_InitialSV, unsigned int * H_LTSStateEncodeBits, unsigned int LTSNum,unsigned int AllLTSStateNum, unsigned int AllTransLength, unsigned int AllSyncTransLength, unsigned int EventEncodeBits, unsigned int * OutgoingTEbytes)
{
int i,j;
unsigned int * G_AllLTS;
unsigned int * G_AllStates;
unsigned int * G_AllTransitions;
unsigned int * G_AllSyncTrans; //all trans with sync events.
//unsigned int * G_InitialStateV;
unsigned int * G_OutgoingTEbytes;
unsigned int * G_LTSStateEncodeBits;
unsigned int * G_DetectResult;
//Choose to generate some statevectors firstly in CPU---OPTIONAL
unsigned int * G_Startlist;
//Choose to generate some statevectors firstly in CPU---OPTIONAL
unsigned int * H_Startlist;
unsigned int * H_Result;
unsigned int H_startsize;
unsigned int * LTSStateNum = new unsigned int[LTSNum];
unsigned int Startblocknum;
unsigned int Startthreadnum1block;
unsigned int Startthreadgroupnum;
unsigned int H_GlobalbucketNum;
int * parameters = new int[2];
parameters[0]=4;
parameters[1]=12;
//unsigned int * G_GlobalbucketNum;
int rv[10];
srand(time(NULL));
for(i = 0; i < 10; i++){
rv[i] = rand();
}
hipSetDevice(0);
H_Result = new unsigned int[1];
Startthreadnum1block = 512;
Startblocknum = 1;
//Initialize Startlist
Startthreadgroupnum = (((Startthreadnum1block/32)/LTSNum)*(Startthreadnum1block/32))*Startblocknum; //initial value, not the final one?
//i = HostGenerateStateSpace(LTSNum, AllLTS,AllStates,AllTransitions, AllSyncTrans, &H_Startlist, 1, H_InitialSV,H_LTSStateEncodeBits, OutgoingTEbytes, EventEncodeBits);
if(i > 0){
j = i * LTSNum;
if(i > Startthreadgroupnum){
Startthreadgroupnum = i;
Startblocknum = Startthreadgroupnum/(Startthreadnum1block/LTSNum);
}
}else if(i == -1){
cout<<"deadlock being detected";
exit(0);
}
Startthreadgroupnum = 1;
H_Startlist = new unsigned int[1];
H_Startlist[0] = H_InitialSV;
H_GlobalbucketNum = LTSNum * 2;
hipMalloc((void **)&G_AllLTS, sizeof(unsigned int) * LTSNum);
hipMalloc((void **)&G_AllStates, sizeof(unsigned int) * (AllLTSStateNum+1));
hipMalloc((void **)&G_AllTransitions, sizeof(unsigned int) * AllTransLength);
hipMalloc((void **)&G_AllSyncTrans,sizeof(unsigned int) * AllSyncTransLength);
hipMalloc((void **)&G_OutgoingTEbytes, sizeof(unsigned int) * LTSNum);
hipMalloc((void **)&G_LTSStateEncodeBits, sizeof(unsigned int) * LTSNum);
hipMalloc((void **)&G_Startlist, sizeof(unsigned int) * Startthreadgroupnum);
//hipMalloc((unsigned int *)&G_InitialStateV, sizeof(int));
hipMalloc((void **)&G_DetectResult, sizeof(unsigned int));
hipMemcpy(G_AllLTS, AllLTS, sizeof(unsigned int) * LTSNum, hipMemcpyHostToDevice);
hipMemcpy(G_AllStates, AllStates, sizeof(unsigned int) * (AllLTSStateNum+1), hipMemcpyHostToDevice);
hipMemcpy(G_AllTransitions, AllTransitions, sizeof(unsigned int) * AllTransLength, hipMemcpyHostToDevice);
hipMemcpy(G_AllSyncTrans, AllSyncTrans, sizeof(unsigned int) * AllSyncTransLength, hipMemcpyHostToDevice);
//hipMemcpy(G_InitialStateV, &H_InitialSV, sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(G_LTSStateEncodeBits, H_LTSStateEncodeBits, sizeof(unsigned int) * LTSNum, hipMemcpyHostToDevice);
hipMemcpy(G_Startlist, H_Startlist, sizeof(unsigned int) * Startthreadgroupnum, hipMemcpyHostToDevice);
hipMemcpy(G_OutgoingTEbytes, OutgoingTEbytes, sizeof(unsigned int)*LTSNum, hipMemcpyHostToDevice);
hipMemcpyToSymbol(LA1, &rv[0], sizeof(int));
hipMemcpyToSymbol(LB1, &rv[1], sizeof(int));
hipMemcpyToSymbol(LA2, &rv[2], sizeof(int));
hipMemcpyToSymbol(LB2, &rv[3], sizeof(int));
hipMemcpyToSymbol(LA3, &rv[4], sizeof(int));
hipMemcpyToSymbol(LB3, &rv[5], sizeof(int));
hipMemcpyToSymbol(LA4, &rv[6], sizeof(int));
hipMemcpyToSymbol(LB4, &rv[7], sizeof(int));
hipMemcpyToSymbol(BUCA, &rv[8], sizeof(int));
hipMemcpyToSymbol(BUCB, &rv[9], sizeof(int));
for(i = 0; i < 8; i++){
rv[i] = rand();
}
i = 512;
hipMemcpyToSymbol(GA1, &rv[0], sizeof(int));
hipMemcpyToSymbol(GB2, &rv[1], sizeof(int));
hipMemcpyToSymbol(GA2, &rv[2], sizeof(int));
hipMemcpyToSymbol(GB2, &rv[3], sizeof(int));
hipMemcpyToSymbol(GA3, &rv[4], sizeof(int));
hipMemcpyToSymbol(GB3, &rv[5], sizeof(int));
hipMemcpyToSymbol(GA4, &rv[6], sizeof(int));
hipMemcpyToSymbol(GB4, &rv[7], sizeof(int));
hipMemcpyToSymbol(TableSize, &i, sizeof(int));
hipMemcpyToSymbol(HashNum, ¶meters[0],sizeof(int));
hipMemcpyToSymbol(IterationTime, ¶meters[1], sizeof(int));
//bind data to texture
hipBindTexture(NULL, LTSOFFSET, G_AllLTS, sizeof(unsigned int)*LTSNum);
hipBindTexture(NULL, STATEOFFSET, G_AllStates, sizeof(unsigned int)*(AllLTSStateNum+1));
hipBindTexture(NULL, OUTGOINGDETAIL, G_AllTransitions, sizeof(unsigned int)*AllTransLength); //how texture memory can accelerate the access rate need to be explored
hipBindTexture(NULL, SYNCOUTGOING, G_AllSyncTrans, sizeof(unsigned int)*AllSyncTransLength);
hipBindTexture(NULL, LTSSTATEBITS, G_LTSStateEncodeBits, sizeof(unsigned int)* LTSNum);
hipBindTexture(NULL, TRANSEBYTES, G_OutgoingTEbytes, sizeof(unsigned int)*LTSNum);
dim3 g(1,1,1);
dim3 b(512,1,1);
H_startsize = 1;
hipLaunchKernelGGL(( CUDADeadlockBFSVerify), dim3(g), dim3(b), 5120*sizeof(unsigned int), 0, G_AllLTS, G_AllStates, G_AllTransitions, G_AllSyncTrans, G_Startlist, G_LTSStateEncodeBits, EventEncodeBits, G_OutgoingTEbytes, LTSNum, G_DetectResult, H_GlobalbucketNum, AllLTSStateNum, H_startsize);
hipMemcpy(H_Result, G_DetectResult, sizeof(unsigned int), hipMemcpyDeviceToHost);
cout<<"Result"<<*H_Result<<endl;
hipUnbindTexture(LTSOFFSET);
hipUnbindTexture(STATEOFFSET);
hipUnbindTexture(OUTGOINGDETAIL);
hipUnbindTexture(SYNCOUTGOING);
hipUnbindTexture(LTSSTATEBITS);
hipUnbindTexture(TRANSEBYTES);
hipFree(G_AllLTS);
hipFree(G_AllStates);
hipFree(G_AllTransitions);
//udaFree(GlobalBuckets);
//hipFree(GlobalOpenHash);
//hipFree(GlobalVisitedHash);
free(AllLTS);
free(AllStates);
free(AllTransitions);
}
int main()
{
//read data from file
int i;
unsigned int * AllLTS;
unsigned int * AllStates;
unsigned int * AllTransitions;
unsigned int * AllSyncTrans;
unsigned int InitialV;
unsigned int LTSNum;
unsigned int StatesNUM;
unsigned int AlltransNum;
unsigned int AllsynctransNum;
//unsigned int Synindexencodebyte;
//unsigned int LTSEncodebyte;
unsigned int EventEncodebits;
unsigned int * LTSStateEncodebits;
unsigned int * OutgoingTEbytes;
ifstream file1; //for all LTS
ifstream file2; //for All states
ifstream file3; //for all trans
ifstream file4; //for all sync trans;
ifstream file5; //for other parameters
file1.open("../test/encode/alllts.txt");
file2.open("../test/encode/allstates.txt");
file3.open("../test/encode/alltrans.txt");
file4.open("../test/encode/allsynctrans.txt");
file5.open("../test/encode/parameters.txt");
//parameters
file5>>InitialV;
file5>>LTSNum;
file5>>StatesNUM;
file5>>AlltransNum;
file5>>AllsynctransNum;
//file5>>Synindexencodebyte;
//file5>>LTSEncodebyte;
file5>>EventEncodebits;
LTSStateEncodebits = new unsigned int[LTSNum];
OutgoingTEbytes = new unsigned int[LTSNum];
for(i=0; i < LTSNum; i++){
file5>>LTSStateEncodebits[i];
}
for(i=0; i < LTSNum; i++){
file5>>OutgoingTEbytes[i];
}
AllLTS = new unsigned int[LTSNum];
AllStates = new unsigned int[StatesNUM + 1];
AllTransitions = new unsigned int[AlltransNum];
AllSyncTrans = new unsigned int[AllsynctransNum];
file5.close();
for(i=0; i <LTSNum; i++){
file1>>AllLTS[i];
}
file1.close();
for(i=0; i < StatesNUM+1; i++){
file2>>AllStates[i];
}
file2.close();
for(i=0; i < AlltransNum; i++){
file3>>AllTransitions[i];
}
file3.close();
for(i=0; i < AllsynctransNum; i++){
file4>>AllSyncTrans[i];
}
file4.close();
CallCudaBFS(AllLTS,AllStates,AllTransitions,AllSyncTrans,InitialV,LTSStateEncodebits, LTSNum, StatesNUM,AlltransNum,AllsynctransNum,EventEncodebits, OutgoingTEbytes);
}
| 82f2c0444cd3f749598bb287d2d59dd5762b42b8.cu |
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "device_launch_parameters.h"
#include <sm_35_atomic_functions.h>
#include <iostream>
#include <string>
#include <time.h>
#include <queue>
#include <set>
#include <list>
#include <fstream>
#include <iomanip>
using namespace std;
texture<unsigned int, 1, cudaReadModeElementType> LTSOFFSET; //1 means 1-dimension
texture<unsigned int, 1, cudaReadModeElementType> STATEOFFSET;
texture<unsigned int, 1, cudaReadModeElementType> OUTGOINGDETAIL;
texture<unsigned int, 1, cudaReadModeElementType> SYNCOUTGOING;
texture<unsigned int, 1, cudaReadModeElementType> TRANSEBYTES;
texture<unsigned int, 1, cudaReadModeElementType> LTSSTATEBITS;
__constant__ int LA1;
__constant__ int LA2;
__constant__ int LA3;
__constant__ int LA4;
__constant__ int LB4;
__constant__ int GA1;
__constant__ int GA2;
__constant__ int GA3;
__constant__ int LB1;
__constant__ int LB2;
__constant__ int LB3;
__constant__ int GB1;
__constant__ int GB2;
__constant__ int GB3;
__constant__ int GA4;
__constant__ int GB4;
__constant__ int BUCA;
__constant__ int BUCB;
__constant__ int TableSize;
__constant__ unsigned int PrimeNum = 334214459;
__constant__ int IterationTime;
__constant__ int HashNum;
__constant__ int ProbeTimes;
static const unsigned int EMPTYVECT32 = 0x7FFFFFFF;
static const unsigned int P = 334214459;
static const unsigned int blocksize = 512;
//class LocalRecord{
//public:
// char localmark; //record the BFS layer in Shared Memory
// char toevent;
// unsigned int statevector;
//
// __device__ void operator= (LocalRecord t){
// localmark = t.localmark;
// toevent = t.toevent;
// statevector = t.statevector;
// }
//};
class Bucket{
public:
unsigned int beginindex;
unsigned int endindex;
};
class Nodemark{
public:
unsigned int beginInt;
unsigned int endInt;
unsigned int synbeginInt;
unsigned int synendInt;
};
__device__ unsigned int *GlobalOpenHash;
__device__ Bucket *GlobalBuckets;
__device__ unsigned int GlobalBucketNum;
__device__ unsigned int *GlobalbucketIndex; //just used for open
__device__ unsigned int *GlobalbucketCount;
__device__ unsigned int *GlobalVisitedHash; //here, for visited stateV, use hash to store back to global memory. While this hash doesn't kick the original one. For open stateV, use buckets hash.
//__device__ unsigned int GlobalVisitedHashoffset[3];
__device__ unsigned int communicationlayer[100];
__device__ bool communicationcollision[100];
__device__ Bucket *communicationGstore; //store the buckets that child blocks store their data
__device__ bool Ifreturn2parent[100];
//__device__ volatile unsigned int * GlobalBucketsCount;
__device__ unsigned int OpenSize;
__device__ unsigned int openvisitedborder;
__device__ bool IFDeadlockDetected;
__device__ bool IfDeadlockfree;
volatile __device__ int SynMutex = 0;
__device__ void CudaInterBlocksSyn( int GoalValue)
{
__syncthreads();
int tid_in_block = threadIdx.x;
// only thread 0 is used for synchronization
//switch(tid_in_block)
//{
// case 0:
if(tid_in_block == 0)
{
atomicAdd((int*)&SynMutex, 1);
while(SynMutex < GoalValue);
}
//}
__syncthreads();
}
__device__ unsigned int Buckethash(unsigned int k)
{
unsigned int bucketindex;
bucketindex = k % GlobalBucketNum;
return bucketindex;
}
__device__ unsigned int Globalhash(unsigned int k, int index)
{
if(index == 0)
return (GA1 * k + GB1) % PrimeNum % (3*TableSize);
if(index == 1)
return (GA2 * k + GB2) % PrimeNum % (3*TableSize);
if(index == 2)
return (GA3 * k + GB3) % PrimeNum % (3*TableSize);
if(index == 3)
return (GA4 * k + GB4) % PrimeNum % (3*TableSize);
}
__device__ unsigned int Localhash(unsigned int k, int index)
{
if(index == 0){
return (LA1 ^ k + LB1) % PrimeNum % TableSize;
}
if(index == 1){
return (LA2 ^ k + LB2) % PrimeNum % TableSize;
}
if(index == 2){
return (LA3 ^ k + LB3) % PrimeNum % TableSize;
}
if(index == 3){
return (LA4^k + LB4) % PrimeNum % TableSize;
}
}
__device__ unsigned int CudaGetStateinVec(int index, unsigned int svec)
{
int sbeginbit, sendbit;
unsigned int ltsid;
sbeginbit = 0;
sendbit = 0;
for(int i = 0; i < index; i++){
sbeginbit += tex1Dfetch(LTSSTATEBITS, i);
}
sendbit = sbeginbit + tex1Dfetch(LTSSTATEBITS, index) - 1;
svec = svec << sbeginbit;
svec = svec >> (sbeginbit + 31 - sendbit);
ltsid = svec;
return ltsid;
}
__device__ bool CudaGetAllsuccessors(unsigned int ltsindex, unsigned int sindex, Nodemark * result)
{
unsigned int statesbegin, transbegin, transborder;
statesbegin = tex1Dfetch(LTSOFFSET, ltsindex);
transbegin = tex1Dfetch(STATEOFFSET, statesbegin + sindex);
if(transbegin == 0 && (ltsindex!=0 || sindex!=0))
return false;
transborder = tex1Dfetch(STATEOFFSET, statesbegin + sindex + 1);
result->beginInt = transbegin;
result->endInt = transborder - 1;
result->synendInt = tex1Dfetch(OUTGOINGDETAIL, transborder - 1);
transborder = tex1Dfetch(STATEOFFSET, statesbegin + sindex);
if(transborder == 0)
result->synbeginInt = 0;
else
result->synbeginInt = tex1Dfetch(OUTGOINGDETAIL, transborder - 1);
if(result->beginInt == result->endInt && result->synendInt == result->synbeginInt)
return false;
return true;
}
__device__ bool CudaNewStateV(unsigned int * targetV, int tindex, int * index, int *count, unsigned char* OutgoingTs, unsigned int OutGTbyte, unsigned int EEncode)
{
unsigned int tmp = *targetV;
unsigned int tostate = 0;
int newsbeginbit = 0, endbit;
unsigned int Secode = tex1Dfetch(LTSSTATEBITS, tindex);
int replacebeginbyte, replaceendbyte;
unsigned int i,j;
int k;
replacebeginbyte = *count * OutGTbyte;
replaceendbyte = (*count + 1)*OutGTbyte;
//if(EEncode < 8){
// OutgoingTs[replacebeginbyte] = OutgoingTs[replacebeginbyte] << EEncode;
// OutgoingTs[replacebeginbyte] = OutgoingTs[replacebeginbyte] >> EEncode; //event
//}else{
// replacebeginbyte++;
// OutgoingTs[replacebeginbyte] = OutgoingTs[replacebeginbyte] << EEncode - 8;
// OutgoingTs[replacebeginbyte] = OutgoingTs[replacebeginbyte] >> EEncode - 8;
//}
for(i = 0; i < tindex; i++){
newsbeginbit += tex1Dfetch(LTSSTATEBITS, i);
}
endbit = newsbeginbit + Secode - 1;
if((Secode+EEncode) <= 8){
tostate = (int) OutgoingTs[replaceendbyte - 1];
}else{
tostate = 0;
for( k = replaceendbyte - 1; k > replacebeginbyte-1; k--)
tostate = tostate | (OutgoingTs[k] << 8 * (replaceendbyte - 1 - k));
}
if(tostate == 0){
(*index)++;
(*count)=0;
return false;
}
tostate = tostate << 32-Secode;
tostate = tostate >> 32-Secode;
tostate = tostate << (31-endbit);
i = tmp >> (31 - newsbeginbit + 1);
i = i << (31 - newsbeginbit + 1);
j = tmp << endbit+1;
j = j >> endbit+1;
* targetV = (unsigned int) (i | j | tostate);
if((OutGTbyte)*(*count + 2) > 4){
* index += 1;
*count = 0;
}else
(*count)++;
return true;
}
__device__ void CudaDecodeTransitions(int type, int beginindex, int count, unsigned int * Tostate, unsigned int * Tevent, unsigned int OutGTe, unsigned int Statebitwidth)
{
unsigned int tmp = 0;
unsigned int startbyte, endbyte;
while(tmp==0 && count >= 0){
startbyte = (count * OutGTe)%4;
endbyte = ((count + 1)*OutGTe)%4;
if(endbyte == 0)
endbyte = 4;
tmp = tex1Dfetch(SYNCOUTGOING, beginindex);
tmp = tmp << (startbyte) * 8;
tmp = tmp >> (startbyte + 4 - endbyte)*8;
*Tostate = (unsigned int)(tmp << 32 - Statebitwidth) >> (32- Statebitwidth);
*Tevent = (unsigned int)tmp >> Statebitwidth;
if(tmp == 0 && type == 1)
break;
count--;
}
}
//__device__ unsigned int CudaGenerateKey(unsigned int KV, int snum)
//{
// return KV;
//
//}
__device__ void SynTwoStates(unsigned int * s1, unsigned int s2, int index)
{
unsigned int localstate;
int beginbit = 0, endbit;
unsigned int i,j;
for(i = 0; i < index;i++){
beginbit += tex1Dfetch(LTSSTATEBITS, i);
}
endbit = beginbit + tex1Dfetch(LTSSTATEBITS,index);
s2 = s2 << 32-endbit;
i = ((*s1) << endbit)>>endbit;
//i = (*s1) >> endbit;
j = ((*s1) >> 32-beginbit)<<32-beginbit;
*s1 = i | j | s2;
}
//void SynTwoStatesCPU(unsigned int * tmpStateV, unsigned int succStateV, int i, unsigned int newStateV, unsigned int * bitwidth){
// int beginbit, endbit;
// int beginbyte, endbyte;
// int j,m;
//
// unsigned char tmp1[4];
// unsigned char tmp2[4];
//
// tmp1[0] = (char)(*tmpStateV);
// tmp1[1] = (char)(*tmpStateV >> 8);
// tmp1[2] = (char)(*tmpStateV >> 16);
// tmp1[3] = (char)(*tmpStateV >> 24);
//
// tmp2[0] = (char)(succStateV);
// tmp2[1] = (char)(succStateV >> 8);
// tmp2[2] = (char)(succStateV >> 16);
// tmp2[3] = (char)(succStateV >> 24);
//
// for(j = 0; j < i; j++){
// beginbit += bitwidth[j];
// }
// endbit = beginbit + bitwidth[i];
//
// beginbyte = beginbit / 8;
// endbyte = endbit / 8;
// beginbit = beginbit % 8;
// endbit = endbit % 8;
//
// for(m = beginbyte; m < endbyte; m++){
// tmp1[m] = tmp1[m] >> (8 - beginbit);
// tmp2[m] = tmp2[m] << beginbit;
// tmp2[m] = tmp2[m] >> beginbit;
// tmp1[m] = tmp1[m] | tmp2[m];
// }
//
// *tmpStateV = (unsigned int)(tmp1[0] | tmp1[1] << 8 | tmp1[2] << 16 | tmp1[3] << 24);
//
//
//}
//__device__ bool CudaHashStore2() //use cuckoo+probe
__device__ bool CudaHashStore(unsigned int beginHV, unsigned int PLTSNum, unsigned int * AllT, int Tnum, unsigned int * RkickoutRecord){
unsigned int localhash;
//LocalRecord kickoutRecord;
char tmp;
int i = 0, j = 0;
unsigned int KeyV = beginHV;
unsigned int kickKeyV;
*RkickoutRecord = EMPTYVECT32;
while(i < IterationTime){
localhash = Localhash(KeyV, i % HashNum);
if((atomicCAS(&(AllT[(i%HashNum) * Tnum + localhash]), EMPTYVECT32, KeyV))==EMPTYVECT32)
return false;
else{
if(AllT[(i%HashNum) * Tnum + localhash] == KeyV)
return false;
kickKeyV = atomicExch(&(AllT[(i%HashNum) * Tnum + localhash]), KeyV);
for(j = 0; j < Tnum; j++){
if(atomicCAS(&(AllT[(i%HashNum) * Tnum + (localhash + j)%Tnum]), EMPTYVECT32, kickKeyV)==EMPTYVECT32)
return false;
else if(AllT[(i%HashNum) * Tnum + (localhash+j)%Tnum] == KeyV)
return false;
if(atomicCAS(&(AllT[(i%HashNum) * Tnum + (localhash - j + Tnum)%Tnum]), EMPTYVECT32, kickKeyV)==EMPTYVECT32){
return false;
}
else if(AllT[(i%HashNum) * Tnum + (localhash-j + Tnum)%Tnum] == KeyV)
return false;
}
kickKeyV = atomicExch(&(AllT[(i%HashNum) * Tnum + (localhash - j + Tnum)%Tnum]), KeyV);
KeyV = kickKeyV;
i++;
}
}
*RkickoutRecord = kickKeyV;
return true;
}
__device__ bool CudaVisitedGlobalHashcal(unsigned int * HT, Bucket belongBucket, unsigned int hkey, unsigned int * hashresult){
unsigned int hashposition;
unsigned int KeyV;
int i = 0;
KeyV = hkey;
while(i < HashNum){
hashposition = Globalhash(KeyV, i);
if(HT[belongBucket.beginindex + hashposition] == hkey)
return true;
i++;
}
KeyV = hkey;
i = 0;
while(i < HashNum){
hashposition = Globalhash(KeyV, i);
if(HT[belongBucket.beginindex + hashposition] == EMPTYVECT32){
*hashresult = hashposition;
return false;
}
i++;
}
*hashresult = Globalhash(KeyV, 0);
return false;
}
__device__ bool CudaVisitedGlobalHashstore(unsigned int * HT, unsigned int hasbucket, unsigned int hashv, unsigned int insertedrecord, unsigned int ltsnum){
Bucket buckethash;
unsigned int kickV;
int i = 0, j = 0;
bool ifstored;
unsigned int kickou;
while(true){
buckethash = GlobalBuckets[hasbucket];
if(atomicCAS(&(HT[buckethash.beginindex + hashv]),EMPTYVECT32,insertedrecord)==EMPTYVECT32){
return true;
}else{
i = 1;
kickV = insertedrecord;
while(i < IterationTime){
kickou = atomicExch(&(HT[buckethash.beginindex + hashv]), kickV);
hashv = Globalhash(kickou, i);
if(atomicCAS(&(HT[buckethash.beginindex + hashv]),EMPTYVECT32,kickV)==EMPTYVECT32)
return true;
i++;
kickV = kickou;
}
hasbucket++;
}
if(hasbucket > openvisitedborder-1)
break;
}
i = 0;
while(i < HashNum){
hashv = Globalhash(kickV, i);
for(j = 0; j < ProbeTimes; j++){
if(atomicCAS(&(HT[buckethash.beginindex + (hashv + j) % TableSize]),EMPTYVECT32,kickV)==EMPTYVECT32)
return true;
if(atomicCAS(&(HT[buckethash.beginindex + (hashv - j+ TableSize) % TableSize]),EMPTYVECT32,kickV)==EMPTYVECT32)
return true;
}
}
return false;
}
__global__ void CUDADeadlockBFSVerifyChild(unsigned int ParentID, unsigned int PBucket, Bucket * Cbucket, unsigned int * CG_AllLTS, unsigned int * CG_AllStates, unsigned int * CG_AllTransitions, unsigned int * CG_AllSynctransitions, unsigned int * CG_LTSStateEncodeBits, unsigned int CEventEncodeBits,unsigned int * OutgoingTEbytes, unsigned int CG_Bucketnum, unsigned int PLTSNum, unsigned int StartNum)
{
int i,j,m,k,x;
int Inblocktid = threadIdx.x;
int Ingridtid = threadIdx.x + blockIdx.x * blockDim.x;
int InWarptid = Inblocktid % 31;
int InvthreadgroupID;
int vthreadgroupID;
int Warpid = Inblocktid/32;
int WarpNum = blockDim.x/32;
unsigned int layer;
unsigned int localstateV;
unsigned int localstate;
unsigned int localstate2;
unsigned int belonglts;
unsigned int transevent;
unsigned int maxtransevent;
unsigned int globalbuckethash;
unsigned int visitedstore;
unsigned int offsetborder; //used to mark the border of successors.
bool ifanyoutgoing, ifgetnewstatev, ifglobaldup; //ifglobaldup means if this state is duplicated
int vthreadgroupnuminblock;
int vthreadgroupnuminwarp;
//char tmp;
//unsigned int localKey, localhash;
unsigned int kickoutRecord;
unsigned int insertRecord;
unsigned int visitedRecord;
unsigned int hkey;
unsigned int getindex; // the index to get tasks
unsigned int storeposition;
unsigned int tmpoutT;
unsigned char tmpT[4];
int outgoingcount;
Nodemark SuccessorMark;
vthreadgroupnuminwarp = 32/PLTSNum;
vthreadgroupnuminblock = vthreadgroupnuminwarp * (blockDim.x/32);
if(InWarptid < vthreadgroupnuminwarp * PLTSNum){
vthreadgroupID = Warpid*vthreadgroupnuminwarp + InWarptid/PLTSNum;
InvthreadgroupID = InWarptid % PLTSNum;
}else{
vthreadgroupID = -1;
InvthreadgroupID = -1;
}
__shared__ int nonewcount;
__shared__ bool Ifcollisionhappens;
__shared__ int maxlayer;
extern __shared__ bool C[];
bool * syncduplicate = C;
bool * needsyndupdetect = &syncduplicate[vthreadgroupnuminblock*PLTSNum];
bool * ifnooutgoing = &needsyndupdetect[vthreadgroupnuminblock];
unsigned int * SynEventInteractive = (unsigned int *)&ifnooutgoing[vthreadgroupnuminblock*PLTSNum];
unsigned int * SynStateInteractive = (unsigned int *)&(SynEventInteractive[vthreadgroupnuminblock*PLTSNum]);
unsigned int * RecordTable = &(SynStateInteractive[vthreadgroupnuminblock*PLTSNum]);
unsigned int * GroupStore = &RecordTable[blockDim.x * HashNum];
Bucket * WarpCBindex = (Bucket *)&GroupStore[vthreadgroupnuminblock];
if(Inblocktid == 0){
for(i=0; i<WarpNum; i++){
WarpCBindex[i].beginindex = 0;
WarpCBindex[i].endindex = 0;
}
for(i = 0; i < vthreadgroupnuminblock * PLTSNum; i++){
ifnooutgoing[i] = false;
SynEventInteractive[i] = EMPTYVECT32;
}
for(i = 0; i < vthreadgroupnuminblock; i++)
GroupStore[i] = EMPTYVECT32;
nonewcount = 0;
maxlayer = 0;
Ifcollisionhappens = false;
}
if(InvthreadgroupID != -1){
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = true;
}
if(InvthreadgroupID == 0){
getindex = vthreadgroupnuminblock * blockIdx.x + vthreadgroupID;
j=0;
if(getindex < GlobalbucketCount[PBucket]){
globalbuckethash = PBucket;
}else{
for(i = Cbucket->beginindex; i < Cbucket->endindex; i++){
j += GlobalbucketCount[i];
if(getindex < j){
globalbuckethash = i;
j -= GlobalbucketCount[i];
getindex = getindex - j;
break;
}
}
}
}
__syncthreads();
if(InvthreadgroupID == 0){
GroupStore[vthreadgroupID] = GlobalOpenHash[globalbuckethash];
GlobalOpenHash[globalbuckethash] = EMPTYVECT32;
}
do{
if(GroupStore[vthreadgroupID] != EMPTYVECT32){
localstate = CudaGetStateinVec(InvthreadgroupID, GroupStore[vthreadgroupID]);
printf("vtg%d, itgid%d, gets%d\n", vthreadgroupID, InvthreadgroupID, localstate);
belonglts = InvthreadgroupID;
ifanyoutgoing = CudaGetAllsuccessors(belonglts, localstate-1, &SuccessorMark);
ifglobaldup = false;
//The successor generation consists of two steps: 1. For trans in alltransitions, process them directly. 2.For trans in allsynctrans, parallel sync is needed.
if(ifanyoutgoing){
outgoingcount=0;
i = SuccessorMark.beginInt;
//calculate global hash position for visited stateV
if(InvthreadgroupID == 0){
globalbuckethash = Buckethash(GroupStore[vthreadgroupID]);
hkey = GroupStore[vthreadgroupID];
ifglobaldup = CudaVisitedGlobalHashcal(GlobalVisitedHash, GlobalBuckets[globalbuckethash],hkey, &visitedstore);
}
localstateV = GroupStore[vthreadgroupID];
visitedRecord = GroupStore[vthreadgroupID];
j = 0;
m = -1;
while(i < SuccessorMark.endInt && !ifglobaldup){
if(m != i){
tmpoutT = tex1Dfetch(OUTGOINGDETAIL, i);
tmpT[0] = (char)(tmpoutT >> 24);
tmpT[1] = (char)(tmpoutT >> 16);
tmpT[2] = (char)(tmpoutT >> 8);
tmpT[3] = (char)tmpoutT;
m = i;
}
if(!CudaNewStateV(&localstateV, InvthreadgroupID, &i, &j, tmpT, tex1Dfetch(TRANSEBYTES, InvthreadgroupID), CEventEncodeBits ))
continue;
if(!Ifcollisionhappens){
insertRecord = localstateV;
//hash store and duplicate elimination module.....
Ifcollisionhappens = CudaHashStore(insertRecord, PLTSNum, RecordTable, blockDim.x, &kickoutRecord);
outgoingcount++;
}
localstateV = GroupStore[vthreadgroupID];
if(Ifcollisionhappens){
break;
}
}
//synchronization part
j = SuccessorMark.synbeginInt;
if(!Ifcollisionhappens){
bool ifmatch;
//int tmpcount=0;
int tmpj = 0;
int nosync;
int lessthanall;
m = 0;
x = -1;
CudaDecodeTransitions(0,SuccessorMark.synendInt-1, (SuccessorMark.synendInt - j + 1)*(4/tex1Dfetch(TRANSEBYTES,belonglts))-1,&localstate2, &maxtransevent, tex1Dfetch(TRANSEBYTES,belonglts), tex1Dfetch(LTSSTATEBITS,belonglts));
while(j < SuccessorMark.synendInt){
ifmatch = false;
if(m == 0 && syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum]){
if(j == SuccessorMark.synendInt)
break;
CudaDecodeTransitions(1, j, tmpj, &SynStateInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum], &SynEventInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum], tex1Dfetch(TRANSEBYTES,belonglts), tex1Dfetch(LTSSTATEBITS, belonglts));
if(SynEventInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum] == 0)
{
SynEventInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum] = EMPTYVECT32;
break;
}
if(x != j){
tmpoutT = tex1Dfetch(SYNCOUTGOING, j);
tmpT[0] = (char)(tmpoutT >> 24);
tmpT[1] = (char)(tmpoutT >> 16);
tmpT[2] = (char)(tmpoutT >> 8);
tmpT[3] = (char)tmpoutT;
x = j;
}
CudaNewStateV(&localstateV, InvthreadgroupID, &j, &tmpj, tmpT, tex1Dfetch(TRANSEBYTES,InvthreadgroupID), CEventEncodeBits);
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = false;
}
nosync = 0;
lessthanall = 0;
m=0;
for(i=0; i<PLTSNum; i++){
if(i == InvthreadgroupID)
continue;
if(SynEventInteractive[i + vthreadgroupID * PLTSNum] == EMPTYVECT32)
{
nosync++;
continue;
}
if(SynEventInteractive[i + vthreadgroupID * PLTSNum] <= maxtransevent){ //if bigger than the maxtransevent of local, no need to compare as it's impossible to sync
if(SynEventInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum] > SynEventInteractive[i + vthreadgroupID * PLTSNum]){
m++;
}else if (SynEventInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum] == SynEventInteractive[i + vthreadgroupID * PLTSNum]){
if(needsyndupdetect[vthreadgroupID] == false)
needsyndupdetect[vthreadgroupID] = true;
//GENERATE SYNC STATE V.......
SynTwoStates(&localstateV, SynStateInteractive[i + vthreadgroupID * PLTSNum], i);
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = true;
ifmatch = true;
}else
lessthanall++;
}
}
if(nosync == PLTSNum - 1){
break;
}
if(lessthanall == PLTSNum -1){
m = 0;
syncduplicate[InvthreadgroupID + vthreadgroupID*PLTSNum] = true;
continue;
}
if(syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum])
m = 0;
if(needsyndupdetect[vthreadgroupID] && InvthreadgroupID == 0){ //duplicate elimination after synchronization, so just one synchronized result will be copied to hashtable.
for(i = 0; i < PLTSNum; i++){
if(syncduplicate[i + vthreadgroupID * PLTSNum]){
for(k = 0; k < i; k++)
{
if(SynEventInteractive[k + vthreadgroupID * PLTSNum] == SynEventInteractive[i + vthreadgroupID * PLTSNum]){
break;
}
}
if(k == i){
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = false;
}
}
}
}
if(ifmatch && syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] == false){
//hash copy to table
insertRecord = localstateV;
if(!Ifcollisionhappens)
{
if(CudaHashStore(insertRecord, PLTSNum, RecordTable, blockDim.x, &kickoutRecord))
Ifcollisionhappens = true;
outgoingcount++;
}
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = true;
if(Ifcollisionhappens){
for(k = 511; k > 0; k--){
if(kickoutRecord != EMPTYVECT32){
if(atomicCAS(&(RecordTable[(HashNum-1)*blockDim.x + k]), EMPTYVECT32, kickoutRecord)==EMPTYVECT32){
kickoutRecord = EMPTYVECT32;
break;
}
}else{
if(atomicCAS(&(RecordTable[(HashNum-1)*blockDim.x + k]), EMPTYVECT32, localstateV) == EMPTYVECT32){
break;
}
}
}
}
}
if(ifmatch && m == 0){
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = true;
}
if(j >= SuccessorMark.synendInt){
SynEventInteractive[InvthreadgroupID + vthreadgroupID*PLTSNum] = EMPTYVECT32;
}
localstateV = GroupStore[vthreadgroupID];
}
}
if(outgoingcount == 0 && !ifglobaldup)
ifnooutgoing[vthreadgroupID*PLTSNum + InvthreadgroupID] = true;
}else{
ifnooutgoing[vthreadgroupID*PLTSNum + InvthreadgroupID] = true;
}
if(InvthreadgroupID == 0 && !ifglobaldup){
for(i = 0; i < PLTSNum; i++){
if(!ifnooutgoing[i + vthreadgroupID * PLTSNum] && !Ifcollisionhappens)
break;
}
if(i == PLTSNum){
printf("vtg%d detect deadlock\n", vthreadgroupID);
IFDeadlockDetected = true;
}
}
}
CudaInterBlocksSyn(gridDim.x);
if(IFDeadlockDetected){
break;
}
if(GroupStore[vthreadgroupID] != EMPTYVECT32){
if(InWarptid == 0&&!Ifcollisionhappens&&!ifglobaldup){
//copy visited state to global memory
CudaVisitedGlobalHashstore(GlobalVisitedHash, globalbuckethash, visitedstore, GroupStore[vthreadgroupID], PLTSNum);
if(InvthreadgroupID == 0){
GroupStore[vthreadgroupID] = EMPTYVECT32;
}
}
if(Ifcollisionhappens || communicationcollision[ParentID]){
if(IFDeadlockDetected)
break;
//load new kernel, copy data back
unsigned int myareacount = 0;
globalbuckethash = Buckethash((unsigned int)(blockIdx.x)) + openvisitedborder;
if(blockIdx.x == 0){
communicationGstore[ParentID].beginindex = (unsigned int)blockIdx.x;
}
if(blockIdx.x == blockDim.x - 1){
communicationGstore[ParentID].endindex = (unsigned int)(blockIdx.x);
}
if(InWarptid == 0){
for(m = Warpid*32; m<(Warpid + 1)*32; m++){
for(k = 0; k < HashNum; k++){
if(RecordTable[k*blockDim.x + m] != EMPTYVECT32)
myareacount++;
}
}
k = 0;
for(m = 0; m < vthreadgroupnuminwarp; m++){
if(GroupStore[vthreadgroupnuminwarp * Warpid + m] != EMPTYVECT32){
myareacount++;
k++;
}
}
WarpCBindex[Warpid].beginindex = atomicAdd(&GlobalbucketIndex[globalbuckethash], myareacount);
WarpCBindex[Warpid].endindex = WarpCBindex[Warpid].beginindex + myareacount;
atomicAdd(&GlobalbucketCount[globalbuckethash], myareacount);
}
if(InWarptid == 0){
for(m = 0; m < k; m++){
GlobalOpenHash[GlobalBuckets[globalbuckethash].beginindex + m] = GroupStore[m];
}
}
storeposition = WarpCBindex[Warpid].beginindex + InWarptid + k;
for(i=0; i<HashNum; i++){
if(RecordTable[i*blockDim.x + Warpid * 32 + InWarptid] != EMPTYVECT32){
GlobalOpenHash[GlobalBuckets[globalbuckethash].beginindex + storeposition] = RecordTable[i*blockDim.x + Warpid * 32 + InWarptid];
RecordTable[i*blockDim.x + Warpid * 32 + InWarptid] = EMPTYVECT32;
storeposition+=32;
}
}
if(storeposition < WarpCBindex[Warpid].endindex)
{
for(i=0; i<HashNum; i++){
for(k = Warpid*32; k<(Warpid+1)*32; k++){
if(RecordTable[i*blockDim.x + k] != EMPTYVECT32){
kickoutRecord = RecordTable[i*blockDim.x + k];
if(atomicCAS(&(RecordTable[i*blockDim.x + k]), kickoutRecord, EMPTYVECT32) == kickoutRecord){
GlobalOpenHash[GlobalBuckets[globalbuckethash].beginindex + storeposition] = kickoutRecord;
storeposition+=32;
}
}
}
}
}
//for the elements larger than 512, to be expanded........
break;
}
}
if(IFDeadlockDetected)
break;
if(InvthreadgroupID == 0 && GroupStore[vthreadgroupID] == EMPTYVECT32){
//got new stateV
localstateV = EMPTYVECT32;
ifgetnewstatev = false;
for(j = 0; j < HashNum; j++){
for(i = vthreadgroupID * PLTSNum; i < (vthreadgroupID+1) * PLTSNum; i++){
if((GroupStore[vthreadgroupID] = atomicExch(&(RecordTable[j*blockDim.x + i]), EMPTYVECT32)) != EMPTYVECT32)
{
ifgetnewstatev = true;
break;
}
}
if(ifgetnewstatev == true)
break;
for(i = vthreadgroupnuminblock * PLTSNum; i<(int)(blockDim.x); i++){
if((GroupStore[vthreadgroupID] = atomicExch(&(RecordTable[j*blockDim.x + i]), EMPTYVECT32)) != EMPTYVECT32)
{
ifgetnewstatev = true;
break;
}
}
if(ifgetnewstatev == true)
break;
}
}
__syncthreads();
if(InvthreadgroupID == 0 && layer == maxlayer + 1 && ifgetnewstatev == false){
if(Inblocktid == 0){
for(nonewcount = 0; nonewcount < vthreadgroupnuminblock; nonewcount++){
if(GroupStore[nonewcount] != EMPTYVECT32){
break;
}
}
if(nonewcount == vthreadgroupnuminblock){
break;
}
}
}
__syncthreads();
}while(!IFDeadlockDetected);
CudaInterBlocksSyn(gridDim.x);
}
__global__ void CUDADeadlockBFSVerify(unsigned int * PG_AllLTS, unsigned int * PG_AllStates, unsigned int * PG_AllTransitions, unsigned int * PG_AllSynctransitions, unsigned int * PG_Startlist, unsigned int * PG_LTSStateEncodeBits, unsigned int PEventEncodeBits, unsigned int * OutgoingTEbytes, unsigned int PLTSNum, unsigned int * G_RESULT, unsigned int PGBucketNum, unsigned int PAllLTSStatesNum, unsigned int StartNum)
{
int i,j,m,k,x,y;
int Inblocktid = threadIdx.x;
int Ingridtid = threadIdx.x + blockIdx.x * blockDim.x;
int InWarptid = Inblocktid % 32;
int InvthreadgroupID;
int vthreadgroupID;
int Warpid = Inblocktid/32;
int WarpNum = blockDim.x/32;
unsigned int getindex; //the index to get the initial task from global memory.
unsigned int localstateV;
unsigned int localstate;
unsigned int localstate2;
unsigned int belonglts;
unsigned int transevent;
unsigned int maxtransevent;
int nosync,lessthanall;
unsigned int globalbuckethash;
unsigned int visitedstore;
unsigned int tmpoutT;
int outgoingcount;
unsigned int offsetborder; //used to mark the border of successors.
bool ifanyoutgoing, ifgetnewstatev, ifglobaldup; //ifglobaldup means if this state is duplicated
int vthreadgroupnuminblock;
int vthreadgroupnuminwarp;
unsigned char tmpT[4];
unsigned int localKey, localhash;
unsigned int kickoutRecord;
unsigned int insertRecord;
unsigned int visitedRecord;
unsigned int hkey;
unsigned int storeposition;
Nodemark SuccessorMark;
vthreadgroupnuminwarp = 32/PLTSNum;
vthreadgroupnuminblock = vthreadgroupnuminwarp * (blockDim.x/32);
if(InWarptid < vthreadgroupnuminwarp * PLTSNum){
vthreadgroupID = Warpid*vthreadgroupnuminwarp + InWarptid/PLTSNum;
InvthreadgroupID = InWarptid % PLTSNum;
}else{
vthreadgroupID = -1;
InvthreadgroupID = -1;
}
__shared__ bool Ifcollisionhappens;
__shared__ int collisiontimes; //how the collision times reflect the occupation rate is needed to be explored with experiments.
__shared__ bool ifblocknostate;
__shared__ int nonewcount;
__shared__ bool haveChild;
__shared__ int launchtime;
i = vthreadgroupnuminblock * PLTSNum;
extern __shared__ bool C[];
bool * syncduplicate = C;
bool * needsyndupdetect = &syncduplicate[i];
bool * ifnooutgoing = &needsyndupdetect[vthreadgroupnuminblock];
unsigned int * SynEventInteractive = (unsigned int *)&ifnooutgoing[i];
unsigned int * SynStateInteractive = &SynEventInteractive[i];
unsigned int * RecordTable = &(SynStateInteractive[i]);
unsigned int * GroupStore = &RecordTable[HashNum * blockDim.x];
Bucket * WarpCBindex = (Bucket *)&GroupStore[vthreadgroupnuminblock];
if(Inblocktid == 0){
for(i = 0; i < vthreadgroupnuminblock * PLTSNum; i++){
ifnooutgoing[i] = false;
SynEventInteractive[i] = EMPTYVECT32;
}
for(i = 0; i < HashNum* blockDim.x; i++){
RecordTable[i] = EMPTYVECT32;
}
for(i = 0; i < vthreadgroupnuminblock; i++)
GroupStore[i] = EMPTYVECT32;
nonewcount = 0;
haveChild = false;
launchtime = 0;
ifblocknostate = false;
for(i=0; i<WarpNum; i++){
WarpCBindex[i].beginindex = 0;
WarpCBindex[i].endindex = 0;
}
Ifcollisionhappens = false;
}
__syncthreads();
if(Ingridtid == 0){
GlobalbucketCount = new unsigned int[PGBucketNum];
GlobalbucketIndex = new unsigned int[PGBucketNum];
GlobalBucketNum = PGBucketNum;
GlobalOpenHash = new unsigned int[blockDim.x * 3 * PLTSNum * 4 ];
GlobalBuckets = new Bucket[GlobalBucketNum];
GlobalVisitedHash = new unsigned int[blockDim.x * 3 * PLTSNum * 4]; //bucket/2
communicationGstore = new Bucket[100];
for(i = 0; i < blockDim.x * 3 * PLTSNum * 4; i++)
GlobalOpenHash[i] = EMPTYVECT32;
for(i = 0; i < blockDim.x * 3 * PLTSNum * 4; i++)
GlobalVisitedHash[i] = EMPTYVECT32;
for(i = 0; i < PLTSNum * 4; i++){
GlobalBuckets[i].beginindex = i * blockDim.x;
GlobalBuckets[i].endindex = (i+1)* 3 *blockDim.x - 1;
}
for(i = PLTSNum * 4; i < PLTSNum * 8; i++){
GlobalBuckets[i].beginindex = (i-PLTSNum*4)*blockDim.x;
GlobalBuckets[i].endindex = (i+1-PLTSNum*4)* 3 *blockDim.x - 1;
}
}
if(InvthreadgroupID != -1){
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = true;
}
if(InvthreadgroupID == 0 && vthreadgroupID < StartNum){
getindex = vthreadgroupnuminblock * blockIdx.x + vthreadgroupID;
GroupStore[vthreadgroupID] = PG_Startlist[getindex];
needsyndupdetect[vthreadgroupID] = false;
}
CudaInterBlocksSyn(gridDim.x);
//while(GroupStore[vthreadgroupID].statevector == EMPTYVECT32);
do{
if(GroupStore[vthreadgroupID] != EMPTYVECT32){
localstate = CudaGetStateinVec(InvthreadgroupID, GroupStore[vthreadgroupID]);
printf("vtg%d, itgid%d, gets%d\n", vthreadgroupID, InvthreadgroupID, localstate);
belonglts = InvthreadgroupID;
ifanyoutgoing = CudaGetAllsuccessors(belonglts, localstate-1, &SuccessorMark);
ifglobaldup = false;
//The successor generation consists of two steps: 1. For trans in alltransitions, process them directly. 2.For trans in allsynctrans, parallel sync is needed.
if(ifanyoutgoing){
outgoingcount = 0;
i = SuccessorMark.beginInt;
//calculate global hash position for visited stateV
if(InvthreadgroupID == 0){
globalbuckethash = Buckethash(GroupStore[vthreadgroupID]);
hkey = GroupStore[vthreadgroupID];
ifglobaldup = CudaVisitedGlobalHashcal(GlobalVisitedHash, GlobalBuckets[globalbuckethash],hkey, &visitedstore);
}
localstateV = GroupStore[vthreadgroupID];
j = 0;
m = -1;
while(i < SuccessorMark.endInt && !ifglobaldup){
if(m != i){
tmpoutT = tex1Dfetch(OUTGOINGDETAIL, i);
tmpT[0] = (char)(tmpoutT >> 24);
tmpT[1] = (char)(tmpoutT >> 16);
tmpT[2] = (char)(tmpoutT >> 8);
tmpT[3] = (char)tmpoutT;
m = i;
}
if(!CudaNewStateV(&localstateV, InvthreadgroupID, &i, &j, tmpT,tex1Dfetch(TRANSEBYTES,InvthreadgroupID), PEventEncodeBits ))
continue;
if(!Ifcollisionhappens){
insertRecord = localstateV;
//hash store and duplicate elimination module.....
if(CudaHashStore(insertRecord, PLTSNum, RecordTable, blockDim.x, &kickoutRecord))
Ifcollisionhappens = true;
outgoingcount++;
}
localstateV = GroupStore[vthreadgroupID];
if(Ifcollisionhappens){
break;
}
}
//synchronization part
j = SuccessorMark.synbeginInt;
if(!Ifcollisionhappens && SuccessorMark.synbeginInt != SuccessorMark.synendInt && !ifglobaldup){
bool ifmatch;
//int tmpcount=0;
int tmpj = 0;
m = 0;
x = -1;
CudaDecodeTransitions(0,SuccessorMark.synendInt-1, (SuccessorMark.synendInt - j)*(4/tex1Dfetch(TRANSEBYTES, belonglts))-1,&localstate2, &maxtransevent, tex1Dfetch(TRANSEBYTES, belonglts), tex1Dfetch(LTSSTATEBITS, belonglts));
while(j < SuccessorMark.synendInt){
ifmatch = false;
if(m == 0 && syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum]){
if(j == SuccessorMark.synendInt)
break;
CudaDecodeTransitions(1, j, tmpj, &SynStateInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum], &SynEventInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum], tex1Dfetch(TRANSEBYTES,belonglts), tex1Dfetch(LTSSTATEBITS, belonglts));
if(SynEventInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum] == 0)
{
SynEventInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum] = EMPTYVECT32;
break;
}
if(x != j){
tmpoutT = tex1Dfetch(SYNCOUTGOING, j);
tmpT[0] = (char)(tmpoutT >> 24);
tmpT[1] = (char)(tmpoutT >> 16);
tmpT[2] = (char)(tmpoutT >> 8);
tmpT[3] = (char)tmpoutT;
x = j;
}
CudaNewStateV(&localstateV, InvthreadgroupID, &j, &tmpj, tmpT, tex1Dfetch(TRANSEBYTES, InvthreadgroupID), PEventEncodeBits);
//tmpcount++;
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = false;
}
nosync = 0;
lessthanall = 0;
m=0;
for(i=0; i<PLTSNum; i++){
if(i == InvthreadgroupID)
continue;
if(SynEventInteractive[i + vthreadgroupID * PLTSNum] == EMPTYVECT32)
{
nosync++;
continue;
}
if(SynEventInteractive[i + vthreadgroupID * PLTSNum] <= maxtransevent){ //if bigger than the maxtransevent of local, no need to compare as it's impossible to sync
if(SynEventInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum] > SynEventInteractive[i + vthreadgroupID * PLTSNum]){
m++;
}else if (SynEventInteractive[InvthreadgroupID + vthreadgroupID * PLTSNum] == SynEventInteractive[i + vthreadgroupID * PLTSNum]){
if(needsyndupdetect[vthreadgroupID] == false)
needsyndupdetect[vthreadgroupID] = true;
//GENERATE SYNC STATE V.......
SynTwoStates(&localstateV, SynStateInteractive[i + vthreadgroupID * PLTSNum], i);
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = true;
ifmatch = true;
}else
lessthanall++;
}
}
if(nosync == PLTSNum - 1){
break;
}
if(lessthanall == PLTSNum -1){
m = 0;
syncduplicate[InvthreadgroupID + vthreadgroupID*PLTSNum] = true;
continue;
}
if(syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum])
m = 0;
if(needsyndupdetect[vthreadgroupID] && InvthreadgroupID == 0){ //duplicate elimination after synchronization, so just one synchronized result will be copied to hashtable.
for(i = 0; i < PLTSNum; i++){
if(syncduplicate[i + vthreadgroupID * PLTSNum]){
for(k = 0; k < i; k++)
{
if(SynEventInteractive[k + vthreadgroupID * PLTSNum] == SynEventInteractive[i + vthreadgroupID * PLTSNum]){
break;
}
}
if(k == i){
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = false;
}
}
}
}
if(ifmatch && syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] == false){
//hash copy to table
insertRecord = localstateV;
if(!Ifcollisionhappens)
{
if(CudaHashStore(insertRecord, PLTSNum, RecordTable, blockDim.x, &kickoutRecord))
Ifcollisionhappens = true;
outgoingcount++;
}
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = true;
if(Ifcollisionhappens){
for(k = 511; k >= 0; k--){
if(kickoutRecord != EMPTYVECT32){
if(atomicCAS(&(RecordTable[(HashNum-1)*blockDim.x + k]), EMPTYVECT32, kickoutRecord)==EMPTYVECT32){
kickoutRecord = EMPTYVECT32;
break;
}
}else{
if(atomicCAS(&(RecordTable[(HashNum-1)*blockDim.x + k]), EMPTYVECT32, localstateV) == EMPTYVECT32){
break;
}
}
}
}
}
if(!ifmatch && m == 0){
syncduplicate[InvthreadgroupID + vthreadgroupID * PLTSNum] = true;
}
localstateV = GroupStore[vthreadgroupID];
}
}
if(outgoingcount == 0 && !ifglobaldup)
ifnooutgoing[vthreadgroupID*PLTSNum + InvthreadgroupID] = true;
}else{
ifnooutgoing[vthreadgroupID*PLTSNum + InvthreadgroupID] = true;
}
if(InvthreadgroupID == 0&&!ifglobaldup &&!Ifcollisionhappens){
for(i = 0; i < PLTSNum; i++){
if(!ifnooutgoing[i + vthreadgroupID * PLTSNum])
break;
}
if(i == PLTSNum){
printf("tgid%d find deadlock\n", vthreadgroupID);
IFDeadlockDetected = true;
}
}
}
CudaInterBlocksSyn(gridDim.x);
if(IFDeadlockDetected)
break;
if(GroupStore[vthreadgroupID] != EMPTYVECT32){
if(InvthreadgroupID == 0&&!Ifcollisionhappens&&!ifglobaldup){
//copy visited state to gl)obal memory
CudaVisitedGlobalHashstore(GlobalVisitedHash, globalbuckethash, visitedstore, GroupStore[vthreadgroupID], PLTSNum);
if(InvthreadgroupID == 0){
GroupStore[vthreadgroupID] = EMPTYVECT32;
}
}else if(Ifcollisionhappens){
if(haveChild)
cudaDeviceSynchronize();
//if(IFDeadlockDetected)
// break;
//load new kernel, copy data back
unsigned int myareacount = 0;
globalbuckethash = Buckethash((unsigned int)(blockIdx.x)) + openvisitedborder;
if(InWarptid == 0){
for(m = Warpid*32; m<(Warpid + 1)*32; m++){
for(k = 0; k < HashNum; k++){
if(RecordTable[k*blockDim.x + m] != EMPTYVECT32)
myareacount++;
}
}
WarpCBindex[Warpid].beginindex = atomicAdd(&GlobalbucketIndex[globalbuckethash], myareacount);
WarpCBindex[Warpid].endindex = WarpCBindex[Warpid].beginindex + myareacount;
atomicAdd(&GlobalbucketCount[globalbuckethash], myareacount);
}
storeposition = WarpCBindex[Warpid].beginindex + InWarptid;
for(m = 0; m < HashNum; m++){
if(RecordTable[m*blockDim.x + Warpid * 32 + InWarptid] != EMPTYVECT32){
GlobalOpenHash[GlobalBuckets[globalbuckethash].beginindex + storeposition] = RecordTable[m*blockDim.x + Warpid * 32 + InWarptid];
RecordTable[m*blockDim.x + Warpid * 32 + InWarptid] = EMPTYVECT32;
storeposition+=32;
}
}
if(storeposition < WarpCBindex[Warpid].endindex)
{
for(m = 0; m < HashNum; m++){
for(k = Warpid*32; k<(Warpid+1)*32; k++){
if(RecordTable[m*blockDim.x + k] != EMPTYVECT32){
kickoutRecord = RecordTable[m*blockDim.x + k];
if(atomicCAS(&(RecordTable[m*blockDim.x + k]), RecordTable[m*blockDim.x + k], EMPTYVECT32) == kickoutRecord){
GlobalOpenHash[GlobalBuckets[globalbuckethash].beginindex + storeposition] = kickoutRecord;
storeposition+=32;
}
}
}
}
}
//for the elements larger than 512, to be expanded........
//launch new kernel
if(Inblocktid == launchtime){
if(GlobalbucketCount[globalbuckethash]*PLTSNum % 512 == 0){
m = (GlobalbucketCount[globalbuckethash]*PLTSNum) / 512;
}else{
m = (GlobalbucketCount[globalbuckethash]*PLTSNum) / 512 + 1;
}
StartNum = GlobalbucketCount[globalbuckethash]*PLTSNum;
if(launchtime > 0){
i=0;
for(k = communicationGstore[blockIdx.x].beginindex; k < communicationGstore[blockIdx.x].endindex; k++){
i+=GlobalbucketCount[k];
}
StartNum += i;
if(i*PLTSNum % 512 == 0){
m += i*PLTSNum / 512;
}else{
m += (i*PLTSNum / 512 +1);
}
}
dim3 cgridstructure(m,1,1);
dim3 cblockstructure(512,1,1);
//CUDADeadlockBFSVerifyChild<<<cgridstructure, cblockstructure>>>(blockIdx.x, globalbuckethash, communicationGstore, PG_AllLTS, PG_AllStates, PG_AllTransitions, PG_AllSynctransitions, PG_LTSStateEncodeBits, PEventEncodeBits, OutgoingTEbytes, PGBucketNum, PLTSNum, StartNum );
launchtime++;
haveChild = true;
}
}
}
__syncthreads();
if(InvthreadgroupID == 0 && GroupStore[vthreadgroupID] == EMPTYVECT32){
//got new stateV
localstateV = EMPTYVECT32;
ifgetnewstatev = false;
for(j = 0; j < HashNum; j++){
for(i = vthreadgroupID * PLTSNum; i < (vthreadgroupID+1) * PLTSNum; i++){
if((GroupStore[vthreadgroupID] = atomicExch(&(RecordTable[j*blockDim.x + i]), EMPTYVECT32)) != EMPTYVECT32)
{
ifgetnewstatev = true;
break;
}
}
if(ifgetnewstatev == true)
break;
for(i = vthreadgroupnuminblock * PLTSNum; i<(int)(blockDim.x); i++){
if((GroupStore[vthreadgroupID] = atomicExch(&(RecordTable[j*blockDim.x + i]), EMPTYVECT32)) != EMPTYVECT32)
{
ifgetnewstatev = true;
break;
}
}
if(ifgetnewstatev == true)
break;
}
}
__syncthreads();
if(Inblocktid == launchtime - 1 && ifgetnewstatev == false){
for(nonewcount = 0; nonewcount < vthreadgroupnuminblock; nonewcount++){
if(GroupStore[nonewcount] != EMPTYVECT32){
break;
}
}
if(nonewcount == vthreadgroupnuminblock){
cudaDeviceSynchronize();
haveChild = false;
}
}
__syncthreads();
if(nonewcount == vthreadgroupnuminblock){
//get new state again, if no, block stop.
if(InvthreadgroupID == 0){
//got new stateV
if(vthreadgroupID < GlobalbucketCount[communicationGstore[blockIdx.x].beginindex])
{
globalbuckethash = communicationGstore[blockIdx.x].beginindex;
storeposition = vthreadgroupID;
}
//layer = communicationlayer[blockIdx.x];
GroupStore[vthreadgroupID] = GlobalOpenHash[GlobalBuckets[globalbuckethash].beginindex + storeposition];
if(InvthreadgroupID == 0 && GroupStore[vthreadgroupID] == EMPTYVECT32)
{
ifblocknostate = true;
}
}
__syncthreads();
if(ifblocknostate)
break;
if(communicationcollision[blockIdx.x] && Inblocktid == launchtime){
//need more blocks
k = 0;
for(m = communicationGstore[blockIdx.x].beginindex; m < communicationGstore[blockIdx.x].endindex; m++){
k += GlobalbucketCount[m];
}
k -= vthreadgroupnuminblock;
StartNum = k;
if(k*PLTSNum % 512 == 0)
m = (k*PLTSNum)/512;
else
m = (k*PLTSNum)/512 + 1;
dim3 gridstruc(m,1,1);
dim3 blockstruc(512,1,1);
//CUDADeadlockBFSVerifyChild<<<gridstruc, blockstruc>>>(blockIdx.x, globalbuckethash, communicationGstore, PG_AllLTS, PG_AllStates, PG_AllTransitions, PG_AllSynctransitions, PG_LTSStateEncodeBits, PEventEncodeBits, OutgoingTEbytes, PGBucketNum, PLTSNum, StartNum );
launchtime++;
haveChild=true;
}
}
}while(!IFDeadlockDetected);
CudaInterBlocksSyn(gridDim.x);
if(!IFDeadlockDetected && Ingridtid == 0){
*G_RESULT = 1;
}else{
*G_RESULT = 0;
}
}
//void NewStateV(unsigned int * targetV, int tindex, int * index, int *count, unsigned char* OutgoingTs, unsigned int * bitwidth, unsigned int OutGTbyte, unsigned int EEncode)
//{
// unsigned int tmp = *targetV;
// unsigned int tostate = 0;
// int newsbeginbit = 0, endbit;
// unsigned int Secode = bitwidth[tindex];
//
// int i,j,replacebeginbyte, replaceendbyte;
//
// replacebeginbyte = *count * OutGTbyte;
// replaceendbyte = (*count + 1)*OutGTbyte;
//
// //if(EEncode < 8){
// // OutgoingTs[replacebeginbyte] = OutgoingTs[replacebeginbyte] << EEncode;
// // OutgoingTs[replacebeginbyte] = OutgoingTs[replacebeginbyte] >> EEncode; //event
// //}else{
// // replacebeginbyte++;
// // OutgoingTs[replacebeginbyte] = OutgoingTs[replacebeginbyte] << EEncode - 8;
// // OutgoingTs[replacebeginbyte] = OutgoingTs[replacebeginbyte] >> EEncode - 8;
// //}
//
// for(i = 0; i < tindex; i++){
// newsbeginbit += bitwidth[i];
// }
//
// endbit = newsbeginbit + bitwidth[tindex];
//
// if(Secode == 8){
// tostate = (int) OutgoingTs[replaceendbyte - 1];
// tostate = tostate << (31 - endbit);
//
// }else{
// tostate = 0;
//
// for( i = replaceendbyte - 1; i > replacebeginbyte; i--)
// tostate = tostate | (OutgoingTs[i] << 8 * (replaceendbyte - 1 - i));
//
// tostate = tostate << (31-Secode);
// tostate = tostate >> (31-Secode);
// tostate = tostate << (31-endbit);
//
// }
//
// i = tmp >> (endbit + Secode);
// i = i << (endbit + Secode);
// j = tmp << (newsbeginbit + Secode);
// j = j >> (newsbeginbit + Secode);
//
// * targetV = (int) (i | j | tostate);
//
// if((EEncode+Secode)*(*count + 1) > 32){
// * index += 1;
// *count = 0;
// }else
// (*count)++;
//}
//
//void DecodeTransitions(unsigned int * outgoingT, int beginindex, int count, unsigned int * Tostate, unsigned int * Tevent, unsigned int OutGTe, unsigned int Statebitwidth)
//{
// int i, j;
// unsigned int tmp;
// unsigned int startbyte, endbyte;
// startbyte = (count * OutGTe)%4;
// endbyte = ((count + 1)*OutGTe)%4;
//
// if(endbyte == 0)
// endbyte = 4;
//
// tmp = outgoingT[beginindex];
//
// tmp = tmp << (startbyte - 1);
// tmp = tmp >> (startbyte + 3 - endbyte);
//
// *Tostate = (tmp << 31 - Statebitwidth) >> (31- Statebitwidth);
// *Tevent = tmp >> Statebitwidth;
//}
//
//
//
//bool GetAllsuccessors(unsigned int * AllLTS, unsigned int * Allstates, unsigned int * Alltransitions, unsigned int ltsindex, unsigned int sindex, Nodemark * result)
//{
// unsigned int statesbegin, transbegin, transborder, syncbegin;
// statesbegin = AllLTS[ltsindex];
// transbegin = Allstates[statesbegin + sindex];
// transborder = Allstates[statesbegin + sindex + 1];
//
// if(transbegin == 0 && (ltsindex != 0 || sindex !=0))
// return false;
//
// result->beginInt = transbegin;
// result->endInt = transborder - 4;
//
// result->synbeginInt = Alltransitions[transborder - 1] | Alltransitions[transborder - 2] | Alltransitions[transborder - 3] | Alltransitions[transborder - 4];
//
// transborder = Allstates[statesbegin + sindex + 2];
//
// syncbegin = Alltransitions[transborder - 1] | Alltransitions[transborder - 2] | Alltransitions[transborder - 3] | Alltransitions[transborder - 4];
//
// result->synendInt = syncbegin - 1;
// return true;
//}
//
//unsigned int GetStateinVec(int index, unsigned int svec, unsigned int * stateencodebits)
//{
// int sbeginbit, sendbit;
// unsigned int ltsid;
//
// sbeginbit = 0;
// sendbit = 0;
//
// for(int i = 0; i < index; i++){
// sbeginbit += stateencodebits[i];
// }
// sendbit = sbeginbit + stateencodebits[index] - 1;
// svec = svec << sbeginbit;
// svec = svec >> (sbeginbit + 31 - sendbit);
// ltsid = svec;
// return ltsid;
//
//}
//
//int HostGenerateStateSpace(int LTSNum, unsigned int * H_AllLTS, unsigned int * H_AllStates, unsigned int * H_AllTransitions, unsigned int * H_AllSynctrans, unsigned int ** RecordList, unsigned int RequestNum, unsigned int H_InitialStateV, unsigned int * H_LTSStateEncodeBits, unsigned int * OutgoingTEbytes, unsigned int HEventEncodeBits)
//{
// int i,j,m,k;
// int SuccessorCount;
// queue<unsigned int> Taskqueue;
// set<unsigned int> Taskset;
// vector<unsigned int> Syncqueue;
// vector<unsigned int>::iterator Syncit;
//
// vector<unsigned int> Syncevents;
// vector<unsigned int>::iterator ITS;
// bool ifexist;
//
// queue<unsigned int> VisitedS;
//
// unsigned int newStateV;
// unsigned int * succStateV;
// unsigned int * tmpStateV;
// unsigned int newState;
// unsigned int belonglts;
// unsigned int transevent;
//
// unsigned int *tmp;
//
// unsigned int tmpcount;
// unsigned int tmpoutT;
// unsigned char tmpT[4];
//
// int x,y;
//
// bool ifoutgoing;
// int ifoutgoingcount;
// Nodemark allsucc;
//
// SuccessorCount = 1;
// Taskqueue.push(H_InitialStateV);
// while(SuccessorCount < RequestNum){
// newStateV = Taskqueue.front();
// ifoutgoingcount = 0;
// for(i = 0; i < LTSNum; i++){
// ifoutgoing = false;
// GetStateinVec(i, newStateV, &newState);
// ifoutgoing = GetAllsuccessors(H_AllLTS, H_AllStates, H_AllTransitions, belonglts, newState, &allsucc);
// if(!ifoutgoing){
// ifoutgoingcount++;
// continue;
// }
//
// m = allsucc.beginInt;
// x = -1;
// y = 0;
// while(m < allsucc.endInt){
// succStateV = new unsigned int[1];
//
// if(x != m){
// tmpoutT = H_AllTransitions[m];
// tmpT[0] = (char)(tmpoutT >> 24);
// tmpT[1] = (char)(tmpoutT >> 16);
// tmpT[2] = (char)(tmpoutT >> 8);
// tmpT[3] = (char)tmpoutT;
// x = m;
// }
// NewStateV(succStateV, i, &m, &y, tmpT, H_LTSStateEncodeBits, OutgoingTEbytes[i], HEventEncodeBits );
//
// if(Taskset.insert(*succStateV).second){
// Taskqueue.push(*succStateV);
// SuccessorCount++;
// }
// }
//
// k = allsucc.synbeginInt;
// tmpcount = 0;
// x = -1;
// y = 0;
// while(k < allsucc.synendInt){
// succStateV = new unsigned int[1];
//
// DecodeTransitions(H_AllSynctrans, k, tmpcount, &newState, &transevent, OutgoingTEbytes[belonglts], H_LTSStateEncodeBits[i]);
//
// if(x != k){
// tmpoutT = H_AllSynctrans[k];
// tmpT[0] = (char)(tmpoutT >> 24);
// tmpT[1] = (char)(tmpoutT >> 16);
// tmpT[2] = (char)(tmpoutT >> 8);
// tmpT[3] = (char)tmpoutT;
// x = k;
// }
// NewStateV(succStateV, i, &k, &y, tmpT, H_LTSStateEncodeBits, OutgoingTEbytes[i], HEventEncodeBits);
//
// tmpcount++;
// j = 0;
// for(ITS = Syncevents.begin(); ITS < Syncevents.end(); ++ITS){
// if(*ITS == transevent){
// ifexist = true;
// break;
// }else
// j++;
// }
// if(ifexist){
//
// tmpStateV = (unsigned int *)&(Syncqueue[j]);
// SynTwoStatesCPU(tmpStateV, *succStateV, i, newStateV, H_LTSStateEncodeBits);
//
// }else{
// Syncevents.push_back(transevent);
// Syncqueue.push_back(*succStateV);
// SuccessorCount++;
// }
// }
// for(Syncit = Syncqueue.begin(); Syncit != Syncqueue.end(); Syncit++) {
// Taskqueue.push(*Syncit);
// }
// Syncqueue.clear();
// }
// if(ifoutgoingcount == LTSNum){
// return -1;
// }
//
// }
//
// *RecordList = new unsigned int[SuccessorCount];
// for(i = 0; i < SuccessorCount; i++){
// (*RecordList)[i] = Taskqueue.front();
// Taskqueue.pop();
// }
//
// return SuccessorCount;
//}
void CallCudaBFS(unsigned int * AllLTS, unsigned int * AllStates, unsigned int * AllTransitions, unsigned int* AllSyncTrans, unsigned int H_InitialSV, unsigned int * H_LTSStateEncodeBits, unsigned int LTSNum,unsigned int AllLTSStateNum, unsigned int AllTransLength, unsigned int AllSyncTransLength, unsigned int EventEncodeBits, unsigned int * OutgoingTEbytes)
{
int i,j;
unsigned int * G_AllLTS;
unsigned int * G_AllStates;
unsigned int * G_AllTransitions;
unsigned int * G_AllSyncTrans; //all trans with sync events.
//unsigned int * G_InitialStateV;
unsigned int * G_OutgoingTEbytes;
unsigned int * G_LTSStateEncodeBits;
unsigned int * G_DetectResult;
//Choose to generate some statevectors firstly in CPU---OPTIONAL
unsigned int * G_Startlist;
//Choose to generate some statevectors firstly in CPU---OPTIONAL
unsigned int * H_Startlist;
unsigned int * H_Result;
unsigned int H_startsize;
unsigned int * LTSStateNum = new unsigned int[LTSNum];
unsigned int Startblocknum;
unsigned int Startthreadnum1block;
unsigned int Startthreadgroupnum;
unsigned int H_GlobalbucketNum;
int * parameters = new int[2];
parameters[0]=4;
parameters[1]=12;
//unsigned int * G_GlobalbucketNum;
int rv[10];
srand(time(NULL));
for(i = 0; i < 10; i++){
rv[i] = rand();
}
cudaSetDevice(0);
H_Result = new unsigned int[1];
Startthreadnum1block = 512;
Startblocknum = 1;
//Initialize Startlist
Startthreadgroupnum = (((Startthreadnum1block/32)/LTSNum)*(Startthreadnum1block/32))*Startblocknum; //initial value, not the final one?
//i = HostGenerateStateSpace(LTSNum, AllLTS,AllStates,AllTransitions, AllSyncTrans, &H_Startlist, 1, H_InitialSV,H_LTSStateEncodeBits, OutgoingTEbytes, EventEncodeBits);
if(i > 0){
j = i * LTSNum;
if(i > Startthreadgroupnum){
Startthreadgroupnum = i;
Startblocknum = Startthreadgroupnum/(Startthreadnum1block/LTSNum);
}
}else if(i == -1){
cout<<"deadlock being detected";
exit(0);
}
Startthreadgroupnum = 1;
H_Startlist = new unsigned int[1];
H_Startlist[0] = H_InitialSV;
H_GlobalbucketNum = LTSNum * 2;
cudaMalloc((void **)&G_AllLTS, sizeof(unsigned int) * LTSNum);
cudaMalloc((void **)&G_AllStates, sizeof(unsigned int) * (AllLTSStateNum+1));
cudaMalloc((void **)&G_AllTransitions, sizeof(unsigned int) * AllTransLength);
cudaMalloc((void **)&G_AllSyncTrans,sizeof(unsigned int) * AllSyncTransLength);
cudaMalloc((void **)&G_OutgoingTEbytes, sizeof(unsigned int) * LTSNum);
cudaMalloc((void **)&G_LTSStateEncodeBits, sizeof(unsigned int) * LTSNum);
cudaMalloc((void **)&G_Startlist, sizeof(unsigned int) * Startthreadgroupnum);
//cudaMalloc((unsigned int *)&G_InitialStateV, sizeof(int));
cudaMalloc((void **)&G_DetectResult, sizeof(unsigned int));
cudaMemcpy(G_AllLTS, AllLTS, sizeof(unsigned int) * LTSNum, cudaMemcpyHostToDevice);
cudaMemcpy(G_AllStates, AllStates, sizeof(unsigned int) * (AllLTSStateNum+1), cudaMemcpyHostToDevice);
cudaMemcpy(G_AllTransitions, AllTransitions, sizeof(unsigned int) * AllTransLength, cudaMemcpyHostToDevice);
cudaMemcpy(G_AllSyncTrans, AllSyncTrans, sizeof(unsigned int) * AllSyncTransLength, cudaMemcpyHostToDevice);
//cudaMemcpy(G_InitialStateV, &H_InitialSV, sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(G_LTSStateEncodeBits, H_LTSStateEncodeBits, sizeof(unsigned int) * LTSNum, cudaMemcpyHostToDevice);
cudaMemcpy(G_Startlist, H_Startlist, sizeof(unsigned int) * Startthreadgroupnum, cudaMemcpyHostToDevice);
cudaMemcpy(G_OutgoingTEbytes, OutgoingTEbytes, sizeof(unsigned int)*LTSNum, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(LA1, &rv[0], sizeof(int));
cudaMemcpyToSymbol(LB1, &rv[1], sizeof(int));
cudaMemcpyToSymbol(LA2, &rv[2], sizeof(int));
cudaMemcpyToSymbol(LB2, &rv[3], sizeof(int));
cudaMemcpyToSymbol(LA3, &rv[4], sizeof(int));
cudaMemcpyToSymbol(LB3, &rv[5], sizeof(int));
cudaMemcpyToSymbol(LA4, &rv[6], sizeof(int));
cudaMemcpyToSymbol(LB4, &rv[7], sizeof(int));
cudaMemcpyToSymbol(BUCA, &rv[8], sizeof(int));
cudaMemcpyToSymbol(BUCB, &rv[9], sizeof(int));
for(i = 0; i < 8; i++){
rv[i] = rand();
}
i = 512;
cudaMemcpyToSymbol(GA1, &rv[0], sizeof(int));
cudaMemcpyToSymbol(GB2, &rv[1], sizeof(int));
cudaMemcpyToSymbol(GA2, &rv[2], sizeof(int));
cudaMemcpyToSymbol(GB2, &rv[3], sizeof(int));
cudaMemcpyToSymbol(GA3, &rv[4], sizeof(int));
cudaMemcpyToSymbol(GB3, &rv[5], sizeof(int));
cudaMemcpyToSymbol(GA4, &rv[6], sizeof(int));
cudaMemcpyToSymbol(GB4, &rv[7], sizeof(int));
cudaMemcpyToSymbol(TableSize, &i, sizeof(int));
cudaMemcpyToSymbol(HashNum, ¶meters[0],sizeof(int));
cudaMemcpyToSymbol(IterationTime, ¶meters[1], sizeof(int));
//bind data to texture
cudaBindTexture(NULL, LTSOFFSET, G_AllLTS, sizeof(unsigned int)*LTSNum);
cudaBindTexture(NULL, STATEOFFSET, G_AllStates, sizeof(unsigned int)*(AllLTSStateNum+1));
cudaBindTexture(NULL, OUTGOINGDETAIL, G_AllTransitions, sizeof(unsigned int)*AllTransLength); //how texture memory can accelerate the access rate need to be explored
cudaBindTexture(NULL, SYNCOUTGOING, G_AllSyncTrans, sizeof(unsigned int)*AllSyncTransLength);
cudaBindTexture(NULL, LTSSTATEBITS, G_LTSStateEncodeBits, sizeof(unsigned int)* LTSNum);
cudaBindTexture(NULL, TRANSEBYTES, G_OutgoingTEbytes, sizeof(unsigned int)*LTSNum);
dim3 g(1,1,1);
dim3 b(512,1,1);
H_startsize = 1;
CUDADeadlockBFSVerify<<<g, b, 5120*sizeof(unsigned int)>>>( G_AllLTS, G_AllStates, G_AllTransitions, G_AllSyncTrans, G_Startlist, G_LTSStateEncodeBits, EventEncodeBits, G_OutgoingTEbytes, LTSNum, G_DetectResult, H_GlobalbucketNum, AllLTSStateNum, H_startsize);
cudaMemcpy(H_Result, G_DetectResult, sizeof(unsigned int), cudaMemcpyDeviceToHost);
cout<<"Result"<<*H_Result<<endl;
cudaUnbindTexture(LTSOFFSET);
cudaUnbindTexture(STATEOFFSET);
cudaUnbindTexture(OUTGOINGDETAIL);
cudaUnbindTexture(SYNCOUTGOING);
cudaUnbindTexture(LTSSTATEBITS);
cudaUnbindTexture(TRANSEBYTES);
cudaFree(G_AllLTS);
cudaFree(G_AllStates);
cudaFree(G_AllTransitions);
//udaFree(GlobalBuckets);
//cudaFree(GlobalOpenHash);
//cudaFree(GlobalVisitedHash);
free(AllLTS);
free(AllStates);
free(AllTransitions);
}
int main()
{
//read data from file
int i;
unsigned int * AllLTS;
unsigned int * AllStates;
unsigned int * AllTransitions;
unsigned int * AllSyncTrans;
unsigned int InitialV;
unsigned int LTSNum;
unsigned int StatesNUM;
unsigned int AlltransNum;
unsigned int AllsynctransNum;
//unsigned int Synindexencodebyte;
//unsigned int LTSEncodebyte;
unsigned int EventEncodebits;
unsigned int * LTSStateEncodebits;
unsigned int * OutgoingTEbytes;
ifstream file1; //for all LTS
ifstream file2; //for All states
ifstream file3; //for all trans
ifstream file4; //for all sync trans;
ifstream file5; //for other parameters
file1.open("../test/encode/alllts.txt");
file2.open("../test/encode/allstates.txt");
file3.open("../test/encode/alltrans.txt");
file4.open("../test/encode/allsynctrans.txt");
file5.open("../test/encode/parameters.txt");
//parameters
file5>>InitialV;
file5>>LTSNum;
file5>>StatesNUM;
file5>>AlltransNum;
file5>>AllsynctransNum;
//file5>>Synindexencodebyte;
//file5>>LTSEncodebyte;
file5>>EventEncodebits;
LTSStateEncodebits = new unsigned int[LTSNum];
OutgoingTEbytes = new unsigned int[LTSNum];
for(i=0; i < LTSNum; i++){
file5>>LTSStateEncodebits[i];
}
for(i=0; i < LTSNum; i++){
file5>>OutgoingTEbytes[i];
}
AllLTS = new unsigned int[LTSNum];
AllStates = new unsigned int[StatesNUM + 1];
AllTransitions = new unsigned int[AlltransNum];
AllSyncTrans = new unsigned int[AllsynctransNum];
file5.close();
for(i=0; i <LTSNum; i++){
file1>>AllLTS[i];
}
file1.close();
for(i=0; i < StatesNUM+1; i++){
file2>>AllStates[i];
}
file2.close();
for(i=0; i < AlltransNum; i++){
file3>>AllTransitions[i];
}
file3.close();
for(i=0; i < AllsynctransNum; i++){
file4>>AllSyncTrans[i];
}
file4.close();
CallCudaBFS(AllLTS,AllStates,AllTransitions,AllSyncTrans,InitialV,LTSStateEncodebits, LTSNum, StatesNUM,AlltransNum,AllsynctransNum,EventEncodebits, OutgoingTEbytes);
}
|
e8a5e13e7a6479dce2a2bd04e34690be7d164971.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek)
* @date 2012-2016
* @copyright University of Pennsylvania & STUDENT
*/
#include <cmath>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/random.h>
#include <util/checkCUDAError.h>
#include <util/tiny_gltf_loader.h>
#include "rasterizeTools.h"
#include "rasterize.h"
#include <glm/gtc/quaternion.hpp>
#include <glm/gtc/matrix_transform.hpp>
#define LAMBERT_SHADING 1
#define TILE_BASED_RASTERIZATION 1
#define SSAA_LEVEL 2
namespace {
typedef unsigned short VertexIndex;
typedef glm::vec3 VertexAttributePosition;
typedef glm::vec3 VertexAttributeNormal;
typedef glm::vec2 VertexAttributeTexcoord;
typedef unsigned char TextureData;
typedef unsigned char BufferByte;
enum PrimitiveType{
Point = 1,
Line = 2,
Triangle = 3
};
struct VertexOut {
glm::vec4 pos;
// TODO: add new attributes to your VertexOut
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor; // eye space normal used for shading, cuz normal will go wrong after perspective transformation
glm::vec2 texcoord0;
// ...
};
struct Primitive {
PrimitiveType primitiveType = Triangle; // C++ 11 init
VertexOut v[3];
TextureData* diffuse_tex = nullptr;
int diffuse_tex_width, diffuse_tex_height;
};
struct Fragment {
glm::vec3 color;
// TODO: add new attributes to your Fragment
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor;
VertexAttributeTexcoord texcoord0;
TextureData* diffuse_tex;
int diffuse_tex_width;
int diffuse_tex_height;
// ...
};
struct PrimitiveDevBufPointers {
int primitiveMode; //from tinygltfloader macro
PrimitiveType primitiveType;
int numPrimitives;
int numIndices;
int numVertices;
// Vertex In, const after loaded
VertexIndex* dev_indices;
VertexAttributePosition* dev_position;
VertexAttributeNormal* dev_normal;
VertexAttributeTexcoord* dev_texcoord0;
// Materials, add more attributes when needed
TextureData* dev_diffuseTex;
int diffuseTexWidth;
int diffuseTexHeight;
// TextureData* dev_specularTex;
// TextureData* dev_normalTex;
// ...
// Vertex Out, vertex used for rasterization, this is changing every frame
VertexOut* dev_verticesOut;
// TODO: add more attributes when needed
};
}
static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap;
static int width = 0;
static int height = 0;
static int output_width = 0;
static int output_height = 0;
static int totalNumPrimitives = 0;
static Primitive *dev_primitives = NULL;
static Fragment *dev_fragmentBuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
hipEvent_t t_start, t_stop;
#if TILE_BASED_RASTERIZATION
// FIXME: tile size is hard to manage
static int tile_w_count = 0;
static int tile_h_count = 0;
const int tile_width = 16;
const int tile_height = 16;
const int max_tile_prim_count = 128;
static Primitive * dev_tile_primitives = nullptr;
static int * dev_tile_prim_counts = nullptr;
#endif
static int * dev_frag_mutex = nullptr;
static int * dev_depth = NULL; // you might need this buffer when doing depth test
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, int render_w, int render_h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color (0.f);
for (int render_x = SSAA_LEVEL * x; render_x < SSAA_LEVEL * x + SSAA_LEVEL; render_x++)
{
for (int render_y = SSAA_LEVEL * y; render_y < SSAA_LEVEL * y + SSAA_LEVEL; render_y++)
{
auto fbuffer_index = render_x + render_y * w * SSAA_LEVEL;
color.x = color.x + glm::clamp(image[fbuffer_index].x, 0.0f, 1.0f) * 255.0;
color.y = color.y + glm::clamp(image[fbuffer_index].y, 0.0f, 1.0f) * 255.0;
color.z = color.z + glm::clamp(image[fbuffer_index].z, 0.0f, 1.0f) * 255.0;
}
}
color /= (SSAA_LEVEL * SSAA_LEVEL);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
/**
* Writes fragment colors to the framebuffer
*/
__global__
void render(int w, int h, const Fragment *fragmentBuffer, glm::vec3 *framebuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h)
{
// DONE: add your fragment shader code here
auto frag = fragmentBuffer[index]; // copy to local mem
glm::vec3 color;
// Base Color
if (!frag.diffuse_tex)
{
color = frag.color;
}
else
{
int tx = static_cast<int>(frag.texcoord0.x * frag.diffuse_tex_width) % frag.diffuse_tex_width;
if (tx < 0) { tx += frag.diffuse_tex_width; }
int ty = static_cast<int>(frag.texcoord0.y * frag.diffuse_tex_height) % frag.diffuse_tex_height;
if (ty < 0) { ty += frag.diffuse_tex_height; }
int pixel_index = 3 * (tx + ty * frag.diffuse_tex_width);
color = glm::vec3(
frag.diffuse_tex[pixel_index] / 255.f,
frag.diffuse_tex[pixel_index + 1] / 255.f,
frag.diffuse_tex[pixel_index + 2] / 255.f
);
}
// Lighting
#if LAMBERT_SHADING
const auto light_dir = glm::vec3(1.f, 1.f, 1.f);
color *= fmaxf(0.f, glm::dot(light_dir, frag.eyeNor));
#endif
// output
framebuffer[index] = color;
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
output_width = w;
output_height = h;
width = w * SSAA_LEVEL;
height = h * SSAA_LEVEL;
hipFree(dev_fragmentBuffer);
hipMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment));
hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
hipFree(dev_framebuffer);
hipMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
hipMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
hipFree(dev_depth);
hipMalloc(&dev_depth, width * height * sizeof(int));
#if TILE_BASED_RASTERIZATION
tile_w_count = (width - 1) / tile_width + 1;
tile_h_count = (height - 1) / tile_height + 1;
if (dev_tile_primitives) { hipFree(dev_tile_primitives); }
hipMalloc(&dev_tile_primitives
, tile_w_count * tile_h_count * max_tile_prim_count * sizeof(dev_tile_primitives[0]));
if (dev_tile_prim_counts) { hipFree(dev_tile_prim_counts); }
hipMalloc(&dev_tile_prim_counts
, tile_w_count * tile_h_count * sizeof(dev_tile_prim_counts[0]));
hipMemset(dev_tile_prim_counts, 0
, tile_w_count * tile_h_count * sizeof(dev_tile_prim_counts[0]));
#endif
if (dev_frag_mutex) { hipFree(dev_frag_mutex); }
hipMalloc(&dev_frag_mutex, width * height * sizeof(dev_frag_mutex[0]));
hipMemset(dev_frag_mutex, 0, width * height * sizeof(dev_frag_mutex[0]));
checkCUDAError("rasterizeInit");
hipEventCreate(&t_start);
hipEventCreate(&t_stop);
}
__global__
void initDepthAndMutex(int w, int h, int * depth, int* mutex)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
depth[index] = INT_MAX;
mutex[index] = 0;
}
}
/**
* kern function with support for stride to sometimes replace hipMemcpy
* One thread is responsible for copying one component
*/
__global__
void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset, int componentTypeByteSize) {
// Attribute (vec3 position)
// component (3 * float)
// byte (4 * byte)
// id of component
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
int count = i / n;
int offset = i - count * n; // which component of the attribute
for (int j = 0; j < componentTypeByteSize; j++) {
dev_dst[count * componentTypeByteSize * n
+ offset * componentTypeByteSize
+ j]
=
dev_src[byteOffset
+ count * (byteStride == 0 ? componentTypeByteSize * n : byteStride)
+ offset * componentTypeByteSize
+ j];
}
}
}
__global__
void _nodeMatrixTransform(
int numVertices,
VertexAttributePosition* position,
VertexAttributeNormal* normal,
glm::mat4 MV, glm::mat3 MV_normal) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f));
normal[vid] = glm::normalize(MV_normal * normal[vid]);
}
}
glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node & n) {
glm::mat4 curMatrix(1.0);
const std::vector<double> &m = n.matrix;
if (m.size() > 0) {
// matrix, copy it
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
curMatrix[i][j] = (float)m.at(4 * i + j);
}
}
} else {
// no matrix, use rotation, scale, translation
if (n.translation.size() > 0) {
curMatrix[3][0] = n.translation[0];
curMatrix[3][1] = n.translation[1];
curMatrix[3][2] = n.translation[2];
}
if (n.rotation.size() > 0) {
glm::mat4 R;
glm::quat q;
q[0] = n.rotation[0];
q[1] = n.rotation[1];
q[2] = n.rotation[2];
R = glm::mat4_cast(q);
curMatrix = curMatrix * R;
}
if (n.scale.size() > 0) {
curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2]));
}
}
return curMatrix;
}
void traverseNode (
std::map<std::string, glm::mat4> & n2m,
const tinygltf::Scene & scene,
const std::string & nodeString,
const glm::mat4 & parentMatrix
)
{
const tinygltf::Node & n = scene.nodes.at(nodeString);
glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n);
n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M));
auto it = n.children.begin();
auto itEnd = n.children.end();
for (; it != itEnd; ++it) {
traverseNode(n2m, scene, *it, M);
}
}
void rasterizeSetBuffers(const tinygltf::Scene & scene) {
totalNumPrimitives = 0;
std::map<std::string, BufferByte*> bufferViewDevPointers;
// 1. copy all `bufferViews` to device memory
{
std::map<std::string, tinygltf::BufferView>::const_iterator it(
scene.bufferViews.begin());
std::map<std::string, tinygltf::BufferView>::const_iterator itEnd(
scene.bufferViews.end());
for (; it != itEnd; it++) {
const std::string key = it->first;
const tinygltf::BufferView &bufferView = it->second;
if (bufferView.target == 0) {
continue; // Unsupported bufferView.
}
const tinygltf::Buffer &buffer = scene.buffers.at(bufferView.buffer);
BufferByte* dev_bufferView;
hipMalloc(&dev_bufferView, bufferView.byteLength);
hipMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength, hipMemcpyHostToDevice);
checkCUDAError("Set BufferView Device Mem");
bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView));
}
}
// 2. for each mesh:
// for each primitive:
// build device buffer of indices, materail, and each attributes
// and store these pointers in a map
{
std::map<std::string, glm::mat4> nodeString2Matrix;
auto rootNodeNamesList = scene.scenes.at(scene.defaultScene);
{
auto it = rootNodeNamesList.begin();
auto itEnd = rootNodeNamesList.end();
for (; it != itEnd; ++it) {
traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f));
}
}
// parse through node to access mesh
auto itNode = nodeString2Matrix.begin();
auto itEndNode = nodeString2Matrix.end();
for (; itNode != itEndNode; ++itNode) {
const tinygltf::Node & N = scene.nodes.at(itNode->first);
const glm::mat4 & matrix = itNode->second;
const glm::mat3 & matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix)));
auto itMeshName = N.meshes.begin();
auto itEndMeshName = N.meshes.end();
for (; itMeshName != itEndMeshName; ++itMeshName) {
const tinygltf::Mesh & mesh = scene.meshes.at(*itMeshName);
auto res = mesh2PrimitivesMap.insert(std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>()));
std::vector<PrimitiveDevBufPointers> & primitiveVector = (res.first)->second;
// for each primitive
for (size_t i = 0; i < mesh.primitives.size(); i++) {
const tinygltf::Primitive &primitive = mesh.primitives[i];
if (primitive.indices.empty())
return;
// TODO: add new attributes for your PrimitiveDevBufPointers when you add new attributes
VertexIndex* dev_indices = NULL;
VertexAttributePosition* dev_position = NULL;
VertexAttributeNormal* dev_normal = NULL;
VertexAttributeTexcoord* dev_texcoord0 = NULL;
// ----------Indices-------------
const tinygltf::Accessor &indexAccessor = scene.accessors.at(primitive.indices);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(indexAccessor.bufferView);
BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView);
// assume type is SCALAR for indices
int n = 1;
int numIndices = indexAccessor.count;
int componentTypeByteSize = sizeof(VertexIndex);
int byteLength = numIndices * n * componentTypeByteSize;
dim3 numThreadsPerBlock(128);
dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
hipMalloc(&dev_indices, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
numIndices,
(BufferByte*)dev_indices,
dev_bufferView,
n,
indexAccessor.byteStride,
indexAccessor.byteOffset,
componentTypeByteSize);
checkCUDAError("Set Index Buffer");
// ---------Primitive Info-------
// Warning: LINE_STRIP is not supported in tinygltfloader
int numPrimitives;
PrimitiveType primitiveType;
switch (primitive.mode) {
case TINYGLTF_MODE_TRIANGLES:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices / 3;
break;
case TINYGLTF_MODE_TRIANGLE_STRIP:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_TRIANGLE_FAN:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_LINE:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices / 2;
break;
case TINYGLTF_MODE_LINE_LOOP:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices + 1;
break;
case TINYGLTF_MODE_POINTS:
primitiveType = PrimitiveType::Point;
numPrimitives = numIndices;
break;
default:
// output error
break;
};
// ----------Attributes-------------
auto it(primitive.attributes.begin());
auto itEnd(primitive.attributes.end());
int numVertices = 0;
// for each attribute
for (; it != itEnd; it++) {
const tinygltf::Accessor &accessor = scene.accessors.at(it->second);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(accessor.bufferView);
int n = 1;
if (accessor.type == TINYGLTF_TYPE_SCALAR) {
n = 1;
}
else if (accessor.type == TINYGLTF_TYPE_VEC2) {
n = 2;
}
else if (accessor.type == TINYGLTF_TYPE_VEC3) {
n = 3;
}
else if (accessor.type == TINYGLTF_TYPE_VEC4) {
n = 4;
}
BufferByte * dev_bufferView = bufferViewDevPointers.at(accessor.bufferView);
BufferByte ** dev_attribute = NULL;
numVertices = accessor.count;
int componentTypeByteSize;
// Note: since the type of our attribute array (dev_position) is static (float32)
// We assume the glTF model attribute type are 5126(FLOAT) here
if (it->first.compare("POSITION") == 0) {
componentTypeByteSize = sizeof(VertexAttributePosition) / n;
dev_attribute = (BufferByte**)&dev_position;
}
else if (it->first.compare("NORMAL") == 0) {
componentTypeByteSize = sizeof(VertexAttributeNormal) / n;
dev_attribute = (BufferByte**)&dev_normal;
}
else if (it->first.compare("TEXCOORD_0") == 0) {
componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n;
dev_attribute = (BufferByte**)&dev_texcoord0;
}
std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n';
dim3 numThreadsPerBlock(128);
dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
int byteLength = numVertices * n * componentTypeByteSize;
hipMalloc(dev_attribute, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
n * numVertices,
*dev_attribute,
dev_bufferView,
n,
accessor.byteStride,
accessor.byteOffset,
componentTypeByteSize);
std::string msg = "Set Attribute Buffer: " + it->first;
checkCUDAError(msg.c_str());
}
// malloc for VertexOut
VertexOut* dev_vertexOut;
hipMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut));
checkCUDAError("Malloc VertexOut Buffer");
// ----------Materials-------------
// You can only worry about this part once you started to
// implement textures for your rasterizer
TextureData* dev_diffuseTex = NULL;
int diffuseTexWidth = 0;
int diffuseTexHeight = 0;
if (!primitive.material.empty()) {
const tinygltf::Material &mat = scene.materials.at(primitive.material);
printf("material.name = %s\n", mat.name.c_str());
if (mat.values.find("diffuse") != mat.values.end()) {
std::string diffuseTexName = mat.values.at("diffuse").string_value;
if (scene.textures.find(diffuseTexName) != scene.textures.end()) {
const tinygltf::Texture &tex = scene.textures.at(diffuseTexName);
if (scene.images.find(tex.source) != scene.images.end()) {
const tinygltf::Image &image = scene.images.at(tex.source);
size_t s = image.image.size() * sizeof(TextureData);
hipMalloc(&dev_diffuseTex, s);
hipMemcpy(dev_diffuseTex, &image.image.at(0), s, hipMemcpyHostToDevice);
diffuseTexWidth = image.width;
diffuseTexHeight = image.height;
checkCUDAError("Set Texture Image data");
}
}
}
// TODO: write your code for other materails
// You may have to take a look at tinygltfloader
// You can also use the above code loading diffuse material as a start point
}
// ---------Node hierarchy transform--------
hipDeviceSynchronize();
dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> > (
numVertices,
dev_position,
dev_normal,
matrix,
matrixNormal);
checkCUDAError("Node hierarchy transformation");
// at the end of the for loop of primitive
// push dev pointers to map
primitiveVector.push_back(PrimitiveDevBufPointers{
primitive.mode,
primitiveType,
numPrimitives,
numIndices,
numVertices,
dev_indices,
dev_position,
dev_normal,
dev_texcoord0,
dev_diffuseTex,
diffuseTexWidth,
diffuseTexHeight,
dev_vertexOut //VertexOut
});
totalNumPrimitives += numPrimitives;
} // for each primitive
} // for each mesh
} // for each node
}
// 3. Malloc for dev_primitives
{
hipMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive));
}
// Finally, hipFree raw dev_bufferViews
{
std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin());
std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end());
//bufferViewDevPointers
for (; it != itEnd; it++) {
hipFree(it->second);
}
checkCUDAError("Free BufferView Device Mem");
}
}
__global__
void _vertexTransformAndAssembly(
int numVertices,
PrimitiveDevBufPointers primitive,
glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal,
int width, int height) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid >= numVertices) { return; }
// DONE: Apply vertex transformation here
// Multiply the MVP matrix for each vertex position, this will transform everything into clipping space
auto pos = MVP * glm::vec4(primitive.dev_position[vid], 1.f);
// Then divide the pos by its w element to transform into NDC space
pos /= pos.w;
// Finally transform x and y to viewport space
pos.x = (- pos.x * 0.5f + 0.5f) * width;
pos.y = (- pos.y * 0.5f + 0.5f) * height;
// DONE: Apply vertex assembly here
// Assemble all attribute arraies into the primitive array
auto eye_pos = glm::vec3(MV * glm::vec4(primitive.dev_position[vid], 1.f));
auto eye_normal = glm::normalize(MV_normal * primitive.dev_normal[vid]);
VertexAttributeTexcoord tex_coord(0.f);
if (primitive.dev_texcoord0)
{
tex_coord = primitive.dev_texcoord0[vid];
}
auto tex_diffuse = primitive.dev_diffuseTex;
auto& v_out = primitive.dev_verticesOut[vid];
v_out.pos = pos;
v_out.eyePos = eye_pos;
v_out.eyeNor = eye_normal;
v_out.texcoord0 = tex_coord;
}
static int curPrimitiveBeginId = 0;
__global__
void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives, PrimitiveDevBufPointers primitive) {
// index id
int iid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (iid < numIndices) {
// DONE: uncomment the following code for a start
// This is primitive assembly for triangles
int pid; // id for cur primitives vector
if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES)
{
pid = iid / (int)primitive.primitiveType;
auto& out_primitive = dev_primitives[pid + curPrimitiveBeginId];
auto v_index = iid % (int)primitive.primitiveType;
out_primitive.v[v_index] = primitive.dev_verticesOut[primitive.dev_indices[iid]];
out_primitive.primitiveType = primitive.primitiveType;
if (v_index == 0)
{
out_primitive.diffuse_tex = primitive.dev_diffuseTex;
out_primitive.diffuse_tex_width = primitive.diffuseTexWidth;
out_primitive.diffuse_tex_height = primitive.diffuseTexHeight;
}
}
// TODO: other primitive types (point, line)
}
}
__device__ void rasterizeTriangleFrag(
const Primitive& primitive
, const glm::vec3 tri[]
, int x
, int y
, int width
, int height
, Fragment * frag_buffer
, int * depth_buffer
, int * mutex_buffer
)
{
if (!(x >= 0 && x < width && y >= 0 && y < height))
{
return;
}
auto bary_coord = calculateBarycentricCoordinate(tri, glm::vec2(x, y));
if (!isBarycentricCoordInBounds(bary_coord))
{
return;
}
auto frag_index = x + y * width;
int depth = -getZAtCoordinate(bary_coord, tri) * INT_MAX;
//// lock mutex
//while (true)
//{
// if (atomicCAS(mutex_buffer + frag_index, 0, 1) == 0)
// {
// // mutex locked
// if (depth < depth_buffer[frag_index])
// {
// depth_buffer[frag_index] = depth;
// frag_buffer[frag_index].color = glm::vec3(1.f);
// frag_buffer[frag_index].diffuse_tex = primitive.diffuse_tex;
// frag_buffer[frag_index].diffuse_tex_height = primitive.diffuse_tex_height;
// frag_buffer[frag_index].diffuse_tex_width = primitive.diffuse_tex_width;
// frag_buffer[frag_index].eyePos = baryInterpolate(bary_coord, primitive.v[0].eyePos, primitive.v[1].eyePos, primitive.v[2].eyePos);
// frag_buffer[frag_index].eyeNor = baryInterpolate(bary_coord, primitive.v[0].eyeNor, primitive.v[1].eyeNor, primitive.v[2].eyeNor);
// frag_buffer[frag_index].texcoord0 = baryInterpolate(bary_coord, primitive.v[0].texcoord0, primitive.v[1].texcoord0, primitive.v[2].texcoord0);
// }
// // unlock mutex
// atomicExch(mutex_buffer + frag_index, 0);
// break;
// }
//}
//atomicExch(mutex_buffer + frag_index, 0);
#if TILE_BASED_RASTERIZATION
if (depth > depth_buffer[frag_index]) { return; }
depth_buffer[frag_index] = depth;
#else
atomicMin(&depth_buffer[frag_index], depth);
if (depth != depth_buffer[frag_index])
{
return;
}
#endif
frag_buffer[frag_index].color = glm::vec3(0.5f);
frag_buffer[frag_index].diffuse_tex = primitive.diffuse_tex;
frag_buffer[frag_index].diffuse_tex_height = primitive.diffuse_tex_height;
frag_buffer[frag_index].diffuse_tex_width = primitive.diffuse_tex_width;
//interpolate
frag_buffer[frag_index].eyePos = baryInterpolate(bary_coord, primitive.v[0].eyePos, primitive.v[1].eyePos, primitive.v[2].eyePos);
frag_buffer[frag_index].eyeNor = baryInterpolate(bary_coord, primitive.v[0].eyeNor, primitive.v[1].eyeNor, primitive.v[2].eyeNor);
frag_buffer[frag_index].texcoord0 = baryInterpolate(bary_coord, primitive.v[0].texcoord0, primitive.v[1].texcoord0, primitive.v[2].texcoord0);
}
#if TILE_BASED_RASTERIZATION
//static int tile_w_count = 0;
//static int tile_h_count = 0;
//
//const int tile_width = 64;
//const int tile_height = 64;
//
//const int max_tile_prim_count = 32;
//static Primitive * dev_tile_primitives = nullptr;
//static int * dev_tile_prim_counts = nullptr;
__global__ void addPrimitivesToTiles(
int num_primitives
, const Primitive* primitives
, int width
, int height
, int tile_width
, int tile_height
, int tile_prim_count_limit
, Primitive* tile_primitives
, int * tile_prim_counts
)
{
// index id
auto pid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pid >= num_primitives) { return; }
// copy primitive data to local memory
auto primitive = primitives[pid];
if (primitive.primitiveType == PrimitiveType::Triangle)
{
glm::vec2 aabb_min = {
fmaxf(fminf(fminf(primitive.v[0].pos[0],primitive.v[1].pos[0]) , primitive.v[2].pos[0]) , 0)
, fmaxf(fminf(fminf(primitive.v[0].pos[1],primitive.v[1].pos[1]) , primitive.v[2].pos[1]) , 0)
};
glm::vec2 aabb_max = {
fminf(fmaxf(fmaxf(primitive.v[0].pos[0],primitive.v[1].pos[0]) , primitive.v[2].pos[0]) , width - 1)
, fminf(fmaxf(fmaxf(primitive.v[0].pos[1],primitive.v[1].pos[1]) , primitive.v[2].pos[1]) , height - 1)
};
auto min_x_tile = static_cast<int>(aabb_min.x) / tile_width;
auto min_y_tile = static_cast<int>(aabb_min.y) / tile_height;
auto max_x_tile = static_cast<int>(aabb_max.x) / tile_width;
auto max_y_tile = static_cast<int>(aabb_max.y) / tile_height;
auto tile_x_count = (width - 1) / tile_width + 1;
for (int tx = min_x_tile; tx <= max_x_tile; tx++)
{
for (int ty = min_y_tile; ty <= max_y_tile; ty++)
{
auto tile_id = tx + ty * tile_x_count;
auto prim_slot = atomicAdd(tile_prim_counts + tile_id, 1);
if (prim_slot >= tile_prim_count_limit)
{
continue;
// TODO: make tile able to contain more primitives somehow
}
tile_primitives[tile_id * tile_prim_count_limit + prim_slot] = primitive;
}
}
}
}
__global__ void kernRasterizeTiles(
int tile_x_count
, int tile_y_count
, int tile_width
, int tile_height
, int width
, int height
, Primitive* tile_primitives
, int * tile_prim_counts
, int tile_prim_count_limit
, Fragment * frag_buffer
, int * depth_buffer
)
{
int tx = (blockIdx.x * blockDim.x) + threadIdx.x;
int ty = (blockIdx.y * blockDim.y) + threadIdx.y;
if (!(tx >= 0 && tx < tile_x_count && ty >= 0 && ty < tile_y_count))
{
return;
}
int index = tx + (ty * tile_x_count);
int x_begin = tx * tile_width;
int x_end = glm::min(x_begin + tile_width, width);
int y_begin = ty * tile_height;
int y_end = glm::min(y_begin + tile_height, height);
auto prim_count = glm::min(tile_prim_counts[index], tile_prim_count_limit);
for (int y = y_begin; y < y_end; y++)
{
for (int x = x_begin; x < x_end; x++)
{
for (int i = 0; i < prim_count; i++)
{
auto& prim = tile_primitives[index * tile_prim_count_limit + i];
glm::vec3 tri_pos[3] = { glm::vec3(prim.v[0].pos)
, glm::vec3(prim.v[1].pos)
, glm::vec3(prim.v[2].pos)
};
rasterizeTriangleFrag(prim, tri_pos, x, y, width, height, frag_buffer, depth_buffer, nullptr);
}
}
}
}
#else
__global__ void kernRasterizePrimitives(
int num_primitives
, const Primitive* primitives
, int width
, int height
, Fragment * frag_buffer
, int * depth_buffer
, int * mutex_buffer
)
{
// index id
auto pid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pid >= num_primitives) { return; }
// copy primitive data to local memory
auto primitive = primitives[pid];
if (primitive.primitiveType == PrimitiveType::Triangle)
{
glm::vec2 aabb_min = {
fmaxf(fminf(fminf( primitive.v[0].pos[0],primitive.v[1].pos[0]) , primitive.v[2].pos[0]) , 0)
, fmaxf(fminf(fminf(primitive.v[0].pos[1],primitive.v[1].pos[1]) , primitive.v[2].pos[1]) , 0)
};
glm::vec2 aabb_max = {
fminf(fmaxf(fmaxf(primitive.v[0].pos[0],primitive.v[1].pos[0]) , primitive.v[2].pos[0]) , width - 1)
, fminf(fmaxf(fmaxf(primitive.v[0].pos[1],primitive.v[1].pos[1]) , primitive.v[2].pos[1]) , height - 1)
};
// TODO: CUDA Dynamic Parallelism?
glm::vec3 tri_pos[3] = { glm::vec3(primitive.v[0].pos)
, glm::vec3(primitive.v[1].pos)
, glm::vec3(primitive.v[2].pos)
};
for (int x = aabb_min.x; x <= static_cast<int>(aabb_max.x); x++)
{
for (int y = aabb_min.y; y <= static_cast<int>(aabb_max.y); y++)
{
rasterizeTriangleFrag(primitive, tri_pos, x, y, width, height, frag_buffer, depth_buffer, mutex_buffer);
}
}
}
}
#endif
/**
* Perform rasterization.
*/
void rasterize(uchar4 *pbo, const glm::mat4 & MVP, const glm::mat4 & MV, const glm::mat3 MV_normal, float* deltatime) {
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
// Execute your rasterization pipeline here
// (See README for rasterization pipeline outline.)
hipEventRecord(t_start);
// Vertex Process & primitive assembly
{
curPrimitiveBeginId = 0;
dim3 numThreadsPerBlock(128);
auto it = mesh2PrimitivesMap.begin();
auto itEnd = mesh2PrimitivesMap.end();
for (; it != itEnd; ++it) {
auto p = (it->second).begin(); // each primitive
auto pEnd = (it->second).end();
for (; p != pEnd; ++p) {
dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_vertexTransformAndAssembly << < numBlocksForVertices, numThreadsPerBlock >> >(p->numVertices, *p, MVP, MV, MV_normal, width, height);
checkCUDAError("Vertex Processing");
hipDeviceSynchronize();
_primitiveAssembly << < numBlocksForIndices, numThreadsPerBlock >> >
(p->numIndices,
curPrimitiveBeginId,
dev_primitives,
*p);
checkCUDAError("Primitive Assembly");
curPrimitiveBeginId += p->numPrimitives;
}
}
checkCUDAError("Vertex Processing and Primitive Assembly");
}
hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
initDepthAndMutex << <blockCount2d, blockSize2d >> >(width, height, dev_depth, dev_frag_mutex);
// TODO: rasterize
{
#if TILE_BASED_RASTERIZATION
hipMemset(dev_tile_prim_counts, 0
, tile_w_count * tile_h_count * sizeof(dev_tile_prim_counts[0]));
dim3 numThreadsPerBlock(128);
dim3 numBlocksForPrimitives((totalNumPrimitives + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
addPrimitivesToTiles << <numBlocksForPrimitives, numThreadsPerBlock >> > (
totalNumPrimitives
, dev_primitives
, width
, height
, tile_width
, tile_height
, max_tile_prim_count
, dev_tile_primitives
, dev_tile_prim_counts
);
checkCUDAError("addPrimitivesToTiles");
dim3 tile_blockSize2d(8, 8);
dim3 tile_blockCount2d((tile_w_count - 1) / tile_blockSize2d.x + 1,
(tile_h_count - 1) / tile_blockSize2d.y + 1);
kernRasterizeTiles << <tile_blockCount2d, tile_blockSize2d >> >(
tile_w_count
, tile_h_count
, tile_width
, tile_height
, width
, height
, dev_tile_primitives
, dev_tile_prim_counts
, max_tile_prim_count
, dev_fragmentBuffer
, dev_depth
);
checkCUDAError("kernRasterizeTiles");
#else
dim3 numThreadsPerBlock(128);
dim3 numBlocksForPrimitives((totalNumPrimitives + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
kernRasterizePrimitives << <numBlocksForPrimitives, numThreadsPerBlock >> >(totalNumPrimitives, dev_primitives, width, height, dev_fragmentBuffer, dev_depth, dev_frag_mutex);
checkCUDAError("Rasterization");
#endif
}
// Copy depthbuffer colors into framebuffer
render << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentBuffer, dev_framebuffer);
checkCUDAError("fragment shader");
// Copy framebuffer into OpenGL buffer for OpenGL previewing
hipLaunchKernelGGL(( sendImageToPBO), dim3(blockCount2d), dim3(blockSize2d), 0, 0, pbo, output_width, output_height, width, height, dev_framebuffer);
checkCUDAError("copy render result to pbo");
hipEventRecord(t_stop);
hipEventSynchronize(t_stop);
hipEventElapsedTime(deltatime, t_start, t_stop);
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
// deconstruct primitives attribute/indices device buffer
auto it(mesh2PrimitivesMap.begin());
auto itEnd(mesh2PrimitivesMap.end());
for (; it != itEnd; ++it) {
for (auto p = it->second.begin(); p != it->second.end(); ++p) {
hipFree(p->dev_indices);
hipFree(p->dev_position);
hipFree(p->dev_normal);
hipFree(p->dev_texcoord0);
hipFree(p->dev_diffuseTex);
hipFree(p->dev_verticesOut);
//TODO: release other attributes and materials
}
}
////////////
hipFree(dev_primitives);
dev_primitives = NULL;
hipFree(dev_fragmentBuffer);
dev_fragmentBuffer = NULL;
hipFree(dev_framebuffer);
dev_framebuffer = NULL;
hipFree(dev_depth);
dev_depth = NULL;
#if TILE_BASED_RASTERIZATION
hipFree(dev_tile_primitives);
dev_tile_primitives = nullptr;
hipFree(dev_tile_prim_counts);
dev_tile_prim_counts = nullptr;
#endif
hipFree(dev_frag_mutex);
dev_frag_mutex = nullptr;
checkCUDAError("rasterize Free");
hipEventDestroy(t_start);
hipEventDestroy(t_stop);
}
| e8a5e13e7a6479dce2a2bd04e34690be7d164971.cu | /**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek)
* @date 2012-2016
* @copyright University of Pennsylvania & STUDENT
*/
#include <cmath>
#include <cstdio>
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/random.h>
#include <util/checkCUDAError.h>
#include <util/tiny_gltf_loader.h>
#include "rasterizeTools.h"
#include "rasterize.h"
#include <glm/gtc/quaternion.hpp>
#include <glm/gtc/matrix_transform.hpp>
#define LAMBERT_SHADING 1
#define TILE_BASED_RASTERIZATION 1
#define SSAA_LEVEL 2
namespace {
typedef unsigned short VertexIndex;
typedef glm::vec3 VertexAttributePosition;
typedef glm::vec3 VertexAttributeNormal;
typedef glm::vec2 VertexAttributeTexcoord;
typedef unsigned char TextureData;
typedef unsigned char BufferByte;
enum PrimitiveType{
Point = 1,
Line = 2,
Triangle = 3
};
struct VertexOut {
glm::vec4 pos;
// TODO: add new attributes to your VertexOut
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor; // eye space normal used for shading, cuz normal will go wrong after perspective transformation
glm::vec2 texcoord0;
// ...
};
struct Primitive {
PrimitiveType primitiveType = Triangle; // C++ 11 init
VertexOut v[3];
TextureData* diffuse_tex = nullptr;
int diffuse_tex_width, diffuse_tex_height;
};
struct Fragment {
glm::vec3 color;
// TODO: add new attributes to your Fragment
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor;
VertexAttributeTexcoord texcoord0;
TextureData* diffuse_tex;
int diffuse_tex_width;
int diffuse_tex_height;
// ...
};
struct PrimitiveDevBufPointers {
int primitiveMode; //from tinygltfloader macro
PrimitiveType primitiveType;
int numPrimitives;
int numIndices;
int numVertices;
// Vertex In, const after loaded
VertexIndex* dev_indices;
VertexAttributePosition* dev_position;
VertexAttributeNormal* dev_normal;
VertexAttributeTexcoord* dev_texcoord0;
// Materials, add more attributes when needed
TextureData* dev_diffuseTex;
int diffuseTexWidth;
int diffuseTexHeight;
// TextureData* dev_specularTex;
// TextureData* dev_normalTex;
// ...
// Vertex Out, vertex used for rasterization, this is changing every frame
VertexOut* dev_verticesOut;
// TODO: add more attributes when needed
};
}
static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap;
static int width = 0;
static int height = 0;
static int output_width = 0;
static int output_height = 0;
static int totalNumPrimitives = 0;
static Primitive *dev_primitives = NULL;
static Fragment *dev_fragmentBuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
cudaEvent_t t_start, t_stop;
#if TILE_BASED_RASTERIZATION
// FIXME: tile size is hard to manage
static int tile_w_count = 0;
static int tile_h_count = 0;
const int tile_width = 16;
const int tile_height = 16;
const int max_tile_prim_count = 128;
static Primitive * dev_tile_primitives = nullptr;
static int * dev_tile_prim_counts = nullptr;
#endif
static int * dev_frag_mutex = nullptr;
static int * dev_depth = NULL; // you might need this buffer when doing depth test
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, int render_w, int render_h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color (0.f);
for (int render_x = SSAA_LEVEL * x; render_x < SSAA_LEVEL * x + SSAA_LEVEL; render_x++)
{
for (int render_y = SSAA_LEVEL * y; render_y < SSAA_LEVEL * y + SSAA_LEVEL; render_y++)
{
auto fbuffer_index = render_x + render_y * w * SSAA_LEVEL;
color.x = color.x + glm::clamp(image[fbuffer_index].x, 0.0f, 1.0f) * 255.0;
color.y = color.y + glm::clamp(image[fbuffer_index].y, 0.0f, 1.0f) * 255.0;
color.z = color.z + glm::clamp(image[fbuffer_index].z, 0.0f, 1.0f) * 255.0;
}
}
color /= (SSAA_LEVEL * SSAA_LEVEL);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
/**
* Writes fragment colors to the framebuffer
*/
__global__
void render(int w, int h, const Fragment *fragmentBuffer, glm::vec3 *framebuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h)
{
// DONE: add your fragment shader code here
auto frag = fragmentBuffer[index]; // copy to local mem
glm::vec3 color;
// Base Color
if (!frag.diffuse_tex)
{
color = frag.color;
}
else
{
int tx = static_cast<int>(frag.texcoord0.x * frag.diffuse_tex_width) % frag.diffuse_tex_width;
if (tx < 0) { tx += frag.diffuse_tex_width; }
int ty = static_cast<int>(frag.texcoord0.y * frag.diffuse_tex_height) % frag.diffuse_tex_height;
if (ty < 0) { ty += frag.diffuse_tex_height; }
int pixel_index = 3 * (tx + ty * frag.diffuse_tex_width);
color = glm::vec3(
frag.diffuse_tex[pixel_index] / 255.f,
frag.diffuse_tex[pixel_index + 1] / 255.f,
frag.diffuse_tex[pixel_index + 2] / 255.f
);
}
// Lighting
#if LAMBERT_SHADING
const auto light_dir = glm::vec3(1.f, 1.f, 1.f);
color *= fmaxf(0.f, glm::dot(light_dir, frag.eyeNor));
#endif
// output
framebuffer[index] = color;
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
output_width = w;
output_height = h;
width = w * SSAA_LEVEL;
height = h * SSAA_LEVEL;
cudaFree(dev_fragmentBuffer);
cudaMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment));
cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
cudaFree(dev_framebuffer);
cudaMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
cudaMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
cudaFree(dev_depth);
cudaMalloc(&dev_depth, width * height * sizeof(int));
#if TILE_BASED_RASTERIZATION
tile_w_count = (width - 1) / tile_width + 1;
tile_h_count = (height - 1) / tile_height + 1;
if (dev_tile_primitives) { cudaFree(dev_tile_primitives); }
cudaMalloc(&dev_tile_primitives
, tile_w_count * tile_h_count * max_tile_prim_count * sizeof(dev_tile_primitives[0]));
if (dev_tile_prim_counts) { cudaFree(dev_tile_prim_counts); }
cudaMalloc(&dev_tile_prim_counts
, tile_w_count * tile_h_count * sizeof(dev_tile_prim_counts[0]));
cudaMemset(dev_tile_prim_counts, 0
, tile_w_count * tile_h_count * sizeof(dev_tile_prim_counts[0]));
#endif
if (dev_frag_mutex) { cudaFree(dev_frag_mutex); }
cudaMalloc(&dev_frag_mutex, width * height * sizeof(dev_frag_mutex[0]));
cudaMemset(dev_frag_mutex, 0, width * height * sizeof(dev_frag_mutex[0]));
checkCUDAError("rasterizeInit");
cudaEventCreate(&t_start);
cudaEventCreate(&t_stop);
}
__global__
void initDepthAndMutex(int w, int h, int * depth, int* mutex)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
depth[index] = INT_MAX;
mutex[index] = 0;
}
}
/**
* kern function with support for stride to sometimes replace cudaMemcpy
* One thread is responsible for copying one component
*/
__global__
void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset, int componentTypeByteSize) {
// Attribute (vec3 position)
// component (3 * float)
// byte (4 * byte)
// id of component
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
int count = i / n;
int offset = i - count * n; // which component of the attribute
for (int j = 0; j < componentTypeByteSize; j++) {
dev_dst[count * componentTypeByteSize * n
+ offset * componentTypeByteSize
+ j]
=
dev_src[byteOffset
+ count * (byteStride == 0 ? componentTypeByteSize * n : byteStride)
+ offset * componentTypeByteSize
+ j];
}
}
}
__global__
void _nodeMatrixTransform(
int numVertices,
VertexAttributePosition* position,
VertexAttributeNormal* normal,
glm::mat4 MV, glm::mat3 MV_normal) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f));
normal[vid] = glm::normalize(MV_normal * normal[vid]);
}
}
glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node & n) {
glm::mat4 curMatrix(1.0);
const std::vector<double> &m = n.matrix;
if (m.size() > 0) {
// matrix, copy it
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
curMatrix[i][j] = (float)m.at(4 * i + j);
}
}
} else {
// no matrix, use rotation, scale, translation
if (n.translation.size() > 0) {
curMatrix[3][0] = n.translation[0];
curMatrix[3][1] = n.translation[1];
curMatrix[3][2] = n.translation[2];
}
if (n.rotation.size() > 0) {
glm::mat4 R;
glm::quat q;
q[0] = n.rotation[0];
q[1] = n.rotation[1];
q[2] = n.rotation[2];
R = glm::mat4_cast(q);
curMatrix = curMatrix * R;
}
if (n.scale.size() > 0) {
curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2]));
}
}
return curMatrix;
}
void traverseNode (
std::map<std::string, glm::mat4> & n2m,
const tinygltf::Scene & scene,
const std::string & nodeString,
const glm::mat4 & parentMatrix
)
{
const tinygltf::Node & n = scene.nodes.at(nodeString);
glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n);
n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M));
auto it = n.children.begin();
auto itEnd = n.children.end();
for (; it != itEnd; ++it) {
traverseNode(n2m, scene, *it, M);
}
}
void rasterizeSetBuffers(const tinygltf::Scene & scene) {
totalNumPrimitives = 0;
std::map<std::string, BufferByte*> bufferViewDevPointers;
// 1. copy all `bufferViews` to device memory
{
std::map<std::string, tinygltf::BufferView>::const_iterator it(
scene.bufferViews.begin());
std::map<std::string, tinygltf::BufferView>::const_iterator itEnd(
scene.bufferViews.end());
for (; it != itEnd; it++) {
const std::string key = it->first;
const tinygltf::BufferView &bufferView = it->second;
if (bufferView.target == 0) {
continue; // Unsupported bufferView.
}
const tinygltf::Buffer &buffer = scene.buffers.at(bufferView.buffer);
BufferByte* dev_bufferView;
cudaMalloc(&dev_bufferView, bufferView.byteLength);
cudaMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength, cudaMemcpyHostToDevice);
checkCUDAError("Set BufferView Device Mem");
bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView));
}
}
// 2. for each mesh:
// for each primitive:
// build device buffer of indices, materail, and each attributes
// and store these pointers in a map
{
std::map<std::string, glm::mat4> nodeString2Matrix;
auto rootNodeNamesList = scene.scenes.at(scene.defaultScene);
{
auto it = rootNodeNamesList.begin();
auto itEnd = rootNodeNamesList.end();
for (; it != itEnd; ++it) {
traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f));
}
}
// parse through node to access mesh
auto itNode = nodeString2Matrix.begin();
auto itEndNode = nodeString2Matrix.end();
for (; itNode != itEndNode; ++itNode) {
const tinygltf::Node & N = scene.nodes.at(itNode->first);
const glm::mat4 & matrix = itNode->second;
const glm::mat3 & matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix)));
auto itMeshName = N.meshes.begin();
auto itEndMeshName = N.meshes.end();
for (; itMeshName != itEndMeshName; ++itMeshName) {
const tinygltf::Mesh & mesh = scene.meshes.at(*itMeshName);
auto res = mesh2PrimitivesMap.insert(std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>()));
std::vector<PrimitiveDevBufPointers> & primitiveVector = (res.first)->second;
// for each primitive
for (size_t i = 0; i < mesh.primitives.size(); i++) {
const tinygltf::Primitive &primitive = mesh.primitives[i];
if (primitive.indices.empty())
return;
// TODO: add new attributes for your PrimitiveDevBufPointers when you add new attributes
VertexIndex* dev_indices = NULL;
VertexAttributePosition* dev_position = NULL;
VertexAttributeNormal* dev_normal = NULL;
VertexAttributeTexcoord* dev_texcoord0 = NULL;
// ----------Indices-------------
const tinygltf::Accessor &indexAccessor = scene.accessors.at(primitive.indices);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(indexAccessor.bufferView);
BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView);
// assume type is SCALAR for indices
int n = 1;
int numIndices = indexAccessor.count;
int componentTypeByteSize = sizeof(VertexIndex);
int byteLength = numIndices * n * componentTypeByteSize;
dim3 numThreadsPerBlock(128);
dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
cudaMalloc(&dev_indices, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
numIndices,
(BufferByte*)dev_indices,
dev_bufferView,
n,
indexAccessor.byteStride,
indexAccessor.byteOffset,
componentTypeByteSize);
checkCUDAError("Set Index Buffer");
// ---------Primitive Info-------
// Warning: LINE_STRIP is not supported in tinygltfloader
int numPrimitives;
PrimitiveType primitiveType;
switch (primitive.mode) {
case TINYGLTF_MODE_TRIANGLES:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices / 3;
break;
case TINYGLTF_MODE_TRIANGLE_STRIP:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_TRIANGLE_FAN:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_LINE:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices / 2;
break;
case TINYGLTF_MODE_LINE_LOOP:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices + 1;
break;
case TINYGLTF_MODE_POINTS:
primitiveType = PrimitiveType::Point;
numPrimitives = numIndices;
break;
default:
// output error
break;
};
// ----------Attributes-------------
auto it(primitive.attributes.begin());
auto itEnd(primitive.attributes.end());
int numVertices = 0;
// for each attribute
for (; it != itEnd; it++) {
const tinygltf::Accessor &accessor = scene.accessors.at(it->second);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(accessor.bufferView);
int n = 1;
if (accessor.type == TINYGLTF_TYPE_SCALAR) {
n = 1;
}
else if (accessor.type == TINYGLTF_TYPE_VEC2) {
n = 2;
}
else if (accessor.type == TINYGLTF_TYPE_VEC3) {
n = 3;
}
else if (accessor.type == TINYGLTF_TYPE_VEC4) {
n = 4;
}
BufferByte * dev_bufferView = bufferViewDevPointers.at(accessor.bufferView);
BufferByte ** dev_attribute = NULL;
numVertices = accessor.count;
int componentTypeByteSize;
// Note: since the type of our attribute array (dev_position) is static (float32)
// We assume the glTF model attribute type are 5126(FLOAT) here
if (it->first.compare("POSITION") == 0) {
componentTypeByteSize = sizeof(VertexAttributePosition) / n;
dev_attribute = (BufferByte**)&dev_position;
}
else if (it->first.compare("NORMAL") == 0) {
componentTypeByteSize = sizeof(VertexAttributeNormal) / n;
dev_attribute = (BufferByte**)&dev_normal;
}
else if (it->first.compare("TEXCOORD_0") == 0) {
componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n;
dev_attribute = (BufferByte**)&dev_texcoord0;
}
std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n';
dim3 numThreadsPerBlock(128);
dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
int byteLength = numVertices * n * componentTypeByteSize;
cudaMalloc(dev_attribute, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
n * numVertices,
*dev_attribute,
dev_bufferView,
n,
accessor.byteStride,
accessor.byteOffset,
componentTypeByteSize);
std::string msg = "Set Attribute Buffer: " + it->first;
checkCUDAError(msg.c_str());
}
// malloc for VertexOut
VertexOut* dev_vertexOut;
cudaMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut));
checkCUDAError("Malloc VertexOut Buffer");
// ----------Materials-------------
// You can only worry about this part once you started to
// implement textures for your rasterizer
TextureData* dev_diffuseTex = NULL;
int diffuseTexWidth = 0;
int diffuseTexHeight = 0;
if (!primitive.material.empty()) {
const tinygltf::Material &mat = scene.materials.at(primitive.material);
printf("material.name = %s\n", mat.name.c_str());
if (mat.values.find("diffuse") != mat.values.end()) {
std::string diffuseTexName = mat.values.at("diffuse").string_value;
if (scene.textures.find(diffuseTexName) != scene.textures.end()) {
const tinygltf::Texture &tex = scene.textures.at(diffuseTexName);
if (scene.images.find(tex.source) != scene.images.end()) {
const tinygltf::Image &image = scene.images.at(tex.source);
size_t s = image.image.size() * sizeof(TextureData);
cudaMalloc(&dev_diffuseTex, s);
cudaMemcpy(dev_diffuseTex, &image.image.at(0), s, cudaMemcpyHostToDevice);
diffuseTexWidth = image.width;
diffuseTexHeight = image.height;
checkCUDAError("Set Texture Image data");
}
}
}
// TODO: write your code for other materails
// You may have to take a look at tinygltfloader
// You can also use the above code loading diffuse material as a start point
}
// ---------Node hierarchy transform--------
cudaDeviceSynchronize();
dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> > (
numVertices,
dev_position,
dev_normal,
matrix,
matrixNormal);
checkCUDAError("Node hierarchy transformation");
// at the end of the for loop of primitive
// push dev pointers to map
primitiveVector.push_back(PrimitiveDevBufPointers{
primitive.mode,
primitiveType,
numPrimitives,
numIndices,
numVertices,
dev_indices,
dev_position,
dev_normal,
dev_texcoord0,
dev_diffuseTex,
diffuseTexWidth,
diffuseTexHeight,
dev_vertexOut //VertexOut
});
totalNumPrimitives += numPrimitives;
} // for each primitive
} // for each mesh
} // for each node
}
// 3. Malloc for dev_primitives
{
cudaMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive));
}
// Finally, cudaFree raw dev_bufferViews
{
std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin());
std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end());
//bufferViewDevPointers
for (; it != itEnd; it++) {
cudaFree(it->second);
}
checkCUDAError("Free BufferView Device Mem");
}
}
__global__
void _vertexTransformAndAssembly(
int numVertices,
PrimitiveDevBufPointers primitive,
glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal,
int width, int height) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid >= numVertices) { return; }
// DONE: Apply vertex transformation here
// Multiply the MVP matrix for each vertex position, this will transform everything into clipping space
auto pos = MVP * glm::vec4(primitive.dev_position[vid], 1.f);
// Then divide the pos by its w element to transform into NDC space
pos /= pos.w;
// Finally transform x and y to viewport space
pos.x = (- pos.x * 0.5f + 0.5f) * width;
pos.y = (- pos.y * 0.5f + 0.5f) * height;
// DONE: Apply vertex assembly here
// Assemble all attribute arraies into the primitive array
auto eye_pos = glm::vec3(MV * glm::vec4(primitive.dev_position[vid], 1.f));
auto eye_normal = glm::normalize(MV_normal * primitive.dev_normal[vid]);
VertexAttributeTexcoord tex_coord(0.f);
if (primitive.dev_texcoord0)
{
tex_coord = primitive.dev_texcoord0[vid];
}
auto tex_diffuse = primitive.dev_diffuseTex;
auto& v_out = primitive.dev_verticesOut[vid];
v_out.pos = pos;
v_out.eyePos = eye_pos;
v_out.eyeNor = eye_normal;
v_out.texcoord0 = tex_coord;
}
static int curPrimitiveBeginId = 0;
__global__
void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives, PrimitiveDevBufPointers primitive) {
// index id
int iid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (iid < numIndices) {
// DONE: uncomment the following code for a start
// This is primitive assembly for triangles
int pid; // id for cur primitives vector
if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES)
{
pid = iid / (int)primitive.primitiveType;
auto& out_primitive = dev_primitives[pid + curPrimitiveBeginId];
auto v_index = iid % (int)primitive.primitiveType;
out_primitive.v[v_index] = primitive.dev_verticesOut[primitive.dev_indices[iid]];
out_primitive.primitiveType = primitive.primitiveType;
if (v_index == 0)
{
out_primitive.diffuse_tex = primitive.dev_diffuseTex;
out_primitive.diffuse_tex_width = primitive.diffuseTexWidth;
out_primitive.diffuse_tex_height = primitive.diffuseTexHeight;
}
}
// TODO: other primitive types (point, line)
}
}
__device__ void rasterizeTriangleFrag(
const Primitive& primitive
, const glm::vec3 tri[]
, int x
, int y
, int width
, int height
, Fragment * frag_buffer
, int * depth_buffer
, int * mutex_buffer
)
{
if (!(x >= 0 && x < width && y >= 0 && y < height))
{
return;
}
auto bary_coord = calculateBarycentricCoordinate(tri, glm::vec2(x, y));
if (!isBarycentricCoordInBounds(bary_coord))
{
return;
}
auto frag_index = x + y * width;
int depth = -getZAtCoordinate(bary_coord, tri) * INT_MAX;
//// lock mutex
//while (true)
//{
// if (atomicCAS(mutex_buffer + frag_index, 0, 1) == 0)
// {
// // mutex locked
// if (depth < depth_buffer[frag_index])
// {
// depth_buffer[frag_index] = depth;
// frag_buffer[frag_index].color = glm::vec3(1.f);
// frag_buffer[frag_index].diffuse_tex = primitive.diffuse_tex;
// frag_buffer[frag_index].diffuse_tex_height = primitive.diffuse_tex_height;
// frag_buffer[frag_index].diffuse_tex_width = primitive.diffuse_tex_width;
// frag_buffer[frag_index].eyePos = baryInterpolate(bary_coord, primitive.v[0].eyePos, primitive.v[1].eyePos, primitive.v[2].eyePos);
// frag_buffer[frag_index].eyeNor = baryInterpolate(bary_coord, primitive.v[0].eyeNor, primitive.v[1].eyeNor, primitive.v[2].eyeNor);
// frag_buffer[frag_index].texcoord0 = baryInterpolate(bary_coord, primitive.v[0].texcoord0, primitive.v[1].texcoord0, primitive.v[2].texcoord0);
// }
// // unlock mutex
// atomicExch(mutex_buffer + frag_index, 0);
// break;
// }
//}
//atomicExch(mutex_buffer + frag_index, 0);
#if TILE_BASED_RASTERIZATION
if (depth > depth_buffer[frag_index]) { return; }
depth_buffer[frag_index] = depth;
#else
atomicMin(&depth_buffer[frag_index], depth);
if (depth != depth_buffer[frag_index])
{
return;
}
#endif
frag_buffer[frag_index].color = glm::vec3(0.5f);
frag_buffer[frag_index].diffuse_tex = primitive.diffuse_tex;
frag_buffer[frag_index].diffuse_tex_height = primitive.diffuse_tex_height;
frag_buffer[frag_index].diffuse_tex_width = primitive.diffuse_tex_width;
//interpolate
frag_buffer[frag_index].eyePos = baryInterpolate(bary_coord, primitive.v[0].eyePos, primitive.v[1].eyePos, primitive.v[2].eyePos);
frag_buffer[frag_index].eyeNor = baryInterpolate(bary_coord, primitive.v[0].eyeNor, primitive.v[1].eyeNor, primitive.v[2].eyeNor);
frag_buffer[frag_index].texcoord0 = baryInterpolate(bary_coord, primitive.v[0].texcoord0, primitive.v[1].texcoord0, primitive.v[2].texcoord0);
}
#if TILE_BASED_RASTERIZATION
//static int tile_w_count = 0;
//static int tile_h_count = 0;
//
//const int tile_width = 64;
//const int tile_height = 64;
//
//const int max_tile_prim_count = 32;
//static Primitive * dev_tile_primitives = nullptr;
//static int * dev_tile_prim_counts = nullptr;
__global__ void addPrimitivesToTiles(
int num_primitives
, const Primitive* primitives
, int width
, int height
, int tile_width
, int tile_height
, int tile_prim_count_limit
, Primitive* tile_primitives
, int * tile_prim_counts
)
{
// index id
auto pid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pid >= num_primitives) { return; }
// copy primitive data to local memory
auto primitive = primitives[pid];
if (primitive.primitiveType == PrimitiveType::Triangle)
{
glm::vec2 aabb_min = {
fmaxf(fminf(fminf(primitive.v[0].pos[0],primitive.v[1].pos[0]) , primitive.v[2].pos[0]) , 0)
, fmaxf(fminf(fminf(primitive.v[0].pos[1],primitive.v[1].pos[1]) , primitive.v[2].pos[1]) , 0)
};
glm::vec2 aabb_max = {
fminf(fmaxf(fmaxf(primitive.v[0].pos[0],primitive.v[1].pos[0]) , primitive.v[2].pos[0]) , width - 1)
, fminf(fmaxf(fmaxf(primitive.v[0].pos[1],primitive.v[1].pos[1]) , primitive.v[2].pos[1]) , height - 1)
};
auto min_x_tile = static_cast<int>(aabb_min.x) / tile_width;
auto min_y_tile = static_cast<int>(aabb_min.y) / tile_height;
auto max_x_tile = static_cast<int>(aabb_max.x) / tile_width;
auto max_y_tile = static_cast<int>(aabb_max.y) / tile_height;
auto tile_x_count = (width - 1) / tile_width + 1;
for (int tx = min_x_tile; tx <= max_x_tile; tx++)
{
for (int ty = min_y_tile; ty <= max_y_tile; ty++)
{
auto tile_id = tx + ty * tile_x_count;
auto prim_slot = atomicAdd(tile_prim_counts + tile_id, 1);
if (prim_slot >= tile_prim_count_limit)
{
continue;
// TODO: make tile able to contain more primitives somehow
}
tile_primitives[tile_id * tile_prim_count_limit + prim_slot] = primitive;
}
}
}
}
__global__ void kernRasterizeTiles(
int tile_x_count
, int tile_y_count
, int tile_width
, int tile_height
, int width
, int height
, Primitive* tile_primitives
, int * tile_prim_counts
, int tile_prim_count_limit
, Fragment * frag_buffer
, int * depth_buffer
)
{
int tx = (blockIdx.x * blockDim.x) + threadIdx.x;
int ty = (blockIdx.y * blockDim.y) + threadIdx.y;
if (!(tx >= 0 && tx < tile_x_count && ty >= 0 && ty < tile_y_count))
{
return;
}
int index = tx + (ty * tile_x_count);
int x_begin = tx * tile_width;
int x_end = glm::min(x_begin + tile_width, width);
int y_begin = ty * tile_height;
int y_end = glm::min(y_begin + tile_height, height);
auto prim_count = glm::min(tile_prim_counts[index], tile_prim_count_limit);
for (int y = y_begin; y < y_end; y++)
{
for (int x = x_begin; x < x_end; x++)
{
for (int i = 0; i < prim_count; i++)
{
auto& prim = tile_primitives[index * tile_prim_count_limit + i];
glm::vec3 tri_pos[3] = { glm::vec3(prim.v[0].pos)
, glm::vec3(prim.v[1].pos)
, glm::vec3(prim.v[2].pos)
};
rasterizeTriangleFrag(prim, tri_pos, x, y, width, height, frag_buffer, depth_buffer, nullptr);
}
}
}
}
#else
__global__ void kernRasterizePrimitives(
int num_primitives
, const Primitive* primitives
, int width
, int height
, Fragment * frag_buffer
, int * depth_buffer
, int * mutex_buffer
)
{
// index id
auto pid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pid >= num_primitives) { return; }
// copy primitive data to local memory
auto primitive = primitives[pid];
if (primitive.primitiveType == PrimitiveType::Triangle)
{
glm::vec2 aabb_min = {
fmaxf(fminf(fminf( primitive.v[0].pos[0],primitive.v[1].pos[0]) , primitive.v[2].pos[0]) , 0)
, fmaxf(fminf(fminf(primitive.v[0].pos[1],primitive.v[1].pos[1]) , primitive.v[2].pos[1]) , 0)
};
glm::vec2 aabb_max = {
fminf(fmaxf(fmaxf(primitive.v[0].pos[0],primitive.v[1].pos[0]) , primitive.v[2].pos[0]) , width - 1)
, fminf(fmaxf(fmaxf(primitive.v[0].pos[1],primitive.v[1].pos[1]) , primitive.v[2].pos[1]) , height - 1)
};
// TODO: CUDA Dynamic Parallelism?
glm::vec3 tri_pos[3] = { glm::vec3(primitive.v[0].pos)
, glm::vec3(primitive.v[1].pos)
, glm::vec3(primitive.v[2].pos)
};
for (int x = aabb_min.x; x <= static_cast<int>(aabb_max.x); x++)
{
for (int y = aabb_min.y; y <= static_cast<int>(aabb_max.y); y++)
{
rasterizeTriangleFrag(primitive, tri_pos, x, y, width, height, frag_buffer, depth_buffer, mutex_buffer);
}
}
}
}
#endif
/**
* Perform rasterization.
*/
void rasterize(uchar4 *pbo, const glm::mat4 & MVP, const glm::mat4 & MV, const glm::mat3 MV_normal, float* deltatime) {
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
// Execute your rasterization pipeline here
// (See README for rasterization pipeline outline.)
cudaEventRecord(t_start);
// Vertex Process & primitive assembly
{
curPrimitiveBeginId = 0;
dim3 numThreadsPerBlock(128);
auto it = mesh2PrimitivesMap.begin();
auto itEnd = mesh2PrimitivesMap.end();
for (; it != itEnd; ++it) {
auto p = (it->second).begin(); // each primitive
auto pEnd = (it->second).end();
for (; p != pEnd; ++p) {
dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_vertexTransformAndAssembly << < numBlocksForVertices, numThreadsPerBlock >> >(p->numVertices, *p, MVP, MV, MV_normal, width, height);
checkCUDAError("Vertex Processing");
cudaDeviceSynchronize();
_primitiveAssembly << < numBlocksForIndices, numThreadsPerBlock >> >
(p->numIndices,
curPrimitiveBeginId,
dev_primitives,
*p);
checkCUDAError("Primitive Assembly");
curPrimitiveBeginId += p->numPrimitives;
}
}
checkCUDAError("Vertex Processing and Primitive Assembly");
}
cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
initDepthAndMutex << <blockCount2d, blockSize2d >> >(width, height, dev_depth, dev_frag_mutex);
// TODO: rasterize
{
#if TILE_BASED_RASTERIZATION
cudaMemset(dev_tile_prim_counts, 0
, tile_w_count * tile_h_count * sizeof(dev_tile_prim_counts[0]));
dim3 numThreadsPerBlock(128);
dim3 numBlocksForPrimitives((totalNumPrimitives + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
addPrimitivesToTiles << <numBlocksForPrimitives, numThreadsPerBlock >> > (
totalNumPrimitives
, dev_primitives
, width
, height
, tile_width
, tile_height
, max_tile_prim_count
, dev_tile_primitives
, dev_tile_prim_counts
);
checkCUDAError("addPrimitivesToTiles");
dim3 tile_blockSize2d(8, 8);
dim3 tile_blockCount2d((tile_w_count - 1) / tile_blockSize2d.x + 1,
(tile_h_count - 1) / tile_blockSize2d.y + 1);
kernRasterizeTiles << <tile_blockCount2d, tile_blockSize2d >> >(
tile_w_count
, tile_h_count
, tile_width
, tile_height
, width
, height
, dev_tile_primitives
, dev_tile_prim_counts
, max_tile_prim_count
, dev_fragmentBuffer
, dev_depth
);
checkCUDAError("kernRasterizeTiles");
#else
dim3 numThreadsPerBlock(128);
dim3 numBlocksForPrimitives((totalNumPrimitives + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
kernRasterizePrimitives << <numBlocksForPrimitives, numThreadsPerBlock >> >(totalNumPrimitives, dev_primitives, width, height, dev_fragmentBuffer, dev_depth, dev_frag_mutex);
checkCUDAError("Rasterization");
#endif
}
// Copy depthbuffer colors into framebuffer
render << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentBuffer, dev_framebuffer);
checkCUDAError("fragment shader");
// Copy framebuffer into OpenGL buffer for OpenGL previewing
sendImageToPBO<<<blockCount2d, blockSize2d>>>(pbo, output_width, output_height, width, height, dev_framebuffer);
checkCUDAError("copy render result to pbo");
cudaEventRecord(t_stop);
cudaEventSynchronize(t_stop);
cudaEventElapsedTime(deltatime, t_start, t_stop);
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
// deconstruct primitives attribute/indices device buffer
auto it(mesh2PrimitivesMap.begin());
auto itEnd(mesh2PrimitivesMap.end());
for (; it != itEnd; ++it) {
for (auto p = it->second.begin(); p != it->second.end(); ++p) {
cudaFree(p->dev_indices);
cudaFree(p->dev_position);
cudaFree(p->dev_normal);
cudaFree(p->dev_texcoord0);
cudaFree(p->dev_diffuseTex);
cudaFree(p->dev_verticesOut);
//TODO: release other attributes and materials
}
}
////////////
cudaFree(dev_primitives);
dev_primitives = NULL;
cudaFree(dev_fragmentBuffer);
dev_fragmentBuffer = NULL;
cudaFree(dev_framebuffer);
dev_framebuffer = NULL;
cudaFree(dev_depth);
dev_depth = NULL;
#if TILE_BASED_RASTERIZATION
cudaFree(dev_tile_primitives);
dev_tile_primitives = nullptr;
cudaFree(dev_tile_prim_counts);
dev_tile_prim_counts = nullptr;
#endif
cudaFree(dev_frag_mutex);
dev_frag_mutex = nullptr;
checkCUDAError("rasterize Free");
cudaEventDestroy(t_start);
cudaEventDestroy(t_stop);
}
|
58401fcdbd8a4111356224c5aeec819af166b292.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda_runtime.h>
#include"device_launch_parameters.h"
#include<stdio.h>
__global__ void malloc_test(double **a)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if(i==10&&j==10)
{
int N = 10000;
hipMalloc((void**)a, sizeof(double)*N);
for(int i=0;i<N;i++)
{
(*a)[i] = i;
}
}
__syncthreads();
if(i==11&&j==11)
{
printf("%f\n",(*a)[500]);
}
}
int main()
{
double **a;
hipMalloc((void**)&a, sizeof(double *)); //a
dim3 blockdim(3,3);
dim3 griddim(6,6);
hipLaunchKernelGGL(( malloc_test), dim3(griddim), dim3(blockdim), 0, 0, a);
hipDeviceSynchronize();
return 0;
} | 58401fcdbd8a4111356224c5aeec819af166b292.cu | #include<cuda_runtime.h>
#include"device_launch_parameters.h"
#include<stdio.h>
__global__ void malloc_test(double **a)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if(i==10&&j==10)
{
int N = 10000;
cudaMalloc((void**)a, sizeof(double)*N);
for(int i=0;i<N;i++)
{
(*a)[i] = i;
}
}
__syncthreads();
if(i==11&&j==11)
{
printf("%f\n",(*a)[500]);
}
}
int main()
{
double **a;
cudaMalloc((void**)&a, sizeof(double *)); //为a分配显存空间
dim3 blockdim(3,3);
dim3 griddim(6,6);
malloc_test<<<griddim, blockdim>>>(a);
cudaDeviceSynchronize();
return 0;
} |
c7e41100b4852bdd8e88b86a58cde0837ddbdcda.hip | // !!! This is a file automatically generated by hipify!!!
#include "CudaHelpers.h"
#include "GatherShuffle.h"
#include "ThrustInclude.h"
#include "shuffle/DartThrowing.h"
#include "shuffle/LCGBijectiveShuffle.h"
#include "shuffle/MergeShuffle.h"
#include "shuffle/PhiloxShuffle.h"
#include "shuffle/RaoSandeliusShuffle.h"
#include "shuffle/SortShuffle.h"
#include "shuffle/StdShuffle.h"
#include <benchmark/benchmark.h>
#include <cmath>
#include <sstream>
#include <vector>
// #define HOST_BENCH 1
using DataType = uint64_t;
template <class ShuffleFunction>
static void benchmarkScatterGather( benchmark::State& state )
{
ShuffleFunction shuffler;
using ContainerType = typename ShuffleFunction::container_type;
// Shuffle second param adds 0 or 1 to compare power of two (best case) vs.
// one above power of two (worst case)
const uint64_t num_to_shuffle = (uint64_t)state.range( 1 ) + ( 1ull << (uint64_t)state.range( 0 ) );
ContainerType in_container( num_to_shuffle );
ContainerType out_container( num_to_shuffle );
PhiloxBijectiveScanShuffle<ContainerType> temp_shuffler;
thrust::sequence( out_container.begin(), out_container.end() );
int seed = 0;
for( auto _ : state )
{
state.PauseTiming();
if( ( seed % 100 ) == 0 )
temp_shuffler( out_container, in_container, seed );
#ifndef HOST_BENCH
checkCudaError( hipDeviceSynchronize() );
#endif
state.ResumeTiming();
// Benchmarks raw gather speed of a random permutation
shuffler( in_container, out_container, seed );
#ifndef HOST_BENCH
checkCudaError( hipDeviceSynchronize() );
#endif
seed++;
}
state.SetItemsProcessed( state.iterations() * num_to_shuffle );
uint64_t log = std::log2( num_to_shuffle );
std::stringstream s;
s << "Shuffle 2^" << log;
if( state.range( 1 ) )
{
s << " + 1";
}
state.SetLabel( s.str() );
}
template <class ShuffleFunction>
static void benchmarkFunction( benchmark::State& state )
{
ShuffleFunction shuffler;
using ContainerType = typename ShuffleFunction::container_type;
// Shuffle second param adds 0 or 1 to compare power of two (best case) vs.
// one above power of two (worst case)
const uint64_t num_to_shuffle = (uint64_t)state.range( 1 ) + ( 1ull << (uint64_t)state.range( 0 ) );
ContainerType in_container( num_to_shuffle );
ContainerType out_container( num_to_shuffle );
int seed = 0;
for( auto _ : state )
{
shuffler( in_container, out_container, seed );
#ifndef HOST_BENCH
checkCudaError( hipDeviceSynchronize() );
#endif
seed++;
}
state.SetItemsProcessed( state.iterations() * num_to_shuffle );
std::stringstream s;
s << "Shuffle 2^" << state.range( 0 );
if( state.range( 1 ) )
{
s << " + 1";
}
state.SetLabel( s.str() );
}
static void argsGenerator( benchmark::internal::Benchmark* b )
{
// Go up by 3 so we get both odd and even numbers of bits
std::vector<int> logs = { 8, 11, 14, 17, 20, 23, 26, 29 };
for( int log : logs )
{
b->Args( { log, 0 } );
b->Args( { log, 1 } );
}
}
BENCHMARK_TEMPLATE( benchmarkFunction, PhiloxBijectiveScanShuffle<> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, BasicPhiloxBijectiveScanShuffle<> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, TwoPassPhiloxBijectiveScanShuffle<> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, LCGBijectiveScanShuffle<thrust::device_vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, PhiloxBijectiveScanShuffle<thrust::tbb::vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, LCGBijectiveScanShuffle<thrust::tbb::vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, DartThrowing<thrust::device_vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, SortShuffle<thrust::device_vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, HostDartThrowing<std::vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, MergeShuffle<std::vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, RaoSandeliusShuffle<std::vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, StdShuffle<std::vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, SortShuffle<thrust::host_vector<DataType>> )->Apply( argsGenerator );
#ifndef HOST_BENCH
BENCHMARK_TEMPLATE( benchmarkScatterGather, GatherShuffle<thrust::device_vector<DataType>> )->Apply( argsGenerator );
#endif
BENCHMARK_TEMPLATE( benchmarkScatterGather, GatherShuffle<thrust::host_vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_MAIN(); | c7e41100b4852bdd8e88b86a58cde0837ddbdcda.cu | #include "CudaHelpers.h"
#include "GatherShuffle.h"
#include "ThrustInclude.h"
#include "shuffle/DartThrowing.h"
#include "shuffle/LCGBijectiveShuffle.h"
#include "shuffle/MergeShuffle.h"
#include "shuffle/PhiloxShuffle.h"
#include "shuffle/RaoSandeliusShuffle.h"
#include "shuffle/SortShuffle.h"
#include "shuffle/StdShuffle.h"
#include <benchmark/benchmark.h>
#include <cmath>
#include <sstream>
#include <vector>
// #define HOST_BENCH 1
using DataType = uint64_t;
template <class ShuffleFunction>
static void benchmarkScatterGather( benchmark::State& state )
{
ShuffleFunction shuffler;
using ContainerType = typename ShuffleFunction::container_type;
// Shuffle second param adds 0 or 1 to compare power of two (best case) vs.
// one above power of two (worst case)
const uint64_t num_to_shuffle = (uint64_t)state.range( 1 ) + ( 1ull << (uint64_t)state.range( 0 ) );
ContainerType in_container( num_to_shuffle );
ContainerType out_container( num_to_shuffle );
PhiloxBijectiveScanShuffle<ContainerType> temp_shuffler;
thrust::sequence( out_container.begin(), out_container.end() );
int seed = 0;
for( auto _ : state )
{
state.PauseTiming();
if( ( seed % 100 ) == 0 )
temp_shuffler( out_container, in_container, seed );
#ifndef HOST_BENCH
checkCudaError( cudaDeviceSynchronize() );
#endif
state.ResumeTiming();
// Benchmarks raw gather speed of a random permutation
shuffler( in_container, out_container, seed );
#ifndef HOST_BENCH
checkCudaError( cudaDeviceSynchronize() );
#endif
seed++;
}
state.SetItemsProcessed( state.iterations() * num_to_shuffle );
uint64_t log = std::log2( num_to_shuffle );
std::stringstream s;
s << "Shuffle 2^" << log;
if( state.range( 1 ) )
{
s << " + 1";
}
state.SetLabel( s.str() );
}
template <class ShuffleFunction>
static void benchmarkFunction( benchmark::State& state )
{
ShuffleFunction shuffler;
using ContainerType = typename ShuffleFunction::container_type;
// Shuffle second param adds 0 or 1 to compare power of two (best case) vs.
// one above power of two (worst case)
const uint64_t num_to_shuffle = (uint64_t)state.range( 1 ) + ( 1ull << (uint64_t)state.range( 0 ) );
ContainerType in_container( num_to_shuffle );
ContainerType out_container( num_to_shuffle );
int seed = 0;
for( auto _ : state )
{
shuffler( in_container, out_container, seed );
#ifndef HOST_BENCH
checkCudaError( cudaDeviceSynchronize() );
#endif
seed++;
}
state.SetItemsProcessed( state.iterations() * num_to_shuffle );
std::stringstream s;
s << "Shuffle 2^" << state.range( 0 );
if( state.range( 1 ) )
{
s << " + 1";
}
state.SetLabel( s.str() );
}
static void argsGenerator( benchmark::internal::Benchmark* b )
{
// Go up by 3 so we get both odd and even numbers of bits
std::vector<int> logs = { 8, 11, 14, 17, 20, 23, 26, 29 };
for( int log : logs )
{
b->Args( { log, 0 } );
b->Args( { log, 1 } );
}
}
BENCHMARK_TEMPLATE( benchmarkFunction, PhiloxBijectiveScanShuffle<> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, BasicPhiloxBijectiveScanShuffle<> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, TwoPassPhiloxBijectiveScanShuffle<> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, LCGBijectiveScanShuffle<thrust::device_vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, PhiloxBijectiveScanShuffle<thrust::tbb::vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, LCGBijectiveScanShuffle<thrust::tbb::vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, DartThrowing<thrust::device_vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, SortShuffle<thrust::device_vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, HostDartThrowing<std::vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, MergeShuffle<std::vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, RaoSandeliusShuffle<std::vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, StdShuffle<std::vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, SortShuffle<thrust::host_vector<DataType>> )->Apply( argsGenerator );
#ifndef HOST_BENCH
BENCHMARK_TEMPLATE( benchmarkScatterGather, GatherShuffle<thrust::device_vector<DataType>> )->Apply( argsGenerator );
#endif
BENCHMARK_TEMPLATE( benchmarkScatterGather, GatherShuffle<thrust::host_vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_MAIN(); |
b1c962f7f05b8aa66d00d76a5f385dda2ad13740.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#define BLOCKS 12
#define BLOCKSIZE 544
#define BSize 12
#define QSize 17
#define SSize 128
struct kernel_para{
volatile int *A, *B;
volatile int *C;
volatile int size;
volatile int block;
volatile int thread;
volatile int warp;
volatile int req;
volatile int funcId;
volatile int taskId;
volatile int doneHost;
int doneGPU;
};
struct kernel_para_GPU{
int warpId;
int baseId;
int taskId;
};
__device__ void init_queue(struct kernel_para_GPU *warpPool){
int warpIdxx = (blockIdx.x*blockDim.x+threadIdx.x)/32;
if((threadIdx.x) != 0){
warpPool[warpIdxx+threadIdx.x].warpId = 0;
}else{
warpPool[warpIdxx+threadIdx.x].warpId = 1;
}
}
__device__ void MatMul_kernel(int *A, int *B, int *C, int Size, int baseTid){
#if 1
int row = baseTid + (threadIdx.x & 0x1f);
for (int j = 0; j < Size; j++){
int sum = 0;
for (int k = 0; k < Size; k++){
int a = A[row * Size + k];
int b = B[k * Size + j];
sum += a * b;
}
C[row * Size + j] = sum;
}
#endif
}
__device__ void VecAdd_kernel(int *A, int *B, int *C, int size, int baseTid)
{
int i = baseTid + (threadIdx.x & 0x1f);
//printf("In vec add with tid %d from block %d\n",i, blockIdx.x);
// for(int j=0; j<200000; j++)
if (i < size)
C[i] = A[i] + B[i];
}
__global__ void deviceRT(volatile int *done, volatile int *totalExecTasks, struct kernel_para_GPU *warpPool, volatile struct kernel_para *taskBuffer, struct kernel_para *taskArgs, volatile int *exec){
int warpIdxx = (blockIdx.x*blockDim.x + threadIdx.x)/32;
__shared__ int warp;
__shared__ int warpQPointer;
__shared__ int base;
int threadDone;
// Init warp queue contents and pointers
#if 1
if(threadIdx.x < QSize){
init_queue(warpPool);
warp = 0;
warpQPointer = 0;
base = 0;
}
__syncthreads();
#endif
// scheduling in master warps
if(threadIdx.x < QSize){
while(!(*done)){
// if(*done) return;
if(taskBuffer[warpQPointer*BSize+blockIdx.x].req == 1 && !(*done)){
warp = taskBuffer[warpQPointer*BSize+blockIdx.x].warp;
// search free warps
while(1){
threadDone = 0;
if(warpPool[warpIdxx+threadIdx.x].warpId == 0){
if(atomicSub(&warp, 1) > 0){
warpPool[warpIdxx+threadIdx.x].taskId = taskBuffer[warpQPointer*BSize+blockIdx.x].taskId;
warpPool[warpIdxx+threadIdx.x].baseId = atomicAdd(&base, 1)*32;
warpPool[warpIdxx+threadIdx.x].warpId = 1;
__threadfence_block();
}// End if(warp > 0)
}// End if (warpQ->contents)
if(warp <= 0){
threadDone = 1;
}
if(__all(threadDone == 1) != 0){
if(threadIdx.x == 0){
taskBuffer[warpQPointer*BSize+blockIdx.x].req = 0;
base = 0;
// atomicAdd((int*)&totalExecTasks[0],1);
}
break;
}// End warp vote
}//End while(1)
} // End taskBuffer if
if(threadIdx.x == 0) {
warpQPointer++;
if(warpQPointer == SSize){
warpQPointer = 0;
}
}
}// End while(!(*done))
}//End if(threadIdx.x < 32)
#if 1
else{
#if 1
while(!(*exec)){
// if(*exec) return;
if(warpPool[warpIdxx].warpId == 1 && !(*exec)){
MatMul_kernel((int*)taskArgs[warpPool[warpIdxx].taskId].A, (int*)taskArgs[warpPool[warpIdxx].taskId].B, (int*)taskArgs[warpPool[warpIdxx].taskId].C, taskArgs[warpPool[warpIdxx].taskId].size, warpPool[warpIdxx].baseId);
if((threadIdx.x & 0x1f) == 0){
if((atomicSub((int*)&taskArgs[warpPool[warpIdxx].taskId].doneGPU,1)) ==1){
taskArgs[warpPool[warpIdxx].taskId].doneHost = 0;
atomicAdd((int*)&totalExecTasks[0],1);
}
warpPool[warpIdxx].warpId = 0;
__threadfence_block();
}
}
}
#endif
}// End else
#endif
}
| b1c962f7f05b8aa66d00d76a5f385dda2ad13740.cu | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#define BLOCKS 12
#define BLOCKSIZE 544
#define BSize 12
#define QSize 17
#define SSize 128
struct kernel_para{
volatile int *A, *B;
volatile int *C;
volatile int size;
volatile int block;
volatile int thread;
volatile int warp;
volatile int req;
volatile int funcId;
volatile int taskId;
volatile int doneHost;
int doneGPU;
};
struct kernel_para_GPU{
int warpId;
int baseId;
int taskId;
};
__device__ void init_queue(struct kernel_para_GPU *warpPool){
int warpIdxx = (blockIdx.x*blockDim.x+threadIdx.x)/32;
if((threadIdx.x) != 0){
warpPool[warpIdxx+threadIdx.x].warpId = 0;
}else{
warpPool[warpIdxx+threadIdx.x].warpId = 1;
}
}
__device__ void MatMul_kernel(int *A, int *B, int *C, int Size, int baseTid){
#if 1
int row = baseTid + (threadIdx.x & 0x1f);
for (int j = 0; j < Size; j++){
int sum = 0;
for (int k = 0; k < Size; k++){
int a = A[row * Size + k];
int b = B[k * Size + j];
sum += a * b;
}
C[row * Size + j] = sum;
}
#endif
}
__device__ void VecAdd_kernel(int *A, int *B, int *C, int size, int baseTid)
{
int i = baseTid + (threadIdx.x & 0x1f);
//printf("In vec add with tid %d from block %d\n",i, blockIdx.x);
// for(int j=0; j<200000; j++)
if (i < size)
C[i] = A[i] + B[i];
}
__global__ void deviceRT(volatile int *done, volatile int *totalExecTasks, struct kernel_para_GPU *warpPool, volatile struct kernel_para *taskBuffer, struct kernel_para *taskArgs, volatile int *exec){
int warpIdxx = (blockIdx.x*blockDim.x + threadIdx.x)/32;
__shared__ int warp;
__shared__ int warpQPointer;
__shared__ int base;
int threadDone;
// Init warp queue contents and pointers
#if 1
if(threadIdx.x < QSize){
init_queue(warpPool);
warp = 0;
warpQPointer = 0;
base = 0;
}
__syncthreads();
#endif
// scheduling in master warps
if(threadIdx.x < QSize){
while(!(*done)){
// if(*done) return;
if(taskBuffer[warpQPointer*BSize+blockIdx.x].req == 1 && !(*done)){
warp = taskBuffer[warpQPointer*BSize+blockIdx.x].warp;
// search free warps
while(1){
threadDone = 0;
if(warpPool[warpIdxx+threadIdx.x].warpId == 0){
if(atomicSub(&warp, 1) > 0){
warpPool[warpIdxx+threadIdx.x].taskId = taskBuffer[warpQPointer*BSize+blockIdx.x].taskId;
warpPool[warpIdxx+threadIdx.x].baseId = atomicAdd(&base, 1)*32;
warpPool[warpIdxx+threadIdx.x].warpId = 1;
__threadfence_block();
}// End if(warp > 0)
}// End if (warpQ->contents)
if(warp <= 0){
threadDone = 1;
}
if(__all(threadDone == 1) != 0){
if(threadIdx.x == 0){
taskBuffer[warpQPointer*BSize+blockIdx.x].req = 0;
base = 0;
// atomicAdd((int*)&totalExecTasks[0],1);
}
break;
}// End warp vote
}//End while(1)
} // End taskBuffer if
if(threadIdx.x == 0) {
warpQPointer++;
if(warpQPointer == SSize){
warpQPointer = 0;
}
}
}// End while(!(*done))
}//End if(threadIdx.x < 32)
#if 1
else{
#if 1
while(!(*exec)){
// if(*exec) return;
if(warpPool[warpIdxx].warpId == 1 && !(*exec)){
MatMul_kernel((int*)taskArgs[warpPool[warpIdxx].taskId].A, (int*)taskArgs[warpPool[warpIdxx].taskId].B, (int*)taskArgs[warpPool[warpIdxx].taskId].C, taskArgs[warpPool[warpIdxx].taskId].size, warpPool[warpIdxx].baseId);
if((threadIdx.x & 0x1f) == 0){
if((atomicSub((int*)&taskArgs[warpPool[warpIdxx].taskId].doneGPU,1)) ==1){
taskArgs[warpPool[warpIdxx].taskId].doneHost = 0;
atomicAdd((int*)&totalExecTasks[0],1);
}
warpPool[warpIdxx].warpId = 0;
__threadfence_block();
}
}
}
#endif
}// End else
#endif
}
|
6b0ead62f5009e5de6b8a1872ea725ed396e6748.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2017, Maria Glukhova. All rights reserved.
* Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#include "histogram_gpu.h"
#include "histogram_common.h"
#include <helper_cuda.h> // CUDA device initialization helper functions
__device__ __forceinline__ void DecodePixel(
uchar4 pixel,
unsigned int (&bins)[ACTIVE_CHANNELS])
{
/**
* Decode uchar4 pixel into bins.
* @param pixel - uchar4 pixel value.
* @param bins (output) - Array of ACTIVE_CHANNELS uints representing binned
* channel value.
*/
unsigned char* samples = reinterpret_cast<unsigned char*>(&pixel);
#pragma unroll
for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL) {
bins[CHANNEL] = (unsigned int) (samples[CHANNEL]) / K_BIN;
}
}
__global__ void histogram_gmem_atomics(
const PixelType *in,
int width,
int height,
unsigned int *out)
{
/**
* First-pass histogram kernel (binning into privatized counters)
* @param in - input image, uchar4 array of continuously placed pixel values.
* @param width - int, image width in pixels.
* @param height - int, image height in pixels.
* @param out (output) - partial histograms.
*/
// Global position and size.
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int nx = blockDim.x * gridDim.x;
int ny = blockDim.y * gridDim.y;
// thread index in workgroup, linear in 0..nt-1
int t = threadIdx.x + threadIdx.y * blockDim.x;
int nt = blockDim.x * blockDim.y; // total threads in workgroup
// Group index in 0..ngroups-1.
int g = blockIdx.x + blockIdx.y * gridDim.x;
// Initialize global memory.
unsigned int *gmem = out + g * NUM_PARTS;
for (int i = t; i < ACTIVE_CHANNELS * NUM_BINS; i += nt){
gmem[i] = 0;
}
__syncthreads();
// Process pixels (updates our group's partial histogram in gmem).
for (int col = x; col < width; col += nx)
{
for (int row = y; row < height; row += ny)
{
PixelType pixel = in[row * width + col];
unsigned int bins[ACTIVE_CHANNELS];
DecodePixel(pixel, bins);
#pragma unroll
for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL) {
atomicAdd(&gmem[(NUM_BINS * CHANNEL) + bins[CHANNEL]], 1);
}
}
}
}
__global__ void histogram_gmem_accum(
const unsigned int *in,
int n,
unsigned int *out)
{
/**
* Accumulate partial histograms into global one.
* @param in - input partial histograms.
* @param n - total number of blocks.
* @param out (output) - global histogram.
*/
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i > ACTIVE_CHANNELS * NUM_BINS) {
return; // out of range
}
unsigned int total = 0;
for (int j = 0; j < n; j++) {
total += in[i + NUM_PARTS * j];
}
out[i] = total;
}
void run_gmem_atomics(
PixelType *d_image,
int width,
int height,
unsigned int *d_hist)
{
/**
* Wrapper for GPU histogram computing.
* @param in - input image, uchar4 array of continuously placed pixel values.
* @param width - int, image width in pixels.
* @param height - int, image height in pixels.
* @param out (output)
*/
int device_count = 0;
hipError_t error_id = hipGetDeviceCount(&device_count);
dim3 block(32, 4);
dim3 grid(16, 16);
int total_blocks = grid.x * grid.y;
// Allocate partial histogram.
unsigned int *d_part_hist;
hipMalloc(&d_part_hist, total_blocks * NUM_PARTS * sizeof(unsigned int));
dim3 block2(128);
dim3 grid2((ACTIVE_CHANNELS * NUM_BINS + block2.x - 1) / block2.x);
hipLaunchKernelGGL(( histogram_gmem_atomics), dim3(grid), dim3(block), 0, 0,
d_image,
width,
height,
d_part_hist);
hipLaunchKernelGGL(( histogram_gmem_accum), dim3(grid2), dim3(block2), 0, 0,
d_part_hist,
total_blocks,
d_hist);
hipFree(d_part_hist);
}
__global__ void histogram_gmem_atomics1(
const PixelType *in,
int width,
int height,
unsigned int *out,
int dev_id,
int dev_count)
{
// Global position and size.
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int nx = blockDim.x * gridDim.x;
int ny = blockDim.y * gridDim.y;
// thread index in workgroup, linear in 0..nt-1
int t = threadIdx.x + threadIdx.y * blockDim.x;
int nt = blockDim.x * blockDim.y; // total threads in workgroup
// Group index in 0..ngroups-1.
int g = blockIdx.x + blockIdx.y * gridDim.x;
// Initialize global memory.
unsigned int *gmem = out + g * NUM_PARTS;
for (int i = t; i < ACTIVE_CHANNELS * NUM_BINS; i += nt)
gmem[i] = 0;
__syncthreads();
// Process pixels (updates our group's partial histogram in gmem).
for (int col = x; col < width; col += nx)
{
for (int row = y; row < height; row += ny)
{
PixelType pixel = in[row * width + col];
unsigned int bins[ACTIVE_CHANNELS];
DecodePixel(pixel, bins);
// Every device process its own channel(s).
#pragma unroll
for (int CHANNEL = dev_id; CHANNEL < ACTIVE_CHANNELS; CHANNEL += dev_count) {
atomicAdd(&gmem[(NUM_BINS * CHANNEL) + bins[CHANNEL]], 1);
}
}
}
}
void run_multigpu(
PixelType *d_image,
int width,
int height,
unsigned int *d_hist,
int device_id,
int device_count)
{
dim3 block(32, 4);
dim3 grid(16, 16);
int total_blocks = grid.x * grid.y;
// Allocate partial histogram.
// Actually, we need less memory (only the channels assigned to the device),
// but for the sake of simplicity, let us have the "full" histogram for
// every device (still counting only relevant bits).
// TODO: Memory-efficient way.
unsigned int *d_part_hist;
hipMalloc(&d_part_hist, total_blocks * NUM_PARTS * sizeof(unsigned int));
dim3 block2(128);
dim3 grid2((ACTIVE_CHANNELS * NUM_BINS + block2.x - 1) / block2.x);
hipLaunchKernelGGL(( histogram_gmem_atomics1), dim3(grid), dim3(block), 0, 0,
d_image,
width,
height,
d_part_hist,
device_id,
device_count
);
hipLaunchKernelGGL(( histogram_gmem_accum), dim3(grid2), dim3(block2), 0, 0,
d_part_hist,
total_blocks,
d_hist);
hipFree(d_part_hist);
}
| 6b0ead62f5009e5de6b8a1872ea725ed396e6748.cu | /******************************************************************************
* Copyright (c) 2017, Maria Glukhova. All rights reserved.
* Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#include "histogram_gpu.h"
#include "histogram_common.h"
#include <helper_cuda.h> // CUDA device initialization helper functions
__device__ __forceinline__ void DecodePixel(
uchar4 pixel,
unsigned int (&bins)[ACTIVE_CHANNELS])
{
/**
* Decode uchar4 pixel into bins.
* @param pixel - uchar4 pixel value.
* @param bins (output) - Array of ACTIVE_CHANNELS uints representing binned
* channel value.
*/
unsigned char* samples = reinterpret_cast<unsigned char*>(&pixel);
#pragma unroll
for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL) {
bins[CHANNEL] = (unsigned int) (samples[CHANNEL]) / K_BIN;
}
}
__global__ void histogram_gmem_atomics(
const PixelType *in,
int width,
int height,
unsigned int *out)
{
/**
* First-pass histogram kernel (binning into privatized counters)
* @param in - input image, uchar4 array of continuously placed pixel values.
* @param width - int, image width in pixels.
* @param height - int, image height in pixels.
* @param out (output) - partial histograms.
*/
// Global position and size.
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int nx = blockDim.x * gridDim.x;
int ny = blockDim.y * gridDim.y;
// thread index in workgroup, linear in 0..nt-1
int t = threadIdx.x + threadIdx.y * blockDim.x;
int nt = blockDim.x * blockDim.y; // total threads in workgroup
// Group index in 0..ngroups-1.
int g = blockIdx.x + blockIdx.y * gridDim.x;
// Initialize global memory.
unsigned int *gmem = out + g * NUM_PARTS;
for (int i = t; i < ACTIVE_CHANNELS * NUM_BINS; i += nt){
gmem[i] = 0;
}
__syncthreads();
// Process pixels (updates our group's partial histogram in gmem).
for (int col = x; col < width; col += nx)
{
for (int row = y; row < height; row += ny)
{
PixelType pixel = in[row * width + col];
unsigned int bins[ACTIVE_CHANNELS];
DecodePixel(pixel, bins);
#pragma unroll
for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL) {
atomicAdd(&gmem[(NUM_BINS * CHANNEL) + bins[CHANNEL]], 1);
}
}
}
}
__global__ void histogram_gmem_accum(
const unsigned int *in,
int n,
unsigned int *out)
{
/**
* Accumulate partial histograms into global one.
* @param in - input partial histograms.
* @param n - total number of blocks.
* @param out (output) - global histogram.
*/
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i > ACTIVE_CHANNELS * NUM_BINS) {
return; // out of range
}
unsigned int total = 0;
for (int j = 0; j < n; j++) {
total += in[i + NUM_PARTS * j];
}
out[i] = total;
}
void run_gmem_atomics(
PixelType *d_image,
int width,
int height,
unsigned int *d_hist)
{
/**
* Wrapper for GPU histogram computing.
* @param in - input image, uchar4 array of continuously placed pixel values.
* @param width - int, image width in pixels.
* @param height - int, image height in pixels.
* @param out (output)
*/
int device_count = 0;
cudaError_t error_id = cudaGetDeviceCount(&device_count);
dim3 block(32, 4);
dim3 grid(16, 16);
int total_blocks = grid.x * grid.y;
// Allocate partial histogram.
unsigned int *d_part_hist;
cudaMalloc(&d_part_hist, total_blocks * NUM_PARTS * sizeof(unsigned int));
dim3 block2(128);
dim3 grid2((ACTIVE_CHANNELS * NUM_BINS + block2.x - 1) / block2.x);
histogram_gmem_atomics<<<grid, block>>>(
d_image,
width,
height,
d_part_hist);
histogram_gmem_accum<<<grid2, block2>>>(
d_part_hist,
total_blocks,
d_hist);
cudaFree(d_part_hist);
}
__global__ void histogram_gmem_atomics1(
const PixelType *in,
int width,
int height,
unsigned int *out,
int dev_id,
int dev_count)
{
// Global position and size.
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int nx = blockDim.x * gridDim.x;
int ny = blockDim.y * gridDim.y;
// thread index in workgroup, linear in 0..nt-1
int t = threadIdx.x + threadIdx.y * blockDim.x;
int nt = blockDim.x * blockDim.y; // total threads in workgroup
// Group index in 0..ngroups-1.
int g = blockIdx.x + blockIdx.y * gridDim.x;
// Initialize global memory.
unsigned int *gmem = out + g * NUM_PARTS;
for (int i = t; i < ACTIVE_CHANNELS * NUM_BINS; i += nt)
gmem[i] = 0;
__syncthreads();
// Process pixels (updates our group's partial histogram in gmem).
for (int col = x; col < width; col += nx)
{
for (int row = y; row < height; row += ny)
{
PixelType pixel = in[row * width + col];
unsigned int bins[ACTIVE_CHANNELS];
DecodePixel(pixel, bins);
// Every device process its own channel(s).
#pragma unroll
for (int CHANNEL = dev_id; CHANNEL < ACTIVE_CHANNELS; CHANNEL += dev_count) {
atomicAdd(&gmem[(NUM_BINS * CHANNEL) + bins[CHANNEL]], 1);
}
}
}
}
void run_multigpu(
PixelType *d_image,
int width,
int height,
unsigned int *d_hist,
int device_id,
int device_count)
{
dim3 block(32, 4);
dim3 grid(16, 16);
int total_blocks = grid.x * grid.y;
// Allocate partial histogram.
// Actually, we need less memory (only the channels assigned to the device),
// but for the sake of simplicity, let us have the "full" histogram for
// every device (still counting only relevant bits).
// TODO: Memory-efficient way.
unsigned int *d_part_hist;
cudaMalloc(&d_part_hist, total_blocks * NUM_PARTS * sizeof(unsigned int));
dim3 block2(128);
dim3 grid2((ACTIVE_CHANNELS * NUM_BINS + block2.x - 1) / block2.x);
histogram_gmem_atomics1<<<grid, block>>>(
d_image,
width,
height,
d_part_hist,
device_id,
device_count
);
histogram_gmem_accum<<<grid2, block2>>>(
d_part_hist,
total_blocks,
d_hist);
cudaFree(d_part_hist);
}
|
d4abd7ba3be223719385332d0da2c8668c46c10e.hip | // !!! This is a file automatically generated by hipify!!!
/* declare a 1d array and find the maximum of each chunk using reduce method. No shared memory is used
*
*chunksize must be an exponential of 2
how to compile: nvcc para
when n is 600,000 or more, the results are not correct probably because there is not enough threads.
The 1d array used for testing is a sequence from 0 to n-1.
How to deal with the incomplete chunk:
if (tid < s && myId < n) { //myId >=n the incomplete chunk is less than blockDim.x/2)
float right_counterpart = (myId+s) >= n? 0:darr[myId+s]; //if the right_counterpart is missing, use 0
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
float * serial_max_each_chunk(float maxarr[], float arr[], int chunkSize, int n);
__global__ void parallel_max_each_chunk(float *dmaxarr, float * darr, int chunkSize, int n);
int main(int argc, char **argv) {
//generate a 1d array
int n = atoi(argv[1]);
float *arr = (float*) malloc(n*sizeof(float));
int i;
for (i =0; i < n; i++) {
arr[i] = (float)i/2.0f;
}
const int chunkSize = 512;
int numChunk = (n + chunkSize -1)/chunkSize;
float *maxarr = (float *)malloc(numChunk * sizeof(float));
// declare GPU memory pointers
float *darr, * dmaxarr;
hipMalloc((void **)&darr, n*sizeof(float));
hipMalloc((void **)&dmaxarr, numChunk*sizeof(float));
hipMemcpy(darr, arr, n*sizeof(float), hipMemcpyHostToDevice);
dim3 dimGrid(numChunk,1);
dim3 dimBlock(chunkSize,1,1);
hipLaunchKernelGGL((
parallel_max_each_chunk), dim3(dimGrid),dim3(dimBlock), 0, 0, dmaxarr, darr, chunkSize,n);
hipDeviceSynchronize();
hipMemcpy(maxarr, dmaxarr, numChunk*sizeof(float), hipMemcpyDeviceToHost);
for (i=0; i < numChunk; i++) {
printf("%d maximum: %f\n",i,maxarr[i]);
}
float * smaxarr = (float *) malloc(numChunk * sizeof(float));
printf("\nserial solution\n");
serial_max_each_chunk(smaxarr, arr, chunkSize, n);
bool judge = true;
for (i=0; i < numChunk; i++) {
printf("%d maximum: %f\n",i,smaxarr[i]);
judge = judge && (smaxarr[i] == maxarr[i]);
}
printf("\n--------correct or wrong---------\n");
printf(judge ? "right\n": "wrong\n");
// check the exit state of CUDA code
hipError_t error = hipGetLastError();
if (error !=hipSuccess) {
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
return 0;
}
float * serial_max_each_chunk(float maxarr[], float arr[], int chunkSize, int n) {
int numChunk = (n + chunkSize - 1)/chunkSize;
int i,j;
for (i = 0; i < numChunk; i++){
maxarr[i] = -3.0;
for (j = i * chunkSize; j < (i+1)*chunkSize; j++) {
if (j >= n) { break;
} else {
if (maxarr[i] < arr[j]) { maxarr[i] = arr[j];}
}
}
}
return maxarr;
}
__global__ void parallel_max_each_chunk(float *dmaxarr, float * darr, int chunkSize, int n) {
int myId = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
for (int s = blockDim.x/2; s > 0; s>>=1) {
if (tid < s && myId < n) { //myId >=n the incomplete chunk is less than blockDim.x/2)
float right_counterpart = (myId+s) >= n? 0:darr[myId+s]; //if the right_counterpart is missing, use 0
darr[myId]= right_counterpart > darr[myId]? right_counterpart : darr[myId];
}
__syncthreads();
}
if(tid == 0) {
dmaxarr[blockIdx.x] = darr[myId];
}
}
| d4abd7ba3be223719385332d0da2c8668c46c10e.cu |
/* declare a 1d array and find the maximum of each chunk using reduce method. No shared memory is used
*
*chunksize must be an exponential of 2
how to compile: nvcc para
when n is 600,000 or more, the results are not correct probably because there is not enough threads.
The 1d array used for testing is a sequence from 0 to n-1.
How to deal with the incomplete chunk:
if (tid < s && myId < n) { //myId >=n the incomplete chunk is less than blockDim.x/2)
float right_counterpart = (myId+s) >= n? 0:darr[myId+s]; //if the right_counterpart is missing, use 0
*/
#include <stdio.h>
#include <cuda.h>
float * serial_max_each_chunk(float maxarr[], float arr[], int chunkSize, int n);
__global__ void parallel_max_each_chunk(float *dmaxarr, float * darr, int chunkSize, int n);
int main(int argc, char **argv) {
//generate a 1d array
int n = atoi(argv[1]);
float *arr = (float*) malloc(n*sizeof(float));
int i;
for (i =0; i < n; i++) {
arr[i] = (float)i/2.0f;
}
const int chunkSize = 512;
int numChunk = (n + chunkSize -1)/chunkSize;
float *maxarr = (float *)malloc(numChunk * sizeof(float));
// declare GPU memory pointers
float *darr, * dmaxarr;
cudaMalloc((void **)&darr, n*sizeof(float));
cudaMalloc((void **)&dmaxarr, numChunk*sizeof(float));
cudaMemcpy(darr, arr, n*sizeof(float), cudaMemcpyHostToDevice);
dim3 dimGrid(numChunk,1);
dim3 dimBlock(chunkSize,1,1);
parallel_max_each_chunk<<<dimGrid,dimBlock>>>(dmaxarr, darr, chunkSize,n);
cudaThreadSynchronize();
cudaMemcpy(maxarr, dmaxarr, numChunk*sizeof(float), cudaMemcpyDeviceToHost);
for (i=0; i < numChunk; i++) {
printf("%d maximum: %f\n",i,maxarr[i]);
}
float * smaxarr = (float *) malloc(numChunk * sizeof(float));
printf("\nserial solution\n");
serial_max_each_chunk(smaxarr, arr, chunkSize, n);
bool judge = true;
for (i=0; i < numChunk; i++) {
printf("%d maximum: %f\n",i,smaxarr[i]);
judge = judge && (smaxarr[i] == maxarr[i]);
}
printf("\n--------correct or wrong---------\n");
printf(judge ? "right\n": "wrong\n");
// check the exit state of CUDA code
cudaError_t error = cudaGetLastError();
if (error !=cudaSuccess) {
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
return 0;
}
float * serial_max_each_chunk(float maxarr[], float arr[], int chunkSize, int n) {
int numChunk = (n + chunkSize - 1)/chunkSize;
int i,j;
for (i = 0; i < numChunk; i++){
maxarr[i] = -3.0;
for (j = i * chunkSize; j < (i+1)*chunkSize; j++) {
if (j >= n) { break;
} else {
if (maxarr[i] < arr[j]) { maxarr[i] = arr[j];}
}
}
}
return maxarr;
}
__global__ void parallel_max_each_chunk(float *dmaxarr, float * darr, int chunkSize, int n) {
int myId = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
for (int s = blockDim.x/2; s > 0; s>>=1) {
if (tid < s && myId < n) { //myId >=n the incomplete chunk is less than blockDim.x/2)
float right_counterpart = (myId+s) >= n? 0:darr[myId+s]; //if the right_counterpart is missing, use 0
darr[myId]= right_counterpart > darr[myId]? right_counterpart : darr[myId];
}
__syncthreads();
}
if(tid == 0) {
dmaxarr[blockIdx.x] = darr[myId];
}
}
|
6e2e268e9c04e10f167f36b7022f9b7668ac418a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <vector>
#include <iostream>
#include "streamCompaction.h"
using namespace std;
#include <thrust/copy.h>
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
#define CONFLICT_FREE_OFFSET(n) \
((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
}
}
__global__ void sum(int* in, int* out, int n, int d1){
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k<n){
int ink = in[k];
if (k>=d1){
out[k] = in[k-d1] + ink;
}
else{
out[k] = ink;
}
}
}
__global__ void shift(int* in, int* out, int n){
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
out[0] = 0;
if (k<n && k>0){
out[k] = in[k-1];
}
}
__global__ void naiveSumGlobal(int* in, int* out, int n){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int logn = ceil(log(float(n))/log(2.0f));
for (int d=1; d<=logn; d++){
int offset = powf(2.0f, d-1);
if (index >= offset){
out[index] = in[index-offset] + in[index];
}
else{
out[index] = in[index];
}
__syncthreads();
int* temp = in;
in = out;
out = temp;
}
}
__global__ void naiveSumSharedSingleBlock(int* in, int* out, int n){
int index = threadIdx.x;
if (index >= n) return;
extern __shared__ int shared[];
int *tempIn = &shared[0];
int *tempOut = &shared[n];
tempOut[index] = (index > 0) ? in[index-1] : 0;
__syncthreads();
for (int offset = 1; offset <= n; offset *= 2){
int* temp = tempIn;
tempIn = tempOut;
tempOut = temp;
if (index >= offset){
tempOut[index] = tempIn[index-offset] + tempIn[index];
}
else{
tempOut[index] = tempIn[index];
}
__syncthreads();
}
out[index] = tempOut[index];
}
__global__ void naiveSumSharedArbitrary(int* in, int* out, int n, int* sums=0){
int localIndex = threadIdx.x;
int globalIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
extern __shared__ int shared[];
int *tempIn = &shared[0];
int *tempOut = &shared[n];
tempOut[localIndex] = in[globalIndex];
__syncthreads();
for (int offset = 1; offset < n; offset *= 2){
int* temp = tempIn;
tempIn = tempOut;
tempOut = temp;
if (localIndex >= offset){
tempOut[localIndex] = tempIn[localIndex-offset] + tempIn[localIndex];
}
else{
tempOut[localIndex] = tempIn[localIndex];
}
__syncthreads();
}
if (sums) sums[blockIdx.x] = tempOut[n-1];
out[globalIndex] = tempOut[localIndex];
}
__global__ void workEfficientSumSingleBlock(int* in, int* out, int n){
extern __shared__ float temp[];
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int offset = 1;
if (2*index+1<=n){
temp[2*index] = in[2*index];
temp[2*index+1] = in[2*index+1];
for (int d = n>>1; d>0; d >>= 1){
__syncthreads();
if (index < d){
int ai = offset * (2*index+1) - 1;
int bi = offset * (2*index+2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (index == 0) temp[n - 1] = 0;
for (int d = 1; d<n; d*=2){
offset >>= 1;
__syncthreads();
if (index < d){
int ai = offset * (2*index+1) - 1;
int bi = offset * (2*index+2) - 1;
if (ai < n && bi < n){
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
}
__syncthreads();
out[2*index] = temp[2*index];
out[2*index+1] = temp[2*index+1];
}
}
__global__ void workEfficientArbitrary(int* in, int* out, int n, int* sums=0){
extern __shared__ float temp[];
int offset = 1;
int index = threadIdx.x;
int indexA = index;
int indexB = index + (n/2);
int bankOffsetA = CONFLICT_FREE_OFFSET(indexA);
int bankOffsetB = CONFLICT_FREE_OFFSET(indexB);
temp[indexA + bankOffsetA] = in[indexA];
temp[indexB + bankOffsetB] = in[indexB];
for (int d = n>>1; d>0; d >>= 1){
__syncthreads();
if (index < d){
int ai = offset * (2*index+1) - 1;
int bi = offset * (2*index+2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset *= 2;
}
if (index == 0){
if (sums) sums[blockIdx.x] = temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)];
temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)] = 0;
}
for (int d = 1; d<n; d*=2){
offset >>= 1;
__syncthreads();
if (index < d){
int ai = offset * (2*index+1) - 1;
int bi = offset * (2*index+2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
if (ai < n && bi < n){
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
}
__syncthreads();
out[indexA] = temp[indexA + bankOffsetA];
out[indexB] = temp[indexB + bankOffsetB];
}
__global__ void addIncs(int* cudaAuxIncs, int* cudaIndicesB, int n){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
// if (index < n){
// cudaIndicesB[index] = blockIdx.x; //cudaAuxIncs[blockIdx.x];
cudaIndicesB[index] += cudaAuxIncs[blockIdx.x];
// }
}
__global__ void streamCompaction(dataPacket* inRays, int* indices, dataPacket* outRays, int numElements){
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k<numElements){
dataPacket inRay = inRays[k];
if (inRay.alive){
outRays[indices[k]] = inRay;
}
}
}
struct isAlive
{
__host__ __device__
bool operator()(const dataPacket& dp)
{
return dp.alive;
}
};
struct isEven
{
__host__ __device__
bool operator()(const int x)
{
return (x%2 == 0);
}
};
struct isOne
{
__host__ __device__
bool operator()(const int x)
{
return (x == 1);
}
};
__global__ void killStream(int index, dataPacket* inRays, int* indices, int numElements){
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k<numElements){
if (k == index){
inRays[k].alive = false;
indices[k] = 0;
}
}
}
__global__ void resetStreams(dataPacket* inRays, int* indices, int numElements){
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k<numElements){
inRays[k].alive = true;
indices[k] = 1;
}
}
void testStreamCompaction(){
//Testing stream compaction
int numElements = 10;
dataPacket* arrayOfElements = new dataPacket[numElements];
for (int i=0; i<numElements; i+=1){
dataPacket rb(i);
arrayOfElements[i] = rb;
}
arrayOfElements[1].alive=false;
arrayOfElements[4].alive=false;
arrayOfElements[5].alive=false;
arrayOfElements[7].alive=false;
arrayOfElements[8].alive=false;
dataPacket* cudaArrayA;
dataPacket* cudaArrayB;
hipMalloc((void**)&cudaArrayA, numElements*sizeof(dataPacket));
hipMalloc((void**)&cudaArrayB, numElements*sizeof(dataPacket));
int* testin;
int* testout;
int* cputest = new int[numElements];
for (int i=0; i<numElements; i++){
if (arrayOfElements[i].alive){
cputest[i]=1;
}
else{
cputest[i]=0;
}
}
hipMalloc((void**)&testin, numElements*sizeof(int));
hipMalloc((void**)&testout, numElements*sizeof(int));
hipMemcpy(cudaArrayA, arrayOfElements, numElements*sizeof(dataPacket), hipMemcpyHostToDevice);
hipMemcpy(cudaArrayB, arrayOfElements, numElements*sizeof(dataPacket), hipMemcpyHostToDevice);
hipMemcpy(testin, cputest, numElements*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(testout, cputest, numElements*sizeof(int), hipMemcpyHostToDevice);
for (int i=0; i<numElements; i++){
std::cout<<arrayOfElements[i].index<<", "<<cputest[i]<<std::endl;
}
dim3 threadsPerBlock(64);
dim3 fullBlocksPerGrid(int(ceil(float(numElements)/64.0f)));
//scan
for (int d=1; d<=ceil(log(numElements)/log(2))+1; d++){
hipLaunchKernelGGL(( sum), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, testin, testout, numElements, int(pow(2.0f,d-1)));
hipDeviceSynchronize();
hipMemcpy(cputest, testout, numElements*sizeof(int), hipMemcpyDeviceToHost);
int* temp = testin;
testin=testout;
testout=temp;
}
//Compact
hipLaunchKernelGGL(( streamCompaction), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, cudaArrayA, testin, cudaArrayB, numElements);
cudaArrayA = cudaArrayB;
hipDeviceSynchronize();
hipMemcpy(&numElements, &testin[numElements-1], 1*sizeof(int), hipMemcpyDeviceToHost);
std::cout<<"number of rays left: "<<numElements<<std::endl;
// for (int i=0; i<numElements; i++){
// std::cout<<cputest[i]<<std::endl;
// }
hipMemcpy(cputest, testin, numElements*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(arrayOfElements, cudaArrayA, numElements*sizeof(dataPacket), hipMemcpyDeviceToHost);
for (int i=0; i<numElements; i++){
std::cout<<arrayOfElements[i].index<<std::endl;
}
std::cout<<"___________________________________"<<std::endl;
delete [] cputest;
hipFree(testin);
hipFree(testout);
delete [] arrayOfElements;
hipFree(cudaArrayA);
hipFree(cudaArrayB);
}
DataStream::DataStream(int numElements, dataPacket * data){
m_data = data;
if (numElements % (THREADS_PER_BLOCK*2)) numElements+=1;
m_numElementsAlive = numElements;
if (numElements % (THREADS_PER_BLOCK*2) != 0){
int counter = 1;
while (THREADS_PER_BLOCK*2*counter < numElements){
counter += 1;
}
numElements = THREADS_PER_BLOCK*2*counter;
}
m_numElements = numElements;
m_indices = new int[numElements];
for (int i=0; i<numElements; i+=1){
if (i < m_numElementsAlive){
m_indices[i] = 1;
}
else{
m_indices[i] = 0;
}
}
m_auxSums = new int[numElements/(THREADS_PER_BLOCK*2)];
for (int i=0; i<numElements/(THREADS_PER_BLOCK*2); i+=1){
m_auxSums[i] = 0;
}
//cudaInit (cudaDataA, cudaDataB, cudaIndicesA, cudaIndicesB);
hipMalloc ((void**)&cudaDataA, numElements*sizeof (dataPacket));
hipMalloc ((void**)&cudaDataB, numElements*sizeof (dataPacket));
hipMalloc ((void**)&cudaIndicesA, numElements*sizeof (int));
hipMalloc ((void**)&cudaIndicesB, numElements*sizeof (int));
hipMalloc ((void**)&cudaAuxSums, numElements/(THREADS_PER_BLOCK*2)*sizeof (int));
hipMalloc ((void**)&cudaAuxIncs, numElements/(THREADS_PER_BLOCK*2)*sizeof (int));
hipMemcpy(cudaDataA, m_data, numElements*sizeof(dataPacket), hipMemcpyHostToDevice);
hipMemcpy(cudaDataB, m_data, numElements*sizeof(dataPacket), hipMemcpyHostToDevice);
hipMemcpy(cudaIndicesA, m_indices, numElements*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(cudaIndicesB, m_indices, numElements*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(cudaAuxSums, m_auxSums, numElements/(THREADS_PER_BLOCK*2)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(cudaAuxIncs, m_auxSums, numElements/(THREADS_PER_BLOCK*2)*sizeof(int), hipMemcpyHostToDevice);
}
DataStream::~DataStream(){
hipFree (cudaDataA);
hipFree (cudaDataB);
hipFree (cudaIndicesA);
hipFree (cudaIndicesB);
hipFree (cudaAuxSums);
hipFree (cudaAuxIncs);
delete [] m_data;
delete [] m_indices;
delete [] m_auxSums;
}
void DataStream::serialScan(){
clock_t t = clock ();
m_indices[0] = 0;
for (int i=1; i<m_numElementsAlive; i+=1){
m_indices[i] = m_indices[i] + m_indices[i-1];
}
t = clock() - t;
cout<<(float)t/CLOCKS_PER_SEC<<endl;
}
void DataStream::globalSum(int* in, int* out, int n){
int threadsPerBlock = THREADS_PER_BLOCK;
dim3 threadsPerBlockL(threadsPerBlock);
dim3 fullBlocksPerGridL(m_numElements/threadsPerBlock);
for (int d=1; d<=ceil(log(m_numElementsAlive)/log(2)); d++){
hipLaunchKernelGGL(( sum), dim3(fullBlocksPerGridL), dim3(threadsPerBlockL), 0, 0, in, out, m_numElementsAlive, powf(2.0f, d-1));
hipDeviceSynchronize();
int* temp = in;
in = out;
out = temp;
}
hipLaunchKernelGGL(( shift), dim3(fullBlocksPerGridL), dim3(threadsPerBlockL), 0, 0, in, out, m_numElementsAlive);
}
void DataStream::thrustStreamCompact(){
clock_t t = clock ();
thrust::copy_if (m_data, m_data+m_numElements, m_indices, m_data, isOne());
t = clock() - t;
cout<<(float)t/CLOCKS_PER_SEC<<endl;
}
void DataStream::compactWorkEfficientArbitrary(){
int numElements = m_numElements;
int threadsPerBlock = THREADS_PER_BLOCK; // 8
int procsPefBlock = threadsPerBlock*2; // 16
dim3 initialScanThreadsPerBlock(procsPefBlock/2); //8
dim3 initialScanBlocksPerGrid(numElements/procsPefBlock);//
int sumSize = numElements/(THREADS_PER_BLOCK*2);
if (sumSize<2) sumSize+=2;
dim3 initialScanThreadsPerBlock2(sumSize/2); //16
dim3 initialScanBlocksPerGrid2(sumSize/(sumSize/2)+1);//1024/16
dim3 initialScanThreadsPerBlock3(procsPefBlock); //8
dim3 initialScanBlocksPerGrid3(numElements/procsPefBlock);//3
dim3 threadsPerBlockL(threadsPerBlock);
dim3 fullBlocksPerGridL(int(ceil(float(m_numElementsAlive)/float(threadsPerBlock))));
hipLaunchKernelGGL(( workEfficientArbitrary), dim3(initialScanBlocksPerGrid), dim3(initialScanThreadsPerBlock), procsPefBlock*sizeof(int), 0, cudaIndicesA, cudaIndicesB, procsPefBlock, cudaAuxSums);
for (int d=1; d<=ceil(log(sumSize)/log(2)); d++){
hipLaunchKernelGGL(( sum), dim3(fullBlocksPerGridL), dim3(threadsPerBlockL), 0, 0, cudaAuxSums, cudaAuxIncs, sumSize, powf(2.0f, d-1));
hipDeviceSynchronize();
int* temp = cudaAuxSums;
cudaAuxSums = cudaAuxIncs;
cudaAuxIncs = temp;
}
hipLaunchKernelGGL(( shift), dim3(fullBlocksPerGridL), dim3(threadsPerBlockL), 0, 0, cudaAuxSums, cudaAuxIncs, m_numElements/(THREADS_PER_BLOCK*2));
hipLaunchKernelGGL(( addIncs), dim3(initialScanBlocksPerGrid3), dim3(initialScanThreadsPerBlock3), 0, 0, cudaAuxIncs, cudaIndicesB, m_numElements);
//Stream compation from A into B, then save back into A
hipLaunchKernelGGL(( streamCompaction), dim3(fullBlocksPerGridL), dim3(threadsPerBlockL), 0, 0, cudaDataA, cudaIndicesB, cudaDataB, m_numElementsAlive);
dataPacket * temp = cudaDataA;
cudaDataA = cudaDataB;
cudaDataB = temp;
// update numrays
hipMemcpy(&m_numElementsAlive, &cudaIndicesB[m_numElementsAlive], sizeof(int), hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( resetStreams), dim3(fullBlocksPerGridL), dim3(threadsPerBlockL), 0, 0, cudaDataA, cudaIndicesA, m_numElementsAlive);
}
void DataStream::compactNaiveSumGlobal(){
int threadsPerBlock = THREADS_PER_BLOCK;
dim3 threadsPerBlockL(threadsPerBlock);
dim3 fullBlocksPerGridL(m_numElements/threadsPerBlock);
clock_t t = clock();
for (int d=1; d<=ceil(log(m_numElementsAlive)/log(2)); d++){
hipLaunchKernelGGL(( sum), dim3(fullBlocksPerGridL), dim3(threadsPerBlockL), 0, 0, cudaIndicesA, cudaIndicesB, m_numElementsAlive, powf(2.0f, d-1));
checkCUDAError("kernel failed 1 !");
hipDeviceSynchronize();
int* temp = cudaIndicesA;
cudaIndicesA = cudaIndicesB;
cudaIndicesB = temp;
}
hipLaunchKernelGGL(( shift), dim3(fullBlocksPerGridL), dim3(threadsPerBlockL), 0, 0, cudaIndicesA, cudaIndicesB, m_numElementsAlive);
checkCUDAError("kernel failed 1 !");
t = clock() - t;
hipMemcpy(m_indices, cudaIndicesB, m_numElementsAlive*sizeof(int), hipMemcpyDeviceToHost);
cout<<m_indices[m_numElementsAlive-1]<<": ";
cout<<(float) t / CLOCKS_PER_SEC<<endl;
// for (int i=0; i<numAlive(); i+=1){
// cout<<m_indices[i];
// if (i<numAlive()-1) cout<<",";
// }
// cout<<endl;
// Stream compation from A into B, then save back into A
hipLaunchKernelGGL(( streamCompaction), dim3(fullBlocksPerGridL), dim3(threadsPerBlockL), 0, 0, cudaDataA, cudaIndicesB, cudaDataB, m_numElementsAlive);
dataPacket * temp = cudaDataA;
cudaDataA = cudaDataB;
cudaDataB = temp;
// update numrays
hipMemcpy(&m_numElementsAlive, &cudaIndicesA[m_numElementsAlive-1], sizeof(int), hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( resetStreams), dim3(fullBlocksPerGridL), dim3(threadsPerBlockL), 0, 0, cudaDataA, cudaIndicesA, m_numElementsAlive);
}
void DataStream::compactNaiveSumSharedSingleBlock(){
int threadsPerBlock = THREADS_PER_BLOCK;
dim3 threadsPerBlockL(threadsPerBlock);
dim3 fullBlocksPerGridL(int(ceil(float(m_numElementsAlive)/float(threadsPerBlock))));
hipLaunchKernelGGL(( naiveSumSharedSingleBlock), dim3(fullBlocksPerGridL), dim3(threadsPerBlockL), 2*m_numElements*sizeof(int), 0, cudaIndicesA, cudaIndicesB, m_numElements);
checkCUDAError("kernel failed!");
hipMemcpy(m_indices, cudaIndicesB, m_numElements*sizeof(int), hipMemcpyDeviceToHost);
}
void DataStream::compactNaiveSumSharedArbitrary(){
////////////////////////////////////////////////////////////////////////////////////////
int threadsPerBlock = THREADS_PER_BLOCK;
dim3 threadsPerBlockL(threadsPerBlock*2);
dim3 fullBlocksPerGridL(m_numElements/(threadsPerBlock*2));
hipLaunchKernelGGL(( naiveSumSharedArbitrary), dim3(fullBlocksPerGridL), dim3(threadsPerBlockL), 2*m_numElements/(m_numElements/(threadsPerBlock*2))*sizeof(int), 0, cudaIndicesA, cudaIndicesB, threadsPerBlock*2, cudaAuxSums);
checkCUDAError("kernel failed 1 !");
////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////
int sumSize = m_numElements/(THREADS_PER_BLOCK*2);
dim3 initialScanThreadsPerBlock2(threadsPerBlock);
dim3 initialScanBlocksPerGrid2(sumSize/threadsPerBlock+1);
dim3 threadsPerBlockOld(threadsPerBlock);
dim3 fullBlocksPerGridOld(int(ceil(float(sumSize)/float(threadsPerBlock))));
// hipMemcpy(cudaAuxIncs, cudaAuxSums, m_numElements/(THREADS_PER_BLOCK*2)*sizeof(int), hipMemcpyDeviceToDevice);
for (int d=1; d<=ceil(log(sumSize)/log(2)); d++){
hipLaunchKernelGGL(( sum), dim3(fullBlocksPerGridL), dim3(threadsPerBlockL), 0, 0, cudaAuxSums, cudaAuxIncs, sumSize, powf(2.0f, d-1));
hipDeviceSynchronize();
int* temp = cudaAuxSums;
cudaAuxSums = cudaAuxIncs;
cudaAuxIncs = temp;
}
hipLaunchKernelGGL(( shift), dim3(fullBlocksPerGridL), dim3(threadsPerBlockL), 0, 0, cudaAuxSums, cudaAuxIncs, m_numElements/(THREADS_PER_BLOCK*2));
hipLaunchKernelGGL(( addIncs), dim3(fullBlocksPerGridL), dim3(threadsPerBlockL), 0, 0, cudaAuxIncs, cudaIndicesB, m_numElements);
hipLaunchKernelGGL(( shift), dim3(fullBlocksPerGridL), dim3(threadsPerBlockL), 0, 0, cudaIndicesB, cudaIndicesA, m_numElementsAlive);
int * temp = cudaIndicesA;
cudaIndicesA = cudaIndicesB;
cudaIndicesB = temp;
dim3 threadsPerBlockLL(threadsPerBlock);
dim3 fullBlocksPerGridLL(m_numElements/threadsPerBlock);
clock_t t = clock();
//Stream compation from A into B, then save back into A
hipLaunchKernelGGL(( streamCompaction), dim3(fullBlocksPerGridLL), dim3(threadsPerBlockLL), 0, 0, cudaDataA, cudaIndicesB, cudaDataB, m_numElementsAlive);
dataPacket * tempDP = cudaDataA;
cudaDataA = cudaDataB;
cudaDataB = tempDP;
t = clock() - t;
cout<<(float)t / CLOCKS_PER_SEC<<endl;
// // update numrays
hipMemcpy(&m_numElementsAlive, &cudaIndicesA[m_numElementsAlive-1], sizeof(int), hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( resetStreams), dim3(fullBlocksPerGridL), dim3(threadsPerBlockL), 0, 0, cudaDataA, cudaIndicesA, m_numElementsAlive);
}
bool DataStream::getData(int index, dataPacket& data){
if (index > m_numElements) return false;
data = m_data[index];
return true;
}
int DataStream::numAlive(){
return m_numElementsAlive;
}
void DataStream::fetchDataFromGPU(){
hipMemcpy(m_data, cudaDataA, m_numElementsAlive*sizeof(dataPacket), hipMemcpyDeviceToHost);
}
void DataStream::kill(int index){
if (index > m_numElementsAlive) return;
dim3 threadsPerBlockL(64);
dim3 fullBlocksPerGridL(int(ceil(float(m_numElementsAlive)/64.0f)));
hipLaunchKernelGGL(( killStream), dim3(fullBlocksPerGridL), dim3(threadsPerBlockL), 0, 0, index, cudaDataA, cudaIndicesA, m_numElementsAlive);
hipMemcpy(m_indices, cudaIndicesA, m_numElementsAlive*sizeof(int), hipMemcpyDeviceToHost);
} | 6e2e268e9c04e10f167f36b7022f9b7668ac418a.cu | #include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <vector>
#include <iostream>
#include "streamCompaction.h"
using namespace std;
#include <thrust/copy.h>
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
#define CONFLICT_FREE_OFFSET(n) \
((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
}
}
__global__ void sum(int* in, int* out, int n, int d1){
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k<n){
int ink = in[k];
if (k>=d1){
out[k] = in[k-d1] + ink;
}
else{
out[k] = ink;
}
}
}
__global__ void shift(int* in, int* out, int n){
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
out[0] = 0;
if (k<n && k>0){
out[k] = in[k-1];
}
}
__global__ void naiveSumGlobal(int* in, int* out, int n){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int logn = ceil(log(float(n))/log(2.0f));
for (int d=1; d<=logn; d++){
int offset = powf(2.0f, d-1);
if (index >= offset){
out[index] = in[index-offset] + in[index];
}
else{
out[index] = in[index];
}
__syncthreads();
int* temp = in;
in = out;
out = temp;
}
}
__global__ void naiveSumSharedSingleBlock(int* in, int* out, int n){
int index = threadIdx.x;
if (index >= n) return;
extern __shared__ int shared[];
int *tempIn = &shared[0];
int *tempOut = &shared[n];
tempOut[index] = (index > 0) ? in[index-1] : 0;
__syncthreads();
for (int offset = 1; offset <= n; offset *= 2){
int* temp = tempIn;
tempIn = tempOut;
tempOut = temp;
if (index >= offset){
tempOut[index] = tempIn[index-offset] + tempIn[index];
}
else{
tempOut[index] = tempIn[index];
}
__syncthreads();
}
out[index] = tempOut[index];
}
__global__ void naiveSumSharedArbitrary(int* in, int* out, int n, int* sums=0){
int localIndex = threadIdx.x;
int globalIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
extern __shared__ int shared[];
int *tempIn = &shared[0];
int *tempOut = &shared[n];
tempOut[localIndex] = in[globalIndex];
__syncthreads();
for (int offset = 1; offset < n; offset *= 2){
int* temp = tempIn;
tempIn = tempOut;
tempOut = temp;
if (localIndex >= offset){
tempOut[localIndex] = tempIn[localIndex-offset] + tempIn[localIndex];
}
else{
tempOut[localIndex] = tempIn[localIndex];
}
__syncthreads();
}
if (sums) sums[blockIdx.x] = tempOut[n-1];
out[globalIndex] = tempOut[localIndex];
}
__global__ void workEfficientSumSingleBlock(int* in, int* out, int n){
extern __shared__ float temp[];
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int offset = 1;
if (2*index+1<=n){
temp[2*index] = in[2*index];
temp[2*index+1] = in[2*index+1];
for (int d = n>>1; d>0; d >>= 1){
__syncthreads();
if (index < d){
int ai = offset * (2*index+1) - 1;
int bi = offset * (2*index+2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (index == 0) temp[n - 1] = 0;
for (int d = 1; d<n; d*=2){
offset >>= 1;
__syncthreads();
if (index < d){
int ai = offset * (2*index+1) - 1;
int bi = offset * (2*index+2) - 1;
if (ai < n && bi < n){
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
}
__syncthreads();
out[2*index] = temp[2*index];
out[2*index+1] = temp[2*index+1];
}
}
__global__ void workEfficientArbitrary(int* in, int* out, int n, int* sums=0){
extern __shared__ float temp[];
int offset = 1;
int index = threadIdx.x;
int indexA = index;
int indexB = index + (n/2);
int bankOffsetA = CONFLICT_FREE_OFFSET(indexA);
int bankOffsetB = CONFLICT_FREE_OFFSET(indexB);
temp[indexA + bankOffsetA] = in[indexA];
temp[indexB + bankOffsetB] = in[indexB];
for (int d = n>>1; d>0; d >>= 1){
__syncthreads();
if (index < d){
int ai = offset * (2*index+1) - 1;
int bi = offset * (2*index+2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset *= 2;
}
if (index == 0){
if (sums) sums[blockIdx.x] = temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)];
temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)] = 0;
}
for (int d = 1; d<n; d*=2){
offset >>= 1;
__syncthreads();
if (index < d){
int ai = offset * (2*index+1) - 1;
int bi = offset * (2*index+2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
if (ai < n && bi < n){
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
}
__syncthreads();
out[indexA] = temp[indexA + bankOffsetA];
out[indexB] = temp[indexB + bankOffsetB];
}
__global__ void addIncs(int* cudaAuxIncs, int* cudaIndicesB, int n){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
// if (index < n){
// cudaIndicesB[index] = blockIdx.x; //cudaAuxIncs[blockIdx.x];
cudaIndicesB[index] += cudaAuxIncs[blockIdx.x];
// }
}
__global__ void streamCompaction(dataPacket* inRays, int* indices, dataPacket* outRays, int numElements){
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k<numElements){
dataPacket inRay = inRays[k];
if (inRay.alive){
outRays[indices[k]] = inRay;
}
}
}
struct isAlive
{
__host__ __device__
bool operator()(const dataPacket& dp)
{
return dp.alive;
}
};
struct isEven
{
__host__ __device__
bool operator()(const int x)
{
return (x%2 == 0);
}
};
struct isOne
{
__host__ __device__
bool operator()(const int x)
{
return (x == 1);
}
};
__global__ void killStream(int index, dataPacket* inRays, int* indices, int numElements){
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k<numElements){
if (k == index){
inRays[k].alive = false;
indices[k] = 0;
}
}
}
__global__ void resetStreams(dataPacket* inRays, int* indices, int numElements){
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k<numElements){
inRays[k].alive = true;
indices[k] = 1;
}
}
void testStreamCompaction(){
//Testing stream compaction
int numElements = 10;
dataPacket* arrayOfElements = new dataPacket[numElements];
for (int i=0; i<numElements; i+=1){
dataPacket rb(i);
arrayOfElements[i] = rb;
}
arrayOfElements[1].alive=false;
arrayOfElements[4].alive=false;
arrayOfElements[5].alive=false;
arrayOfElements[7].alive=false;
arrayOfElements[8].alive=false;
dataPacket* cudaArrayA;
dataPacket* cudaArrayB;
cudaMalloc((void**)&cudaArrayA, numElements*sizeof(dataPacket));
cudaMalloc((void**)&cudaArrayB, numElements*sizeof(dataPacket));
int* testin;
int* testout;
int* cputest = new int[numElements];
for (int i=0; i<numElements; i++){
if (arrayOfElements[i].alive){
cputest[i]=1;
}
else{
cputest[i]=0;
}
}
cudaMalloc((void**)&testin, numElements*sizeof(int));
cudaMalloc((void**)&testout, numElements*sizeof(int));
cudaMemcpy(cudaArrayA, arrayOfElements, numElements*sizeof(dataPacket), cudaMemcpyHostToDevice);
cudaMemcpy(cudaArrayB, arrayOfElements, numElements*sizeof(dataPacket), cudaMemcpyHostToDevice);
cudaMemcpy(testin, cputest, numElements*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(testout, cputest, numElements*sizeof(int), cudaMemcpyHostToDevice);
for (int i=0; i<numElements; i++){
std::cout<<arrayOfElements[i].index<<", "<<cputest[i]<<std::endl;
}
dim3 threadsPerBlock(64);
dim3 fullBlocksPerGrid(int(ceil(float(numElements)/64.0f)));
//scan
for (int d=1; d<=ceil(log(numElements)/log(2))+1; d++){
sum<<<fullBlocksPerGrid, threadsPerBlock>>>(testin, testout, numElements, int(pow(2.0f,d-1)));
cudaThreadSynchronize();
cudaMemcpy(cputest, testout, numElements*sizeof(int), cudaMemcpyDeviceToHost);
int* temp = testin;
testin=testout;
testout=temp;
}
//Compact
streamCompaction<<<fullBlocksPerGrid, threadsPerBlock>>>(cudaArrayA, testin, cudaArrayB, numElements);
cudaArrayA = cudaArrayB;
cudaThreadSynchronize();
cudaMemcpy(&numElements, &testin[numElements-1], 1*sizeof(int), cudaMemcpyDeviceToHost);
std::cout<<"number of rays left: "<<numElements<<std::endl;
// for (int i=0; i<numElements; i++){
// std::cout<<cputest[i]<<std::endl;
// }
cudaMemcpy(cputest, testin, numElements*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(arrayOfElements, cudaArrayA, numElements*sizeof(dataPacket), cudaMemcpyDeviceToHost);
for (int i=0; i<numElements; i++){
std::cout<<arrayOfElements[i].index<<std::endl;
}
std::cout<<"___________________________________"<<std::endl;
delete [] cputest;
cudaFree(testin);
cudaFree(testout);
delete [] arrayOfElements;
cudaFree(cudaArrayA);
cudaFree(cudaArrayB);
}
DataStream::DataStream(int numElements, dataPacket * data){
m_data = data;
if (numElements % (THREADS_PER_BLOCK*2)) numElements+=1;
m_numElementsAlive = numElements;
if (numElements % (THREADS_PER_BLOCK*2) != 0){
int counter = 1;
while (THREADS_PER_BLOCK*2*counter < numElements){
counter += 1;
}
numElements = THREADS_PER_BLOCK*2*counter;
}
m_numElements = numElements;
m_indices = new int[numElements];
for (int i=0; i<numElements; i+=1){
if (i < m_numElementsAlive){
m_indices[i] = 1;
}
else{
m_indices[i] = 0;
}
}
m_auxSums = new int[numElements/(THREADS_PER_BLOCK*2)];
for (int i=0; i<numElements/(THREADS_PER_BLOCK*2); i+=1){
m_auxSums[i] = 0;
}
//cudaInit (cudaDataA, cudaDataB, cudaIndicesA, cudaIndicesB);
cudaMalloc ((void**)&cudaDataA, numElements*sizeof (dataPacket));
cudaMalloc ((void**)&cudaDataB, numElements*sizeof (dataPacket));
cudaMalloc ((void**)&cudaIndicesA, numElements*sizeof (int));
cudaMalloc ((void**)&cudaIndicesB, numElements*sizeof (int));
cudaMalloc ((void**)&cudaAuxSums, numElements/(THREADS_PER_BLOCK*2)*sizeof (int));
cudaMalloc ((void**)&cudaAuxIncs, numElements/(THREADS_PER_BLOCK*2)*sizeof (int));
cudaMemcpy(cudaDataA, m_data, numElements*sizeof(dataPacket), cudaMemcpyHostToDevice);
cudaMemcpy(cudaDataB, m_data, numElements*sizeof(dataPacket), cudaMemcpyHostToDevice);
cudaMemcpy(cudaIndicesA, m_indices, numElements*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cudaIndicesB, m_indices, numElements*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cudaAuxSums, m_auxSums, numElements/(THREADS_PER_BLOCK*2)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cudaAuxIncs, m_auxSums, numElements/(THREADS_PER_BLOCK*2)*sizeof(int), cudaMemcpyHostToDevice);
}
DataStream::~DataStream(){
cudaFree (cudaDataA);
cudaFree (cudaDataB);
cudaFree (cudaIndicesA);
cudaFree (cudaIndicesB);
cudaFree (cudaAuxSums);
cudaFree (cudaAuxIncs);
delete [] m_data;
delete [] m_indices;
delete [] m_auxSums;
}
void DataStream::serialScan(){
clock_t t = clock ();
m_indices[0] = 0;
for (int i=1; i<m_numElementsAlive; i+=1){
m_indices[i] = m_indices[i] + m_indices[i-1];
}
t = clock() - t;
cout<<(float)t/CLOCKS_PER_SEC<<endl;
}
void DataStream::globalSum(int* in, int* out, int n){
int threadsPerBlock = THREADS_PER_BLOCK;
dim3 threadsPerBlockL(threadsPerBlock);
dim3 fullBlocksPerGridL(m_numElements/threadsPerBlock);
for (int d=1; d<=ceil(log(m_numElementsAlive)/log(2)); d++){
sum<<<fullBlocksPerGridL, threadsPerBlockL>>>(in, out, m_numElementsAlive, powf(2.0f, d-1));
cudaThreadSynchronize();
int* temp = in;
in = out;
out = temp;
}
shift<<<fullBlocksPerGridL, threadsPerBlockL>>>(in, out, m_numElementsAlive);
}
void DataStream::thrustStreamCompact(){
clock_t t = clock ();
thrust::copy_if (m_data, m_data+m_numElements, m_indices, m_data, isOne());
t = clock() - t;
cout<<(float)t/CLOCKS_PER_SEC<<endl;
}
void DataStream::compactWorkEfficientArbitrary(){
int numElements = m_numElements;
int threadsPerBlock = THREADS_PER_BLOCK; // 8
int procsPefBlock = threadsPerBlock*2; // 16
dim3 initialScanThreadsPerBlock(procsPefBlock/2); //8
dim3 initialScanBlocksPerGrid(numElements/procsPefBlock);//
int sumSize = numElements/(THREADS_PER_BLOCK*2);
if (sumSize<2) sumSize+=2;
dim3 initialScanThreadsPerBlock2(sumSize/2); //16
dim3 initialScanBlocksPerGrid2(sumSize/(sumSize/2)+1);//1024/16
dim3 initialScanThreadsPerBlock3(procsPefBlock); //8
dim3 initialScanBlocksPerGrid3(numElements/procsPefBlock);//3
dim3 threadsPerBlockL(threadsPerBlock);
dim3 fullBlocksPerGridL(int(ceil(float(m_numElementsAlive)/float(threadsPerBlock))));
workEfficientArbitrary<<<initialScanBlocksPerGrid, initialScanThreadsPerBlock, procsPefBlock*sizeof(int)>>>(cudaIndicesA, cudaIndicesB, procsPefBlock, cudaAuxSums);
for (int d=1; d<=ceil(log(sumSize)/log(2)); d++){
sum<<<fullBlocksPerGridL, threadsPerBlockL>>>(cudaAuxSums, cudaAuxIncs, sumSize, powf(2.0f, d-1));
cudaThreadSynchronize();
int* temp = cudaAuxSums;
cudaAuxSums = cudaAuxIncs;
cudaAuxIncs = temp;
}
shift<<<fullBlocksPerGridL, threadsPerBlockL>>>(cudaAuxSums, cudaAuxIncs, m_numElements/(THREADS_PER_BLOCK*2));
addIncs<<<initialScanBlocksPerGrid3, initialScanThreadsPerBlock3>>>(cudaAuxIncs, cudaIndicesB, m_numElements);
//Stream compation from A into B, then save back into A
streamCompaction<<<fullBlocksPerGridL, threadsPerBlockL>>>(cudaDataA, cudaIndicesB, cudaDataB, m_numElementsAlive);
dataPacket * temp = cudaDataA;
cudaDataA = cudaDataB;
cudaDataB = temp;
// update numrays
cudaMemcpy(&m_numElementsAlive, &cudaIndicesB[m_numElementsAlive], sizeof(int), cudaMemcpyDeviceToHost);
resetStreams<<<fullBlocksPerGridL, threadsPerBlockL>>>(cudaDataA, cudaIndicesA, m_numElementsAlive);
}
void DataStream::compactNaiveSumGlobal(){
int threadsPerBlock = THREADS_PER_BLOCK;
dim3 threadsPerBlockL(threadsPerBlock);
dim3 fullBlocksPerGridL(m_numElements/threadsPerBlock);
clock_t t = clock();
for (int d=1; d<=ceil(log(m_numElementsAlive)/log(2)); d++){
sum<<<fullBlocksPerGridL, threadsPerBlockL>>>(cudaIndicesA, cudaIndicesB, m_numElementsAlive, powf(2.0f, d-1));
checkCUDAError("kernel failed 1 !");
cudaThreadSynchronize();
int* temp = cudaIndicesA;
cudaIndicesA = cudaIndicesB;
cudaIndicesB = temp;
}
shift<<<fullBlocksPerGridL, threadsPerBlockL>>>(cudaIndicesA, cudaIndicesB, m_numElementsAlive);
checkCUDAError("kernel failed 1 !");
t = clock() - t;
cudaMemcpy(m_indices, cudaIndicesB, m_numElementsAlive*sizeof(int), cudaMemcpyDeviceToHost);
cout<<m_indices[m_numElementsAlive-1]<<": ";
cout<<(float) t / CLOCKS_PER_SEC<<endl;
// for (int i=0; i<numAlive(); i+=1){
// cout<<m_indices[i];
// if (i<numAlive()-1) cout<<",";
// }
// cout<<endl;
// Stream compation from A into B, then save back into A
streamCompaction<<<fullBlocksPerGridL, threadsPerBlockL>>>(cudaDataA, cudaIndicesB, cudaDataB, m_numElementsAlive);
dataPacket * temp = cudaDataA;
cudaDataA = cudaDataB;
cudaDataB = temp;
// update numrays
cudaMemcpy(&m_numElementsAlive, &cudaIndicesA[m_numElementsAlive-1], sizeof(int), cudaMemcpyDeviceToHost);
resetStreams<<<fullBlocksPerGridL, threadsPerBlockL>>>(cudaDataA, cudaIndicesA, m_numElementsAlive);
}
void DataStream::compactNaiveSumSharedSingleBlock(){
int threadsPerBlock = THREADS_PER_BLOCK;
dim3 threadsPerBlockL(threadsPerBlock);
dim3 fullBlocksPerGridL(int(ceil(float(m_numElementsAlive)/float(threadsPerBlock))));
naiveSumSharedSingleBlock<<<fullBlocksPerGridL, threadsPerBlockL, 2*m_numElements*sizeof(int)>>>(cudaIndicesA, cudaIndicesB, m_numElements);
checkCUDAError("kernel failed!");
cudaMemcpy(m_indices, cudaIndicesB, m_numElements*sizeof(int), cudaMemcpyDeviceToHost);
}
void DataStream::compactNaiveSumSharedArbitrary(){
////////////////////////////////////////////////////////////////////////////////////////
int threadsPerBlock = THREADS_PER_BLOCK;
dim3 threadsPerBlockL(threadsPerBlock*2);
dim3 fullBlocksPerGridL(m_numElements/(threadsPerBlock*2));
naiveSumSharedArbitrary<<<fullBlocksPerGridL, threadsPerBlockL, 2*m_numElements/(m_numElements/(threadsPerBlock*2))*sizeof(int)>>>(cudaIndicesA, cudaIndicesB, threadsPerBlock*2, cudaAuxSums);
checkCUDAError("kernel failed 1 !");
////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////
int sumSize = m_numElements/(THREADS_PER_BLOCK*2);
dim3 initialScanThreadsPerBlock2(threadsPerBlock);
dim3 initialScanBlocksPerGrid2(sumSize/threadsPerBlock+1);
dim3 threadsPerBlockOld(threadsPerBlock);
dim3 fullBlocksPerGridOld(int(ceil(float(sumSize)/float(threadsPerBlock))));
// cudaMemcpy(cudaAuxIncs, cudaAuxSums, m_numElements/(THREADS_PER_BLOCK*2)*sizeof(int), cudaMemcpyDeviceToDevice);
for (int d=1; d<=ceil(log(sumSize)/log(2)); d++){
sum<<<fullBlocksPerGridL, threadsPerBlockL>>>(cudaAuxSums, cudaAuxIncs, sumSize, powf(2.0f, d-1));
cudaThreadSynchronize();
int* temp = cudaAuxSums;
cudaAuxSums = cudaAuxIncs;
cudaAuxIncs = temp;
}
shift<<<fullBlocksPerGridL, threadsPerBlockL>>>(cudaAuxSums, cudaAuxIncs, m_numElements/(THREADS_PER_BLOCK*2));
addIncs<<<fullBlocksPerGridL, threadsPerBlockL>>>(cudaAuxIncs, cudaIndicesB, m_numElements);
shift<<<fullBlocksPerGridL, threadsPerBlockL>>>(cudaIndicesB, cudaIndicesA, m_numElementsAlive);
int * temp = cudaIndicesA;
cudaIndicesA = cudaIndicesB;
cudaIndicesB = temp;
dim3 threadsPerBlockLL(threadsPerBlock);
dim3 fullBlocksPerGridLL(m_numElements/threadsPerBlock);
clock_t t = clock();
//Stream compation from A into B, then save back into A
streamCompaction<<<fullBlocksPerGridLL, threadsPerBlockLL>>>(cudaDataA, cudaIndicesB, cudaDataB, m_numElementsAlive);
dataPacket * tempDP = cudaDataA;
cudaDataA = cudaDataB;
cudaDataB = tempDP;
t = clock() - t;
cout<<(float)t / CLOCKS_PER_SEC<<endl;
// // update numrays
cudaMemcpy(&m_numElementsAlive, &cudaIndicesA[m_numElementsAlive-1], sizeof(int), cudaMemcpyDeviceToHost);
resetStreams<<<fullBlocksPerGridL, threadsPerBlockL>>>(cudaDataA, cudaIndicesA, m_numElementsAlive);
}
bool DataStream::getData(int index, dataPacket& data){
if (index > m_numElements) return false;
data = m_data[index];
return true;
}
int DataStream::numAlive(){
return m_numElementsAlive;
}
void DataStream::fetchDataFromGPU(){
cudaMemcpy(m_data, cudaDataA, m_numElementsAlive*sizeof(dataPacket), cudaMemcpyDeviceToHost);
}
void DataStream::kill(int index){
if (index > m_numElementsAlive) return;
dim3 threadsPerBlockL(64);
dim3 fullBlocksPerGridL(int(ceil(float(m_numElementsAlive)/64.0f)));
killStream<<<fullBlocksPerGridL, threadsPerBlockL>>>(index, cudaDataA, cudaIndicesA, m_numElementsAlive);
cudaMemcpy(m_indices, cudaIndicesA, m_numElementsAlive*sizeof(int), cudaMemcpyDeviceToHost);
} |
66f2c054c9b98dec63a5a038a261955617ae697d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
__global__ void Kernel_Vanilla_Call_single(
optionData data,
double * d_s,
double * d_normals,
unsigned N_STEPS,
unsigned N_SIMULS)
{
int s_idx = threadIdx.x + blockIdx.x * blockDim.x; // thread index
int n_idx = (s_idx)* N_STEPS; // for random number indexing
// check thread # < # of simuls
if (s_idx < N_SIMULS) {
int n = 0;
// Initialize
double s_curr = data.S0;
double T = data.T;
double sig = data.sig;
double r = data.r;
double dt = data.dt;
double sqrdt = data.sqrdt;
double K = data.K;
double payoff = 0.0;
do {
s_curr = s_curr * exp((r - (sig*sig)*0.5)*dt + sig*sqrdt*d_normals[n_idx]);
n_idx++; // random number index
n++; // time stepping
} while (n < N_STEPS);
// payoff using ternary operator
payoff = (s_curr > K) ? (s_curr - K) : 0;
// to save results, sycronize threads
__syncthreads();
// save payoff
d_s[s_idx] = payoff;
}
}
void Vanilla_Call_single(
optionData option,
double * d_s,
double * d_normals,
unsigned N_STEPS,
unsigned N_SIMULS) {
const unsigned BLOCK_SIZE = 1024; // # of threads in a block (1-dimension threads & block)
const unsigned GRID_SIZE = CEIL(N_SIMULS, BLOCK_SIZE); // # of block in a grid
Kernel_Vanilla_Call_single << <GRID_SIZE, BLOCK_SIZE >> >
(option, d_s, d_normals, N_STEPS, N_SIMULS);
} | 66f2c054c9b98dec63a5a038a261955617ae697d.cu | #include "kernel.h"
__global__ void Kernel_Vanilla_Call_single(
optionData data,
double * d_s,
double * d_normals,
unsigned N_STEPS,
unsigned N_SIMULS)
{
int s_idx = threadIdx.x + blockIdx.x * blockDim.x; // thread index
int n_idx = (s_idx)* N_STEPS; // for random number indexing
// check thread # < # of simuls
if (s_idx < N_SIMULS) {
int n = 0;
// Initialize
double s_curr = data.S0;
double T = data.T;
double sig = data.sig;
double r = data.r;
double dt = data.dt;
double sqrdt = data.sqrdt;
double K = data.K;
double payoff = 0.0;
do {
s_curr = s_curr * exp((r - (sig*sig)*0.5)*dt + sig*sqrdt*d_normals[n_idx]);
n_idx++; // random number index
n++; // time stepping
} while (n < N_STEPS);
// payoff using ternary operator
payoff = (s_curr > K) ? (s_curr - K) : 0;
// to save results, sycronize threads
__syncthreads();
// save payoff
d_s[s_idx] = payoff;
}
}
void Vanilla_Call_single(
optionData option,
double * d_s,
double * d_normals,
unsigned N_STEPS,
unsigned N_SIMULS) {
const unsigned BLOCK_SIZE = 1024; // # of threads in a block (1-dimension threads & block)
const unsigned GRID_SIZE = CEIL(N_SIMULS, BLOCK_SIZE); // # of block in a grid
Kernel_Vanilla_Call_single << <GRID_SIZE, BLOCK_SIZE >> >
(option, d_s, d_normals, N_STEPS, N_SIMULS);
} |
60c95367b1f5b4ebd21e699b892947397df70016.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// @file source/nbfmm/core/solver/l2l.cu
/// @brief Compute local to local
///
/// @author Mu Yang <[email protected]>
///
#include <nbfmm/core.hpp>
#include <nbfmm/utility.hpp>
/// @addtogroup impl_core
/// @{
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute local to local
///
/// @param[in] cell_size the size of cells in the current level per side.
/// @param[in] base_dim the number of cells in the base level per side.
/// @param[in,out] level_effect the cell effects of the current level.
///
__global__ void l2lDevice(
const int cell_size,
const int base_dim,
float2* level_effect
) {
const int thread_idx_x = threadIdx.x + blockIdx.x * blockDim.x;
const int thread_idx_y = threadIdx.y + blockIdx.y * blockDim.y;
if ( thread_idx_x >= base_dim / cell_size || thread_idx_y >= base_dim / cell_size ) {
return;
}
const int idx = ( thread_idx_x + thread_idx_y * base_dim) * cell_size;
const int idx_parent = ((thread_idx_x & ~1) + (thread_idx_y & ~1) * base_dim) * cell_size + base_dim * base_dim;
level_effect[idx] += level_effect[idx_parent];
}
/// @}
// L2L
void nbfmm::Solver::l2l() {
if ( num_level_ <= 1 ) {
return;
}
for ( auto level = num_level_-2; level >= 0; --level ) {
const int cell_size = 1 << level;
const int level_dim = base_dim_ / cell_size;
const int block_dim_side = (level_dim < kMaxBlockDim) ? level_dim : kMaxBlockDim;
const int grid_dim_side = (level_dim < kMaxBlockDim) ? 1 : (level_dim / block_dim_side);
const dim3 block_dim(block_dim_side, block_dim_side);
const dim3 grid_dim(grid_dim_side, grid_dim_side);
const int offset = level * base_dim_ * base_dim_;
hipLaunchKernelGGL(( l2lDevice), dim3(block_dim), dim3(grid_dim), 0, 0, cell_size, base_dim_, gpuptr_cell_effect_ + offset);
}
}
| 60c95367b1f5b4ebd21e699b892947397df70016.cu | ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// @file source/nbfmm/core/solver/l2l.cu
/// @brief Compute local to local
///
/// @author Mu Yang <[email protected]>
///
#include <nbfmm/core.hpp>
#include <nbfmm/utility.hpp>
/// @addtogroup impl_core
/// @{
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Compute local to local
///
/// @param[in] cell_size the size of cells in the current level per side.
/// @param[in] base_dim the number of cells in the base level per side.
/// @param[in,out] level_effect the cell effects of the current level.
///
__global__ void l2lDevice(
const int cell_size,
const int base_dim,
float2* level_effect
) {
const int thread_idx_x = threadIdx.x + blockIdx.x * blockDim.x;
const int thread_idx_y = threadIdx.y + blockIdx.y * blockDim.y;
if ( thread_idx_x >= base_dim / cell_size || thread_idx_y >= base_dim / cell_size ) {
return;
}
const int idx = ( thread_idx_x + thread_idx_y * base_dim) * cell_size;
const int idx_parent = ((thread_idx_x & ~1) + (thread_idx_y & ~1) * base_dim) * cell_size + base_dim * base_dim;
level_effect[idx] += level_effect[idx_parent];
}
/// @}
// L2L
void nbfmm::Solver::l2l() {
if ( num_level_ <= 1 ) {
return;
}
for ( auto level = num_level_-2; level >= 0; --level ) {
const int cell_size = 1 << level;
const int level_dim = base_dim_ / cell_size;
const int block_dim_side = (level_dim < kMaxBlockDim) ? level_dim : kMaxBlockDim;
const int grid_dim_side = (level_dim < kMaxBlockDim) ? 1 : (level_dim / block_dim_side);
const dim3 block_dim(block_dim_side, block_dim_side);
const dim3 grid_dim(grid_dim_side, grid_dim_side);
const int offset = level * base_dim_ * base_dim_;
l2lDevice<<<block_dim, grid_dim>>>(cell_size, base_dim_, gpuptr_cell_effect_ + offset);
}
}
|
441f73b893da8468f9b353b2bbcff5e14062568c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
@EECE528 Project - BDD Parallelization
@Authors: Yu Lei, Haotian Zhang
@Date: 2017/12/3
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include"cuda.h"
#define MAXNODENUM 16000
#define MAXLINE 256 /* Maximum length of each input line read. */
typedef struct bddNode_ {
float index;
int value;
struct bddNode_ *lowChild;
struct bddNode_ *highChild;
} bddNode;
typedef struct bddTree_ {
int totalNodeNum;
int totalLevels;
bddNode *topNode;
bddNode *zeroLeaf;
bddNode *oneLeaf;
} bddTree;
typedef struct applyManager_ {
int maxNodeNum;
int currentSpaceNum;
} applyManager;
typedef struct pattern_{
int size;
float index[MAXNODENUM];
bddNode* left[MAXNODENUM];
bddNode* right[MAXNODENUM];
}pattern;
pattern patterns;
void bddTreeInit(bddTree *bdd) {
bddNode *zero,*one;
bdd->totalNodeNum = 0;
bdd->totalLevels = 0;
zero = (bddNode*)malloc(sizeof(bddNode));
one = (bddNode*)malloc(sizeof(bddNode));
one->index = INFINITY;
zero->index = INFINITY;
zero->value = 0;
zero->lowChild = NULL;
zero->highChild = NULL;
one->value = 1;
one->lowChild = NULL;
one->highChild = NULL;
bdd->zeroLeaf = zero;
bdd->oneLeaf = one;
}
void applyManagerInit(applyManager *appMan, int maxNodes){
appMan->maxNodeNum = maxNodes;
appMan->currentSpaceNum = 0;
}
bddTree* readBDD(char *filename) {
FILE *f;
bddTree *bdd;
int nodeTotal;
int levelTotal;
int nodeNum;
int nodeIndex;
int lowC;
int highC;
f = fopen(filename,"r");
if (!f) {
fprintf(stderr, "cannot open file \"%s\"\n", filename);
return NULL;
}
bdd = (bddTree*)malloc(sizeof(bddTree));
bddTreeInit(bdd);
char linebuf[MAXLINE];
fgets(linebuf,MAXLINE,f);
sscanf(linebuf, "%d %d", &nodeTotal, &levelTotal);
bddNode *array[10000];
bdd->totalNodeNum = nodeTotal;
bdd->totalLevels = levelTotal;
while (fgets(linebuf, MAXLINE, f) != NULL) {
sscanf(linebuf, "%d %d %d %d", &nodeNum, &nodeIndex, &lowC, &highC);
bddNode *newNode;
newNode = (bddNode*)malloc(sizeof(bddNode));
newNode->index = nodeIndex;
newNode->value = -1;
if (lowC == -10) {
newNode->lowChild = bdd->zeroLeaf;
} else if (lowC == -11) {
newNode->lowChild = bdd->oneLeaf;
} else {
newNode->lowChild = array[lowC];
}
if (highC == -10) {
newNode->highChild = bdd->zeroLeaf;
} else if (highC == -11) {
newNode->highChild = bdd->oneLeaf;
} else {
newNode->highChild = array[highC];
}
array[nodeNum] = newNode;
bdd->topNode = newNode;
}
fclose(f);
return bdd;
}
void printNode(bddNode *node) {
printf("Node: %f children: \t%f \t%f.\n", node->index, node->lowChild->index, node->highChild->index);
if (node->lowChild->index != INFINITY) {
printNode(node->lowChild);
}
if (node->highChild->index != INFINITY) {
printNode(node->highChild);
}
}
void printBDD(bddTree *bdd) {
printf("\nPrinting bdd:\n");
printf("Total nodes in bdd: %d\n", bdd->totalNodeNum);
printNode(bdd->topNode);
}
void recursFree(bddNode *node) {
if (node->lowChild->index != INFINITY) {
recursFree(node->lowChild);
}
if (node->highChild->index != INFINITY) {
recursFree(node->highChild);
}
free(node);
}
void freeBDD(bddTree *bdd) {
recursFree(bdd->topNode);
free(bdd->zeroLeaf);
free(bdd->oneLeaf);
free(bdd);
}
// void addNew(int *size) {
// }i
float *d_index;
int *d_result,*check_result;
bddNode *d_left, *d_right,*cleft,*cright,*d_array_left,*d_array_right;
float *d_array_index;
__global__
void check_nodec(int size,int *d_result,bddNode *d_left,bddNode *d_right,float *d_index,bddNode **d_array_left,bddNode **d_array_right,float *d_array_index){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i==0){
*d_result = 0;
}
if(i < size){
if(d_array_index[i] == *d_index && d_array_left[i] == d_left && d_array_right[i] == d_right){
*d_result = i;
}
if(i == 0 && *d_result == 1){
d_array_index[size+1]=*d_index;
d_array_right[size+1]=d_right;
d_array_left[size+1]=d_left;
}
}
}
int check_node(float index,bddNode* left, bddNode *right){
int size = patterns.size;
float cindex;
// for(i=0;i<patterns.size;i++){
// if(index == patterns.index[i] && left == patterns.left[i] && right == patterns.right[i]){
// return i;
// }
// }
cleft = left;
cright = right;
cindex = index;
hipMemcpy(d_left,cleft,sizeof(bddNode*),hipMemcpyHostToDevice);
hipMemcpy(d_right,cright,sizeof(bddNode*),hipMemcpyHostToDevice);
hipMemcpy(d_index,&cindex,sizeof(bddNode),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( check_nodec), dim3((size+511)/512),dim3(512), 0, 0, size,d_result,d_left,d_right,d_index,&d_array_left,&d_array_right,d_array_index);
check_result = (int*)malloc(sizeof(int));
hipMemcpy(check_result,d_result,sizeof(int),hipMemcpyDeviceToHost);
if(check_result ==0){
patterns.index[patterns.size] = index;
patterns.left[patterns.size] = left;
patterns.right[patterns.size] = right;
patterns.size++;
}
return *check_result;
}
bddNode* applyBDDs(bddTree *result, bddNode *node1, bddNode *node2, applyManager *appMan){
bddNode *left, *right;
float newNodeIndex;
int checkNode = 0;
if(node1->value == 0 && node2->value == 0){
return result->zeroLeaf;
}else if(node1->value == 0 && node2->value == 1){
return result->zeroLeaf;
}else if(node1->value == 1 && node2->value == 0){
return result->zeroLeaf;
}else if(node1->value == 1 && node2->value == 1){
return result->oneLeaf;
}
// printf("node1:%lf node2:%lf",node1->index, node2->index);
if(node1->index == node2->index){
left = applyBDDs(result, node1->lowChild,node2->lowChild,appMan);
right = applyBDDs(result, node1->highChild,node2->highChild,appMan);
}else if (node1->index < node2->index){
left = applyBDDs(result,node1->lowChild,node2,appMan);
right = applyBDDs(result,node1->highChild,node2,appMan);
newNodeIndex = node1 -> index;
}else if (node1->index > node2->index){
left = applyBDDs(result,node1,node2->lowChild,appMan);
right = applyBDDs(result,node1,node2->highChild,appMan);
newNodeIndex = node2 -> index;
}
// return result -> oneLeaf;
bddNode *newNode;
newNode = (bddNode*)malloc(sizeof(bddNode));
if(left == right){
return left;
}else{
if(checkNode = check_node(newNodeIndex,left,right)){
newNode->index = patterns.index[checkNode];
newNode->value = -1;
newNode->lowChild = patterns.left[checkNode];
newNode->highChild = patterns.right[checkNode];
}
else{
newNode->index = newNodeIndex;
newNode->value = -1;
newNode->lowChild = left;
newNode->highChild = right;
}
return newNode;
}
}
int main(int argc, char* argv[]) {
bddTree *bdd1, *bdd2;
bddTree *bddResult;
clock_t begin,end;
if (argc !=3) {
fprintf(stderr,"usage: a.out file1 file2\n");
exit(1);
}
bdd1 = readBDD(argv[1]);
bdd2 = readBDD(argv[2]);
bddResult = (bddTree*)malloc(sizeof(bddTree));
bddTreeInit(bddResult);
applyManager *appMan;
appMan = (applyManager*)malloc(sizeof(applyManager));
applyManagerInit(appMan, (int)pow(2, (bdd1->totalLevels + bdd2->totalLevels)));
patterns.size = 0;
check_result = (int*)malloc(sizeof(int));
hipMalloc(&d_result,sizeof(int));
hipMalloc(&d_index,sizeof(float));
hipMalloc(&d_left,sizeof(bddNode*));
hipMalloc(&d_right,sizeof(bddNode*));
hipMalloc(&d_array_index,MAXNODENUM*sizeof(float));
hipMalloc(&d_array_right,MAXNODENUM*sizeof(bddNode*));
hipMalloc(&d_array_left,MAXNODENUM*sizeof(bddNode*));
begin = clock();
bddResult->topNode = applyBDDs(bddResult, bdd1->topNode, bdd2->topNode, appMan);
end = clock();
printf("time: %f sec\n",(double)(end-begin)/CLOCKS_PER_SEC);
free(bdd1);
free(bdd2);
free(bddResult);
hipFree(d_result);
hipFree(d_index);
hipFree(d_left);
hipFree(d_right);
hipFree(d_array_index);
hipFree(d_array_right);
hipFree(d_array_left);
free(appMan);
return 0;
}
| 441f73b893da8468f9b353b2bbcff5e14062568c.cu | /*
@EECE528 Project - BDD Parallelization
@Authors: Yu Lei, Haotian Zhang
@Date: 2017/12/3
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include"cuda.h"
#define MAXNODENUM 16000
#define MAXLINE 256 /* Maximum length of each input line read. */
typedef struct bddNode_ {
float index;
int value;
struct bddNode_ *lowChild;
struct bddNode_ *highChild;
} bddNode;
typedef struct bddTree_ {
int totalNodeNum;
int totalLevels;
bddNode *topNode;
bddNode *zeroLeaf;
bddNode *oneLeaf;
} bddTree;
typedef struct applyManager_ {
int maxNodeNum;
int currentSpaceNum;
} applyManager;
typedef struct pattern_{
int size;
float index[MAXNODENUM];
bddNode* left[MAXNODENUM];
bddNode* right[MAXNODENUM];
}pattern;
pattern patterns;
void bddTreeInit(bddTree *bdd) {
bddNode *zero,*one;
bdd->totalNodeNum = 0;
bdd->totalLevels = 0;
zero = (bddNode*)malloc(sizeof(bddNode));
one = (bddNode*)malloc(sizeof(bddNode));
one->index = INFINITY;
zero->index = INFINITY;
zero->value = 0;
zero->lowChild = NULL;
zero->highChild = NULL;
one->value = 1;
one->lowChild = NULL;
one->highChild = NULL;
bdd->zeroLeaf = zero;
bdd->oneLeaf = one;
}
void applyManagerInit(applyManager *appMan, int maxNodes){
appMan->maxNodeNum = maxNodes;
appMan->currentSpaceNum = 0;
}
bddTree* readBDD(char *filename) {
FILE *f;
bddTree *bdd;
int nodeTotal;
int levelTotal;
int nodeNum;
int nodeIndex;
int lowC;
int highC;
f = fopen(filename,"r");
if (!f) {
fprintf(stderr, "cannot open file \"%s\"\n", filename);
return NULL;
}
bdd = (bddTree*)malloc(sizeof(bddTree));
bddTreeInit(bdd);
char linebuf[MAXLINE];
fgets(linebuf,MAXLINE,f);
sscanf(linebuf, "%d %d", &nodeTotal, &levelTotal);
bddNode *array[10000];
bdd->totalNodeNum = nodeTotal;
bdd->totalLevels = levelTotal;
while (fgets(linebuf, MAXLINE, f) != NULL) {
sscanf(linebuf, "%d %d %d %d", &nodeNum, &nodeIndex, &lowC, &highC);
bddNode *newNode;
newNode = (bddNode*)malloc(sizeof(bddNode));
newNode->index = nodeIndex;
newNode->value = -1;
if (lowC == -10) {
newNode->lowChild = bdd->zeroLeaf;
} else if (lowC == -11) {
newNode->lowChild = bdd->oneLeaf;
} else {
newNode->lowChild = array[lowC];
}
if (highC == -10) {
newNode->highChild = bdd->zeroLeaf;
} else if (highC == -11) {
newNode->highChild = bdd->oneLeaf;
} else {
newNode->highChild = array[highC];
}
array[nodeNum] = newNode;
bdd->topNode = newNode;
}
fclose(f);
return bdd;
}
void printNode(bddNode *node) {
printf("Node: %f children: \t%f \t%f.\n", node->index, node->lowChild->index, node->highChild->index);
if (node->lowChild->index != INFINITY) {
printNode(node->lowChild);
}
if (node->highChild->index != INFINITY) {
printNode(node->highChild);
}
}
void printBDD(bddTree *bdd) {
printf("\nPrinting bdd:\n");
printf("Total nodes in bdd: %d\n", bdd->totalNodeNum);
printNode(bdd->topNode);
}
void recursFree(bddNode *node) {
if (node->lowChild->index != INFINITY) {
recursFree(node->lowChild);
}
if (node->highChild->index != INFINITY) {
recursFree(node->highChild);
}
free(node);
}
void freeBDD(bddTree *bdd) {
recursFree(bdd->topNode);
free(bdd->zeroLeaf);
free(bdd->oneLeaf);
free(bdd);
}
// void addNew(int *size) {
// }i
float *d_index;
int *d_result,*check_result;
bddNode *d_left, *d_right,*cleft,*cright,*d_array_left,*d_array_right;
float *d_array_index;
__global__
void check_nodec(int size,int *d_result,bddNode *d_left,bddNode *d_right,float *d_index,bddNode **d_array_left,bddNode **d_array_right,float *d_array_index){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i==0){
*d_result = 0;
}
if(i < size){
if(d_array_index[i] == *d_index && d_array_left[i] == d_left && d_array_right[i] == d_right){
*d_result = i;
}
if(i == 0 && *d_result == 1){
d_array_index[size+1]=*d_index;
d_array_right[size+1]=d_right;
d_array_left[size+1]=d_left;
}
}
}
int check_node(float index,bddNode* left, bddNode *right){
int size = patterns.size;
float cindex;
// for(i=0;i<patterns.size;i++){
// if(index == patterns.index[i] && left == patterns.left[i] && right == patterns.right[i]){
// return i;
// }
// }
cleft = left;
cright = right;
cindex = index;
cudaMemcpy(d_left,cleft,sizeof(bddNode*),cudaMemcpyHostToDevice);
cudaMemcpy(d_right,cright,sizeof(bddNode*),cudaMemcpyHostToDevice);
cudaMemcpy(d_index,&cindex,sizeof(bddNode),cudaMemcpyHostToDevice);
check_nodec<<<(size+511)/512,512>>>(size,d_result,d_left,d_right,d_index,&d_array_left,&d_array_right,d_array_index);
check_result = (int*)malloc(sizeof(int));
cudaMemcpy(check_result,d_result,sizeof(int),cudaMemcpyDeviceToHost);
if(check_result ==0){
patterns.index[patterns.size] = index;
patterns.left[patterns.size] = left;
patterns.right[patterns.size] = right;
patterns.size++;
}
return *check_result;
}
bddNode* applyBDDs(bddTree *result, bddNode *node1, bddNode *node2, applyManager *appMan){
bddNode *left, *right;
float newNodeIndex;
int checkNode = 0;
if(node1->value == 0 && node2->value == 0){
return result->zeroLeaf;
}else if(node1->value == 0 && node2->value == 1){
return result->zeroLeaf;
}else if(node1->value == 1 && node2->value == 0){
return result->zeroLeaf;
}else if(node1->value == 1 && node2->value == 1){
return result->oneLeaf;
}
// printf("node1:%lf node2:%lf",node1->index, node2->index);
if(node1->index == node2->index){
left = applyBDDs(result, node1->lowChild,node2->lowChild,appMan);
right = applyBDDs(result, node1->highChild,node2->highChild,appMan);
}else if (node1->index < node2->index){
left = applyBDDs(result,node1->lowChild,node2,appMan);
right = applyBDDs(result,node1->highChild,node2,appMan);
newNodeIndex = node1 -> index;
}else if (node1->index > node2->index){
left = applyBDDs(result,node1,node2->lowChild,appMan);
right = applyBDDs(result,node1,node2->highChild,appMan);
newNodeIndex = node2 -> index;
}
// return result -> oneLeaf;
bddNode *newNode;
newNode = (bddNode*)malloc(sizeof(bddNode));
if(left == right){
return left;
}else{
if(checkNode = check_node(newNodeIndex,left,right)){
newNode->index = patterns.index[checkNode];
newNode->value = -1;
newNode->lowChild = patterns.left[checkNode];
newNode->highChild = patterns.right[checkNode];
}
else{
newNode->index = newNodeIndex;
newNode->value = -1;
newNode->lowChild = left;
newNode->highChild = right;
}
return newNode;
}
}
int main(int argc, char* argv[]) {
bddTree *bdd1, *bdd2;
bddTree *bddResult;
clock_t begin,end;
if (argc !=3) {
fprintf(stderr,"usage: a.out file1 file2\n");
exit(1);
}
bdd1 = readBDD(argv[1]);
bdd2 = readBDD(argv[2]);
bddResult = (bddTree*)malloc(sizeof(bddTree));
bddTreeInit(bddResult);
applyManager *appMan;
appMan = (applyManager*)malloc(sizeof(applyManager));
applyManagerInit(appMan, (int)pow(2, (bdd1->totalLevels + bdd2->totalLevels)));
patterns.size = 0;
check_result = (int*)malloc(sizeof(int));
cudaMalloc(&d_result,sizeof(int));
cudaMalloc(&d_index,sizeof(float));
cudaMalloc(&d_left,sizeof(bddNode*));
cudaMalloc(&d_right,sizeof(bddNode*));
cudaMalloc(&d_array_index,MAXNODENUM*sizeof(float));
cudaMalloc(&d_array_right,MAXNODENUM*sizeof(bddNode*));
cudaMalloc(&d_array_left,MAXNODENUM*sizeof(bddNode*));
begin = clock();
bddResult->topNode = applyBDDs(bddResult, bdd1->topNode, bdd2->topNode, appMan);
end = clock();
printf("time: %f sec\n",(double)(end-begin)/CLOCKS_PER_SEC);
free(bdd1);
free(bdd2);
free(bddResult);
cudaFree(d_result);
cudaFree(d_index);
cudaFree(d_left);
cudaFree(d_right);
cudaFree(d_array_index);
cudaFree(d_array_right);
cudaFree(d_array_left);
free(appMan);
return 0;
}
|
fd35268e874c35d700858d1cfc2e23645fd7500d.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <chrono>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
static void HandleError( hipError_t err, const char *file, int line )
{
if (err != hipSuccess)
{
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define SIZE 10
using namespace std;
void print_array (float array[][SIZE])
{
for (int i=0; i<SIZE; i++) {
for (int j=0; j<SIZE; j++) {
cout<<array[i][j]<<" ";
}
cout<<endl;
}
}
void initialize_array (float array[][SIZE])
{
for (int i=0; i<SIZE; i++) {
for (int j=0; j<SIZE; j++) {
array[i][j] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
}
}
void array_decl ()
{
static float A[SIZE][SIZE];
static float B[SIZE][SIZE];
static float C[SIZE][SIZE];
static float D[SIZE][SIZE];
initialize_array(A);
initialize_array(B);
initialize_array(C);
initialize_array(D);
int *d_A, *d_B, *d_C, *d_D, *d_temp;
HANDLE_ERROR(hipMalloc((void**)&d_A, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&d_B, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&d_C, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&d_D, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&d_temp, SIZE*SIZE*sizeof(float)));
hipFree (d_A);
hipFree (d_B);
hipFree (d_C);
hipFree (d_D);
hipFree (d_temp);
}
int main (int argc, char **argv) {
array_decl();
} | fd35268e874c35d700858d1cfc2e23645fd7500d.cu |
#include <iostream>
#include <chrono>
#include "cuda_runtime.h"
#include "cuda.h"
#include "device_launch_parameters.h"
static void HandleError( cudaError_t err, const char *file, int line )
{
if (err != cudaSuccess)
{
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define SIZE 10
using namespace std;
void print_array (float array[][SIZE])
{
for (int i=0; i<SIZE; i++) {
for (int j=0; j<SIZE; j++) {
cout<<array[i][j]<<" ";
}
cout<<endl;
}
}
void initialize_array (float array[][SIZE])
{
for (int i=0; i<SIZE; i++) {
for (int j=0; j<SIZE; j++) {
array[i][j] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
}
}
void array_decl ()
{
static float A[SIZE][SIZE];
static float B[SIZE][SIZE];
static float C[SIZE][SIZE];
static float D[SIZE][SIZE];
initialize_array(A);
initialize_array(B);
initialize_array(C);
initialize_array(D);
int *d_A, *d_B, *d_C, *d_D, *d_temp;
HANDLE_ERROR(cudaMalloc((void**)&d_A, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_B, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_C, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_D, SIZE*SIZE*sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&d_temp, SIZE*SIZE*sizeof(float)));
cudaFree (d_A);
cudaFree (d_B);
cudaFree (d_C);
cudaFree (d_D);
cudaFree (d_temp);
}
int main (int argc, char **argv) {
array_decl();
} |
f48fd9796e39846cca6a6833c92c586df069268a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include <assert.h>
#define N 2//64
__device__ int bar(float* A) {
if(threadIdx.x != 0) {
return 0;
}
return 1;
}
__global__ void foo(float* A) {
int y = bar(A);
A[threadIdx.x]=y;
}
int main(void){
int i;
float *A;
float *dev_A;
float size= N*sizeof(float);
A=(float*)malloc(size);
for(i=0;i<N;i++)
A[i]=2;
hipMalloc((void**)&dev_A,size);
hipMemcpy(dev_A, A,size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( foo), dim3(1),dim3(N), 0, 0, dev_A);
//ESBMC_verify_kernel_f(foo,1,N,dev_A);
hipMemcpy(A,dev_A,size,hipMemcpyDeviceToHost);
for(i=0;i<N;i++){
// printf("%.2f ",A[i]);
if (i!=0)
assert(A[i]!=0);
}
hipFree(dev_A);
free(A);
return 0;
}
| f48fd9796e39846cca6a6833c92c586df069268a.cu | #include <stdio.h>
#include <stdlib.h>
#include "cuda.h"
#include <assert.h>
#define N 2//64
__device__ int bar(float* A) {
if(threadIdx.x != 0) {
return 0;
}
return 1;
}
__global__ void foo(float* A) {
int y = bar(A);
A[threadIdx.x]=y;
}
int main(void){
int i;
float *A;
float *dev_A;
float size= N*sizeof(float);
A=(float*)malloc(size);
for(i=0;i<N;i++)
A[i]=2;
cudaMalloc((void**)&dev_A,size);
cudaMemcpy(dev_A, A,size, cudaMemcpyHostToDevice);
foo<<<1,N>>>(dev_A);
//ESBMC_verify_kernel_f(foo,1,N,dev_A);
cudaMemcpy(A,dev_A,size,cudaMemcpyDeviceToHost);
for(i=0;i<N;i++){
// printf("%.2f ",A[i]);
if (i!=0)
assert(A[i]!=0);
}
cudaFree(dev_A);
free(A);
return 0;
}
|
c68dc414b0e9b2bc4c48c8798a210f91f5aac88d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cu_backprojection.h"
__host__ void host2_backprojection(float *d_img, float *d_proj, float *float_para, int *int_para)
{
}
__host__ void host_backprojection(float *d_img, float *d_proj, float angle,float SO, float SD, float da, int na, float ai, float db, int nb, float bi, int nx, int ny, int nz)
{
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
struct hipExtent extent = make_hipExtent(na, nb, 1);
hipArray *array_proj;
hipMalloc3DArray(&array_proj, &channelDesc, extent);
hipMemcpy3DParms copyParams = {0};
hipPitchedPtr dp_proj = make_hipPitchedPtr((void*) d_proj, na * sizeof(float), na, nb);
copyParams.extent = extent;
copyParams.kind = hipMemcpyDeviceToDevice;
copyParams.srcPtr = dp_proj;
copyParams.dstArray = array_proj;
hipMemcpy3D(©Params);
hipResourceDesc resDesc;
hipTextureDesc texDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.addressMode[1] = hipAddressModeClamp;
texDesc.addressMode[2] = hipAddressModeClamp;
texDesc.filterMode = hipFilterModeLinear;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = 0;
resDesc.res.array.array = array_proj;
hipTextureObject_t tex_proj = 0;
// hipTextureObject_t tex_proj = host_create_texture_object(d_proj, nb, na, 1);
hipCreateTextureObject(&tex_proj, &resDesc, &texDesc, NULL);
const dim3 gridSize_img((nx + BLOCKSIZE_X - 1) / BLOCKSIZE_X, (ny + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y, (nz + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z);
const dim3 blockSize(BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z);
hipLaunchKernelGGL(( kernel_backprojection), dim3(gridSize_img), dim3(blockSize), 0, 0, d_img, tex_proj, angle, SO, SD, na, nb, da, db, ai, bi, nx, ny, nz);
hipDeviceSynchronize();
hipFreeArray(array_proj);
hipDestroyTextureObject(tex_proj);
}
__global__ void kernel_backprojection(float *img, hipTextureObject_t tex_proj, float angle, float SO, float SD, int na, int nb, float da, float db, float ai, float bi, int nx, int ny, int nz){
int ix = BLOCKSIZE_X * blockIdx.x + threadIdx.x;
int iy = BLOCKSIZE_Y * blockIdx.y + threadIdx.y;
int iz = BLOCKSIZE_Z * blockIdx.z + threadIdx.z;
if (ix >= nx || iy >= ny || iz >= nz)
return;
int id = ix + iy * nx + iz * nx * ny;
// angle += 3.141592653589793;
img[id] = 0.0f;
// float sphi = __sinf(angle);
// float cphi = __cosf(angle);
float sphi = __sinf(angle);
float cphi = __cosf(angle);
// float dd_voxel[3];
float xc, yc, zc;
xc = (float)ix - nx / 2 + 0.5f;
yc = (float)iy - ny / 2 + 0.5f;
zc = (float)iz - nz / 2 + 0.5f;
// voxel boundary coordinates
float xll, yll, zll, xlr, ylr, zlr, xrl, yrl, zrl, xrr, yrr, zrr, xt, yt, zt, xb, yb, zb;
// xll = +(xc - 0.5f) * cphi + (yc - 0.5f) * sphi;
// yll = -(xc - 0.5f) * sphi + (yc - 0.5f) * cphi;
// xrr = +(xc + 0.5f) * cphi + (yc + 0.5f) * sphi;
// yrr = -(xc + 0.5f) * sphi + (yc + 0.5f) * cphi;
// zll = zc; zrr = zc;
// xrl = +(xc + 0.5f) * cphi + (yc - 0.5f) * sphi;
// yrl = -(xc + 0.5f) * sphi + (yc - 0.5f) * cphi;
// xlr = +(xc - 0.5f) * cphi + (yc + 0.5f) * sphi;
// ylr = -(xc - 0.5f) * sphi + (yc + 0.5f) * cphi;
// zrl = zc; zlr = zc;
xll = +xc * cphi + yc * sphi - 0.5f;
yll = -xc * sphi + yc * cphi - 0.5f;
xrr = +xc * cphi + yc * sphi + 0.5f;
yrr = -xc * sphi + yc * cphi + 0.5f;
zll = zc; zrr = zc;
xrl = +xc * cphi + yc * sphi + 0.5f;
yrl = -xc * sphi + yc * cphi - 0.5f;
xlr = +xc * cphi + yc * sphi - 0.5f;
ylr = -xc * sphi + yc * cphi + 0.5f;
zrl = zc; zlr = zc;
xt = xc * cphi + yc * sphi;
yt = -xc * sphi + yc * cphi;
zt = zc + 0.5f;
xb = xc * cphi + yc * sphi;
yb = -xc * sphi + yc * cphi;
zb = zc - 0.5f;
// the coordinates of source and detector plane here are after rotation
float ratio, all, bll, alr, blr, arl, brl, arr, brr, at, bt, ab, bb, a_max, a_min, b_max, b_min;
// calculate a value for each boundary coordinates
// the a and b here are all absolute positions from isocenter, which are on detector planes
ratio = SD / (xll + SO);
all = ratio * yll;
bll = ratio * zll;
ratio = SD / (xrr + SO);
arr = ratio * yrr;
brr = ratio * zrr;
ratio = SD / (xlr + SO);
alr = ratio * ylr;
blr = ratio * zlr;
ratio = SD / (xrl + SO);
arl = ratio * yrl;
brl = ratio * zrl;
ratio = SD / (xt + SO);
at = ratio * yt;
bt = ratio * zt;
ratio = SD / (xb + SO);
ab = ratio * yb;
bb = ratio * zb;
// get the max and min values of all boundary projectors of voxel boundaries on detector plane
// a_max = MAX4(al ,ar, at, ab);
// a_min = MIN4(al ,ar, at, ab);
// b_max = MAX4(bl ,br, bt, bb);
// b_min = MIN4(bl ,br, bt, bb);
a_max = MAX6(all ,arr, alr, arl, at, ab);
a_min = MIN6(all ,arr, alr, arl, at, ab);
b_max = MAX6(bll ,brr, blr, brl, bt, bb);
b_min = MIN6(bll ,brr, blr, brl, bt, bb);
// the related positions on detector plane from start points
a_max = a_max / da - ai + 0.5f; // now they are the detector coordinates
a_min = a_min / da - ai + 0.5f;
b_max = b_max / db - bi + 0.5f;
b_min = b_min / db - bi + 0.5f;
int a_ind_max = (int)floorf(a_max);
int a_ind_min = (int)floorf(a_min);
int b_ind_max = (int)floorf(b_max);
int b_ind_min = (int)floorf(b_min);
// int a_ind_max = (int)floorf(a_max / da - ai);
// int a_ind_min = (int)floorf(a_min / da - ai);
// int b_ind_max = (int)floorf(b_max / db - bi);
// int b_ind_min = (int)floorf(b_min / db - bi);
float bin_bound_1, bin_bound_2, wa, wb;
for (int ia = MAX(0, a_ind_min); ia < MIN(na, a_max); ia ++){
// bin_bound_1 = ((float)ia + ai) * da;
// bin_bound_2 = ((float)ia + ai + 1.0f) * da;
bin_bound_1 = ia + 0.0f;
bin_bound_2 = ia + 1.0f;
wa = MIN(bin_bound_2, a_max) - MAX(bin_bound_1, a_min);// wa /= a_max - a_min;
for (int ib = MAX(0, b_ind_min); ib < MIN(nb, b_max); ib ++){
// bin_bound_1 = ((float)ib + bi) * db;
// bin_bound_2 = ((float)ib + bi + 1.0f) * db;
bin_bound_1 = ib + 0.0f;
bin_bound_2 = ib + 1.0f;
// wb = MIN(bin_bound_2, b_max) - MAX(bin_bound_1, b_min);// wb /= db;
wb = MIN(bin_bound_2, b_max) - MAX(bin_bound_1, b_min);// wb /= b_max - b_min;
img[id] += wa * wb * tex3D<float>(tex_proj, (ia + 0.5f), (ib + 0.5f), 0.5f);
}
}
}
| c68dc414b0e9b2bc4c48c8798a210f91f5aac88d.cu | #include "cu_backprojection.h"
__host__ void host2_backprojection(float *d_img, float *d_proj, float *float_para, int *int_para)
{
}
__host__ void host_backprojection(float *d_img, float *d_proj, float angle,float SO, float SD, float da, int na, float ai, float db, int nb, float bi, int nx, int ny, int nz)
{
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
struct cudaExtent extent = make_cudaExtent(na, nb, 1);
cudaArray *array_proj;
cudaMalloc3DArray(&array_proj, &channelDesc, extent);
cudaMemcpy3DParms copyParams = {0};
cudaPitchedPtr dp_proj = make_cudaPitchedPtr((void*) d_proj, na * sizeof(float), na, nb);
copyParams.extent = extent;
copyParams.kind = cudaMemcpyDeviceToDevice;
copyParams.srcPtr = dp_proj;
copyParams.dstArray = array_proj;
cudaMemcpy3D(©Params);
cudaResourceDesc resDesc;
cudaTextureDesc texDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.addressMode[1] = cudaAddressModeClamp;
texDesc.addressMode[2] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = 0;
resDesc.res.array.array = array_proj;
cudaTextureObject_t tex_proj = 0;
// cudaTextureObject_t tex_proj = host_create_texture_object(d_proj, nb, na, 1);
cudaCreateTextureObject(&tex_proj, &resDesc, &texDesc, NULL);
const dim3 gridSize_img((nx + BLOCKSIZE_X - 1) / BLOCKSIZE_X, (ny + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y, (nz + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z);
const dim3 blockSize(BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z);
kernel_backprojection<<<gridSize_img, blockSize>>>(d_img, tex_proj, angle, SO, SD, na, nb, da, db, ai, bi, nx, ny, nz);
cudaDeviceSynchronize();
cudaFreeArray(array_proj);
cudaDestroyTextureObject(tex_proj);
}
__global__ void kernel_backprojection(float *img, cudaTextureObject_t tex_proj, float angle, float SO, float SD, int na, int nb, float da, float db, float ai, float bi, int nx, int ny, int nz){
int ix = BLOCKSIZE_X * blockIdx.x + threadIdx.x;
int iy = BLOCKSIZE_Y * blockIdx.y + threadIdx.y;
int iz = BLOCKSIZE_Z * blockIdx.z + threadIdx.z;
if (ix >= nx || iy >= ny || iz >= nz)
return;
int id = ix + iy * nx + iz * nx * ny;
// angle += 3.141592653589793;
img[id] = 0.0f;
// float sphi = __sinf(angle);
// float cphi = __cosf(angle);
float sphi = __sinf(angle);
float cphi = __cosf(angle);
// float dd_voxel[3];
float xc, yc, zc;
xc = (float)ix - nx / 2 + 0.5f;
yc = (float)iy - ny / 2 + 0.5f;
zc = (float)iz - nz / 2 + 0.5f;
// voxel boundary coordinates
float xll, yll, zll, xlr, ylr, zlr, xrl, yrl, zrl, xrr, yrr, zrr, xt, yt, zt, xb, yb, zb;
// xll = +(xc - 0.5f) * cphi + (yc - 0.5f) * sphi;
// yll = -(xc - 0.5f) * sphi + (yc - 0.5f) * cphi;
// xrr = +(xc + 0.5f) * cphi + (yc + 0.5f) * sphi;
// yrr = -(xc + 0.5f) * sphi + (yc + 0.5f) * cphi;
// zll = zc; zrr = zc;
// xrl = +(xc + 0.5f) * cphi + (yc - 0.5f) * sphi;
// yrl = -(xc + 0.5f) * sphi + (yc - 0.5f) * cphi;
// xlr = +(xc - 0.5f) * cphi + (yc + 0.5f) * sphi;
// ylr = -(xc - 0.5f) * sphi + (yc + 0.5f) * cphi;
// zrl = zc; zlr = zc;
xll = +xc * cphi + yc * sphi - 0.5f;
yll = -xc * sphi + yc * cphi - 0.5f;
xrr = +xc * cphi + yc * sphi + 0.5f;
yrr = -xc * sphi + yc * cphi + 0.5f;
zll = zc; zrr = zc;
xrl = +xc * cphi + yc * sphi + 0.5f;
yrl = -xc * sphi + yc * cphi - 0.5f;
xlr = +xc * cphi + yc * sphi - 0.5f;
ylr = -xc * sphi + yc * cphi + 0.5f;
zrl = zc; zlr = zc;
xt = xc * cphi + yc * sphi;
yt = -xc * sphi + yc * cphi;
zt = zc + 0.5f;
xb = xc * cphi + yc * sphi;
yb = -xc * sphi + yc * cphi;
zb = zc - 0.5f;
// the coordinates of source and detector plane here are after rotation
float ratio, all, bll, alr, blr, arl, brl, arr, brr, at, bt, ab, bb, a_max, a_min, b_max, b_min;
// calculate a value for each boundary coordinates
// the a and b here are all absolute positions from isocenter, which are on detector planes
ratio = SD / (xll + SO);
all = ratio * yll;
bll = ratio * zll;
ratio = SD / (xrr + SO);
arr = ratio * yrr;
brr = ratio * zrr;
ratio = SD / (xlr + SO);
alr = ratio * ylr;
blr = ratio * zlr;
ratio = SD / (xrl + SO);
arl = ratio * yrl;
brl = ratio * zrl;
ratio = SD / (xt + SO);
at = ratio * yt;
bt = ratio * zt;
ratio = SD / (xb + SO);
ab = ratio * yb;
bb = ratio * zb;
// get the max and min values of all boundary projectors of voxel boundaries on detector plane
// a_max = MAX4(al ,ar, at, ab);
// a_min = MIN4(al ,ar, at, ab);
// b_max = MAX4(bl ,br, bt, bb);
// b_min = MIN4(bl ,br, bt, bb);
a_max = MAX6(all ,arr, alr, arl, at, ab);
a_min = MIN6(all ,arr, alr, arl, at, ab);
b_max = MAX6(bll ,brr, blr, brl, bt, bb);
b_min = MIN6(bll ,brr, blr, brl, bt, bb);
// the related positions on detector plane from start points
a_max = a_max / da - ai + 0.5f; // now they are the detector coordinates
a_min = a_min / da - ai + 0.5f;
b_max = b_max / db - bi + 0.5f;
b_min = b_min / db - bi + 0.5f;
int a_ind_max = (int)floorf(a_max);
int a_ind_min = (int)floorf(a_min);
int b_ind_max = (int)floorf(b_max);
int b_ind_min = (int)floorf(b_min);
// int a_ind_max = (int)floorf(a_max / da - ai);
// int a_ind_min = (int)floorf(a_min / da - ai);
// int b_ind_max = (int)floorf(b_max / db - bi);
// int b_ind_min = (int)floorf(b_min / db - bi);
float bin_bound_1, bin_bound_2, wa, wb;
for (int ia = MAX(0, a_ind_min); ia < MIN(na, a_max); ia ++){
// bin_bound_1 = ((float)ia + ai) * da;
// bin_bound_2 = ((float)ia + ai + 1.0f) * da;
bin_bound_1 = ia + 0.0f;
bin_bound_2 = ia + 1.0f;
wa = MIN(bin_bound_2, a_max) - MAX(bin_bound_1, a_min);// wa /= a_max - a_min;
for (int ib = MAX(0, b_ind_min); ib < MIN(nb, b_max); ib ++){
// bin_bound_1 = ((float)ib + bi) * db;
// bin_bound_2 = ((float)ib + bi + 1.0f) * db;
bin_bound_1 = ib + 0.0f;
bin_bound_2 = ib + 1.0f;
// wb = MIN(bin_bound_2, b_max) - MAX(bin_bound_1, b_min);// wb /= db;
wb = MIN(bin_bound_2, b_max) - MAX(bin_bound_1, b_min);// wb /= b_max - b_min;
img[id] += wa * wb * tex3D<float>(tex_proj, (ia + 0.5f), (ib + 0.5f), 0.5f);
}
}
}
|
e83a9457ee903838fa613e3117f831fefbf00df2.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by root on 2020/11/26.
//
#include "kernel.h"
#include "hip/hip_runtime.h"
#define TX 32
#define TY 32
#define LEN 5.f
#define TIME_STEP 0.005f
#define FINAL_TIME 10.f
__device__ float scale(int i, int w) {
return 2 * LEN * (((1.f * i) / w) - 0.5f);
}
__device__ float f(float x, float y, float param, float sys) {
if (sys == 1) {
return x - 2 * param * y;
}
if (sys == 2) {
return -x + param * (1 - x * x) * y;
}
return -x - 2 * param * y;
}
__device__ float2 euler(float x, float y, float dt, float tFinal, float param, float sys) {
float dx = 0.f, dy = 0.f;
for (float t = 0; t < tFinal; t += dt) {
dx = dt * y;
dy = dt * f(x, y , param, sys);
x += dx;
y += dy;
}
return make_float2(x, y);
}
__device__ char clip(float n) {
return n < 0 ? 0 : n > 255 ? 255 : n;
}
__global__ void stabImageKernel(uchar4 *d_out, int w, int h, float p, int s) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int idx = y * w + x;
if (x >= w || y >= h) {
return;
}
float x0 = scale(x, w);
float y0 = scale(y, h);
float dist_0 = sqrt(x0 * x0 + y0 * y0);
float2 pos = euler(x0, y0, TIME_STEP, FINAL_TIME, p, s);
float dist_f = sqrt(pos.x * pos.x + pos.y * pos.y);
float dist_r = dist_f / dist_0;
d_out[idx].x = clip(dist_r * 255);
d_out[idx].y = (x == w / 2 || y == h / 2) ? 255 : 0;
d_out[idx].z = clip((1 / dist_r) * 255);
d_out[idx].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, float p, int s) {
dim3 block(TX, TY);
dim3 grid((w + TX - 1) / TX, (h + TY - 1) / TY);
hipLaunchKernelGGL(( stabImageKernel), dim3(grid), dim3(block), 0, 0, d_out, w, h, p, s);
} | e83a9457ee903838fa613e3117f831fefbf00df2.cu | //
// Created by root on 2020/11/26.
//
#include "kernel.h"
#include "cuda_runtime.h"
#define TX 32
#define TY 32
#define LEN 5.f
#define TIME_STEP 0.005f
#define FINAL_TIME 10.f
__device__ float scale(int i, int w) {
return 2 * LEN * (((1.f * i) / w) - 0.5f);
}
__device__ float f(float x, float y, float param, float sys) {
if (sys == 1) {
return x - 2 * param * y;
}
if (sys == 2) {
return -x + param * (1 - x * x) * y;
}
return -x - 2 * param * y;
}
__device__ float2 euler(float x, float y, float dt, float tFinal, float param, float sys) {
float dx = 0.f, dy = 0.f;
for (float t = 0; t < tFinal; t += dt) {
dx = dt * y;
dy = dt * f(x, y , param, sys);
x += dx;
y += dy;
}
return make_float2(x, y);
}
__device__ char clip(float n) {
return n < 0 ? 0 : n > 255 ? 255 : n;
}
__global__ void stabImageKernel(uchar4 *d_out, int w, int h, float p, int s) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int idx = y * w + x;
if (x >= w || y >= h) {
return;
}
float x0 = scale(x, w);
float y0 = scale(y, h);
float dist_0 = sqrt(x0 * x0 + y0 * y0);
float2 pos = euler(x0, y0, TIME_STEP, FINAL_TIME, p, s);
float dist_f = sqrt(pos.x * pos.x + pos.y * pos.y);
float dist_r = dist_f / dist_0;
d_out[idx].x = clip(dist_r * 255);
d_out[idx].y = (x == w / 2 || y == h / 2) ? 255 : 0;
d_out[idx].z = clip((1 / dist_r) * 255);
d_out[idx].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, float p, int s) {
dim3 block(TX, TY);
dim3 grid((w + TX - 1) / TX, (h + TY - 1) / TY);
stabImageKernel<<<grid, block>>>(d_out, w, h, p, s);
} |
a8d268e6781c4699385c3817a30497564ea8982c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ __host__ int maximum( int a, int b, int c){
int k;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
return(c);
else
return(k);
}
__global__ void needle_cuda_noshr_2( int* reference, int* matrix_cuda, int cols, int penalty, int i, int block_width)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int b_index_x = bx + block_width - i;
int b_index_y = block_width - bx -1;
int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols + 1 );
for( int m = 0 ; m < BLOCK_SIZE ; m++) {
if ( tx <= m ){
int t_index_x = tx;
int t_index_y = m - tx;
int idx = index + t_index_y * cols + t_index_x;
matrix_cuda[idx] = maximum( matrix_cuda[idx-cols-1] + reference[idx],
matrix_cuda[idx - 1] - penalty,
matrix_cuda[idx - cols] - penalty);
}
}
for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--) {
if ( tx <= m){
int t_index_x = tx + BLOCK_SIZE - m -1;
int t_index_y = BLOCK_SIZE - tx - 1;
int idx = index + t_index_y * cols + t_index_x;
matrix_cuda[idx] = maximum( matrix_cuda[idx-cols-1] + reference[idx],
matrix_cuda[idx - 1] - penalty,
matrix_cuda[idx - cols] - penalty);
}
}
} | a8d268e6781c4699385c3817a30497564ea8982c.cu | #include "includes.h"
__device__ __host__ int maximum( int a, int b, int c){
int k;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
return(c);
else
return(k);
}
__global__ void needle_cuda_noshr_2( int* reference, int* matrix_cuda, int cols, int penalty, int i, int block_width)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int b_index_x = bx + block_width - i;
int b_index_y = block_width - bx -1;
int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols + 1 );
for( int m = 0 ; m < BLOCK_SIZE ; m++) {
if ( tx <= m ){
int t_index_x = tx;
int t_index_y = m - tx;
int idx = index + t_index_y * cols + t_index_x;
matrix_cuda[idx] = maximum( matrix_cuda[idx-cols-1] + reference[idx],
matrix_cuda[idx - 1] - penalty,
matrix_cuda[idx - cols] - penalty);
}
}
for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--) {
if ( tx <= m){
int t_index_x = tx + BLOCK_SIZE - m -1;
int t_index_y = BLOCK_SIZE - tx - 1;
int idx = index + t_index_y * cols + t_index_x;
matrix_cuda[idx] = maximum( matrix_cuda[idx-cols-1] + reference[idx],
matrix_cuda[idx - 1] - penalty,
matrix_cuda[idx - cols] - penalty);
}
}
} |
e60ec9a8545960f5adcc3d761a0e76e7e0c5c6ff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
typedef unsigned long long ul;
typedef unsigned int uint;
int banyakdata = 5120;
int dimensigrid = 160;
int dimensiblok = 32;
int sizebig = 16;
typedef struct {
char size;
uint* value;
}big;
__host__ __device__ short ukuranbit(big *a) {
uint lastval = a->value[a->size-1];
short res = 0;
while (lastval != 0) {
lastval >>= 1;
res++;
}
return res + (a->size - 1) * 32;
}
__host__ __device__ char getbit(big* a, short count) {
return (a->value[count / 32] & ((uint) 1 << (count % 32))) != 0;
}
__host__ __device__ uint getShiftedBlock(big *num, char noblok, char geser) {
uint part1 = (noblok == 0 || geser == 0) ? 0 : (num->value[noblok - 1] >> (32-geser));
uint part2 = (noblok == num->size) ? 0 : (num->value[noblok] << geser);
return part1 | part2;
}
__host__ __device__ void kali(big *a, big *b, big* res) {
if (a->size == 0 || b->size == 0) {
res->size = 0;
return ;
}
char ukurana = a->size;
char ukuranb = b->size;
char ukuranres = ukurana + ukuranb;
res->size = ukuranres;
for (char i = 0; i < ukuranres; i++) {
res->value[i] = 0;
}
for (char i = 0; i < ukurana; i++) {
uint aval = a->value[i];
if (aval==0){
continue;
}
uint lebih = 0;
for (char j = 0, lebih = 0; j < ukuranb; j++) {
uint bval = b->value[j];
ul temp = res->value[i+j] + aval * bval + lebih;
res->value[i+j] = temp % UINT_MAX;
lebih = temp / UINT_MAX;
}
res->value[i+ukuranb] = lebih;
}
if (res->value[res->size - 1] == 0){
res->size--;
}
}
__host__ __device__ void modulo(big* a, big* b, big* res, uint* minbuff) {
res->size = a->size;
for(char i = 0 ; i < res->size ;i++){
res->value[i] = a->value[i];
}
if (a->size < b->size) {
return ;
}
char i, j, k;
char i2;
uint temp ;
char borrowIn, borrowOut;
char ukurana = a->size;
char ukuranb = b->size;
res->value[res->size] = 0;
res->size++;
i = ukurana - ukuranb + 1;
while (i > 0) {
i--;
i2 = 32;
while (i2 > 0) {
i2--;
for (j = 0, k = i, borrowIn = 0; j <= ukuranb; j++, k++) {
temp = res->value[k] - getShiftedBlock(b, j, i2);
borrowOut = (temp > res->value[k]);
if (borrowIn) {
borrowOut |= (temp == 0);
temp--;
}
minbuff[k] = temp;
borrowIn = borrowOut;
}
for (; k < ukurana && borrowIn; k++) {
borrowIn = (res->value[k] == 0);
minbuff[k] = res->value[k] - 1;
}
if (!borrowIn) {
while (k > i) {
k--;
res->value[k] = minbuff[k];
}
}
}
}
while (res->size > 0 && res->value[res->size - 1] == 0)
res->size--;
}
void tambah(big* a, char b, big* res) {
if (a->size == 0) {
res->size = 1;
res->value[0] = uint(b);
return;
}
char carryIn = 0;
uint temp;
res->size = a->size + 1;
res->value[0] = a->value[0] + (uint)b;
carryIn = (res->value[0] < a->value[0]);
char i = 1;
for (; i < a->size && carryIn; i++) {
temp = a->value[i] + (uint)1;
carryIn = (temp == 0);
res->value[i] = temp;
}
for (; i < a->size; i++)
res->value[i] = a->value[i];
if (carryIn)
res->value[i] = 1;
else
res->size--;
}
void kurang(big* a, big *b, big* res) {
res->size = a->size;
for (int i = 0; i < res->size; i++){
res->value[i] = 0;
}
if (b->size == 0) {
return;
}
char borrowIn, borrowOut;
uint temp;
char i;
for (i = 0, borrowIn = 0; i < b->size; i++) {
temp = a->value[i] - b->value[i];
borrowOut = (temp > a->value[i]);
if (borrowIn) {
borrowOut |= (temp == 0);
temp--;
}
res->value[i] = temp;
borrowIn = borrowOut;
}
for (; i < a->size && borrowIn; i++) {
borrowIn = (a->value[i] == 0);
res->value[i] = a->value[i] - 1;
}
for (; i < a->size; i++)
res->value[i] = a->value[i];
if (res->value[res->size - 1] == 0){
res->size--;
}
}
__host__ __device__ void modexp(big* a, big* b, big* c, big* res, uint* minbuff, big* mulbuff){
//printf("c val 0 %u\n", c->value[0]);
res->size = 1;
res->value[0] = 1;
short i = ukuranbit(b);
while (i > 0) {
i--;
kali(res,res,mulbuff);
modulo(mulbuff,c,res,minbuff);
if (getbit(b,i)) {
kali(res, a, mulbuff);
modulo(mulbuff, c, res, minbuff);
}
}
}
__device__ void dekripsi(big *c1, big *c2, big *p, big *e, big *res, uint *minbuff, big *mulbuff) {
modexp(c1,e,p,res,minbuff,mulbuff);
kali(res, c2, mulbuff);
modulo(mulbuff, p, res, minbuff);
// printf("c1 adlaah %u\n", c1->value[0]);
// printf("c2 adlaah %u\n", c2->value[0]);
}
void carikunciy(big *g, big *x, big *p, big *y, uint *minbuff, big *mulbuff){
modexp(g,x,p,y,minbuff,mulbuff);
}
__global__ void kerneldek(uint *p, uint *e, uint *c, uint *resval, char *ressize, uint *buffmin, uint *buffmul){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int jdx = threadIdx.x;
int sizebig = 16;
// int banyakdata = 256;
__shared__ big sa[32];
__shared__ big sb[32];
__shared__ big smulbuff[32];
__shared__ big sres[64];
__shared__ big sp;
__shared__ big se;
__shared__ uint s[3200];
uint *spval = s;
uint *seval = (uint*)&spval[sizebig];
uint *sresval = (uint*)&seval[sizebig];
uint *smulbuffval = (uint*)&sresval[sizebig*32*2];
//uint *sminbuffval = (uint*)&smulbuffval[2*sizebig*128];
//uint *sminbuffval = (uint*)&sresval[2*sizebig*128*2];
uint *saval = (uint*)&smulbuffval[sizebig*32*2];
uint *sbval = (uint*)&saval[sizebig*32];
for (int i = 0; i < sizebig; i++)
{
spval[i] = p[i];
seval[i] = e[i];
saval[jdx*sizebig+i] = c[2*idx*sizebig+i];
sbval[jdx*sizebig+i] = c[(2*idx+1)*sizebig+i];
}
sp.size = sizebig;
se.size = sizebig;
sa[jdx].size = sizebig;
sb[jdx].size = sizebig;
sp.value = spval;
se.value = seval;
sa[jdx].value = (uint*)&saval[jdx*sizebig];
sb[jdx].value = (uint*)&sbval[jdx*sizebig];
sres[jdx].value = (uint*)&sresval[jdx*sizebig*2];
smulbuff[jdx].value = (uint*)&smulbuffval[jdx*sizebig*2];
// sminbuff[jdx].value = (uint*)&sminbuffval[jdx*sizebig];
__syncthreads();
//uint* minbuff = (uint*) malloc(sizeof(uint) * sizebig);
dekripsi(sa + jdx, sb + jdx, &sp, &se, sres + jdx, buffmin + 2 *sizebig * idx, smulbuff + jdx);
ressize[idx] = sres[jdx].size;
for (int i = 0; i < sres[jdx].size; i++)
{
resval[idx * sizebig * 2 + i] = sres[jdx].value[i];
}
}
void CUDAenk(uint *p, uint *e, uint *c, uint *resval, char *ressize) {
//=====================BAGIAN G, P, DAN Y ====================================//
char *devressize;
uint *devp, *deve, *devc, *devresval, *buffmin, *buffmul;
hipMalloc((void**)&devp, sizebig * sizeof(uint));
hipMalloc((void**)&deve, sizebig * sizeof(uint));
hipMalloc((void**)&devc, 2 * banyakdata * sizebig * sizeof(uint));
hipMalloc((void**)&devresval, banyakdata * 2 * sizebig * sizeof(uint));
hipMalloc((void**)&devressize, banyakdata * sizeof(char));
hipMalloc((void**)&buffmin, banyakdata * sizebig * 2 * sizeof(uint));
hipMalloc((void**)&buffmul, banyakdata * sizebig * 2 * sizeof(uint));
hipMemcpy(devp, p, sizebig * sizeof(uint), hipMemcpyHostToDevice);
hipMemcpy(deve, e, sizebig * sizeof(uint), hipMemcpyHostToDevice);
hipMemcpy(devc, c, 2 * banyakdata * sizebig * sizeof(uint), hipMemcpyHostToDevice);
kerneldek << <dimensigrid, dimensiblok >> >(devp, deve, devc, devresval, devressize, buffmin, buffmul);
hipDeviceSynchronize();
// COPY FROM DEVICE TO HOST HERE
hipMemcpy(ressize, devressize, banyakdata, hipMemcpyDeviceToHost);
hipMemcpy(resval, devresval, banyakdata * 2 * sizebig * sizeof(uint), hipMemcpyDeviceToHost);
hipFree(devp);
hipFree(deve);
hipFree(devc);
hipFree(devresval);
hipFree(devressize);
hipFree(buffmin);
hipFree(buffmul);
}
void init(uint *pval, uint *eval, uint *cval){
srand(2018);
big *p, *e;
p = (big*)malloc(sizeof(big));
e = (big*)malloc(sizeof(big));
p->size = sizebig;
p->value = pval;
p->value[0] = UINT_MAX;
for (int i = 0; i < p->size; i++)
{
//p->value[i] = 2357;
p->value[i] = rand() % UINT_MAX;
}
e->size = sizebig;
e->value = eval;
for (int i = 0; i < e->size; i++)
{
// e->value[i] = 2;
e->value[i] = rand() % UINT_MAX;
}
//========================================================//
// Blok cipherteks
for(int i = 0 ; i < 2 * banyakdata * sizebig ; i++){
cval[i] = rand() % UINT_MAX;
}
}
int main(){
char *ressize;
uint *p, *e, *c, *resval;
p = (uint*) malloc(sizebig * sizeof(uint));
e = (uint*) malloc(sizebig * sizeof(uint));
c = (uint*) malloc(2 * banyakdata * sizebig * sizeof(uint));
resval = (uint*) malloc(banyakdata * 2 * sizebig * sizeof(uint));
ressize = (char*) malloc(banyakdata * sizeof(char));
init(p,e,c);
// printf("Encrypting...\n");
//========================================================//
CUDAenk(p,e,c,resval,ressize);
// for (int i = 0; i < 5; i++)
// {
// printf("Plain %d size %d : %u\n",i, ressize[i], resval[i*2*sizebig]);
// }
// printf("Plain ... : ...\n");
// printf("Plain %d size %d : %u\n",banyakdata-2, ressize[banyakdata-2], resval[(banyakdata-2) * 2 * sizebig]);
// printf("Plain %d size %d : %u\n",banyakdata-1, ressize[banyakdata-1], resval[(banyakdata-1) * 2 * sizebig]);
free(p);
free(e);
free(c);
free(resval);
free(ressize);
return 0;
}
| e60ec9a8545960f5adcc3d761a0e76e7e0c5c6ff.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
typedef unsigned long long ul;
typedef unsigned int uint;
int banyakdata = 5120;
int dimensigrid = 160;
int dimensiblok = 32;
int sizebig = 16;
typedef struct {
char size;
uint* value;
}big;
__host__ __device__ short ukuranbit(big *a) {
uint lastval = a->value[a->size-1];
short res = 0;
while (lastval != 0) {
lastval >>= 1;
res++;
}
return res + (a->size - 1) * 32;
}
__host__ __device__ char getbit(big* a, short count) {
return (a->value[count / 32] & ((uint) 1 << (count % 32))) != 0;
}
__host__ __device__ uint getShiftedBlock(big *num, char noblok, char geser) {
uint part1 = (noblok == 0 || geser == 0) ? 0 : (num->value[noblok - 1] >> (32-geser));
uint part2 = (noblok == num->size) ? 0 : (num->value[noblok] << geser);
return part1 | part2;
}
__host__ __device__ void kali(big *a, big *b, big* res) {
if (a->size == 0 || b->size == 0) {
res->size = 0;
return ;
}
char ukurana = a->size;
char ukuranb = b->size;
char ukuranres = ukurana + ukuranb;
res->size = ukuranres;
for (char i = 0; i < ukuranres; i++) {
res->value[i] = 0;
}
for (char i = 0; i < ukurana; i++) {
uint aval = a->value[i];
if (aval==0){
continue;
}
uint lebih = 0;
for (char j = 0, lebih = 0; j < ukuranb; j++) {
uint bval = b->value[j];
ul temp = res->value[i+j] + aval * bval + lebih;
res->value[i+j] = temp % UINT_MAX;
lebih = temp / UINT_MAX;
}
res->value[i+ukuranb] = lebih;
}
if (res->value[res->size - 1] == 0){
res->size--;
}
}
__host__ __device__ void modulo(big* a, big* b, big* res, uint* minbuff) {
res->size = a->size;
for(char i = 0 ; i < res->size ;i++){
res->value[i] = a->value[i];
}
if (a->size < b->size) {
return ;
}
char i, j, k;
char i2;
uint temp ;
char borrowIn, borrowOut;
char ukurana = a->size;
char ukuranb = b->size;
res->value[res->size] = 0;
res->size++;
i = ukurana - ukuranb + 1;
while (i > 0) {
i--;
i2 = 32;
while (i2 > 0) {
i2--;
for (j = 0, k = i, borrowIn = 0; j <= ukuranb; j++, k++) {
temp = res->value[k] - getShiftedBlock(b, j, i2);
borrowOut = (temp > res->value[k]);
if (borrowIn) {
borrowOut |= (temp == 0);
temp--;
}
minbuff[k] = temp;
borrowIn = borrowOut;
}
for (; k < ukurana && borrowIn; k++) {
borrowIn = (res->value[k] == 0);
minbuff[k] = res->value[k] - 1;
}
if (!borrowIn) {
while (k > i) {
k--;
res->value[k] = minbuff[k];
}
}
}
}
while (res->size > 0 && res->value[res->size - 1] == 0)
res->size--;
}
void tambah(big* a, char b, big* res) {
if (a->size == 0) {
res->size = 1;
res->value[0] = uint(b);
return;
}
char carryIn = 0;
uint temp;
res->size = a->size + 1;
res->value[0] = a->value[0] + (uint)b;
carryIn = (res->value[0] < a->value[0]);
char i = 1;
for (; i < a->size && carryIn; i++) {
temp = a->value[i] + (uint)1;
carryIn = (temp == 0);
res->value[i] = temp;
}
for (; i < a->size; i++)
res->value[i] = a->value[i];
if (carryIn)
res->value[i] = 1;
else
res->size--;
}
void kurang(big* a, big *b, big* res) {
res->size = a->size;
for (int i = 0; i < res->size; i++){
res->value[i] = 0;
}
if (b->size == 0) {
return;
}
char borrowIn, borrowOut;
uint temp;
char i;
for (i = 0, borrowIn = 0; i < b->size; i++) {
temp = a->value[i] - b->value[i];
borrowOut = (temp > a->value[i]);
if (borrowIn) {
borrowOut |= (temp == 0);
temp--;
}
res->value[i] = temp;
borrowIn = borrowOut;
}
for (; i < a->size && borrowIn; i++) {
borrowIn = (a->value[i] == 0);
res->value[i] = a->value[i] - 1;
}
for (; i < a->size; i++)
res->value[i] = a->value[i];
if (res->value[res->size - 1] == 0){
res->size--;
}
}
__host__ __device__ void modexp(big* a, big* b, big* c, big* res, uint* minbuff, big* mulbuff){
//printf("c val 0 %u\n", c->value[0]);
res->size = 1;
res->value[0] = 1;
short i = ukuranbit(b);
while (i > 0) {
i--;
kali(res,res,mulbuff);
modulo(mulbuff,c,res,minbuff);
if (getbit(b,i)) {
kali(res, a, mulbuff);
modulo(mulbuff, c, res, minbuff);
}
}
}
__device__ void dekripsi(big *c1, big *c2, big *p, big *e, big *res, uint *minbuff, big *mulbuff) {
modexp(c1,e,p,res,minbuff,mulbuff);
kali(res, c2, mulbuff);
modulo(mulbuff, p, res, minbuff);
// printf("c1 adlaah %u\n", c1->value[0]);
// printf("c2 adlaah %u\n", c2->value[0]);
}
void carikunciy(big *g, big *x, big *p, big *y, uint *minbuff, big *mulbuff){
modexp(g,x,p,y,minbuff,mulbuff);
}
__global__ void kerneldek(uint *p, uint *e, uint *c, uint *resval, char *ressize, uint *buffmin, uint *buffmul){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int jdx = threadIdx.x;
int sizebig = 16;
// int banyakdata = 256;
__shared__ big sa[32];
__shared__ big sb[32];
__shared__ big smulbuff[32];
__shared__ big sres[64];
__shared__ big sp;
__shared__ big se;
__shared__ uint s[3200];
uint *spval = s;
uint *seval = (uint*)&spval[sizebig];
uint *sresval = (uint*)&seval[sizebig];
uint *smulbuffval = (uint*)&sresval[sizebig*32*2];
//uint *sminbuffval = (uint*)&smulbuffval[2*sizebig*128];
//uint *sminbuffval = (uint*)&sresval[2*sizebig*128*2];
uint *saval = (uint*)&smulbuffval[sizebig*32*2];
uint *sbval = (uint*)&saval[sizebig*32];
for (int i = 0; i < sizebig; i++)
{
spval[i] = p[i];
seval[i] = e[i];
saval[jdx*sizebig+i] = c[2*idx*sizebig+i];
sbval[jdx*sizebig+i] = c[(2*idx+1)*sizebig+i];
}
sp.size = sizebig;
se.size = sizebig;
sa[jdx].size = sizebig;
sb[jdx].size = sizebig;
sp.value = spval;
se.value = seval;
sa[jdx].value = (uint*)&saval[jdx*sizebig];
sb[jdx].value = (uint*)&sbval[jdx*sizebig];
sres[jdx].value = (uint*)&sresval[jdx*sizebig*2];
smulbuff[jdx].value = (uint*)&smulbuffval[jdx*sizebig*2];
// sminbuff[jdx].value = (uint*)&sminbuffval[jdx*sizebig];
__syncthreads();
//uint* minbuff = (uint*) malloc(sizeof(uint) * sizebig);
dekripsi(sa + jdx, sb + jdx, &sp, &se, sres + jdx, buffmin + 2 *sizebig * idx, smulbuff + jdx);
ressize[idx] = sres[jdx].size;
for (int i = 0; i < sres[jdx].size; i++)
{
resval[idx * sizebig * 2 + i] = sres[jdx].value[i];
}
}
void CUDAenk(uint *p, uint *e, uint *c, uint *resval, char *ressize) {
//=====================BAGIAN G, P, DAN Y ====================================//
char *devressize;
uint *devp, *deve, *devc, *devresval, *buffmin, *buffmul;
cudaMalloc((void**)&devp, sizebig * sizeof(uint));
cudaMalloc((void**)&deve, sizebig * sizeof(uint));
cudaMalloc((void**)&devc, 2 * banyakdata * sizebig * sizeof(uint));
cudaMalloc((void**)&devresval, banyakdata * 2 * sizebig * sizeof(uint));
cudaMalloc((void**)&devressize, banyakdata * sizeof(char));
cudaMalloc((void**)&buffmin, banyakdata * sizebig * 2 * sizeof(uint));
cudaMalloc((void**)&buffmul, banyakdata * sizebig * 2 * sizeof(uint));
cudaMemcpy(devp, p, sizebig * sizeof(uint), cudaMemcpyHostToDevice);
cudaMemcpy(deve, e, sizebig * sizeof(uint), cudaMemcpyHostToDevice);
cudaMemcpy(devc, c, 2 * banyakdata * sizebig * sizeof(uint), cudaMemcpyHostToDevice);
kerneldek << <dimensigrid, dimensiblok >> >(devp, deve, devc, devresval, devressize, buffmin, buffmul);
cudaDeviceSynchronize();
// COPY FROM DEVICE TO HOST HERE
cudaMemcpy(ressize, devressize, banyakdata, cudaMemcpyDeviceToHost);
cudaMemcpy(resval, devresval, banyakdata * 2 * sizebig * sizeof(uint), cudaMemcpyDeviceToHost);
cudaFree(devp);
cudaFree(deve);
cudaFree(devc);
cudaFree(devresval);
cudaFree(devressize);
cudaFree(buffmin);
cudaFree(buffmul);
}
void init(uint *pval, uint *eval, uint *cval){
srand(2018);
big *p, *e;
p = (big*)malloc(sizeof(big));
e = (big*)malloc(sizeof(big));
p->size = sizebig;
p->value = pval;
p->value[0] = UINT_MAX;
for (int i = 0; i < p->size; i++)
{
//p->value[i] = 2357;
p->value[i] = rand() % UINT_MAX;
}
e->size = sizebig;
e->value = eval;
for (int i = 0; i < e->size; i++)
{
// e->value[i] = 2;
e->value[i] = rand() % UINT_MAX;
}
//========================================================//
// Blok cipherteks
for(int i = 0 ; i < 2 * banyakdata * sizebig ; i++){
cval[i] = rand() % UINT_MAX;
}
}
int main(){
char *ressize;
uint *p, *e, *c, *resval;
p = (uint*) malloc(sizebig * sizeof(uint));
e = (uint*) malloc(sizebig * sizeof(uint));
c = (uint*) malloc(2 * banyakdata * sizebig * sizeof(uint));
resval = (uint*) malloc(banyakdata * 2 * sizebig * sizeof(uint));
ressize = (char*) malloc(banyakdata * sizeof(char));
init(p,e,c);
// printf("Encrypting...\n");
//========================================================//
CUDAenk(p,e,c,resval,ressize);
// for (int i = 0; i < 5; i++)
// {
// printf("Plain %d size %d : %u\n",i, ressize[i], resval[i*2*sizebig]);
// }
// printf("Plain ... : ...\n");
// printf("Plain %d size %d : %u\n",banyakdata-2, ressize[banyakdata-2], resval[(banyakdata-2) * 2 * sizebig]);
// printf("Plain %d size %d : %u\n",banyakdata-1, ressize[banyakdata-1], resval[(banyakdata-1) * 2 * sizebig]);
free(p);
free(e);
free(c);
free(resval);
free(ressize);
return 0;
}
|
6b40a7dde8250f6af58c7f61c313ab742ad317fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
// MASK SIZE
#define MASK_WIDTH 5
// MASK RADIO
#define MASK_R (MASK_WIDTH-1)/2
#define COMMENT "Histogram_GPU"
#define RGB_COMPONENT_COLOR 255
// SIZE OF TILE
#define TILE_WIDTH 32
// SIZE OF SHARE MATRIX
#define SHARED_SIZE (MASK_WIDTH-1 + TILE_WIDTH)
typedef struct {
unsigned char red, green, blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
static PPMImage *readPPM(const char *filename) {
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
img = (PPMImage *) malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n')
;
c = getc(fp);
}
ungetc(c, fp);
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n",
filename);
exit(1);
}
if (rgb_comp_color != RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n')
;
img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
if (fread(img->data, 3 * img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
void writePPM(PPMImage *img) {
fprintf(stdout, "P6\n");
fprintf(stdout, "# %s\n", COMMENT);
fprintf(stdout, "%d %d\n", img->x, img->y);
fprintf(stdout, "%d\n", RGB_COMPONENT_COLOR);
fwrite(img->data, 3 * img->x, img->y, stdout);
fclose(stdout);
}
__global__ void smoothing_kernel(PPMImage *d_image, PPMImage *d_image_copy) {
// Creating variables
int i, j, row, col;
int total_red = 0, total_blue = 0, total_green = 0;
int index_dst_y, index_dst_x, index_src_y, index_src_x;
// Get Row and COl
row = blockIdx.y * TILE_WIDTH + threadIdx.y;
col = blockIdx.x * TILE_WIDTH + threadIdx.x;
// Create Shared block of data
__shared__ PPMPixel shared_image_data[SHARED_SIZE*SHARED_SIZE];
// Filling the shared variable with a two-step for.
// first step fill the data with index inside the blocks dim
// -----------------
// |x|x|x|x|x|x|x| |
// |x|x|x|x|x|x|x| |
// |x|x|x|x|x|x|x| |
// |x|x|x|x|x|x|x| |
// |x|x|x|x|x|x|x| |
// |x|x|x|x|x|x|x| |
// |x|x|x|x|x|x|x| |
// | | | | | | | | |
// -----------------
// Second step fill the data with index outside the blocks dim
// -----------------
// | | | | | | | |x|
// | | | | | | | |x|
// | | | | | | | |x|
// | | | | | | | |x|
// | | | | | | | |x|
// | | | | | | | |x|
// | | | | | | | |x|
// |x|x|x|x|x|x|x|x|
// -----------------
for (i = 0; i <= TILE_WIDTH * TILE_WIDTH; i = i + TILE_WIDTH * TILE_WIDTH)
{
// Get indexs of dst matrix
index_dst_y = (threadIdx.y * TILE_WIDTH + threadIdx.x + i) / SHARED_SIZE;
index_dst_x = (threadIdx.y * TILE_WIDTH + threadIdx.x + i) % SHARED_SIZE;
// Get indexs of destination matrix
index_src_y = (blockIdx.y * TILE_WIDTH) + index_dst_y - MASK_R;
index_src_x = (blockIdx.x * TILE_WIDTH) + index_dst_x - MASK_R;
//Work only if dst geral index stay into shared matrix size
if (index_dst_y * SHARED_SIZE + index_dst_x < (SHARED_SIZE*SHARED_SIZE)) {
// if src index stay into image save images values else save 0
if (index_src_y >= 0 && index_src_y < d_image->y && index_src_x >= 0 && index_src_x < d_image->x){
shared_image_data[index_dst_y * SHARED_SIZE + index_dst_x].red = d_image_copy->data[(index_src_y * d_image->x) + index_src_x].red;
shared_image_data[index_dst_y * SHARED_SIZE + index_dst_x].blue = d_image_copy->data[(index_src_y * d_image->x) + index_src_x].blue;
shared_image_data[index_dst_y * SHARED_SIZE + index_dst_x].green = d_image_copy->data[(index_src_y * d_image->x) + index_src_x].green;
}
else{
shared_image_data[index_dst_y * SHARED_SIZE + index_dst_x].red = 0;
shared_image_data[index_dst_y * SHARED_SIZE + index_dst_x].blue = 0;
shared_image_data[index_dst_y * SHARED_SIZE + index_dst_x].green = 0;
}
}
}
// sync threads
__syncthreads();
// if row and col stay into image proceed with convolution
if (row < d_image->y && col < d_image->x){
for (i = 0; i < MASK_WIDTH; i++){
for (j = 0; j < MASK_WIDTH; j++) {
total_red += shared_image_data[((threadIdx.y + j) * SHARED_SIZE) + (threadIdx.x + i)].red;
total_blue += shared_image_data[((threadIdx.y + j) * SHARED_SIZE) + (threadIdx.x + i)].blue;
total_green += shared_image_data[((threadIdx.y + j) * SHARED_SIZE) + (threadIdx.x + i)].green;
}
}
// Save data of convolution into devise image
d_image->data[(row * d_image->x) + col].red = total_red / (MASK_WIDTH*MASK_WIDTH);
d_image->data[(row * d_image->x) + col].blue = total_blue / (MASK_WIDTH*MASK_WIDTH);
d_image->data[(row * d_image->x) + col].green = total_green / (MASK_WIDTH*MASK_WIDTH);
}
}
void smoothing_GPU(PPMImage *image, PPMImage *image_copy) {
unsigned int rows, cols, img_size;
PPMImage *d_image, *d_image_copy;
PPMPixel *d_pixels, *d_pixels_copy, *new_pixels;
// Get data
cols = image->x;
rows = image->y;
img_size = cols * rows;
// Alloc structure to devise
hipMalloc((void **)&d_image, sizeof(PPMImage));
hipMalloc((void **)&d_image_copy, sizeof(PPMImage));
// Alloc image to devise
hipMalloc((void **)&d_pixels, sizeof(PPMPixel) * img_size);
hipMalloc((void **)&d_pixels_copy, sizeof(PPMPixel) * img_size);
// cpy stucture to devise
hipMemcpy(d_image, image, sizeof(PPMImage), hipMemcpyHostToDevice);
hipMemcpy(d_pixels, image->data, sizeof(PPMPixel) * img_size, hipMemcpyHostToDevice);
hipMemcpy(&(d_image->data), &d_pixels, sizeof(PPMPixel *), hipMemcpyHostToDevice);
hipMemcpy(d_image_copy, image, sizeof(PPMImage), hipMemcpyHostToDevice);
hipMemcpy(d_pixels_copy, image->data, sizeof(PPMPixel) * img_size, hipMemcpyHostToDevice);
hipMemcpy(&(d_image_copy->data), &d_pixels_copy, sizeof(PPMPixel *), hipMemcpyHostToDevice);
dim3 dimGrid(ceil((float)cols / TILE_WIDTH), ceil((float)rows / TILE_WIDTH), 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
// Call function
hipLaunchKernelGGL(( smoothing_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_image, d_image_copy);
new_pixels = (PPMPixel *) malloc(img_size * sizeof(PPMPixel));
// Copy result to local array
hipMemcpy(image, d_image, sizeof(PPMImage), hipMemcpyDeviceToHost);
hipMemcpy(new_pixels, d_pixels, sizeof(PPMPixel) * img_size, hipMemcpyDeviceToHost);
image->data = new_pixels;
//Free memory
hipFree(d_image);
hipFree(d_image_copy);
hipFree(d_pixels);
hipFree(d_pixels_copy);
}
int main(int argc, char *argv[]) {
if( argc != 2 ) {
printf("Too many or no one arguments supplied.\n");
}
//double t_start, t_end;
//int i;
char *filename = argv[1]; //Recebendo o arquivo!;
PPMImage *image = readPPM(filename);
PPMImage *image_output = readPPM(filename);
//t_start = rtclock();
smoothing_GPU(image_output, image);
//t_end = rtclock();
writePPM(image_output);
//fprintf(stdout, "\n%0.6lfs\n", t_end - t_start);
free(image);
free(image_output);
}
| 6b40a7dde8250f6af58c7f61c313ab742ad317fe.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
// MASK SIZE
#define MASK_WIDTH 5
// MASK RADIO
#define MASK_R (MASK_WIDTH-1)/2
#define COMMENT "Histogram_GPU"
#define RGB_COMPONENT_COLOR 255
// SIZE OF TILE
#define TILE_WIDTH 32
// SIZE OF SHARE MATRIX
#define SHARED_SIZE (MASK_WIDTH-1 + TILE_WIDTH)
typedef struct {
unsigned char red, green, blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
static PPMImage *readPPM(const char *filename) {
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
img = (PPMImage *) malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n')
;
c = getc(fp);
}
ungetc(c, fp);
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n",
filename);
exit(1);
}
if (rgb_comp_color != RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n')
;
img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
if (fread(img->data, 3 * img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
void writePPM(PPMImage *img) {
fprintf(stdout, "P6\n");
fprintf(stdout, "# %s\n", COMMENT);
fprintf(stdout, "%d %d\n", img->x, img->y);
fprintf(stdout, "%d\n", RGB_COMPONENT_COLOR);
fwrite(img->data, 3 * img->x, img->y, stdout);
fclose(stdout);
}
__global__ void smoothing_kernel(PPMImage *d_image, PPMImage *d_image_copy) {
// Creating variables
int i, j, row, col;
int total_red = 0, total_blue = 0, total_green = 0;
int index_dst_y, index_dst_x, index_src_y, index_src_x;
// Get Row and COl
row = blockIdx.y * TILE_WIDTH + threadIdx.y;
col = blockIdx.x * TILE_WIDTH + threadIdx.x;
// Create Shared block of data
__shared__ PPMPixel shared_image_data[SHARED_SIZE*SHARED_SIZE];
// Filling the shared variable with a two-step for.
// first step fill the data with index inside the blocks dim
// -----------------
// |x|x|x|x|x|x|x| |
// |x|x|x|x|x|x|x| |
// |x|x|x|x|x|x|x| |
// |x|x|x|x|x|x|x| |
// |x|x|x|x|x|x|x| |
// |x|x|x|x|x|x|x| |
// |x|x|x|x|x|x|x| |
// | | | | | | | | |
// -----------------
// Second step fill the data with index outside the blocks dim
// -----------------
// | | | | | | | |x|
// | | | | | | | |x|
// | | | | | | | |x|
// | | | | | | | |x|
// | | | | | | | |x|
// | | | | | | | |x|
// | | | | | | | |x|
// |x|x|x|x|x|x|x|x|
// -----------------
for (i = 0; i <= TILE_WIDTH * TILE_WIDTH; i = i + TILE_WIDTH * TILE_WIDTH)
{
// Get indexs of dst matrix
index_dst_y = (threadIdx.y * TILE_WIDTH + threadIdx.x + i) / SHARED_SIZE;
index_dst_x = (threadIdx.y * TILE_WIDTH + threadIdx.x + i) % SHARED_SIZE;
// Get indexs of destination matrix
index_src_y = (blockIdx.y * TILE_WIDTH) + index_dst_y - MASK_R;
index_src_x = (blockIdx.x * TILE_WIDTH) + index_dst_x - MASK_R;
//Work only if dst geral index stay into shared matrix size
if (index_dst_y * SHARED_SIZE + index_dst_x < (SHARED_SIZE*SHARED_SIZE)) {
// if src index stay into image save images values else save 0
if (index_src_y >= 0 && index_src_y < d_image->y && index_src_x >= 0 && index_src_x < d_image->x){
shared_image_data[index_dst_y * SHARED_SIZE + index_dst_x].red = d_image_copy->data[(index_src_y * d_image->x) + index_src_x].red;
shared_image_data[index_dst_y * SHARED_SIZE + index_dst_x].blue = d_image_copy->data[(index_src_y * d_image->x) + index_src_x].blue;
shared_image_data[index_dst_y * SHARED_SIZE + index_dst_x].green = d_image_copy->data[(index_src_y * d_image->x) + index_src_x].green;
}
else{
shared_image_data[index_dst_y * SHARED_SIZE + index_dst_x].red = 0;
shared_image_data[index_dst_y * SHARED_SIZE + index_dst_x].blue = 0;
shared_image_data[index_dst_y * SHARED_SIZE + index_dst_x].green = 0;
}
}
}
// sync threads
__syncthreads();
// if row and col stay into image proceed with convolution
if (row < d_image->y && col < d_image->x){
for (i = 0; i < MASK_WIDTH; i++){
for (j = 0; j < MASK_WIDTH; j++) {
total_red += shared_image_data[((threadIdx.y + j) * SHARED_SIZE) + (threadIdx.x + i)].red;
total_blue += shared_image_data[((threadIdx.y + j) * SHARED_SIZE) + (threadIdx.x + i)].blue;
total_green += shared_image_data[((threadIdx.y + j) * SHARED_SIZE) + (threadIdx.x + i)].green;
}
}
// Save data of convolution into devise image
d_image->data[(row * d_image->x) + col].red = total_red / (MASK_WIDTH*MASK_WIDTH);
d_image->data[(row * d_image->x) + col].blue = total_blue / (MASK_WIDTH*MASK_WIDTH);
d_image->data[(row * d_image->x) + col].green = total_green / (MASK_WIDTH*MASK_WIDTH);
}
}
void smoothing_GPU(PPMImage *image, PPMImage *image_copy) {
unsigned int rows, cols, img_size;
PPMImage *d_image, *d_image_copy;
PPMPixel *d_pixels, *d_pixels_copy, *new_pixels;
// Get data
cols = image->x;
rows = image->y;
img_size = cols * rows;
// Alloc structure to devise
cudaMalloc((void **)&d_image, sizeof(PPMImage));
cudaMalloc((void **)&d_image_copy, sizeof(PPMImage));
// Alloc image to devise
cudaMalloc((void **)&d_pixels, sizeof(PPMPixel) * img_size);
cudaMalloc((void **)&d_pixels_copy, sizeof(PPMPixel) * img_size);
// cpy stucture to devise
cudaMemcpy(d_image, image, sizeof(PPMImage), cudaMemcpyHostToDevice);
cudaMemcpy(d_pixels, image->data, sizeof(PPMPixel) * img_size, cudaMemcpyHostToDevice);
cudaMemcpy(&(d_image->data), &d_pixels, sizeof(PPMPixel *), cudaMemcpyHostToDevice);
cudaMemcpy(d_image_copy, image, sizeof(PPMImage), cudaMemcpyHostToDevice);
cudaMemcpy(d_pixels_copy, image->data, sizeof(PPMPixel) * img_size, cudaMemcpyHostToDevice);
cudaMemcpy(&(d_image_copy->data), &d_pixels_copy, sizeof(PPMPixel *), cudaMemcpyHostToDevice);
dim3 dimGrid(ceil((float)cols / TILE_WIDTH), ceil((float)rows / TILE_WIDTH), 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
// Call function
smoothing_kernel<<<dimGrid, dimBlock>>>(d_image, d_image_copy);
new_pixels = (PPMPixel *) malloc(img_size * sizeof(PPMPixel));
// Copy result to local array
cudaMemcpy(image, d_image, sizeof(PPMImage), cudaMemcpyDeviceToHost);
cudaMemcpy(new_pixels, d_pixels, sizeof(PPMPixel) * img_size, cudaMemcpyDeviceToHost);
image->data = new_pixels;
//Free memory
cudaFree(d_image);
cudaFree(d_image_copy);
cudaFree(d_pixels);
cudaFree(d_pixels_copy);
}
int main(int argc, char *argv[]) {
if( argc != 2 ) {
printf("Too many or no one arguments supplied.\n");
}
//double t_start, t_end;
//int i;
char *filename = argv[1]; //Recebendo o arquivo!;
PPMImage *image = readPPM(filename);
PPMImage *image_output = readPPM(filename);
//t_start = rtclock();
smoothing_GPU(image_output, image);
//t_end = rtclock();
writePPM(image_output);
//fprintf(stdout, "\n%0.6lfs\n", t_end - t_start);
free(image);
free(image_output);
}
|
ed068bdcf6b8e21cc1bd9048b43ebb9ceb7ca551.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
// modified from
// https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/nms_rotated/nms_rotated_cuda.cu
#include "nms_rotated_cuda.cuh"
#include "parrots_cuda_helper.hpp"
DArrayLite nms_rotated_cuda(const DArrayLite dets, const DArrayLite scores,
const DArrayLite dets_sorted, float iou_threshold,
const int multi_label, hipStream_t stream,
CudaContext& ctx) {
int dets_num = dets.dim(0);
const int col_blocks = divideUP(dets_num, threadsPerBlock);
auto mask = ctx.createDArrayLite(
DArraySpec::array(Prim::Int64, DArrayShape(dets_num * col_blocks)));
dim3 blocks(col_blocks, col_blocks);
dim3 threads(threadsPerBlock);
PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF(dets_sorted.elemType().prim(), [&] {
hipLaunchKernelGGL(( nms_rotated_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
dets_num, iou_threshold, dets_sorted.ptr<scalar_t>(),
(unsigned long long*)mask.ptr<int64_t>(), multi_label);
});
DArrayLite mask_cpu = ctx.createDArrayLite(mask, getHostProxy());
unsigned long long* mask_host = (unsigned long long*)mask_cpu.ptr<int64_t>();
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
auto keep = ctx.createDArrayLite(
DArraySpec::array(Prim::Int64, DArrayShape(dets_num)), getHostProxy());
int64_t* keep_out = keep.ptr<int64_t>();
for (int i = 0; i < dets_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[i] = 1;
unsigned long long* p = mask_host + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
auto keep_cuda = ctx.createDArrayLite(keep, ctx.getProxy());
PARROTS_CUDA_CHECK(hipGetLastError());
return keep_cuda;
}
| ed068bdcf6b8e21cc1bd9048b43ebb9ceb7ca551.cu | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
// modified from
// https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/nms_rotated/nms_rotated_cuda.cu
#include "nms_rotated_cuda.cuh"
#include "parrots_cuda_helper.hpp"
DArrayLite nms_rotated_cuda(const DArrayLite dets, const DArrayLite scores,
const DArrayLite dets_sorted, float iou_threshold,
const int multi_label, cudaStream_t stream,
CudaContext& ctx) {
int dets_num = dets.dim(0);
const int col_blocks = divideUP(dets_num, threadsPerBlock);
auto mask = ctx.createDArrayLite(
DArraySpec::array(Prim::Int64, DArrayShape(dets_num * col_blocks)));
dim3 blocks(col_blocks, col_blocks);
dim3 threads(threadsPerBlock);
PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF(dets_sorted.elemType().prim(), [&] {
nms_rotated_cuda_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
dets_num, iou_threshold, dets_sorted.ptr<scalar_t>(),
(unsigned long long*)mask.ptr<int64_t>(), multi_label);
});
DArrayLite mask_cpu = ctx.createDArrayLite(mask, getHostProxy());
unsigned long long* mask_host = (unsigned long long*)mask_cpu.ptr<int64_t>();
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
auto keep = ctx.createDArrayLite(
DArraySpec::array(Prim::Int64, DArrayShape(dets_num)), getHostProxy());
int64_t* keep_out = keep.ptr<int64_t>();
for (int i = 0; i < dets_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[i] = 1;
unsigned long long* p = mask_host + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
auto keep_cuda = ctx.createDArrayLite(keep, ctx.getProxy());
PARROTS_CUDA_CHECK(cudaGetLastError());
return keep_cuda;
}
|
c1ce8faeec20f147d8602ce05b9dcf17bfbc565d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include "cudaCommon.h"
#include "mpi_common.h"
#include "freezeAndPtot.h"
/* THIS FUNCTION:
Calculates the maximum in the x direction of the freezing speed c_f, defined
as the fastest characteristic velocity in the x direction.
In the hydrodynamic case this is the adiabatic sounds speed, in the MHD case
this is the fast magnetosonic speed.
*/
#define BLOCKDIM 64
#define MAXPOW 5
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
if(nrhs!=12)
mexErrMsgTxt("Wrong number of arguments. Call using [ptot freeze] = FreezeAndPtot(mass, ener, momx, momy, momz, bz, by, bz, gamma, direct=1, csmin, topology)");
if(nlhs == 0) mexErrMsgTxt("0 LHS argument: Must return at least Ptotal");
if(nlhs > 2) mexErrMsgTxt(">2 LHS arguments: Can only return [Ptot c_freeze]");
CHECK_CUDA_ERROR("entering freezeAndPtot");
ParallelTopology topology;
int zee = topoStructureToC(prhs[11], &topology);
int ispurehydro = (int)*mxGetPr(prhs[9]);
int nArrays;
if(ispurehydro) { nArrays = 5; } else { nArrays = 8; }
MGArray fluid[8];
MGA_accessMatlabArrays(prhs, 0, nArrays-1, fluid);
dim3 arraySize;
arraySize.x = fluid->dim[0];
arraySize.y = fluid->dim[1];
arraySize.z = fluid->dim[2];
dim3 blocksize, gridsize;
blocksize.x = BLOCKDIM; blocksize.y = blocksize.z = 1;
MGArray clone;
MGArray *POut;
MGArray *cfOut;
clone = fluid[0];
if(fluid->partitionDir == PARTITION_X) {
clone.dim[0] = fluid->nGPUs;
} else {
clone.dim[0] = 1;
}
clone.dim[1] = arraySize.y;
clone.dim[2] = arraySize.z;
clone.haloSize = 0;
POut = MGA_createReturnedArrays(plhs, 1, fluid);
MGArray *cfLocal;
int itworked = MGA_allocArrays(&cfLocal, 1, &clone);
double hostgf[6];
double gam = *mxGetPr(prhs[8]);
hostgf[0] = gam;
hostgf[1] = gam - 1.0;
hostgf[2] = gam*(gam-1.0);
hostgf[3] = (1.0 - .5*gam);
hostgf[4] = (*mxGetPr(prhs[10]))*(*mxGetPr(prhs[10])); // min c_s squared ;
hostgf[5] = (ALFVEN_CSQ_FACTOR - .5*gam*(gam-1.0));
int i;
int sub[6];
for(i = 0; i < fluid->nGPUs; i++) {
hipSetDevice(fluid->deviceID[i]);
hipMemcpyToSymbol((const void *)gammafunc, &hostgf[0], 6*sizeof(double), 0, hipMemcpyHostToDevice);
CHECK_CUDA_ERROR("cfreeze symbol upload");
}
if(ispurehydro) {
for(i = 0; i < fluid->nGPUs; i++) {
hipSetDevice(fluid->deviceID[i]);
CHECK_CUDA_ERROR("hipSetDevice()");
calcPartitionExtent(fluid, i, sub);
gridsize.x = sub[4];
gridsize.y = sub[5];
hipLaunchKernelGGL(( cukern_FreezeSpeed_hydro), dim3(gridsize), dim3(blocksize), 0, 0,
fluid[0].devicePtr[i],
fluid[1].devicePtr[i],
fluid[2].devicePtr[i],
fluid[3].devicePtr[i],
fluid[4].devicePtr[i],
cfLocal->devicePtr[i], POut->devicePtr[i], sub[3]);
CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, fluid, i, "Freeze speed hydro");
}
} else {
for(i = 0; i < fluid->nGPUs; i++) {
hipSetDevice(fluid->deviceID[i]);
calcPartitionExtent(fluid, i, sub);
hipLaunchKernelGGL(( cukern_FreezeSpeed_mhd), dim3(gridsize), dim3(blocksize), 0, 0,
fluid[0].devicePtr[i],
fluid[1].devicePtr[i],
fluid[2].devicePtr[i],
fluid[3].devicePtr[i],
fluid[4].devicePtr[i],
fluid[5].devicePtr[i],
fluid[6].devicePtr[i],
fluid[7].devicePtr[i],
cfLocal->devicePtr[i], POut->devicePtr[i], sub[3]);
CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, fluid, i, "freeze speed MHD");
}
}
cfOut = NULL;
MGA_globalReduceDimension(cfLocal, &cfOut, MGA_OP_MAX, 1, 0, 1, &topology);
MGA_delete(cfLocal);
MGA_returnOneArray(plhs+1, cfOut);
free(POut);
free(cfLocal);
free(cfOut);
}
#define gam gammafunc[0]
#define gm1 gammafunc[1]
#define gg1 gammafunc[2]
#define MHD_PRESS_B gammafunc[3]
#define cs0sq gammafunc[4]
#define MHD_CS_B gammafunc[5]
__global__ void cukern_FreezeSpeed_mhd(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *freeze, double *ptot, int nx)
{
int tix = threadIdx.x;
/* gridDim = [ny nz], nx = nx */
int x = tix + nx*(blockIdx.x + gridDim.x*blockIdx.y);
nx += nx*(blockIdx.x + gridDim.x*blockIdx.y);
//int addrMax = nx + nx*(blockIdx.x + gridDim.x*blockIdx.y);
double pressvar;
double T, bsquared;
double rhoinv;
__shared__ double locBloc[BLOCKDIM];
//CsMax = 0.0;
locBloc[tix] = 0.0;
if(x >= nx) return; // If we get a very low resolution
while(x < nx) {
rhoinv = 1.0/rho[x];
T = .5*rhoinv*(px[x]*px[x] + py[x]*py[x] + pz[x]*pz[x]);
bsquared = bx[x]*bx[x] + by[x]*by[x] + bz[x]*bz[x];
// Calculate internal + magnetic energy
pressvar = E[x] - T;
// Assert minimum thermal soundspeed / temperature
/* if(gam*pressvar*rhoinv < cs0sq) {
E[x] = T + bsqhf + cs0sq/(gam*rhoinv);
pressvar = cs0sq/(gam*rhoinv);
} */
// Calculate gas + magnetic pressure
ptot[x] = gm1*pressvar + MHD_PRESS_B*bsquared;
// We calculate the freezing speed in the X direction: max of |v|+c_fast
// MHD_CS_B includes an "alfven factor" to stabilize the code in low-beta situations
pressvar = (gg1*pressvar + MHD_CS_B*bsquared)*rhoinv;
pressvar = sqrt(abs(pressvar)) + abs(px[x]*rhoinv);
if(pressvar > locBloc[tix]) locBloc[tix] = pressvar;
x += BLOCKDIM;
}
__syncthreads();
if(tix >= 32) return;
if(locBloc[tix+32] > locBloc[tix]) { locBloc[tix] = locBloc[tix+32]; }
__syncthreads(); // compute 2 and later schedule by half-warps so we need to be down to 16 before no syncthreads
if(tix >= 16) return;
if(locBloc[tix+16] > locBloc[tix]) { locBloc[tix] = locBloc[tix+16]; }
if(tix >= 8) return;
if(locBloc[tix+8] > locBloc[tix]) { locBloc[tix] = locBloc[tix+8]; }
if(tix >= 4) return;
if(locBloc[tix+4] > locBloc[tix]) { locBloc[tix] = locBloc[tix+4]; }
if(tix >= 2) return;
if(locBloc[tix+2] > locBloc[tix]) { locBloc[tix] = locBloc[tix+2]; }
if(tix == 0) {
if(locBloc[1] > locBloc[0]) { locBloc[0] = locBloc[1]; }
freeze[blockIdx.x + gridDim.x*blockIdx.y] = locBloc[0];
}
}
#define PRESSURE Cs
// cs0sq = gamma rho^(gamma-1))
__global__ void cukern_FreezeSpeed_hydro(double *rho, double *E, double *px, double *py, double *pz, double *freeze, double *ptot, int nx)
{
int tix = threadIdx.x;
int x = tix + nx*(blockIdx.x + gridDim.x*blockIdx.y);
int addrMax = nx + nx*(blockIdx.x + gridDim.x*blockIdx.y);
double Cs, CsMax;
double psqhf, rhoinv;
//double gg1 = gam*(gam-1.0);
//double gm1 = gam - 1.0;
__shared__ double locBloc[BLOCKDIM];
CsMax = 0.0;
locBloc[tix] = 0.0;
if(x >= addrMax) return; // If we get a very low resolution
while(x < addrMax) {
rhoinv = 1.0/rho[x];
psqhf = .5*(px[x]*px[x]+py[x]*py[x]+pz[x]*pz[x]);
PRESSURE = gm1*(E[x] - psqhf*rhoinv);
if(gam*PRESSURE*rhoinv < cs0sq) {
PRESSURE = cs0sq/(gam*rhoinv);
E[x] = psqhf*rhoinv + PRESSURE/gm1;
} /* Constrain temperature to a minimum value */
ptot[x] = PRESSURE;
Cs = sqrt(gam * PRESSURE *rhoinv) + abs(px[x]*rhoinv);
if(Cs > CsMax) CsMax = Cs;
x += BLOCKDIM;
}
locBloc[tix] = CsMax;
__syncthreads();
if(tix >= 32) return;
if(locBloc[tix+32] > locBloc[tix]) { locBloc[tix] = locBloc[tix+32]; }
__syncthreads(); // compute 2 and later schedule by half-warps so we need to be down to 16 before no syncthreads
if(tix >= 16) return;
if(locBloc[tix+16] > locBloc[tix]) { locBloc[tix] = locBloc[tix+16]; }
if(tix >= 8) return;
if(locBloc[tix+8] > locBloc[tix]) { locBloc[tix] = locBloc[tix+8]; }
if(tix >= 4) return;
if(locBloc[tix+4] > locBloc[tix]) { locBloc[tix] = locBloc[tix+4]; }
if(tix >= 2) return;
if(locBloc[tix+2] > locBloc[tix]) { locBloc[tix] = locBloc[tix+2]; }
if(tix == 0) {
if(locBloc[1] > locBloc[0]) { locBloc[0] = locBloc[1]; }
freeze[blockIdx.x + gridDim.x*blockIdx.y] = locBloc[0];
}
}
| c1ce8faeec20f147d8602ce05b9dcf17bfbc565d.cu | #include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
// CUDA
#include "cuda.h"
#include "cuda_runtime.h"
#include "cublas.h"
#include "cudaCommon.h"
#include "mpi_common.h"
#include "freezeAndPtot.h"
/* THIS FUNCTION:
Calculates the maximum in the x direction of the freezing speed c_f, defined
as the fastest characteristic velocity in the x direction.
In the hydrodynamic case this is the adiabatic sounds speed, in the MHD case
this is the fast magnetosonic speed.
*/
#define BLOCKDIM 64
#define MAXPOW 5
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
if(nrhs!=12)
mexErrMsgTxt("Wrong number of arguments. Call using [ptot freeze] = FreezeAndPtot(mass, ener, momx, momy, momz, bz, by, bz, gamma, direct=1, csmin, topology)");
if(nlhs == 0) mexErrMsgTxt("0 LHS argument: Must return at least Ptotal");
if(nlhs > 2) mexErrMsgTxt(">2 LHS arguments: Can only return [Ptot c_freeze]");
CHECK_CUDA_ERROR("entering freezeAndPtot");
ParallelTopology topology;
int zee = topoStructureToC(prhs[11], &topology);
int ispurehydro = (int)*mxGetPr(prhs[9]);
int nArrays;
if(ispurehydro) { nArrays = 5; } else { nArrays = 8; }
MGArray fluid[8];
MGA_accessMatlabArrays(prhs, 0, nArrays-1, fluid);
dim3 arraySize;
arraySize.x = fluid->dim[0];
arraySize.y = fluid->dim[1];
arraySize.z = fluid->dim[2];
dim3 blocksize, gridsize;
blocksize.x = BLOCKDIM; blocksize.y = blocksize.z = 1;
MGArray clone;
MGArray *POut;
MGArray *cfOut;
clone = fluid[0];
if(fluid->partitionDir == PARTITION_X) {
clone.dim[0] = fluid->nGPUs;
} else {
clone.dim[0] = 1;
}
clone.dim[1] = arraySize.y;
clone.dim[2] = arraySize.z;
clone.haloSize = 0;
POut = MGA_createReturnedArrays(plhs, 1, fluid);
MGArray *cfLocal;
int itworked = MGA_allocArrays(&cfLocal, 1, &clone);
double hostgf[6];
double gam = *mxGetPr(prhs[8]);
hostgf[0] = gam;
hostgf[1] = gam - 1.0;
hostgf[2] = gam*(gam-1.0);
hostgf[3] = (1.0 - .5*gam);
hostgf[4] = (*mxGetPr(prhs[10]))*(*mxGetPr(prhs[10])); // min c_s squared ;
hostgf[5] = (ALFVEN_CSQ_FACTOR - .5*gam*(gam-1.0));
int i;
int sub[6];
for(i = 0; i < fluid->nGPUs; i++) {
cudaSetDevice(fluid->deviceID[i]);
cudaMemcpyToSymbol((const void *)gammafunc, &hostgf[0], 6*sizeof(double), 0, cudaMemcpyHostToDevice);
CHECK_CUDA_ERROR("cfreeze symbol upload");
}
if(ispurehydro) {
for(i = 0; i < fluid->nGPUs; i++) {
cudaSetDevice(fluid->deviceID[i]);
CHECK_CUDA_ERROR("cudaSetDevice()");
calcPartitionExtent(fluid, i, sub);
gridsize.x = sub[4];
gridsize.y = sub[5];
cukern_FreezeSpeed_hydro<<<gridsize, blocksize>>>(
fluid[0].devicePtr[i],
fluid[1].devicePtr[i],
fluid[2].devicePtr[i],
fluid[3].devicePtr[i],
fluid[4].devicePtr[i],
cfLocal->devicePtr[i], POut->devicePtr[i], sub[3]);
CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, fluid, i, "Freeze speed hydro");
}
} else {
for(i = 0; i < fluid->nGPUs; i++) {
cudaSetDevice(fluid->deviceID[i]);
calcPartitionExtent(fluid, i, sub);
cukern_FreezeSpeed_mhd<<<gridsize, blocksize>>>(
fluid[0].devicePtr[i],
fluid[1].devicePtr[i],
fluid[2].devicePtr[i],
fluid[3].devicePtr[i],
fluid[4].devicePtr[i],
fluid[5].devicePtr[i],
fluid[6].devicePtr[i],
fluid[7].devicePtr[i],
cfLocal->devicePtr[i], POut->devicePtr[i], sub[3]);
CHECK_CUDA_LAUNCH_ERROR(blocksize, gridsize, fluid, i, "freeze speed MHD");
}
}
cfOut = NULL;
MGA_globalReduceDimension(cfLocal, &cfOut, MGA_OP_MAX, 1, 0, 1, &topology);
MGA_delete(cfLocal);
MGA_returnOneArray(plhs+1, cfOut);
free(POut);
free(cfLocal);
free(cfOut);
}
#define gam gammafunc[0]
#define gm1 gammafunc[1]
#define gg1 gammafunc[2]
#define MHD_PRESS_B gammafunc[3]
#define cs0sq gammafunc[4]
#define MHD_CS_B gammafunc[5]
__global__ void cukern_FreezeSpeed_mhd(double *rho, double *E, double *px, double *py, double *pz, double *bx, double *by, double *bz, double *freeze, double *ptot, int nx)
{
int tix = threadIdx.x;
/* gridDim = [ny nz], nx = nx */
int x = tix + nx*(blockIdx.x + gridDim.x*blockIdx.y);
nx += nx*(blockIdx.x + gridDim.x*blockIdx.y);
//int addrMax = nx + nx*(blockIdx.x + gridDim.x*blockIdx.y);
double pressvar;
double T, bsquared;
double rhoinv;
__shared__ double locBloc[BLOCKDIM];
//CsMax = 0.0;
locBloc[tix] = 0.0;
if(x >= nx) return; // If we get a very low resolution
while(x < nx) {
rhoinv = 1.0/rho[x];
T = .5*rhoinv*(px[x]*px[x] + py[x]*py[x] + pz[x]*pz[x]);
bsquared = bx[x]*bx[x] + by[x]*by[x] + bz[x]*bz[x];
// Calculate internal + magnetic energy
pressvar = E[x] - T;
// Assert minimum thermal soundspeed / temperature
/* if(gam*pressvar*rhoinv < cs0sq) {
E[x] = T + bsqhf + cs0sq/(gam*rhoinv);
pressvar = cs0sq/(gam*rhoinv);
} */
// Calculate gas + magnetic pressure
ptot[x] = gm1*pressvar + MHD_PRESS_B*bsquared;
// We calculate the freezing speed in the X direction: max of |v|+c_fast
// MHD_CS_B includes an "alfven factor" to stabilize the code in low-beta situations
pressvar = (gg1*pressvar + MHD_CS_B*bsquared)*rhoinv;
pressvar = sqrt(abs(pressvar)) + abs(px[x]*rhoinv);
if(pressvar > locBloc[tix]) locBloc[tix] = pressvar;
x += BLOCKDIM;
}
__syncthreads();
if(tix >= 32) return;
if(locBloc[tix+32] > locBloc[tix]) { locBloc[tix] = locBloc[tix+32]; }
__syncthreads(); // compute 2 and later schedule by half-warps so we need to be down to 16 before no syncthreads
if(tix >= 16) return;
if(locBloc[tix+16] > locBloc[tix]) { locBloc[tix] = locBloc[tix+16]; }
if(tix >= 8) return;
if(locBloc[tix+8] > locBloc[tix]) { locBloc[tix] = locBloc[tix+8]; }
if(tix >= 4) return;
if(locBloc[tix+4] > locBloc[tix]) { locBloc[tix] = locBloc[tix+4]; }
if(tix >= 2) return;
if(locBloc[tix+2] > locBloc[tix]) { locBloc[tix] = locBloc[tix+2]; }
if(tix == 0) {
if(locBloc[1] > locBloc[0]) { locBloc[0] = locBloc[1]; }
freeze[blockIdx.x + gridDim.x*blockIdx.y] = locBloc[0];
}
}
#define PRESSURE Cs
// cs0sq = gamma rho^(gamma-1))
__global__ void cukern_FreezeSpeed_hydro(double *rho, double *E, double *px, double *py, double *pz, double *freeze, double *ptot, int nx)
{
int tix = threadIdx.x;
int x = tix + nx*(blockIdx.x + gridDim.x*blockIdx.y);
int addrMax = nx + nx*(blockIdx.x + gridDim.x*blockIdx.y);
double Cs, CsMax;
double psqhf, rhoinv;
//double gg1 = gam*(gam-1.0);
//double gm1 = gam - 1.0;
__shared__ double locBloc[BLOCKDIM];
CsMax = 0.0;
locBloc[tix] = 0.0;
if(x >= addrMax) return; // If we get a very low resolution
while(x < addrMax) {
rhoinv = 1.0/rho[x];
psqhf = .5*(px[x]*px[x]+py[x]*py[x]+pz[x]*pz[x]);
PRESSURE = gm1*(E[x] - psqhf*rhoinv);
if(gam*PRESSURE*rhoinv < cs0sq) {
PRESSURE = cs0sq/(gam*rhoinv);
E[x] = psqhf*rhoinv + PRESSURE/gm1;
} /* Constrain temperature to a minimum value */
ptot[x] = PRESSURE;
Cs = sqrt(gam * PRESSURE *rhoinv) + abs(px[x]*rhoinv);
if(Cs > CsMax) CsMax = Cs;
x += BLOCKDIM;
}
locBloc[tix] = CsMax;
__syncthreads();
if(tix >= 32) return;
if(locBloc[tix+32] > locBloc[tix]) { locBloc[tix] = locBloc[tix+32]; }
__syncthreads(); // compute 2 and later schedule by half-warps so we need to be down to 16 before no syncthreads
if(tix >= 16) return;
if(locBloc[tix+16] > locBloc[tix]) { locBloc[tix] = locBloc[tix+16]; }
if(tix >= 8) return;
if(locBloc[tix+8] > locBloc[tix]) { locBloc[tix] = locBloc[tix+8]; }
if(tix >= 4) return;
if(locBloc[tix+4] > locBloc[tix]) { locBloc[tix] = locBloc[tix+4]; }
if(tix >= 2) return;
if(locBloc[tix+2] > locBloc[tix]) { locBloc[tix] = locBloc[tix+2]; }
if(tix == 0) {
if(locBloc[1] > locBloc[0]) { locBloc[0] = locBloc[1]; }
freeze[blockIdx.x + gridDim.x*blockIdx.y] = locBloc[0];
}
}
|
9077ad31edad08644e41e6bd77c26098b64646c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/Select.cuh>
namespace faiss {
namespace gpu {
// Number of warps that the kernel is instantiated with
constexpr int kWarps = 8;
constexpr int kLanes = kWarpSize;
constexpr int kMaxDistance = std::numeric_limits<int>::max();
// Performs a binary matrix multiplication, returning the lowest k results in
// `vecs` for each `query` in terms of Hamming distance (a fused kernel)
// Each warp calculates distance for a single query
template <int NumWarpQ, int NumThreadQ, typename BinaryType>
__launch_bounds__(kWarps* kLanes) __global__ void binaryDistanceAnySize(
const Tensor<BinaryType, 2, true> vecs,
const Tensor<BinaryType, 2, true> query,
Tensor<int, 2, true> outK,
Tensor<idx_t, 2, true> outV,
int k) {
// A matrix tile (query, k)
__shared__ BinaryType queryTile[kWarps][kLanes + 1]; // avoid bank conflict
// B matrix tile (vec, k)
__shared__ BinaryType vecTile[kLanes][kLanes + 1]; // avoid bank conflict
WarpSelect<
int,
idx_t,
false,
Comparator<int>,
NumWarpQ,
NumThreadQ,
kWarps * kLanes>
heap(kMaxDistance, -1, k);
int warpId = threadIdx.y;
int laneId = threadIdx.x;
// Each warp handles a single query
idx_t warpQuery = idx_t(blockIdx.x) * kWarps + warpId;
bool queryInBounds = warpQuery < query.getSize(0);
// Each warp loops through the entire chunk of vectors
for (idx_t blockVec = 0; blockVec < vecs.getSize(0); blockVec += kLanes) {
int threadDistance = 0;
// Reduction dimension
for (idx_t blockK = 0; blockK < vecs.getSize(1); blockK += kLanes) {
idx_t laneK = blockK + laneId;
bool kInBounds = laneK < vecs.getSize(1);
queryTile[warpId][laneId] =
queryInBounds && kInBounds ? query[warpQuery][laneK] : 0;
// kWarps warps are responsible for loading 32 vecs
#pragma unroll
for (int i = 0; i < kLanes / kWarps; ++i) {
int warpVec = i * kWarps + warpId;
idx_t vec = blockVec + warpVec;
bool vecInBounds = vec < vecs.getSize(0);
vecTile[warpVec][laneId] =
vecInBounds && kInBounds ? vecs[vec][laneK] : 0;
}
__syncthreads();
// Compare distances
#pragma unroll
for (int i = 0; i < kLanes; ++i) {
threadDistance +=
__popc(queryTile[warpId][i] ^ vecTile[laneId][i]);
}
__syncthreads();
}
// Lanes within a warp are different vec results against the same query
// Only submit distances which represent real (query, vec) pairs
bool valInBounds =
queryInBounds && (blockVec + laneId < vecs.getSize(0));
threadDistance = valInBounds ? threadDistance : kMaxDistance;
idx_t id = valInBounds ? blockVec + laneId : idx_t(-1);
heap.add(threadDistance, id);
}
heap.reduce();
if (warpQuery < query.getSize(0)) {
heap.writeOut(outK[warpQuery].data(), outV[warpQuery].data(), k);
}
}
// Version of the kernel that avoids a loop over the reduction dimension, and
// thus avoids reloading the query vectors
template <
int NumWarpQ,
int NumThreadQ,
typename BinaryType,
int ReductionLimit = kLanes>
__global__ void __launch_bounds__(kWarps* kLanes) binaryDistanceLimitSize(
const Tensor<BinaryType, 2, true> vecs,
const Tensor<BinaryType, 2, true> query,
Tensor<int, 2, true> outK,
Tensor<idx_t, 2, true> outV,
int k) {
// A matrix tile (query, k)
__shared__ BinaryType queryTile[kWarps][kLanes + 1]; // avoid bank conflict
// B matrix tile (vec, k)
__shared__ BinaryType vecTile[kLanes][kLanes + 1]; // avoid bank conflict
WarpSelect<
int,
idx_t,
false,
Comparator<int>,
NumWarpQ,
NumThreadQ,
kWarps * kLanes>
heap(kMaxDistance, -1, k);
int warpId = threadIdx.y;
int laneId = threadIdx.x;
// Each warp handles a single query
int laneK = laneId;
idx_t warpQuery = idx_t(blockIdx.x) * kWarps + warpId;
bool kInBounds = laneK < vecs.getSize(1);
bool queryInBounds = warpQuery < query.getSize(0);
queryTile[warpId][laneId] =
queryInBounds && kInBounds ? query[warpQuery][laneK] : 0;
// Each warp loops through the entire chunk of vectors
for (idx_t blockVec = 0; blockVec < vecs.getSize(0); blockVec += kLanes) {
int threadDistance = 0;
// kWarps warps are responsible for loading 32 vecs
#pragma unroll
for (int i = 0; i < kLanes / kWarps; ++i) {
int warpVec = i * kWarps + warpId;
idx_t vec = blockVec + warpVec;
bool vecInBounds = vec < vecs.getSize(0);
vecTile[warpVec][laneId] =
vecInBounds && kInBounds ? vecs[vec][laneK] : 0;
}
__syncthreads();
// Compare distances
#pragma unroll
for (int i = 0; i < ReductionLimit; ++i) {
threadDistance += __popc(queryTile[warpId][i] ^ vecTile[laneId][i]);
}
__syncthreads();
// Lanes within a warp are different vec results against the same query
// Only submit distances which represent real (query, vec) pairs
bool valInBounds =
queryInBounds && (blockVec + laneId < vecs.getSize(0));
threadDistance = valInBounds ? threadDistance : kMaxDistance;
idx_t id = valInBounds ? blockVec + laneId : idx_t(-1);
heap.add(threadDistance, id);
}
heap.reduce();
if (warpQuery < query.getSize(0)) {
heap.writeOut(outK[warpQuery].data(), outV[warpQuery].data(), k);
}
}
template <typename BinaryType>
void runBinaryDistanceAnySize(
Tensor<BinaryType, 2, true>& vecs,
Tensor<BinaryType, 2, true>& query,
Tensor<int, 2, true>& outK,
Tensor<idx_t, 2, true>& outV,
int k,
hipStream_t stream) {
dim3 grid(utils::divUp(query.getSize(0), kWarps));
dim3 block(kLanes, kWarps);
if (k == 1) {
hipLaunchKernelGGL(( binaryDistanceAnySize<1, 1, BinaryType>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 32) {
hipLaunchKernelGGL(( binaryDistanceAnySize<32, 2, BinaryType>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 64) {
hipLaunchKernelGGL(( binaryDistanceAnySize<64, 3, BinaryType>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 128) {
hipLaunchKernelGGL(( binaryDistanceAnySize<128, 3, BinaryType>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 256) {
hipLaunchKernelGGL(( binaryDistanceAnySize<256, 4, BinaryType>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 512) {
hipLaunchKernelGGL(( binaryDistanceAnySize<512, 8, BinaryType>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 1024) {
hipLaunchKernelGGL(( binaryDistanceAnySize<1024, 8, BinaryType>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
}
#if GPU_MAX_SELECTION_K >= 2048
else if (k <= 2048) {
hipLaunchKernelGGL(( binaryDistanceAnySize<2048, 8, BinaryType>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
}
#endif
}
template <typename BinaryType, int ReductionLimit>
void runBinaryDistanceLimitSize(
Tensor<BinaryType, 2, true>& vecs,
Tensor<BinaryType, 2, true>& query,
Tensor<int, 2, true>& outK,
Tensor<idx_t, 2, true>& outV,
int k,
hipStream_t stream) {
dim3 grid(utils::divUp(query.getSize(0), kWarps));
dim3 block(kLanes, kWarps);
if (k == 1) {
hipLaunchKernelGGL(( binaryDistanceLimitSize<1, 1, BinaryType, ReductionLimit>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 32) {
hipLaunchKernelGGL(( binaryDistanceLimitSize<32, 2, BinaryType, ReductionLimit>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 64) {
hipLaunchKernelGGL(( binaryDistanceLimitSize<64, 3, BinaryType, ReductionLimit>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 128) {
hipLaunchKernelGGL(( binaryDistanceLimitSize<128, 3, BinaryType, ReductionLimit>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 256) {
hipLaunchKernelGGL(( binaryDistanceLimitSize<256, 4, BinaryType, ReductionLimit>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 512) {
hipLaunchKernelGGL(( binaryDistanceLimitSize<512, 8, BinaryType, ReductionLimit>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 1024) {
hipLaunchKernelGGL(( binaryDistanceLimitSize<1024, 8, BinaryType, ReductionLimit>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
}
#if GPU_MAX_SELECTION_K >= 2048
else if (k <= 2048) {
hipLaunchKernelGGL(( binaryDistanceLimitSize<2048, 8, BinaryType, ReductionLimit>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
}
#endif
}
void runBinaryDistance(
Tensor<unsigned char, 2, true>& vecs,
Tensor<unsigned char, 2, true>& query,
Tensor<int, 2, true>& outK,
Tensor<idx_t, 2, true>& outV,
int k,
hipStream_t stream) {
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
FAISS_ASSERT(vecs.getSize(1) == query.getSize(1));
FAISS_ASSERT(outK.getSize(1) == k);
FAISS_ASSERT(outV.getSize(1) == k);
// For the optimized uint32 kernel, we handle 32 * 8 = 256 max dims
constexpr int kReductionLimit32 = 8;
// For the optimized uint8 kernel, we handle 8 * 16 = 128 max dims
constexpr int kReductionLimit8 = 16;
// All other cases (large or small) go through the general kernel
if (vecs.getSize(1) % sizeof(unsigned int) == 0 &&
(vecs.getSize(1) / sizeof(unsigned int)) <= kReductionLimit32) {
auto vecs32 = vecs.castResize<unsigned int>();
auto query32 = query.castResize<unsigned int>();
// Optimize for vectors with dimensions a multiple of 32 that are less
// than 32 * kReductionLimit (256) dimensions in size
runBinaryDistanceLimitSize<unsigned int, kReductionLimit32>(
vecs32, query32, outK, outV, k, stream);
} else if (vecs.getSize(1) <= kReductionLimit8) {
// Optimize for vectors with dimensions a multiple of 32 that are less
// than 32 * kReductionLimit (256) dimensions in size
runBinaryDistanceLimitSize<unsigned char, kReductionLimit8>(
vecs, query, outK, outV, k, stream);
} else {
// Arbitrary size kernel
runBinaryDistanceAnySize<unsigned char>(
vecs, query, outK, outV, k, stream);
}
}
} // namespace gpu
} // namespace faiss
| 9077ad31edad08644e41e6bd77c26098b64646c6.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/Select.cuh>
namespace faiss {
namespace gpu {
// Number of warps that the kernel is instantiated with
constexpr int kWarps = 8;
constexpr int kLanes = kWarpSize;
constexpr int kMaxDistance = std::numeric_limits<int>::max();
// Performs a binary matrix multiplication, returning the lowest k results in
// `vecs` for each `query` in terms of Hamming distance (a fused kernel)
// Each warp calculates distance for a single query
template <int NumWarpQ, int NumThreadQ, typename BinaryType>
__launch_bounds__(kWarps* kLanes) __global__ void binaryDistanceAnySize(
const Tensor<BinaryType, 2, true> vecs,
const Tensor<BinaryType, 2, true> query,
Tensor<int, 2, true> outK,
Tensor<idx_t, 2, true> outV,
int k) {
// A matrix tile (query, k)
__shared__ BinaryType queryTile[kWarps][kLanes + 1]; // avoid bank conflict
// B matrix tile (vec, k)
__shared__ BinaryType vecTile[kLanes][kLanes + 1]; // avoid bank conflict
WarpSelect<
int,
idx_t,
false,
Comparator<int>,
NumWarpQ,
NumThreadQ,
kWarps * kLanes>
heap(kMaxDistance, -1, k);
int warpId = threadIdx.y;
int laneId = threadIdx.x;
// Each warp handles a single query
idx_t warpQuery = idx_t(blockIdx.x) * kWarps + warpId;
bool queryInBounds = warpQuery < query.getSize(0);
// Each warp loops through the entire chunk of vectors
for (idx_t blockVec = 0; blockVec < vecs.getSize(0); blockVec += kLanes) {
int threadDistance = 0;
// Reduction dimension
for (idx_t blockK = 0; blockK < vecs.getSize(1); blockK += kLanes) {
idx_t laneK = blockK + laneId;
bool kInBounds = laneK < vecs.getSize(1);
queryTile[warpId][laneId] =
queryInBounds && kInBounds ? query[warpQuery][laneK] : 0;
// kWarps warps are responsible for loading 32 vecs
#pragma unroll
for (int i = 0; i < kLanes / kWarps; ++i) {
int warpVec = i * kWarps + warpId;
idx_t vec = blockVec + warpVec;
bool vecInBounds = vec < vecs.getSize(0);
vecTile[warpVec][laneId] =
vecInBounds && kInBounds ? vecs[vec][laneK] : 0;
}
__syncthreads();
// Compare distances
#pragma unroll
for (int i = 0; i < kLanes; ++i) {
threadDistance +=
__popc(queryTile[warpId][i] ^ vecTile[laneId][i]);
}
__syncthreads();
}
// Lanes within a warp are different vec results against the same query
// Only submit distances which represent real (query, vec) pairs
bool valInBounds =
queryInBounds && (blockVec + laneId < vecs.getSize(0));
threadDistance = valInBounds ? threadDistance : kMaxDistance;
idx_t id = valInBounds ? blockVec + laneId : idx_t(-1);
heap.add(threadDistance, id);
}
heap.reduce();
if (warpQuery < query.getSize(0)) {
heap.writeOut(outK[warpQuery].data(), outV[warpQuery].data(), k);
}
}
// Version of the kernel that avoids a loop over the reduction dimension, and
// thus avoids reloading the query vectors
template <
int NumWarpQ,
int NumThreadQ,
typename BinaryType,
int ReductionLimit = kLanes>
__global__ void __launch_bounds__(kWarps* kLanes) binaryDistanceLimitSize(
const Tensor<BinaryType, 2, true> vecs,
const Tensor<BinaryType, 2, true> query,
Tensor<int, 2, true> outK,
Tensor<idx_t, 2, true> outV,
int k) {
// A matrix tile (query, k)
__shared__ BinaryType queryTile[kWarps][kLanes + 1]; // avoid bank conflict
// B matrix tile (vec, k)
__shared__ BinaryType vecTile[kLanes][kLanes + 1]; // avoid bank conflict
WarpSelect<
int,
idx_t,
false,
Comparator<int>,
NumWarpQ,
NumThreadQ,
kWarps * kLanes>
heap(kMaxDistance, -1, k);
int warpId = threadIdx.y;
int laneId = threadIdx.x;
// Each warp handles a single query
int laneK = laneId;
idx_t warpQuery = idx_t(blockIdx.x) * kWarps + warpId;
bool kInBounds = laneK < vecs.getSize(1);
bool queryInBounds = warpQuery < query.getSize(0);
queryTile[warpId][laneId] =
queryInBounds && kInBounds ? query[warpQuery][laneK] : 0;
// Each warp loops through the entire chunk of vectors
for (idx_t blockVec = 0; blockVec < vecs.getSize(0); blockVec += kLanes) {
int threadDistance = 0;
// kWarps warps are responsible for loading 32 vecs
#pragma unroll
for (int i = 0; i < kLanes / kWarps; ++i) {
int warpVec = i * kWarps + warpId;
idx_t vec = blockVec + warpVec;
bool vecInBounds = vec < vecs.getSize(0);
vecTile[warpVec][laneId] =
vecInBounds && kInBounds ? vecs[vec][laneK] : 0;
}
__syncthreads();
// Compare distances
#pragma unroll
for (int i = 0; i < ReductionLimit; ++i) {
threadDistance += __popc(queryTile[warpId][i] ^ vecTile[laneId][i]);
}
__syncthreads();
// Lanes within a warp are different vec results against the same query
// Only submit distances which represent real (query, vec) pairs
bool valInBounds =
queryInBounds && (blockVec + laneId < vecs.getSize(0));
threadDistance = valInBounds ? threadDistance : kMaxDistance;
idx_t id = valInBounds ? blockVec + laneId : idx_t(-1);
heap.add(threadDistance, id);
}
heap.reduce();
if (warpQuery < query.getSize(0)) {
heap.writeOut(outK[warpQuery].data(), outV[warpQuery].data(), k);
}
}
template <typename BinaryType>
void runBinaryDistanceAnySize(
Tensor<BinaryType, 2, true>& vecs,
Tensor<BinaryType, 2, true>& query,
Tensor<int, 2, true>& outK,
Tensor<idx_t, 2, true>& outV,
int k,
cudaStream_t stream) {
dim3 grid(utils::divUp(query.getSize(0), kWarps));
dim3 block(kLanes, kWarps);
if (k == 1) {
binaryDistanceAnySize<1, 1, BinaryType>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 32) {
binaryDistanceAnySize<32, 2, BinaryType>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 64) {
binaryDistanceAnySize<64, 3, BinaryType>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 128) {
binaryDistanceAnySize<128, 3, BinaryType>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 256) {
binaryDistanceAnySize<256, 4, BinaryType>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 512) {
binaryDistanceAnySize<512, 8, BinaryType>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 1024) {
binaryDistanceAnySize<1024, 8, BinaryType>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
}
#if GPU_MAX_SELECTION_K >= 2048
else if (k <= 2048) {
binaryDistanceAnySize<2048, 8, BinaryType>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
}
#endif
}
template <typename BinaryType, int ReductionLimit>
void runBinaryDistanceLimitSize(
Tensor<BinaryType, 2, true>& vecs,
Tensor<BinaryType, 2, true>& query,
Tensor<int, 2, true>& outK,
Tensor<idx_t, 2, true>& outV,
int k,
cudaStream_t stream) {
dim3 grid(utils::divUp(query.getSize(0), kWarps));
dim3 block(kLanes, kWarps);
if (k == 1) {
binaryDistanceLimitSize<1, 1, BinaryType, ReductionLimit>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 32) {
binaryDistanceLimitSize<32, 2, BinaryType, ReductionLimit>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 64) {
binaryDistanceLimitSize<64, 3, BinaryType, ReductionLimit>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 128) {
binaryDistanceLimitSize<128, 3, BinaryType, ReductionLimit>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 256) {
binaryDistanceLimitSize<256, 4, BinaryType, ReductionLimit>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 512) {
binaryDistanceLimitSize<512, 8, BinaryType, ReductionLimit>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 1024) {
binaryDistanceLimitSize<1024, 8, BinaryType, ReductionLimit>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
}
#if GPU_MAX_SELECTION_K >= 2048
else if (k <= 2048) {
binaryDistanceLimitSize<2048, 8, BinaryType, ReductionLimit>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
}
#endif
}
void runBinaryDistance(
Tensor<unsigned char, 2, true>& vecs,
Tensor<unsigned char, 2, true>& query,
Tensor<int, 2, true>& outK,
Tensor<idx_t, 2, true>& outV,
int k,
cudaStream_t stream) {
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
FAISS_ASSERT(vecs.getSize(1) == query.getSize(1));
FAISS_ASSERT(outK.getSize(1) == k);
FAISS_ASSERT(outV.getSize(1) == k);
// For the optimized uint32 kernel, we handle 32 * 8 = 256 max dims
constexpr int kReductionLimit32 = 8;
// For the optimized uint8 kernel, we handle 8 * 16 = 128 max dims
constexpr int kReductionLimit8 = 16;
// All other cases (large or small) go through the general kernel
if (vecs.getSize(1) % sizeof(unsigned int) == 0 &&
(vecs.getSize(1) / sizeof(unsigned int)) <= kReductionLimit32) {
auto vecs32 = vecs.castResize<unsigned int>();
auto query32 = query.castResize<unsigned int>();
// Optimize for vectors with dimensions a multiple of 32 that are less
// than 32 * kReductionLimit (256) dimensions in size
runBinaryDistanceLimitSize<unsigned int, kReductionLimit32>(
vecs32, query32, outK, outV, k, stream);
} else if (vecs.getSize(1) <= kReductionLimit8) {
// Optimize for vectors with dimensions a multiple of 32 that are less
// than 32 * kReductionLimit (256) dimensions in size
runBinaryDistanceLimitSize<unsigned char, kReductionLimit8>(
vecs, query, outK, outV, k, stream);
} else {
// Arbitrary size kernel
runBinaryDistanceAnySize<unsigned char>(
vecs, query, outK, outV, k, stream);
}
}
} // namespace gpu
} // namespace faiss
|
4cd8b5fee4daa1dd8fd6babc061c1526cbdc1863.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------------------------------
// Copyrighted by Marko Rakita.
// Author: Marko Rakita
// File contains: Tests for dropout layer.
// Created: 02/16/2016.
// ----------------------------------------------------------------------------------------------------
#include "include/testdropoutlayer.cuh"
TestDropoutLayer::TestDropoutLayer(string outputFolder)
{
m_outputFolder = outputFolder;
// Registering tests.
m_dropoutLayerTests["doforwardprop"] = &TestDropoutLayer::TestDoForwardProp;
//m_dropoutLayerTests["forwardpropspeed"] = &TestDropoutLayer::TestForwardPropSpeed;
m_dropoutLayerTests["dobackwardprop"] = &TestDropoutLayer::TestDoBackwardProp;
}
bool TestDropoutLayer::HasTest(string testName)
{
auto test = m_dropoutLayerTests.find(testName);
return test != m_dropoutLayerTests.end();
}
void TestDropoutLayer::RunTest(string testName)
{
auto test = m_dropoutLayerTests.find(testName);
TestingAssert(test != m_dropoutLayerTests.end(), "Test not found!");
((*this).*(test->second))();
}
void TestDropoutLayer::RunAllTests()
{
for (auto test = m_dropoutLayerTests.begin(); test != m_dropoutLayerTests.end(); ++test)
{
((*this).*(test->second))();
s_consoleHelper.SetConsoleForeground(ConsoleForeground::GREEN);
cout << "Test " << test->first << " passed!" << endl << endl;
s_consoleHelper.RevertConsoleForeground();
}
}
//******************************************************************************************************
// Helper functions
//******************************************************************************************************
void TestDropoutLayer::TestDoForwardProp(uint inputNumChannels, uint inputDataWidth, uint inputDataHeight, uint inputDataCount, float dropProbability)
{
// Creating layers.
MockInputLayer inputLayer(inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount);
MockDropoutLayer mockDropoutLayer(inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, dropProbability);
mockDropoutLayer.AddPrevLayer(&inputLayer);
DropoutLayer dropoutLayer(ParallelismMode::Data, 0, 0, NULL, 0, 1, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, false,
dropProbability, true, false);
dropoutLayer.AddPrevLayer(&inputLayer);
// Doing forward prop.
PropagationMode propagationMode = PropagationMode::Train;
inputLayer.LoadInputs();
inputLayer.DoForwardProp(propagationMode);
mockDropoutLayer.LoadInputs();
mockDropoutLayer.DoForwardProp(propagationMode);
dropoutLayer.CopyDropoutFilterFromHost(mockDropoutLayer.GetDropoutFilter());
dropoutLayer.LoadInputs();
dropoutLayer.DoForwardProp(propagationMode);
CudaAssert(hipDeviceSynchronize());
// Transferring results to host.
size_t activationsBufferSize = mockDropoutLayer.GetActivationBufferSize();
float* dropoutLayerActivationBuffer;
CudaAssert(hipHostMalloc<float>(&dropoutLayerActivationBuffer, activationsBufferSize));
CudaAssert(hipMemcpy(dropoutLayerActivationBuffer, dropoutLayer.GetActivationDataBuffer(), activationsBufferSize, hipMemcpyDeviceToHost));
// Checking correctness.
bool correctResult = true;
size_t numDifferences = 0;
float firstDifference = 0.f;
float firstDifferentMock = 0.f;
float firstDifferentReg = 0.f;
bool foundDifferentFromZeroMock = false;
bool foundDifferentFromZeroReg = false;
size_t activationsBufferLength = activationsBufferSize / sizeof(float);
const float* mockDropoutLayerActivationBuffer = mockDropoutLayer.GetActivationDataBuffer();
const float maxDiff = 0.000001f;
const float maxDiffPercentage = 0.001f;
const float maxDiffPercentageThreshold = 0.00000001f;
CompareBuffers(dropoutLayerActivationBuffer, mockDropoutLayerActivationBuffer, activationsBufferLength, maxDiff, maxDiffPercentage, maxDiffPercentageThreshold,
correctResult, numDifferences, firstDifference, firstDifferentMock, firstDifferentReg, foundDifferentFromZeroMock, foundDifferentFromZeroReg);
CudaAssert(hipHostFree(dropoutLayerActivationBuffer));
TestingAssert(foundDifferentFromZeroMock, "All mock dropout layer activations are zeros! Input num channels: " + to_string(inputNumChannels) + "; Input data width: " +
to_string(inputDataWidth) + "; Input data height: " + to_string(inputDataHeight) + "; Input data count: " + to_string(inputDataCount) + "; Drop probability: " +
to_string(dropProbability));
TestingAssert(foundDifferentFromZeroReg, "All dropout layer activations are zeros! Input num channels: " + to_string(inputNumChannels) + "; Input data width: " +
to_string(inputDataWidth) + "; Input data height: " + to_string(inputDataHeight) + "; Input data count: " + to_string(inputDataCount) + "; Drop probability: " +
to_string(dropProbability));
TestingAssert(correctResult, "Incorrect forward prop! Num differences: " + to_string(numDifferences) + "; First difference: " + to_string(firstDifference) +
"; First different mock activation: " + to_string(firstDifferentMock) + "; First different regular activation: " + to_string(firstDifferentReg) +
"; Input num channels: " + to_string(inputNumChannels) + "; Input data width: " + to_string(inputDataWidth) + "; Input data height: " +
to_string(inputDataHeight) + "; Input data count: " + to_string(inputDataCount) + "; Drop probability: " + to_string(dropProbability));
cout << "Forward prop passed. Input num channels: " << inputNumChannels << "; Input width: " << inputDataWidth << "; Input height: " << inputDataHeight <<
"; Input data count: " << inputDataCount << "; Drop probability: " << dropProbability << endl;
}
void TestDropoutLayer::TestDoBackwardProp(uint inputNumChannels, uint inputDataWidth, uint inputDataHeight, uint inputDataCount, float dropProbability)
{
// Creating layers.
MockInputLayer inputLayer(inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount);
MockDropoutLayer mockDropoutLayer(inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, dropProbability);
mockDropoutLayer.AddPrevLayer(&inputLayer);
DropoutLayer dropoutLayer(ParallelismMode::Data, 0, 0, NULL, 0, 1, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, false,
dropProbability, true, false);
dropoutLayer.AddPrevLayer(&inputLayer);
MockOutputLayer outputLayer(inputNumChannels * inputDataWidth * inputDataHeight, inputDataCount, LossFunctionType::LogisticRegression, false, 0, true);
mockDropoutLayer.AddNextLayer(&outputLayer);
dropoutLayer.AddNextLayer(&outputLayer);
// Doing forward and backward prop.
PropagationMode propagationMode = PropagationMode::Train;
inputLayer.LoadInputs();
inputLayer.DoForwardProp(propagationMode);
mockDropoutLayer.LoadInputs();
mockDropoutLayer.DoForwardProp(propagationMode);
dropoutLayer.CopyDropoutFilterFromHost(mockDropoutLayer.GetDropoutFilter());
dropoutLayer.LoadInputs();
dropoutLayer.DoForwardProp(propagationMode);
outputLayer.DoBackwardProp();
dropoutLayer.LoadActivationGradients();
dropoutLayer.DoBackwardProp();
mockDropoutLayer.LoadActivationGradients();
mockDropoutLayer.DoBackwardProp();
CudaAssert(hipDeviceSynchronize());
// Transferring results to host.
size_t gradientsBufferSize = inputLayer.GetActivationBufferSize();
float* dropoutLayerInputGradientsBuffer;
CudaAssert(hipHostMalloc<float>(&dropoutLayerInputGradientsBuffer, gradientsBufferSize));
CudaAssert(hipMemcpy(dropoutLayerInputGradientsBuffer, dropoutLayer.GetInputGradientsBuffer(), gradientsBufferSize, hipMemcpyDeviceToHost));
// Checking correctness.
bool correctResult = true;
size_t numDifferences = 0;
float firstDifference = 0.f;
float firstDifferentMock = 0.f;
float firstDifferentReg = 0.f;
bool foundDifferentFromZeroMock = false;
bool foundDifferentFromZeroReg = false;
size_t gradientsBufferLength = gradientsBufferSize / sizeof(float);
const float* mockDropoutLayerInputGradientsBuffer = mockDropoutLayer.GetInputGradientsBuffer();
const float maxDiff = 0.000001f;
const float maxDiffPercentage = 0.001f;
const float maxDiffPercentageThreshold = 0.00000001f;
CompareBuffers(dropoutLayerInputGradientsBuffer, mockDropoutLayerInputGradientsBuffer, gradientsBufferLength, maxDiff, maxDiffPercentage, maxDiffPercentageThreshold,
correctResult, numDifferences, firstDifference, firstDifferentMock, firstDifferentReg, foundDifferentFromZeroMock, foundDifferentFromZeroReg);
CudaAssert(hipHostFree(dropoutLayerInputGradientsBuffer));
TestingAssert(foundDifferentFromZeroMock, "All mock dropout layer input gradients are zeros! Input num channels: " + to_string(inputNumChannels) + "; Input data width: " +
to_string(inputDataWidth) + "; Input data height: " + to_string(inputDataHeight) + "; Input data count: " + to_string(inputDataCount) + "; Drop probability: " +
to_string(dropProbability));
TestingAssert(foundDifferentFromZeroReg, "All dropout layer input gradients are zeros! Input num channels: " + to_string(inputNumChannels) + "; Input data width: " +
to_string(inputDataWidth) + "; Input data height: " + to_string(inputDataHeight) + "; Input data count: " + to_string(inputDataCount) + "; Drop probability: " +
to_string(dropProbability));
TestingAssert(correctResult, "Incorrect backward prop! Num differences: " + to_string(numDifferences) + "; First difference: " + to_string(firstDifference) +
"; First different mock input gradient: " + to_string(firstDifferentMock) + "; First different regular input gradient: " + to_string(firstDifferentReg) +
"; Input num channels: " + to_string(inputNumChannels) + "; Input data width: " + to_string(inputDataWidth) + "; Input data height: " +
to_string(inputDataHeight) + "; Input data count: " + to_string(inputDataCount) + "; Drop probability: " + to_string(dropProbability));
cout << "Backward prop passed. Input num channels: " << inputNumChannels << "; Input width: " << inputDataWidth << "; Input height: " << inputDataHeight <<
"; Input data count: " << inputDataCount << "; Drop probability: " << dropProbability << endl;
}
//******************************************************************************************************
// Tests
//******************************************************************************************************
void TestDropoutLayer::TestDoForwardProp()
{
// lastBatch == true
// dropProbability == 0.2f
TestDoForwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 119 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoForwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 97 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoForwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 55 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoForwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 27 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoForwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 13 /*inputDataCount*/, 0.2f /*dropProbability*/);
// dropProbability == 0.5f
TestDoForwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 119 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoForwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 97 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoForwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 55 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoForwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 27 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoForwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 13 /*inputDataCount*/, 0.5f /*dropProbability*/);
// dropProbability == 0.7f
TestDoForwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 119 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoForwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 97 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoForwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 55 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoForwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 27 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoForwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 13 /*inputDataCount*/, 0.7f /*dropProbability*/);
// lastBatch == false
// dropProbability == 0.2f
TestDoForwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoForwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoForwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoForwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoForwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.2f /*dropProbability*/);
// dropProbability == 0.5f
TestDoForwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoForwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoForwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoForwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoForwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.5f /*dropProbability*/);
// dropProbability == 0.7f
TestDoForwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoForwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoForwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoForwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoForwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.7f /*dropProbability*/);
}
/*
Current speed record over 1000 launches:
*/
//void TestDropoutLayer::TestForwardPropSpeed()
//{
// // Creating cuBLAS handle to use in tests.
// hipblasHandle_t cublasHandle;
// CudaCublasAssert(hipblasCreate(&cublasHandle));
//
// // Creating layers.
// uint inputNumChannels = 256;
// uint inputDataWidth = 13;
// uint inputDataHeight = 13;
// uint inputDataCount = 128;
// uint numNeurons = 2048;
// MockInputLayer inputLayer(inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount);
// float weightsDeviation = 0.01f;
// float biasesInitialValue = 1.0f;
// ActivationType activationType = ActivationType::ReLu;
// StandardLayer standardLayer(ParallelismMode::Data, 0, cublasHandle, 0, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, false,
// numNeurons, true, weightsDeviation, true, biasesInitialValue, activationType);
// standardLayer.AddPrevLayer(&inputLayer);
//
// // Doing forward prop and measuring time.
// inputLayer.LoadInputs();
// inputLayer.DoForwardProp();
// standardLayer.LoadInputs();
// const uint c_timesToLaunch = 1000;
// high_resolution_clock::time_point startTime = high_resolution_clock::now();
// for (uint i = 0; i < c_timesToLaunch; ++i)
// {
// standardLayer.DoForwardProp();
// }
// CudaAssert(hipDeviceSynchronize());
// high_resolution_clock::time_point endTime = high_resolution_clock::now();
//
// // Reporting time.
// long long durationInMilliseconds = duration_cast<milliseconds>(endTime - startTime).count();
// cout << "Forward prop took " << (float)durationInMilliseconds / (float)c_timesToLaunch << "ms in average to process." << endl;
//
// // Destroying cuBLAS handle.
// CudaCublasAssert(hipblasDestroy(cublasHandle));
//}
void TestDropoutLayer::TestDoBackwardProp()
{
// lastBatch == true
// dropProbability == 0.2f
TestDoBackwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 119 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoBackwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 97 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoBackwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 55 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoBackwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 27 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoBackwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 13 /*inputDataCount*/, 0.2f /*dropProbability*/);
// dropProbability == 0.5f
TestDoBackwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 119 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoBackwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 97 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoBackwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 55 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoBackwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 27 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoBackwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 13 /*inputDataCount*/, 0.5f /*dropProbability*/);
// dropProbability == 0.7f
TestDoBackwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 119 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoBackwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 97 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoBackwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 55 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoBackwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 27 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoBackwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 13 /*inputDataCount*/, 0.7f /*dropProbability*/);
// lastBatch == false
// dropProbability == 0.2f
TestDoBackwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoBackwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoBackwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoBackwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoBackwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.2f /*dropProbability*/);
// dropProbability == 0.5f
TestDoBackwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoBackwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoBackwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoBackwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoBackwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.5f /*dropProbability*/);
// dropProbability == 0.7f
TestDoBackwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoBackwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoBackwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoBackwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoBackwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.7f /*dropProbability*/);
} | 4cd8b5fee4daa1dd8fd6babc061c1526cbdc1863.cu | // ----------------------------------------------------------------------------------------------------
// Copyrighted by Marko Rakita.
// Author: Marko Rakita
// File contains: Tests for dropout layer.
// Created: 02/16/2016.
// ----------------------------------------------------------------------------------------------------
#include "include/testdropoutlayer.cuh"
TestDropoutLayer::TestDropoutLayer(string outputFolder)
{
m_outputFolder = outputFolder;
// Registering tests.
m_dropoutLayerTests["doforwardprop"] = &TestDropoutLayer::TestDoForwardProp;
//m_dropoutLayerTests["forwardpropspeed"] = &TestDropoutLayer::TestForwardPropSpeed;
m_dropoutLayerTests["dobackwardprop"] = &TestDropoutLayer::TestDoBackwardProp;
}
bool TestDropoutLayer::HasTest(string testName)
{
auto test = m_dropoutLayerTests.find(testName);
return test != m_dropoutLayerTests.end();
}
void TestDropoutLayer::RunTest(string testName)
{
auto test = m_dropoutLayerTests.find(testName);
TestingAssert(test != m_dropoutLayerTests.end(), "Test not found!");
((*this).*(test->second))();
}
void TestDropoutLayer::RunAllTests()
{
for (auto test = m_dropoutLayerTests.begin(); test != m_dropoutLayerTests.end(); ++test)
{
((*this).*(test->second))();
s_consoleHelper.SetConsoleForeground(ConsoleForeground::GREEN);
cout << "Test " << test->first << " passed!" << endl << endl;
s_consoleHelper.RevertConsoleForeground();
}
}
//******************************************************************************************************
// Helper functions
//******************************************************************************************************
void TestDropoutLayer::TestDoForwardProp(uint inputNumChannels, uint inputDataWidth, uint inputDataHeight, uint inputDataCount, float dropProbability)
{
// Creating layers.
MockInputLayer inputLayer(inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount);
MockDropoutLayer mockDropoutLayer(inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, dropProbability);
mockDropoutLayer.AddPrevLayer(&inputLayer);
DropoutLayer dropoutLayer(ParallelismMode::Data, 0, 0, NULL, 0, 1, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, false,
dropProbability, true, false);
dropoutLayer.AddPrevLayer(&inputLayer);
// Doing forward prop.
PropagationMode propagationMode = PropagationMode::Train;
inputLayer.LoadInputs();
inputLayer.DoForwardProp(propagationMode);
mockDropoutLayer.LoadInputs();
mockDropoutLayer.DoForwardProp(propagationMode);
dropoutLayer.CopyDropoutFilterFromHost(mockDropoutLayer.GetDropoutFilter());
dropoutLayer.LoadInputs();
dropoutLayer.DoForwardProp(propagationMode);
CudaAssert(cudaDeviceSynchronize());
// Transferring results to host.
size_t activationsBufferSize = mockDropoutLayer.GetActivationBufferSize();
float* dropoutLayerActivationBuffer;
CudaAssert(cudaMallocHost<float>(&dropoutLayerActivationBuffer, activationsBufferSize));
CudaAssert(cudaMemcpy(dropoutLayerActivationBuffer, dropoutLayer.GetActivationDataBuffer(), activationsBufferSize, cudaMemcpyDeviceToHost));
// Checking correctness.
bool correctResult = true;
size_t numDifferences = 0;
float firstDifference = 0.f;
float firstDifferentMock = 0.f;
float firstDifferentReg = 0.f;
bool foundDifferentFromZeroMock = false;
bool foundDifferentFromZeroReg = false;
size_t activationsBufferLength = activationsBufferSize / sizeof(float);
const float* mockDropoutLayerActivationBuffer = mockDropoutLayer.GetActivationDataBuffer();
const float maxDiff = 0.000001f;
const float maxDiffPercentage = 0.001f;
const float maxDiffPercentageThreshold = 0.00000001f;
CompareBuffers(dropoutLayerActivationBuffer, mockDropoutLayerActivationBuffer, activationsBufferLength, maxDiff, maxDiffPercentage, maxDiffPercentageThreshold,
correctResult, numDifferences, firstDifference, firstDifferentMock, firstDifferentReg, foundDifferentFromZeroMock, foundDifferentFromZeroReg);
CudaAssert(cudaFreeHost(dropoutLayerActivationBuffer));
TestingAssert(foundDifferentFromZeroMock, "All mock dropout layer activations are zeros! Input num channels: " + to_string(inputNumChannels) + "; Input data width: " +
to_string(inputDataWidth) + "; Input data height: " + to_string(inputDataHeight) + "; Input data count: " + to_string(inputDataCount) + "; Drop probability: " +
to_string(dropProbability));
TestingAssert(foundDifferentFromZeroReg, "All dropout layer activations are zeros! Input num channels: " + to_string(inputNumChannels) + "; Input data width: " +
to_string(inputDataWidth) + "; Input data height: " + to_string(inputDataHeight) + "; Input data count: " + to_string(inputDataCount) + "; Drop probability: " +
to_string(dropProbability));
TestingAssert(correctResult, "Incorrect forward prop! Num differences: " + to_string(numDifferences) + "; First difference: " + to_string(firstDifference) +
"; First different mock activation: " + to_string(firstDifferentMock) + "; First different regular activation: " + to_string(firstDifferentReg) +
"; Input num channels: " + to_string(inputNumChannels) + "; Input data width: " + to_string(inputDataWidth) + "; Input data height: " +
to_string(inputDataHeight) + "; Input data count: " + to_string(inputDataCount) + "; Drop probability: " + to_string(dropProbability));
cout << "Forward prop passed. Input num channels: " << inputNumChannels << "; Input width: " << inputDataWidth << "; Input height: " << inputDataHeight <<
"; Input data count: " << inputDataCount << "; Drop probability: " << dropProbability << endl;
}
void TestDropoutLayer::TestDoBackwardProp(uint inputNumChannels, uint inputDataWidth, uint inputDataHeight, uint inputDataCount, float dropProbability)
{
// Creating layers.
MockInputLayer inputLayer(inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount);
MockDropoutLayer mockDropoutLayer(inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, dropProbability);
mockDropoutLayer.AddPrevLayer(&inputLayer);
DropoutLayer dropoutLayer(ParallelismMode::Data, 0, 0, NULL, 0, 1, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, false,
dropProbability, true, false);
dropoutLayer.AddPrevLayer(&inputLayer);
MockOutputLayer outputLayer(inputNumChannels * inputDataWidth * inputDataHeight, inputDataCount, LossFunctionType::LogisticRegression, false, 0, true);
mockDropoutLayer.AddNextLayer(&outputLayer);
dropoutLayer.AddNextLayer(&outputLayer);
// Doing forward and backward prop.
PropagationMode propagationMode = PropagationMode::Train;
inputLayer.LoadInputs();
inputLayer.DoForwardProp(propagationMode);
mockDropoutLayer.LoadInputs();
mockDropoutLayer.DoForwardProp(propagationMode);
dropoutLayer.CopyDropoutFilterFromHost(mockDropoutLayer.GetDropoutFilter());
dropoutLayer.LoadInputs();
dropoutLayer.DoForwardProp(propagationMode);
outputLayer.DoBackwardProp();
dropoutLayer.LoadActivationGradients();
dropoutLayer.DoBackwardProp();
mockDropoutLayer.LoadActivationGradients();
mockDropoutLayer.DoBackwardProp();
CudaAssert(cudaDeviceSynchronize());
// Transferring results to host.
size_t gradientsBufferSize = inputLayer.GetActivationBufferSize();
float* dropoutLayerInputGradientsBuffer;
CudaAssert(cudaMallocHost<float>(&dropoutLayerInputGradientsBuffer, gradientsBufferSize));
CudaAssert(cudaMemcpy(dropoutLayerInputGradientsBuffer, dropoutLayer.GetInputGradientsBuffer(), gradientsBufferSize, cudaMemcpyDeviceToHost));
// Checking correctness.
bool correctResult = true;
size_t numDifferences = 0;
float firstDifference = 0.f;
float firstDifferentMock = 0.f;
float firstDifferentReg = 0.f;
bool foundDifferentFromZeroMock = false;
bool foundDifferentFromZeroReg = false;
size_t gradientsBufferLength = gradientsBufferSize / sizeof(float);
const float* mockDropoutLayerInputGradientsBuffer = mockDropoutLayer.GetInputGradientsBuffer();
const float maxDiff = 0.000001f;
const float maxDiffPercentage = 0.001f;
const float maxDiffPercentageThreshold = 0.00000001f;
CompareBuffers(dropoutLayerInputGradientsBuffer, mockDropoutLayerInputGradientsBuffer, gradientsBufferLength, maxDiff, maxDiffPercentage, maxDiffPercentageThreshold,
correctResult, numDifferences, firstDifference, firstDifferentMock, firstDifferentReg, foundDifferentFromZeroMock, foundDifferentFromZeroReg);
CudaAssert(cudaFreeHost(dropoutLayerInputGradientsBuffer));
TestingAssert(foundDifferentFromZeroMock, "All mock dropout layer input gradients are zeros! Input num channels: " + to_string(inputNumChannels) + "; Input data width: " +
to_string(inputDataWidth) + "; Input data height: " + to_string(inputDataHeight) + "; Input data count: " + to_string(inputDataCount) + "; Drop probability: " +
to_string(dropProbability));
TestingAssert(foundDifferentFromZeroReg, "All dropout layer input gradients are zeros! Input num channels: " + to_string(inputNumChannels) + "; Input data width: " +
to_string(inputDataWidth) + "; Input data height: " + to_string(inputDataHeight) + "; Input data count: " + to_string(inputDataCount) + "; Drop probability: " +
to_string(dropProbability));
TestingAssert(correctResult, "Incorrect backward prop! Num differences: " + to_string(numDifferences) + "; First difference: " + to_string(firstDifference) +
"; First different mock input gradient: " + to_string(firstDifferentMock) + "; First different regular input gradient: " + to_string(firstDifferentReg) +
"; Input num channels: " + to_string(inputNumChannels) + "; Input data width: " + to_string(inputDataWidth) + "; Input data height: " +
to_string(inputDataHeight) + "; Input data count: " + to_string(inputDataCount) + "; Drop probability: " + to_string(dropProbability));
cout << "Backward prop passed. Input num channels: " << inputNumChannels << "; Input width: " << inputDataWidth << "; Input height: " << inputDataHeight <<
"; Input data count: " << inputDataCount << "; Drop probability: " << dropProbability << endl;
}
//******************************************************************************************************
// Tests
//******************************************************************************************************
void TestDropoutLayer::TestDoForwardProp()
{
// lastBatch == true
// dropProbability == 0.2f
TestDoForwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 119 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoForwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 97 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoForwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 55 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoForwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 27 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoForwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 13 /*inputDataCount*/, 0.2f /*dropProbability*/);
// dropProbability == 0.5f
TestDoForwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 119 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoForwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 97 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoForwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 55 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoForwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 27 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoForwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 13 /*inputDataCount*/, 0.5f /*dropProbability*/);
// dropProbability == 0.7f
TestDoForwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 119 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoForwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 97 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoForwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 55 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoForwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 27 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoForwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 13 /*inputDataCount*/, 0.7f /*dropProbability*/);
// lastBatch == false
// dropProbability == 0.2f
TestDoForwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoForwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoForwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoForwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoForwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.2f /*dropProbability*/);
// dropProbability == 0.5f
TestDoForwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoForwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoForwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoForwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoForwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.5f /*dropProbability*/);
// dropProbability == 0.7f
TestDoForwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoForwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoForwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoForwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoForwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.7f /*dropProbability*/);
}
/*
Current speed record over 1000 launches:
*/
//void TestDropoutLayer::TestForwardPropSpeed()
//{
// // Creating cuBLAS handle to use in tests.
// cublasHandle_t cublasHandle;
// CudaCublasAssert(cublasCreate(&cublasHandle));
//
// // Creating layers.
// uint inputNumChannels = 256;
// uint inputDataWidth = 13;
// uint inputDataHeight = 13;
// uint inputDataCount = 128;
// uint numNeurons = 2048;
// MockInputLayer inputLayer(inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount);
// float weightsDeviation = 0.01f;
// float biasesInitialValue = 1.0f;
// ActivationType activationType = ActivationType::ReLu;
// StandardLayer standardLayer(ParallelismMode::Data, 0, cublasHandle, 0, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, false,
// numNeurons, true, weightsDeviation, true, biasesInitialValue, activationType);
// standardLayer.AddPrevLayer(&inputLayer);
//
// // Doing forward prop and measuring time.
// inputLayer.LoadInputs();
// inputLayer.DoForwardProp();
// standardLayer.LoadInputs();
// const uint c_timesToLaunch = 1000;
// high_resolution_clock::time_point startTime = high_resolution_clock::now();
// for (uint i = 0; i < c_timesToLaunch; ++i)
// {
// standardLayer.DoForwardProp();
// }
// CudaAssert(cudaDeviceSynchronize());
// high_resolution_clock::time_point endTime = high_resolution_clock::now();
//
// // Reporting time.
// long long durationInMilliseconds = duration_cast<milliseconds>(endTime - startTime).count();
// cout << "Forward prop took " << (float)durationInMilliseconds / (float)c_timesToLaunch << "ms in average to process." << endl;
//
// // Destroying cuBLAS handle.
// CudaCublasAssert(cublasDestroy(cublasHandle));
//}
void TestDropoutLayer::TestDoBackwardProp()
{
// lastBatch == true
// dropProbability == 0.2f
TestDoBackwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 119 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoBackwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 97 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoBackwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 55 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoBackwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 27 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoBackwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 13 /*inputDataCount*/, 0.2f /*dropProbability*/);
// dropProbability == 0.5f
TestDoBackwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 119 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoBackwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 97 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoBackwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 55 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoBackwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 27 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoBackwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 13 /*inputDataCount*/, 0.5f /*dropProbability*/);
// dropProbability == 0.7f
TestDoBackwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 119 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoBackwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 97 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoBackwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 55 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoBackwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 27 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoBackwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 13 /*inputDataCount*/, 0.7f /*dropProbability*/);
// lastBatch == false
// dropProbability == 0.2f
TestDoBackwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoBackwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoBackwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoBackwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.2f /*dropProbability*/);
TestDoBackwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.2f /*dropProbability*/);
// dropProbability == 0.5f
TestDoBackwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoBackwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoBackwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoBackwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.5f /*dropProbability*/);
TestDoBackwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.5f /*dropProbability*/);
// dropProbability == 0.7f
TestDoBackwardProp(3 /*inputNumChannels*/, 64 /*inputDataWidth*/, 64 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoBackwardProp(64 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoBackwardProp(128 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoBackwardProp(192 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.7f /*dropProbability*/);
TestDoBackwardProp(256 /*inputNumChannels*/, 13 /*inputDataWidth*/, 13 /*inputDataHeight*/, 128 /*inputDataCount*/, 0.7f /*dropProbability*/);
} |
204713ebe8d9d86642b720b112cdb27cacf78845.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <rules.h>
#define MAX_LEN 1500
__global__ static void match(uint16_t *acArray, AcNodeGPU *contPatt, char *packets, uint16_t *packets_len, uint16_t *result)
{
printf("GPU match\n");
uint16_t len;
uint16_t contId[20] = {0};
int tmp;
int state = 0;
int i = 0, j = 0, k = 0;
char content;
// Multi-threads for many packets. One packet each thread.
int bid = blockIdx.x;
int tid = threadIdx.x;
i = tid + bid * blockDim.x;
len = packets_len[i];
for(k = 0; k < len + 1; k++)
{
content = packets[MAX_LEN * i + k];
while(1)
{
tmp = acArray[257 * state + ((int)content - 0)];
if(tmp != 0)
{
if(acArray[257 * tmp + 0] != 0)
{
contId[j++] = acArray[257 * tmp + 0];
}
state = tmp;
break;
}
else
{
if(state == 0) break;
else state = acArray[257 * state + 256];
}
}
if(content == '\0') break;
}
// __syncthreads();
printf("Finish...");
if(j == 0)
{ printf("!!!!!!j==0\n");
result[i] = 0;
}
else
{ printf("!!!!!!j!=0 contId[0]: %d\n", contId[0]);
result[i] = contId[0];
}
}
/*__global__ static void match(uint16_t *acArray, AcNodeGPU *contPatt, char *packets, int n, uint16_t *result)
{
uint16_t contId[10] = {0};
int tmp;
int state = 0;
int i = 0, j = 0, k = 0;
// Single thread for many packets
for(i = 0; i < n; i++)
{
for(k = 0; k < LEN; k++)
{
char content = packets[LEN * i + k];
while(1)
{
tmp = acArray[257 * state + ((int)content - 0)];
if(tmp != 0)
{
if(acArray[257 * tmp + 0] != 0)
{
contId[j++] = acArray[257 * tmp + 0];
}
state = tmp;
break;
}
else
{
if(state == 0) break;
else state = acArray[257 * state + 256];
}
}
if(content == '\0') break;
}
}
__syncthreads();
for(i = 0; i < 10; i++) result[i] = contId[i];
}*/
/*__device__ uint16_t *acGPU;
__device__ AcNodeGPU *contPatt;
__device__ char *pkt;
__device__ uint16_t *pkt_len;
__device__ uint16_t *res;
__device__ int n;
*/
extern "C"
void gpuinit(RuleSetRoot *rsr, int blockNum, int threadNum, uint16_t **acGPU, AcNodeGPU **contPatt, char **pkt, uint16_t **pkt_len, uint16_t **res)
{
/* uint16_t *acGPU;
AcNodeGPU *contPatt;
char *pkt;
uint16_t *pkt_len;
uint16_t *res;*/
int n = blockNum * threadNum;
/* uint16_t *tmp_acGPU;
AcNodeGPU *tmp_contPatt;
char *tmp_pkt;
uint16_t *tmp_pkt_len;
uint16_t *tmp_res;
int tmp_n = blockNum * threadNum;
*/
hipMalloc((void **)acGPU, MAX_STATE * 257 * sizeof(uint16_t));
hipMalloc((void **)contPatt, rsr->nodeNum * sizeof(acGPU));
hipMalloc((void **)pkt, n * MAX_LEN * sizeof(char));
hipMalloc((void **)pkt_len, n * sizeof(uint16_t));
hipMalloc((void **)res, n * sizeof(uint16_t));
/*hipMalloc((void **)&tmp_acGPU, MAX_STATE * 257 * sizeof(uint16_t));
hipMalloc((void **)&tmp_contPatt, rsr->nodeNum * sizeof(acGPU));
hipMalloc((void **)&tmp_pkt, tmp_n * MAX_LEN * sizeof(char));
hipMalloc((void **)&tmp_pkt_len, tmp_n * sizeof(uint16_t));
hipMalloc((void **)&tmp_res, tmp_n * sizeof(uint16_t));
hipMemcpyToSymbol(acGPU, &tmp_acGPU, MAX_STATE * 257 * sizeof(uint16_t));
hipMemcpyToSymbol(contPatt, &tmp_contPatt, rsr->nodeNum *sizeof(acGPU));
hipMemcpyToSymbol(pkt, &tmp_pkt, tmp_n * MAX_LEN * sizeof(char));
hipMemcpyToSymbol(pkt_len, &tmp_pkt_len, tmp_n * sizeof(uint16_t));
hipMemcpyToSymbol(res, &tmp_res, tmp_n * sizeof(uint16_t));
hipMemcpyToSymbol(n, &tmp_n, sizeof(int));
*/
hipDeviceProp_t deviceProp;
int devID = 0;
hipSetDevice(devID);
hipGetDeviceProperties(&deviceProp, devID);
printf("\n\n******GPU Device %s\n", deviceProp.name);
}
extern "C"
void gpufree(int k, uint16_t *acGPU, AcNodeGPU *contPatt, char *pkt, uint16_t *pkt_len, uint16_t *res)
{
/* uint16_t *acGPU;
AcNodeGPU *contPatt;
char *pkt;
uint16_t *pkt_len;
uint16_t *res;
acGPU = *ptr_acGPU;
contPatt = *ptr_contPatt;
pkt = *ptr_pkt;
pkt_len = *ptr_pkt_len;
res = *ptr_res;
*/
if(k == 0)
printf("######GPU-Free\n");
hipFree(acGPU);
hipFree(contPatt);
hipFree(pkt);
hipFree(pkt_len);
hipFree(res);
}
extern "C"
uint16_t *gpumatch(RuleSetRoot *rsr, char *packets, int *packets_len, int blockNum, int threadNum, uint16_t *acGPU, AcNodeGPU *contPatt, char *pkt, uint16_t *pkt_len, uint16_t *res)
{
int n;
n = blockNum * threadNum;
/* uint16_t *acGPU;
AcNodeGPU *contPatt;
char *pkt;
uint16_t *pkt_len;
uint16_t *res;
acGPU = *ptr_acGPU;
contPatt = *ptr_contPatt;
pkt = *ptr_pkt;
pkt_len = *ptr_pkt_len;
res = *ptr_res;
*/
hipMemcpy(acGPU, rsr->acGPU, MAX_STATE * 257 * sizeof(uint16_t), hipMemcpyHostToDevice);
hipMemcpy(contPatt, rsr->contPattMatch, rsr->nodeNum * sizeof(AcNodeGPU), hipMemcpyHostToDevice);
hipMemcpy(pkt, packets, n * MAX_LEN * sizeof(char), hipMemcpyHostToDevice);
hipMemcpy(pkt_len, packets_len, n * sizeof(uint16_t), hipMemcpyHostToDevice);
uint16_t *results;
results = (uint16_t *)malloc(n * sizeof(uint16_t));
memset(results, 0, n * sizeof(uint16_t));
float time_gpu = 0.0;
hipEvent_t start_gpu, stop_gpu;
hipEventCreate(&stop_gpu);
hipEventCreate(&start_gpu);
hipEventRecord(start_gpu, 0);
hipLaunchKernelGGL(( match), dim3(blockNum), dim3(threadNum), 0, 0, acGPU, contPatt, pkt, pkt_len, res);
hipMemcpy(results, res, n * sizeof(uint16_t), hipMemcpyDeviceToHost);
hipEventRecord(stop_gpu, 0);
hipEventSynchronize(start_gpu);
hipEventSynchronize(stop_gpu);
hipEventElapsedTime(&time_gpu, start_gpu, stop_gpu);
hipEventDestroy(start_gpu);
hipEventDestroy(stop_gpu);
printf("\n\n\n#####gpu time %f(ms)\n", time_gpu);
printf("######Matching Results:\n");
int i;
for(i = 0; i < n; i++)//n; i++)
{
if(results[i] != 0) printf("%4d\t%d\n", i, results[i]);
}
printf("\n");
return results;
}
| 204713ebe8d9d86642b720b112cdb27cacf78845.cu | #include <stdio.h>
#include <stdlib.h>
#include <rules.h>
#define MAX_LEN 1500
__global__ static void match(uint16_t *acArray, AcNodeGPU *contPatt, char *packets, uint16_t *packets_len, uint16_t *result)
{
printf("GPU match\n");
uint16_t len;
uint16_t contId[20] = {0};
int tmp;
int state = 0;
int i = 0, j = 0, k = 0;
char content;
// Multi-threads for many packets. One packet each thread.
int bid = blockIdx.x;
int tid = threadIdx.x;
i = tid + bid * blockDim.x;
len = packets_len[i];
for(k = 0; k < len + 1; k++)
{
content = packets[MAX_LEN * i + k];
while(1)
{
tmp = acArray[257 * state + ((int)content - 0)];
if(tmp != 0)
{
if(acArray[257 * tmp + 0] != 0)
{
contId[j++] = acArray[257 * tmp + 0];
}
state = tmp;
break;
}
else
{
if(state == 0) break;
else state = acArray[257 * state + 256];
}
}
if(content == '\0') break;
}
// __syncthreads();
printf("Finish...");
if(j == 0)
{ printf("!!!!!!j==0\n");
result[i] = 0;
}
else
{ printf("!!!!!!j!=0 contId[0]: %d\n", contId[0]);
result[i] = contId[0];
}
}
/*__global__ static void match(uint16_t *acArray, AcNodeGPU *contPatt, char *packets, int n, uint16_t *result)
{
uint16_t contId[10] = {0};
int tmp;
int state = 0;
int i = 0, j = 0, k = 0;
// Single thread for many packets
for(i = 0; i < n; i++)
{
for(k = 0; k < LEN; k++)
{
char content = packets[LEN * i + k];
while(1)
{
tmp = acArray[257 * state + ((int)content - 0)];
if(tmp != 0)
{
if(acArray[257 * tmp + 0] != 0)
{
contId[j++] = acArray[257 * tmp + 0];
}
state = tmp;
break;
}
else
{
if(state == 0) break;
else state = acArray[257 * state + 256];
}
}
if(content == '\0') break;
}
}
__syncthreads();
for(i = 0; i < 10; i++) result[i] = contId[i];
}*/
/*__device__ uint16_t *acGPU;
__device__ AcNodeGPU *contPatt;
__device__ char *pkt;
__device__ uint16_t *pkt_len;
__device__ uint16_t *res;
__device__ int n;
*/
extern "C"
void gpuinit(RuleSetRoot *rsr, int blockNum, int threadNum, uint16_t **acGPU, AcNodeGPU **contPatt, char **pkt, uint16_t **pkt_len, uint16_t **res)
{
/* uint16_t *acGPU;
AcNodeGPU *contPatt;
char *pkt;
uint16_t *pkt_len;
uint16_t *res;*/
int n = blockNum * threadNum;
/* uint16_t *tmp_acGPU;
AcNodeGPU *tmp_contPatt;
char *tmp_pkt;
uint16_t *tmp_pkt_len;
uint16_t *tmp_res;
int tmp_n = blockNum * threadNum;
*/
cudaMalloc((void **)acGPU, MAX_STATE * 257 * sizeof(uint16_t));
cudaMalloc((void **)contPatt, rsr->nodeNum * sizeof(acGPU));
cudaMalloc((void **)pkt, n * MAX_LEN * sizeof(char));
cudaMalloc((void **)pkt_len, n * sizeof(uint16_t));
cudaMalloc((void **)res, n * sizeof(uint16_t));
/*cudaMalloc((void **)&tmp_acGPU, MAX_STATE * 257 * sizeof(uint16_t));
cudaMalloc((void **)&tmp_contPatt, rsr->nodeNum * sizeof(acGPU));
cudaMalloc((void **)&tmp_pkt, tmp_n * MAX_LEN * sizeof(char));
cudaMalloc((void **)&tmp_pkt_len, tmp_n * sizeof(uint16_t));
cudaMalloc((void **)&tmp_res, tmp_n * sizeof(uint16_t));
cudaMemcpyToSymbol(acGPU, &tmp_acGPU, MAX_STATE * 257 * sizeof(uint16_t));
cudaMemcpyToSymbol(contPatt, &tmp_contPatt, rsr->nodeNum *sizeof(acGPU));
cudaMemcpyToSymbol(pkt, &tmp_pkt, tmp_n * MAX_LEN * sizeof(char));
cudaMemcpyToSymbol(pkt_len, &tmp_pkt_len, tmp_n * sizeof(uint16_t));
cudaMemcpyToSymbol(res, &tmp_res, tmp_n * sizeof(uint16_t));
cudaMemcpyToSymbol(n, &tmp_n, sizeof(int));
*/
cudaDeviceProp deviceProp;
int devID = 0;
cudaSetDevice(devID);
cudaGetDeviceProperties(&deviceProp, devID);
printf("\n\n******GPU Device %s\n", deviceProp.name);
}
extern "C"
void gpufree(int k, uint16_t *acGPU, AcNodeGPU *contPatt, char *pkt, uint16_t *pkt_len, uint16_t *res)
{
/* uint16_t *acGPU;
AcNodeGPU *contPatt;
char *pkt;
uint16_t *pkt_len;
uint16_t *res;
acGPU = *ptr_acGPU;
contPatt = *ptr_contPatt;
pkt = *ptr_pkt;
pkt_len = *ptr_pkt_len;
res = *ptr_res;
*/
if(k == 0)
printf("######GPU-Free\n");
cudaFree(acGPU);
cudaFree(contPatt);
cudaFree(pkt);
cudaFree(pkt_len);
cudaFree(res);
}
extern "C"
uint16_t *gpumatch(RuleSetRoot *rsr, char *packets, int *packets_len, int blockNum, int threadNum, uint16_t *acGPU, AcNodeGPU *contPatt, char *pkt, uint16_t *pkt_len, uint16_t *res)
{
int n;
n = blockNum * threadNum;
/* uint16_t *acGPU;
AcNodeGPU *contPatt;
char *pkt;
uint16_t *pkt_len;
uint16_t *res;
acGPU = *ptr_acGPU;
contPatt = *ptr_contPatt;
pkt = *ptr_pkt;
pkt_len = *ptr_pkt_len;
res = *ptr_res;
*/
cudaMemcpy(acGPU, rsr->acGPU, MAX_STATE * 257 * sizeof(uint16_t), cudaMemcpyHostToDevice);
cudaMemcpy(contPatt, rsr->contPattMatch, rsr->nodeNum * sizeof(AcNodeGPU), cudaMemcpyHostToDevice);
cudaMemcpy(pkt, packets, n * MAX_LEN * sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(pkt_len, packets_len, n * sizeof(uint16_t), cudaMemcpyHostToDevice);
uint16_t *results;
results = (uint16_t *)malloc(n * sizeof(uint16_t));
memset(results, 0, n * sizeof(uint16_t));
float time_gpu = 0.0;
cudaEvent_t start_gpu, stop_gpu;
cudaEventCreate(&stop_gpu);
cudaEventCreate(&start_gpu);
cudaEventRecord(start_gpu, 0);
match<<<blockNum, threadNum>>>(acGPU, contPatt, pkt, pkt_len, res);
cudaMemcpy(results, res, n * sizeof(uint16_t), cudaMemcpyDeviceToHost);
cudaEventRecord(stop_gpu, 0);
cudaEventSynchronize(start_gpu);
cudaEventSynchronize(stop_gpu);
cudaEventElapsedTime(&time_gpu, start_gpu, stop_gpu);
cudaEventDestroy(start_gpu);
cudaEventDestroy(stop_gpu);
printf("\n\n\n#####gpu time %f(ms)\n", time_gpu);
printf("######Matching Results:\n");
int i;
for(i = 0; i < n; i++)//n; i++)
{
if(results[i] != 0) printf("%4d\t%d\n", i, results[i]);
}
printf("\n");
return results;
}
|
6b976ce92cec540bfb9b095dce3ffb4db85d8254.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** @file histo-global.cu histogram with global memory atomics */
#include <png.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
/** CUDA check macro */
#define cucheck(call) \
{\
hipError_t res = (call);\
if(res != hipSuccess) {\
const char* err_str = hipGetErrorString(res);\
fprintf(stderr, "%s (%d): %s in %s", __FILE__, __LINE__, err_str, #call); \
exit(-1);\
}\
}
/** time spent in device */
double gpu_time = 0;
/** a useful function to compute the number of threads */
int divup(int x, int y) { return x / y + (x % y ? 1 : 0); }
/** gets the color, given the dwell */
void dwell_color(int *r, int *g, int *b, int dwell);
/** save the dwell into a PNG file
@remarks: code to save PNG file taken from here
(error handling is removed):
http://www.labbookpages.co.uk/software/imgProc/libPNG.html
*/
void save_image(const char *filename, int *dwells, int w, int h) {
png_bytep row;
FILE *fp = fopen(filename, "wb");
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, 0, 0, 0);
png_infop info_ptr = png_create_info_struct(png_ptr);
// exception handling
setjmp(png_jmpbuf(png_ptr));
png_init_io(png_ptr, fp);
// write header (8 bit colour depth)
png_set_IHDR(png_ptr, info_ptr, w, h,
8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
// set title
png_text title_text;
title_text.compression = PNG_TEXT_COMPRESSION_NONE;
title_text.key = "Title";
title_text.text = "Mandelbrot set, per-pixel";
png_set_text(png_ptr, info_ptr, &title_text, 1);
png_write_info(png_ptr, info_ptr);
// write image data
row = (png_bytep) malloc(3 * w * sizeof(png_byte));
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
int r, g, b;
dwell_color(&r, &g, &b, dwells[y * w + x]);
row[3 * x + 0] = (png_byte)r;
row[3 * x + 1] = (png_byte)g;
row[3 * x + 2] = (png_byte)b;
}
png_write_row(png_ptr, row);
}
png_write_end(png_ptr, NULL);
fclose(fp);
png_free_data(png_ptr, info_ptr, PNG_FREE_ALL, -1);
png_destroy_write_struct(&png_ptr, (png_infopp)NULL);
free(row);
} // save_image
/** a simple complex type */
struct complex {
__host__ __device__ complex(float re, float im = 0) {
this->re = re;
this->im = im;
}
/** real and imaginary part */
float re, im;
}; // struct complex
// operator overloads for complex numbers
inline __host__ __device__ complex operator+
(const complex &a, const complex &b) {
return complex(a.re + b.re, a.im + b.im);
}
inline __host__ __device__ complex operator-
(const complex &a) { return complex(-a.re, -a.im); }
inline __host__ __device__ complex operator-
(const complex &a, const complex &b) {
return complex(a.re - b.re, a.im - b.im);
}
inline __host__ __device__ complex operator*
(const complex &a, const complex &b) {
return complex(a.re * b.re - a.im * b.im, a.im * b.re + a.re * b.im);
}
inline __host__ __device__ float abs2(const complex &a) {
return a.re * a.re + a.im * a.im;
}
inline __host__ __device__ complex operator/
(const complex &a, const complex &b) {
float invabs2 = 1 / abs2(b);
return complex((a.re * b.re + a.im * b.im) * invabs2,
(a.im * b.re - b.im * a.re) * invabs2);
} // operator/
#define MAX_DWELL 256
#define BS 256
/** computes the dwell for a single pixel */
__device__ int pixel_dwell
(int w, int h, complex cmin, complex cmax, int x, int y) {
complex dc = cmax - cmin;
float fx = (float)x / w, fy = (float)y / h;
complex c = cmin + complex(fx * dc.re, fy * dc.im);
int dwell = 0;
complex z = c;
while(dwell < MAX_DWELL && abs2(z) < 2 * 2) {
z = z * z + c;
dwell++;
}
return dwell;
} // pixel_dwell
/** computes the dwells for Mandelbrot image
@param dwells the output array
@param w the width of the output image
@param h the height of the output image
@param cmin the complex value associated with the left-bottom corner of the
image
@param cmax the complex value associated with the right-top corner of the
image
*/
__global__ void mandelbrot_k
(int *dwells, int w, int h, complex cmin, complex cmax) {
// complex value to start iteration (c)
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int dwell = pixel_dwell(w, h, cmin, cmax, x, y);
dwells[y * w + x] = dwell;
} // mandelbrot_k
/** gets the color, given the dwell (on host) */
#define CUT_DWELL (MAX_DWELL / 4)
void dwell_color(int *r, int *g, int *b, int dwell) {
// black for the Mandelbrot set
if(dwell >= MAX_DWELL) {
*r = *g = *b = 0;
} else {
// cut at zero
if(dwell < 0)
dwell = 0;
if(dwell <= CUT_DWELL) {
// from black to blue the first half
*r = *g = 0;
*b = 128 + dwell * 127 / (CUT_DWELL);
} else {
// from blue to white for the second half
*b = 255;
*r = *g = (dwell - CUT_DWELL) * 255 / (MAX_DWELL - CUT_DWELL);
}
}
} // dwell_color
/** data size */
#define H (8 * 1024)
#define W (8 * 1024)
#define IMAGE_PATH "./mandelbrot.png"
int main(int argc, char **argv) {
// allocate memory
int w = W, h = H;
size_t dwell_sz = w * h * sizeof(int);
int *h_dwells, *d_dwells;
cucheck(hipMalloc((void**)&d_dwells, dwell_sz));
h_dwells = (int*)malloc(dwell_sz);
// compute the dwells, copy them back
double t1 = omp_get_wtime();
dim3 bs(64, 4), grid(divup(w, bs.x), divup(h, bs.y));
hipLaunchKernelGGL(( mandelbrot_k), dim3(grid), dim3(bs), 0, 0,
d_dwells, w, h, complex(-1.5, -1), complex(0.5, 1));
cucheck(hipDeviceSynchronize());
double t2 = omp_get_wtime();
cucheck(hipMemcpy(h_dwells, d_dwells, dwell_sz, hipMemcpyDeviceToHost));
gpu_time = t2 - t1;
// save the image to PNG
save_image(IMAGE_PATH, h_dwells, w, h);
// print performance
printf("Mandelbrot set computed in %.3lf s, at %.3lf Mpix/s\n", gpu_time,
h * w * 1e-6 / gpu_time);
// free data
hipFree(d_dwells);
free(h_dwells);
return 0;
} // main
| 6b976ce92cec540bfb9b095dce3ffb4db85d8254.cu | /** @file histo-global.cu histogram with global memory atomics */
#include <png.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
/** CUDA check macro */
#define cucheck(call) \
{\
cudaError_t res = (call);\
if(res != cudaSuccess) {\
const char* err_str = cudaGetErrorString(res);\
fprintf(stderr, "%s (%d): %s in %s", __FILE__, __LINE__, err_str, #call); \
exit(-1);\
}\
}
/** time spent in device */
double gpu_time = 0;
/** a useful function to compute the number of threads */
int divup(int x, int y) { return x / y + (x % y ? 1 : 0); }
/** gets the color, given the dwell */
void dwell_color(int *r, int *g, int *b, int dwell);
/** save the dwell into a PNG file
@remarks: code to save PNG file taken from here
(error handling is removed):
http://www.labbookpages.co.uk/software/imgProc/libPNG.html
*/
void save_image(const char *filename, int *dwells, int w, int h) {
png_bytep row;
FILE *fp = fopen(filename, "wb");
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, 0, 0, 0);
png_infop info_ptr = png_create_info_struct(png_ptr);
// exception handling
setjmp(png_jmpbuf(png_ptr));
png_init_io(png_ptr, fp);
// write header (8 bit colour depth)
png_set_IHDR(png_ptr, info_ptr, w, h,
8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
// set title
png_text title_text;
title_text.compression = PNG_TEXT_COMPRESSION_NONE;
title_text.key = "Title";
title_text.text = "Mandelbrot set, per-pixel";
png_set_text(png_ptr, info_ptr, &title_text, 1);
png_write_info(png_ptr, info_ptr);
// write image data
row = (png_bytep) malloc(3 * w * sizeof(png_byte));
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
int r, g, b;
dwell_color(&r, &g, &b, dwells[y * w + x]);
row[3 * x + 0] = (png_byte)r;
row[3 * x + 1] = (png_byte)g;
row[3 * x + 2] = (png_byte)b;
}
png_write_row(png_ptr, row);
}
png_write_end(png_ptr, NULL);
fclose(fp);
png_free_data(png_ptr, info_ptr, PNG_FREE_ALL, -1);
png_destroy_write_struct(&png_ptr, (png_infopp)NULL);
free(row);
} // save_image
/** a simple complex type */
struct complex {
__host__ __device__ complex(float re, float im = 0) {
this->re = re;
this->im = im;
}
/** real and imaginary part */
float re, im;
}; // struct complex
// operator overloads for complex numbers
inline __host__ __device__ complex operator+
(const complex &a, const complex &b) {
return complex(a.re + b.re, a.im + b.im);
}
inline __host__ __device__ complex operator-
(const complex &a) { return complex(-a.re, -a.im); }
inline __host__ __device__ complex operator-
(const complex &a, const complex &b) {
return complex(a.re - b.re, a.im - b.im);
}
inline __host__ __device__ complex operator*
(const complex &a, const complex &b) {
return complex(a.re * b.re - a.im * b.im, a.im * b.re + a.re * b.im);
}
inline __host__ __device__ float abs2(const complex &a) {
return a.re * a.re + a.im * a.im;
}
inline __host__ __device__ complex operator/
(const complex &a, const complex &b) {
float invabs2 = 1 / abs2(b);
return complex((a.re * b.re + a.im * b.im) * invabs2,
(a.im * b.re - b.im * a.re) * invabs2);
} // operator/
#define MAX_DWELL 256
#define BS 256
/** computes the dwell for a single pixel */
__device__ int pixel_dwell
(int w, int h, complex cmin, complex cmax, int x, int y) {
complex dc = cmax - cmin;
float fx = (float)x / w, fy = (float)y / h;
complex c = cmin + complex(fx * dc.re, fy * dc.im);
int dwell = 0;
complex z = c;
while(dwell < MAX_DWELL && abs2(z) < 2 * 2) {
z = z * z + c;
dwell++;
}
return dwell;
} // pixel_dwell
/** computes the dwells for Mandelbrot image
@param dwells the output array
@param w the width of the output image
@param h the height of the output image
@param cmin the complex value associated with the left-bottom corner of the
image
@param cmax the complex value associated with the right-top corner of the
image
*/
__global__ void mandelbrot_k
(int *dwells, int w, int h, complex cmin, complex cmax) {
// complex value to start iteration (c)
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int dwell = pixel_dwell(w, h, cmin, cmax, x, y);
dwells[y * w + x] = dwell;
} // mandelbrot_k
/** gets the color, given the dwell (on host) */
#define CUT_DWELL (MAX_DWELL / 4)
void dwell_color(int *r, int *g, int *b, int dwell) {
// black for the Mandelbrot set
if(dwell >= MAX_DWELL) {
*r = *g = *b = 0;
} else {
// cut at zero
if(dwell < 0)
dwell = 0;
if(dwell <= CUT_DWELL) {
// from black to blue the first half
*r = *g = 0;
*b = 128 + dwell * 127 / (CUT_DWELL);
} else {
// from blue to white for the second half
*b = 255;
*r = *g = (dwell - CUT_DWELL) * 255 / (MAX_DWELL - CUT_DWELL);
}
}
} // dwell_color
/** data size */
#define H (8 * 1024)
#define W (8 * 1024)
#define IMAGE_PATH "./mandelbrot.png"
int main(int argc, char **argv) {
// allocate memory
int w = W, h = H;
size_t dwell_sz = w * h * sizeof(int);
int *h_dwells, *d_dwells;
cucheck(cudaMalloc((void**)&d_dwells, dwell_sz));
h_dwells = (int*)malloc(dwell_sz);
// compute the dwells, copy them back
double t1 = omp_get_wtime();
dim3 bs(64, 4), grid(divup(w, bs.x), divup(h, bs.y));
mandelbrot_k<<<grid, bs>>>
(d_dwells, w, h, complex(-1.5, -1), complex(0.5, 1));
cucheck(cudaThreadSynchronize());
double t2 = omp_get_wtime();
cucheck(cudaMemcpy(h_dwells, d_dwells, dwell_sz, cudaMemcpyDeviceToHost));
gpu_time = t2 - t1;
// save the image to PNG
save_image(IMAGE_PATH, h_dwells, w, h);
// print performance
printf("Mandelbrot set computed in %.3lf s, at %.3lf Mpix/s\n", gpu_time,
h * w * 1e-6 / gpu_time);
// free data
cudaFree(d_dwells);
free(h_dwells);
return 0;
} // main
|
a84290fd63d1440bacdb046d81157a52217531cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* CudaOperations.cu
*
* Created on: Feb 6, 2019
* Author: alexander
*/
__global__ void cudaKernelPull(float* mat, float* spins, int size, float* temp, float tempStep, float* meanFieldElements, bool* continueIteration, float minDiff, int* unemptyCells, float linearCoef) {
int blockId = blockIdx.x;
int thrId = threadIdx.x;
do {
// Lessen temperature
if (thrId == 0)
temp[blockId] = temp[blockId] - tempStep;
// Stabilize
do {
__syncthreads();
// By default current iteration is the last one
if (thrId == 0)
continueIteration[blockId] = false;
for (int spinId = 0; spinId < size; ++spinId) {
__syncthreads();
// Transitional value assignment
int wIndex = thrId;
while (wIndex < unemptyCells[spinId * (size + 1)]) {
meanFieldElements[wIndex + blockId * size] =
spins[unemptyCells[spinId * (size + 1) + wIndex + 1]
+ blockId * size]
* mat[spinId * size
+ unemptyCells[spinId * (size + 1)
+ wIndex + 1]];
// BEWARE: Matrix is symmetrical!
wIndex = wIndex + blockDim.x;
}
__syncthreads();
// Parallelized mean-field computation
long long offset = 1;
while (offset < unemptyCells[spinId * (size + 1)]) {
wIndex = thrId;
while ((wIndex * 2 + 1) * offset
< unemptyCells[spinId * (size + 1)]) {
meanFieldElements[wIndex * 2 * offset + blockId * size] +=
meanFieldElements[(wIndex * 2 + 1) * offset
+ blockId * size];
wIndex = wIndex + blockDim.x;
}
offset *= 2;
__syncthreads();
}
__syncthreads();
// Mean-field calculation complete - write new spin and delta
if (thrId == 0) {
float meanField = meanFieldElements[blockId * size];
float old = spins[spinId + blockId * size];
if (temp[blockId] > 0) {
spins[spinId + blockId * size] = -1
* tanh(meanField / temp[blockId]) * linearCoef
+ spins[spinId + blockId * size]
* (1 - linearCoef);
} else if (meanField > 0)
spins[spinId + blockId * size] = -1;
else
spins[spinId + blockId * size] = 1;
if (minDiff < fabs(old - spins[spinId + blockId * size]))
continueIteration[blockId] = true; // Too big delta. One more iteration needed
}
__syncthreads();
}
} while (continueIteration[blockId]);
} while (temp[blockId] >= 0);
} | a84290fd63d1440bacdb046d81157a52217531cd.cu | #include "includes.h"
/*
* CudaOperations.cu
*
* Created on: Feb 6, 2019
* Author: alexander
*/
__global__ void cudaKernelPull(float* mat, float* spins, int size, float* temp, float tempStep, float* meanFieldElements, bool* continueIteration, float minDiff, int* unemptyCells, float linearCoef) {
int blockId = blockIdx.x;
int thrId = threadIdx.x;
do {
// Lessen temperature
if (thrId == 0)
temp[blockId] = temp[blockId] - tempStep;
// Stabilize
do {
__syncthreads();
// By default current iteration is the last one
if (thrId == 0)
continueIteration[blockId] = false;
for (int spinId = 0; spinId < size; ++spinId) {
__syncthreads();
// Transitional value assignment
int wIndex = thrId;
while (wIndex < unemptyCells[spinId * (size + 1)]) {
meanFieldElements[wIndex + blockId * size] =
spins[unemptyCells[spinId * (size + 1) + wIndex + 1]
+ blockId * size]
* mat[spinId * size
+ unemptyCells[spinId * (size + 1)
+ wIndex + 1]];
// BEWARE: Matrix is symmetrical!
wIndex = wIndex + blockDim.x;
}
__syncthreads();
// Parallelized mean-field computation
long long offset = 1;
while (offset < unemptyCells[spinId * (size + 1)]) {
wIndex = thrId;
while ((wIndex * 2 + 1) * offset
< unemptyCells[spinId * (size + 1)]) {
meanFieldElements[wIndex * 2 * offset + blockId * size] +=
meanFieldElements[(wIndex * 2 + 1) * offset
+ blockId * size];
wIndex = wIndex + blockDim.x;
}
offset *= 2;
__syncthreads();
}
__syncthreads();
// Mean-field calculation complete - write new spin and delta
if (thrId == 0) {
float meanField = meanFieldElements[blockId * size];
float old = spins[spinId + blockId * size];
if (temp[blockId] > 0) {
spins[spinId + blockId * size] = -1
* tanh(meanField / temp[blockId]) * linearCoef
+ spins[spinId + blockId * size]
* (1 - linearCoef);
} else if (meanField > 0)
spins[spinId + blockId * size] = -1;
else
spins[spinId + blockId * size] = 1;
if (minDiff < fabs(old - spins[spinId + blockId * size]))
continueIteration[blockId] = true; // Too big delta. One more iteration needed
}
__syncthreads();
}
} while (continueIteration[blockId]);
} while (temp[blockId] >= 0);
} |
d22102c48b212729c300cb0311d8d7ede2e1336d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "trustworthiness.h"
#include <cuda_utils.h>
#include "distance/distance.h"
#include <selection/columnWiseSort.h>
#include <common/cumlHandle.hpp>
#include <knn/knn.h>
using namespace MLCommon;
using namespace MLCommon::Distance;
using namespace MLCommon::Selection;
using namespace ML;
namespace ML {
/**
* @brief Compute a kNN and returns the indexes of the nearest neighbors
* @input param input: Input matrix holding the dataset
* @input param n: Number of samples
* @input param d: Number of features
* @return Matrix holding the indexes of the nearest neighbors
*/
template<typename math_t>
long* get_knn_indexes(const cumlHandle& h, math_t* input, int n,
int d, int n_neighbors)
{
hipStream_t stream = h.getStream();
auto d_alloc = h.getDeviceAllocator();
long* d_pred_I = (long*)d_alloc->allocate(n * n_neighbors * sizeof(long), stream);
math_t* d_pred_D = (math_t*)d_alloc->allocate(n * n_neighbors * sizeof(math_t), stream);
kNNParams params = {input, n};
kNN knn(h, d);
knn.fit(¶ms, 1);
knn.search(input, n, d_pred_I, d_pred_D, n_neighbors);
d_alloc->deallocate(d_pred_D, n * n_neighbors * sizeof(math_t), stream);
return d_pred_I;
}
/**
* @brief Compute a the rank of trustworthiness score
* @input param ind_X: indexes given by pairwise distance and sorting
* @input param ind_X_embedded: indexes given by KNN
* @input param n: Number of samples
* @input param n_neighbors: Number of neighbors considered by trustworthiness score
* @input param work: Batch to consider (to do it at once use n * n_neighbors)
* @output param rank: Resulting rank
*/
template<typename math_t>
__global__ void compute_rank(math_t *ind_X, long *ind_X_embedded,
int n, int n_neighbors, int work, double * rank)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= work)
return;
int n_idx = i / n_neighbors;
int nn_idx = (i % n_neighbors) + 1;
int idx = ind_X_embedded[n_idx * (n_neighbors+1) + nn_idx];
math_t* sample_i = &ind_X[n_idx * n];
for (int r = 1; r < n; r++)
{
if (sample_i[r] == idx)
{
int tmp = r - n_neighbors;
if (tmp > 0)
atomicAdd(rank, tmp);
break;
}
}
}
namespace Metrics {
/**
* @brief Compute the trustworthiness score
* @input param X: Data in original dimension
* @input param X_embedded: Data in target dimension (embedding)
* @input param n: Number of samples
* @input param m: Number of features in high/original dimension
* @input param d: Number of features in low/embedded dimension
* @input param n_neighbors: Number of neighbors considered by trustworthiness score
* @input param distance_type: Distance type to consider
* @return Trustworthiness score
*/
template<typename math_t, DistanceType distance_type>
double trustworthiness_score(const cumlHandle& h, math_t* X,
math_t* X_embedded, int n, int m, int d,
int n_neighbors)
{
const int TMP_SIZE = MAX_BATCH_SIZE * n;
hipStream_t stream = h.getStream();
auto d_alloc = h.getDeviceAllocator();
size_t workspaceSize = 0; // EucUnexpandedL2Sqrt does not reauire workspace (may need change for other distances)
typedef cutlass::Shape<8, 128, 128> OutputTile_t;
bool bAllocWorkspace = false;
math_t* d_pdist_tmp = (math_t*)d_alloc->allocate(TMP_SIZE * sizeof(math_t), stream);
int* d_ind_X_tmp = (int*)d_alloc->allocate(TMP_SIZE * sizeof(int), stream);
long* ind_X_embedded = get_knn_indexes(h, X_embedded,
n, d, n_neighbors + 1);
double t_tmp = 0.0;
double t = 0.0;
double* d_t = (double*)d_alloc->allocate(sizeof(double), stream);
int toDo = n;
while (toDo > 0)
{
int batchSize = min(toDo, MAX_BATCH_SIZE);
// Takes at most MAX_BATCH_SIZE vectors at a time
distance<distance_type, math_t, math_t, math_t, OutputTile_t>
(&X[(n - toDo) * m], X,
d_pdist_tmp,
batchSize, n, m,
(void*)nullptr, workspaceSize,
stream
);
CUDA_CHECK(hipPeekAtLastError());
sortColumnsPerRow(d_pdist_tmp, d_ind_X_tmp,
batchSize, n,
bAllocWorkspace, NULL, workspaceSize,
stream);
CUDA_CHECK(hipPeekAtLastError());
t_tmp = 0.0;
updateDevice(d_t, &t_tmp, 1, stream);
int work = batchSize * n_neighbors;
int n_blocks = work / N_THREADS + 1;
hipLaunchKernelGGL(( compute_rank), dim3(n_blocks), dim3(N_THREADS), 0, stream, d_ind_X_tmp,
&ind_X_embedded[(n - toDo) * (n_neighbors+1)],
n,
n_neighbors,
batchSize * n_neighbors,
d_t);
CUDA_CHECK(hipPeekAtLastError());
updateHost(&t_tmp, d_t, 1, stream);
t += t_tmp;
toDo -= batchSize;
}
t = 1.0 - ((2.0 / ((n * n_neighbors) * ((2.0 * n) - (3.0 * n_neighbors) - 1.0))) * t);
d_alloc->deallocate(ind_X_embedded, n * (n_neighbors + 1) * sizeof(long), stream);
d_alloc->deallocate(d_pdist_tmp, TMP_SIZE * sizeof(math_t), stream);
d_alloc->deallocate(d_ind_X_tmp, TMP_SIZE * sizeof(int), stream);
d_alloc->deallocate(d_t, sizeof(double), stream);
return t;
}
template double trustworthiness_score<float, EucUnexpandedL2Sqrt>(const cumlHandle& h,
float* X, float* X_embedded, int n, int m, int d, int n_neighbors);
}
} | d22102c48b212729c300cb0311d8d7ede2e1336d.cu | /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "trustworthiness.h"
#include <cuda_utils.h>
#include "distance/distance.h"
#include <selection/columnWiseSort.h>
#include <common/cumlHandle.hpp>
#include <knn/knn.h>
using namespace MLCommon;
using namespace MLCommon::Distance;
using namespace MLCommon::Selection;
using namespace ML;
namespace ML {
/**
* @brief Compute a kNN and returns the indexes of the nearest neighbors
* @input param input: Input matrix holding the dataset
* @input param n: Number of samples
* @input param d: Number of features
* @return Matrix holding the indexes of the nearest neighbors
*/
template<typename math_t>
long* get_knn_indexes(const cumlHandle& h, math_t* input, int n,
int d, int n_neighbors)
{
cudaStream_t stream = h.getStream();
auto d_alloc = h.getDeviceAllocator();
long* d_pred_I = (long*)d_alloc->allocate(n * n_neighbors * sizeof(long), stream);
math_t* d_pred_D = (math_t*)d_alloc->allocate(n * n_neighbors * sizeof(math_t), stream);
kNNParams params = {input, n};
kNN knn(h, d);
knn.fit(¶ms, 1);
knn.search(input, n, d_pred_I, d_pred_D, n_neighbors);
d_alloc->deallocate(d_pred_D, n * n_neighbors * sizeof(math_t), stream);
return d_pred_I;
}
/**
* @brief Compute a the rank of trustworthiness score
* @input param ind_X: indexes given by pairwise distance and sorting
* @input param ind_X_embedded: indexes given by KNN
* @input param n: Number of samples
* @input param n_neighbors: Number of neighbors considered by trustworthiness score
* @input param work: Batch to consider (to do it at once use n * n_neighbors)
* @output param rank: Resulting rank
*/
template<typename math_t>
__global__ void compute_rank(math_t *ind_X, long *ind_X_embedded,
int n, int n_neighbors, int work, double * rank)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= work)
return;
int n_idx = i / n_neighbors;
int nn_idx = (i % n_neighbors) + 1;
int idx = ind_X_embedded[n_idx * (n_neighbors+1) + nn_idx];
math_t* sample_i = &ind_X[n_idx * n];
for (int r = 1; r < n; r++)
{
if (sample_i[r] == idx)
{
int tmp = r - n_neighbors;
if (tmp > 0)
atomicAdd(rank, tmp);
break;
}
}
}
namespace Metrics {
/**
* @brief Compute the trustworthiness score
* @input param X: Data in original dimension
* @input param X_embedded: Data in target dimension (embedding)
* @input param n: Number of samples
* @input param m: Number of features in high/original dimension
* @input param d: Number of features in low/embedded dimension
* @input param n_neighbors: Number of neighbors considered by trustworthiness score
* @input param distance_type: Distance type to consider
* @return Trustworthiness score
*/
template<typename math_t, DistanceType distance_type>
double trustworthiness_score(const cumlHandle& h, math_t* X,
math_t* X_embedded, int n, int m, int d,
int n_neighbors)
{
const int TMP_SIZE = MAX_BATCH_SIZE * n;
cudaStream_t stream = h.getStream();
auto d_alloc = h.getDeviceAllocator();
size_t workspaceSize = 0; // EucUnexpandedL2Sqrt does not reauire workspace (may need change for other distances)
typedef cutlass::Shape<8, 128, 128> OutputTile_t;
bool bAllocWorkspace = false;
math_t* d_pdist_tmp = (math_t*)d_alloc->allocate(TMP_SIZE * sizeof(math_t), stream);
int* d_ind_X_tmp = (int*)d_alloc->allocate(TMP_SIZE * sizeof(int), stream);
long* ind_X_embedded = get_knn_indexes(h, X_embedded,
n, d, n_neighbors + 1);
double t_tmp = 0.0;
double t = 0.0;
double* d_t = (double*)d_alloc->allocate(sizeof(double), stream);
int toDo = n;
while (toDo > 0)
{
int batchSize = min(toDo, MAX_BATCH_SIZE);
// Takes at most MAX_BATCH_SIZE vectors at a time
distance<distance_type, math_t, math_t, math_t, OutputTile_t>
(&X[(n - toDo) * m], X,
d_pdist_tmp,
batchSize, n, m,
(void*)nullptr, workspaceSize,
stream
);
CUDA_CHECK(cudaPeekAtLastError());
sortColumnsPerRow(d_pdist_tmp, d_ind_X_tmp,
batchSize, n,
bAllocWorkspace, NULL, workspaceSize,
stream);
CUDA_CHECK(cudaPeekAtLastError());
t_tmp = 0.0;
updateDevice(d_t, &t_tmp, 1, stream);
int work = batchSize * n_neighbors;
int n_blocks = work / N_THREADS + 1;
compute_rank<<<n_blocks, N_THREADS, 0, stream>>>(d_ind_X_tmp,
&ind_X_embedded[(n - toDo) * (n_neighbors+1)],
n,
n_neighbors,
batchSize * n_neighbors,
d_t);
CUDA_CHECK(cudaPeekAtLastError());
updateHost(&t_tmp, d_t, 1, stream);
t += t_tmp;
toDo -= batchSize;
}
t = 1.0 - ((2.0 / ((n * n_neighbors) * ((2.0 * n) - (3.0 * n_neighbors) - 1.0))) * t);
d_alloc->deallocate(ind_X_embedded, n * (n_neighbors + 1) * sizeof(long), stream);
d_alloc->deallocate(d_pdist_tmp, TMP_SIZE * sizeof(math_t), stream);
d_alloc->deallocate(d_ind_X_tmp, TMP_SIZE * sizeof(int), stream);
d_alloc->deallocate(d_t, sizeof(double), stream);
return t;
}
template double trustworthiness_score<float, EucUnexpandedL2Sqrt>(const cumlHandle& h,
float* X, float* X_embedded, int n, int m, int d, int n_neighbors);
}
} |
0e87c74a761c5b4eff0a6f9a6d31349e1b4b67a0.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
//extern __device__ int testxyz[1000];
//int localtrace[10000];
//__device__ float* tracehandle;
__device__ float foo_CC(float a)
{
return a*0.9;
}
__device__ int foo_DD(float a)
{
if (threadIdx.x < 2 || threadIdx.y > 2)
return (int) a;
else
return a+2;
}
__device__ float foo_BB(float a)
{
if (threadIdx.x > 3 || threadIdx.y > 11)
return a + foo_CC(a);
else
return a + (float)foo_DD(a) /2;
}
__device__ float foo_AA( float a, float b)
{
if (threadIdx.x < 8 || threadIdx.y > 4)
return a*3.1415+1;
else
return (b*a)*0.5 + foo_BB(b);
}
__global__ void axpy_kernel2(float a, float* x, float* y)
{
//tracehandle = newbu;
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
int index = threadId;
float aa = y[index] + x[index] + 1.1;
float b = 0.5*y[index] + 0.25* x[index] + 1.0;
y[index] += ( x[index]*1.67 + foo_AA(aa, b) );
// y[index] += ( x[index]*1.67 + aa + b );
}
__global__ void axpy_kernel1(float a, float* x, float* y)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
int index = threadId;
y[index] = x[index]*0.3;
if (index>2)
y[index] += 99;
else
y[index] += 999 + foo_CC(a);
}
int main(int argc, char* argv[])
{
//const int kDataLen2 = 128;
float a = 2.0f;
//int blocks2 = 600;
hipSetDevice(0);
if (argc != 5)
{
printf("usage: ./axpy [blocks_x] [blocks_y] [threads_x] [threads_y]\n");
exit(1);
}
int blocksx = atoi(argv[1]) ;
int blocksy = atoi(argv[2]) ;
int kDataLenx = atoi(argv[3]);
int kDataLeny = atoi(argv[4]);
int sizen = blocksx *blocksy *kDataLenx *kDataLeny;
// hipThreadSetLimit(hipLimitMallocHeapSize, 1024*1024); //sderek
hipDeviceSetLimit(hipLimitMallocHeapSize, 1024*1024*500); //sderek
// tracetest = (int*)malloc( 1234);
// float host_y[blocks*kDataLen];
// float host_y[blocks*kDataLen];
float* host_x = (float*) malloc( sizen* sizeof(float));
float* host_y = (float*) malloc( sizen* sizeof(float));
void* host_newbu = (void*) malloc( 1000 );
int ii;
for( ii=0; ii<sizen; ii++)
host_x[ii] = ii%8;
for( ii=0; ii<sizen; ii++)
host_y[ii] = ii%5;
/* int x[5];
x[0] = 13;
printf("%p\n",x);
printf("%p\n",&x);
printf("%d\n",*x);
printf("%d\n",*(x+1));
*/
// Copy input data to device.
float* device_x;
float* device_y;
// void* newbu;
// printf(" %p\n", device_x);
hipMalloc((void**)&device_x, sizen * sizeof(float));
// printf(" %p\n", device_x);
// printf(" %p\n", device_y);
hipMalloc((void**)&device_y, sizen * sizeof(float) + 18);
// printf(" %p\n", device_y);
// printf(" %p\n", newbu);
// hipMalloc(&newbu, 1000);
// printf(" %p\n", newbu);
/* std::cout << &(device_x) << "\n";
std::cout << &(device_y) << "\n";
std::cout << &(*device_x) << "\n";
std::cout << &(*device_y) << "\n";
std::cout << (device_x) << "\n";
std::cout << (device_y) << "\n";
*/
hipMemcpy(device_x, host_x, sizen * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_y, host_y, sizen * sizeof(float), hipMemcpyHostToDevice);
dim3 CTAs(blocksx, blocksy);
dim3 Threads(kDataLenx, kDataLeny);
std::cout << "launching kernel...\n";
hipLaunchKernelGGL(( axpy_kernel1), dim3(CTAs), dim3(Threads), 0, 0, a, device_x, device_y);
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
printf ("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
hipLaunchKernelGGL(( axpy_kernel2), dim3(CTAs), dim3(Threads), 0, 0, a, device_x, device_y);
hipDeviceSynchronize();
error = hipGetLastError();
if (error != hipSuccess) {
printf ("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
hipMemcpy(host_y, device_y, sizen* sizeof(float), hipMemcpyDeviceToHost);
// hipMemcpy(host_newbu, newbu, 1000, hipMemcpyDeviceToHost);
free(host_newbu);
// hipFree(newbu);
int verify = 0;
for (int ii = 0; ii < 8; ii++)
std::cout << "y[" << ii << "] = " << host_y[ii] << "\n";
for (int ii = 0; ii < sizen; ii++) {
if( host_y[ii] == ii%5)
verify ++;
// std::cout << "y[" << i << "] = " << host_y[i] << "\n";
}
std::cout << "\n\n[TOOL verify] There are a total of\t" << verify << " incorrect numbers." << std::endl;
if (verify==0)
std::cout << "[TOOL verify] passed!" << std::endl << std::endl;
// for (int i = 0; i < 20; ++i) {
// std::cout << "newtrace [" << i << "] = " << host_newbu[i] << "\n";
// std::cout << & (host_y[i] )<< "\n";
// }
/* hipMemcpyFromSymbol(localtrace, testxyz, 40*sizeof(int), 0, hipMemcpyDeviceToHost);
for (int i = 0; i < 20; ++i)
printf("%d\t", localtrace[i] );
std::cout << std::endl;
hipMemcpyFromSymbol(localtrace+8, testxyz, 40*sizeof(int), 0, hipMemcpyDeviceToHost);
for (int i = 0; i < 20; ++i)
printf("%d\t", localtrace[i] );
std::cout << std::endl;
*/
// int* show_h;
// hipMemcpyFromSymbol(show_h, show, sizeof(int), 0, hipMemcpyDeviceToHost);
// msg = hipGetSymbolAddress((void **)&d_G, test);
// printf("the address is %p\n", d_G);
// if (msg == hipSuccess)
// {
// int tmp[4];
// printf("before %d %d %d %d@ %p\n", *tmp, *(tmp+1), *(tmp+2), *(tmp+3), tmp);
// hipMemcpyFromSymbol(tracetest, test1, 4*sizeof(int), 0, hipMemcpyDeviceToHost);
// hipMemcpyFromSymbol(tmp, test2, 4*sizeof(int), 0, hipMemcpyDeviceToHost);
// printf("copy %d %d %d %d@ %p\n", *tmp, *(tmp+1), *(tmp+2), *(tmp+3), tmp);
// hipMemcpyFromSymbol(tmp, test2, 4*sizeof(int), 0, hipMemcpyDeviceToHost);
// printf("after %d %d %d %d@ %p\n", tmp[0], tmp[1], tmp[2], tmp[3], tmp);
// }
//else
// std::cout << hipGetErrorString(msg) << "\n\n";
hipFree(device_x);
hipFree(device_y);
hipDeviceReset();
return 0;
}
| 0e87c74a761c5b4eff0a6f9a6d31349e1b4b67a0.cu | #include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <cuda.h>
#include <cuda_runtime.h>
//extern __device__ int testxyz[1000];
//int localtrace[10000];
//__device__ float* tracehandle;
__device__ float foo_CC(float a)
{
return a*0.9;
}
__device__ int foo_DD(float a)
{
if (threadIdx.x < 2 || threadIdx.y > 2)
return (int) a;
else
return a+2;
}
__device__ float foo_BB(float a)
{
if (threadIdx.x > 3 || threadIdx.y > 11)
return a + foo_CC(a);
else
return a + (float)foo_DD(a) /2;
}
__device__ float foo_AA( float a, float b)
{
if (threadIdx.x < 8 || threadIdx.y > 4)
return a*3.1415+1;
else
return (b*a)*0.5 + foo_BB(b);
}
__global__ void axpy_kernel2(float a, float* x, float* y)
{
//tracehandle = newbu;
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
int index = threadId;
float aa = y[index] + x[index] + 1.1;
float b = 0.5*y[index] + 0.25* x[index] + 1.0;
y[index] += ( x[index]*1.67 + foo_AA(aa, b) );
// y[index] += ( x[index]*1.67 + aa + b );
}
__global__ void axpy_kernel1(float a, float* x, float* y)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
int index = threadId;
y[index] = x[index]*0.3;
if (index>2)
y[index] += 99;
else
y[index] += 999 + foo_CC(a);
}
int main(int argc, char* argv[])
{
//const int kDataLen2 = 128;
float a = 2.0f;
//int blocks2 = 600;
cudaSetDevice(0);
if (argc != 5)
{
printf("usage: ./axpy [blocks_x] [blocks_y] [threads_x] [threads_y]\n");
exit(1);
}
int blocksx = atoi(argv[1]) ;
int blocksy = atoi(argv[2]) ;
int kDataLenx = atoi(argv[3]);
int kDataLeny = atoi(argv[4]);
int sizen = blocksx *blocksy *kDataLenx *kDataLeny;
// cudaThreadSetLimit(cudaLimitMallocHeapSize, 1024*1024); //sderek
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 1024*1024*500); //sderek
// tracetest = (int*)malloc( 1234);
// float host_y[blocks*kDataLen];
// float host_y[blocks*kDataLen];
float* host_x = (float*) malloc( sizen* sizeof(float));
float* host_y = (float*) malloc( sizen* sizeof(float));
void* host_newbu = (void*) malloc( 1000 );
int ii;
for( ii=0; ii<sizen; ii++)
host_x[ii] = ii%8;
for( ii=0; ii<sizen; ii++)
host_y[ii] = ii%5;
/* int x[5];
x[0] = 13;
printf("%p\n",x);
printf("%p\n",&x);
printf("%d\n",*x);
printf("%d\n",*(x+1));
*/
// Copy input data to device.
float* device_x;
float* device_y;
// void* newbu;
// printf(" %p\n", device_x);
cudaMalloc((void**)&device_x, sizen * sizeof(float));
// printf(" %p\n", device_x);
// printf(" %p\n", device_y);
cudaMalloc((void**)&device_y, sizen * sizeof(float) + 18);
// printf(" %p\n", device_y);
// printf(" %p\n", newbu);
// cudaMalloc(&newbu, 1000);
// printf(" %p\n", newbu);
/* std::cout << &(device_x) << "\n";
std::cout << &(device_y) << "\n";
std::cout << &(*device_x) << "\n";
std::cout << &(*device_y) << "\n";
std::cout << (device_x) << "\n";
std::cout << (device_y) << "\n";
*/
cudaMemcpy(device_x, host_x, sizen * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_y, host_y, sizen * sizeof(float), cudaMemcpyHostToDevice);
dim3 CTAs(blocksx, blocksy);
dim3 Threads(kDataLenx, kDataLeny);
std::cout << "launching kernel...\n";
axpy_kernel1<<<CTAs, Threads>>>(a, device_x, device_y);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
printf ("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
axpy_kernel2<<<CTAs, Threads>>>(a, device_x, device_y);
cudaDeviceSynchronize();
error = cudaGetLastError();
if (error != cudaSuccess) {
printf ("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
cudaMemcpy(host_y, device_y, sizen* sizeof(float), cudaMemcpyDeviceToHost);
// cudaMemcpy(host_newbu, newbu, 1000, cudaMemcpyDeviceToHost);
free(host_newbu);
// cudaFree(newbu);
int verify = 0;
for (int ii = 0; ii < 8; ii++)
std::cout << "y[" << ii << "] = " << host_y[ii] << "\n";
for (int ii = 0; ii < sizen; ii++) {
if( host_y[ii] == ii%5)
verify ++;
// std::cout << "y[" << i << "] = " << host_y[i] << "\n";
}
std::cout << "\n\n[TOOL verify] There are a total of\t" << verify << " incorrect numbers." << std::endl;
if (verify==0)
std::cout << "[TOOL verify] passed!" << std::endl << std::endl;
// for (int i = 0; i < 20; ++i) {
// std::cout << "newtrace [" << i << "] = " << host_newbu[i] << "\n";
// std::cout << & (host_y[i] )<< "\n";
// }
/* cudaMemcpyFromSymbol(localtrace, testxyz, 40*sizeof(int), 0, cudaMemcpyDeviceToHost);
for (int i = 0; i < 20; ++i)
printf("%d\t", localtrace[i] );
std::cout << std::endl;
cudaMemcpyFromSymbol(localtrace+8, testxyz, 40*sizeof(int), 0, cudaMemcpyDeviceToHost);
for (int i = 0; i < 20; ++i)
printf("%d\t", localtrace[i] );
std::cout << std::endl;
*/
// int* show_h;
// cudaMemcpyFromSymbol(show_h, show, sizeof(int), 0, cudaMemcpyDeviceToHost);
// msg = cudaGetSymbolAddress((void **)&d_G, test);
// printf("the address is %p\n", d_G);
// if (msg == cudaSuccess)
// {
// int tmp[4];
// printf("before %d %d %d %d@ %p\n", *tmp, *(tmp+1), *(tmp+2), *(tmp+3), tmp);
// cudaMemcpyFromSymbol(tracetest, test1, 4*sizeof(int), 0, cudaMemcpyDeviceToHost);
// cudaMemcpyFromSymbol(tmp, test2, 4*sizeof(int), 0, cudaMemcpyDeviceToHost);
// printf("copy %d %d %d %d@ %p\n", *tmp, *(tmp+1), *(tmp+2), *(tmp+3), tmp);
// cudaMemcpyFromSymbol(tmp, test2, 4*sizeof(int), 0, cudaMemcpyDeviceToHost);
// printf("after %d %d %d %d@ %p\n", tmp[0], tmp[1], tmp[2], tmp[3], tmp);
// }
//else
// std::cout << cudaGetErrorString(msg) << "\n\n";
cudaFree(device_x);
cudaFree(device_y);
cudaDeviceReset();
return 0;
}
|
d4e07ecd91c5e030e43e8b06254235ab0794629a.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/batch_triangular/Xtrmm_batch.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 2.0.0
* @author Ali Charara
* @date 2017-11-13
**/
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "rocblas.h"
#include <typeinfo>
#include "kblas.h"
#include "kblas_struct.h"
#include "operators.h"
#include "defs.h"
#include "kblas_common.h"
#include "batch_common.ch"
//==============================================================================================
#include "Xblas_core.ch"
#include "Xhelper_funcs.ch"
#include "Xtrmm_batch_drivers.cuh"
//==============================================================================================
//Non-Strided form
int Xtrmm_batch_offset( kblasHandle_t handle,
char side, char uplo, char trans, char diag,
const int m, const int n,
const TYPE alpha,
const TYPE** A, int A_row_off, int A_col_off, int lda,
TYPE** B, int B_row_off, int B_col_off, int ldb,
int batchCount){
KBlasWorkspaceState ws_needed;
trmm_batch_wsquery_core<false>( batchCount,
side, m, n,
(kblasWorkspaceState_t)&ws_needed);
if( !ws_needed.isSufficient( &(handle->work_space.allocated_ws_state) ) ){
return KBLAS_InsufficientWorkspace;
}
return Xtrmm_batch_core<TYPE, TYPE**, false>(
handle,
side, uplo, trans, diag,
m, n,
alpha,
(TYPE**)A, A_row_off, A_col_off, lda, (long)0,
(TYPE**)B, B_row_off, B_col_off, ldb, (long)0,
batchCount);
}
// workspace needed: device pointers
// A, B: host pointer to array of device pointers to device buffers
int kblas_trmm_batch(kblasHandle_t handle,
char side, char uplo, char trans, char diag,
const int m, const int n,
const TYPE alpha,
const TYPE** A, int lda,
TYPE** B, int ldb,
int batchCount)
{
return Xtrmm_batch_offset(handle,
side, uplo, trans, diag,
m, n,
alpha,
A, 0, 0, lda,
B, 0, 0, ldb,
batchCount);
}
// workspace needed: device pointers
// A, B: host pointer to array of device pointers to device buffers
extern "C"
int kblasXtrmm_batch(kblasHandle_t handle,
char side, char uplo, char trans, char diag,
const int m, const int n,
const TYPE alpha,
const TYPE** A, int lda,
TYPE** B, int ldb,
int batchCount)
{
return Xtrmm_batch_offset(handle,
side, uplo, trans, diag,
m, n,
alpha,
A, 0, 0, lda,
B, 0, 0, ldb,
batchCount);
}
//==============================================================================================
//Strided form
int Xtrmm_batch_offset( kblasHandle_t handle,
char side, char uplo, char trans, char diag,
const int m, const int n,
const TYPE alpha,
const TYPE* A, int A_row_off, int A_col_off, int lda, long strideA,
TYPE* B, int B_row_off, int B_col_off, int ldb, long strideB,
int batchCount){
KBlasWorkspaceState ws_needed;
trmm_batch_wsquery_core<true>(batchCount,
side, m, n,
(kblasWorkspaceState_t)&ws_needed);
bool suffWorkspace = (ws_needed.d_ptrs_bytes <= handle->work_space.allocated_ws_state.d_ptrs_bytes);
if(!suffWorkspace){
return KBLAS_InsufficientWorkspace;
}
return Xtrmm_batch_core<TYPE, TYPE*, true>(
handle,
side, uplo, trans, diag,
m, n,
alpha,
(TYPE*)A, A_row_off, A_col_off, lda, strideA,
(TYPE*)B, B_row_off, B_col_off, ldb, strideB,
batchCount);
}
// workspace needed: device pointers
// A, B: host pointer to array of device pointers to device buffers
int kblas_trmm_batch(kblasHandle_t handle,
char side, char uplo, char trans, char diag,
const int m, const int n,
const TYPE alpha,
const TYPE* A, int lda, long strideA,
TYPE* B, int ldb, long strideB,
int batchCount)
{
return Xtrmm_batch_offset(handle,
side, uplo, trans, diag,
m, n,
alpha,
A, 0, 0, lda, strideA,
B, 0, 0, ldb, strideB,
batchCount);
}
// workspace needed: device pointers
// A, B: host pointer to device buffers
extern "C"
int kblasXtrmm_batch_strided(kblasHandle_t handle,
char side, char uplo, char trans, char diag,
const int m, const int n,
const TYPE alpha,
const TYPE* A, int lda, long strideA,
TYPE* B, int ldb, long strideB,
int batchCount)
{
return Xtrmm_batch_offset(handle,
side, uplo, trans, diag,
m, n,
alpha,
A, 0, 0, lda, strideA,
B, 0, 0, ldb, strideB,
batchCount);
}
| d4e07ecd91c5e030e43e8b06254235ab0794629a.cu | /**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/batch_triangular/Xtrmm_batch.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 2.0.0
* @author Ali Charara
* @date 2017-11-13
**/
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "cublas_v2.h"
#include <typeinfo>
#include "kblas.h"
#include "kblas_struct.h"
#include "operators.h"
#include "defs.h"
#include "kblas_common.h"
#include "batch_common.ch"
//==============================================================================================
#include "Xblas_core.ch"
#include "Xhelper_funcs.ch"
#include "Xtrmm_batch_drivers.cuh"
//==============================================================================================
//Non-Strided form
int Xtrmm_batch_offset( kblasHandle_t handle,
char side, char uplo, char trans, char diag,
const int m, const int n,
const TYPE alpha,
const TYPE** A, int A_row_off, int A_col_off, int lda,
TYPE** B, int B_row_off, int B_col_off, int ldb,
int batchCount){
KBlasWorkspaceState ws_needed;
trmm_batch_wsquery_core<false>( batchCount,
side, m, n,
(kblasWorkspaceState_t)&ws_needed);
if( !ws_needed.isSufficient( &(handle->work_space.allocated_ws_state) ) ){
return KBLAS_InsufficientWorkspace;
}
return Xtrmm_batch_core<TYPE, TYPE**, false>(
handle,
side, uplo, trans, diag,
m, n,
alpha,
(TYPE**)A, A_row_off, A_col_off, lda, (long)0,
(TYPE**)B, B_row_off, B_col_off, ldb, (long)0,
batchCount);
}
// workspace needed: device pointers
// A, B: host pointer to array of device pointers to device buffers
int kblas_trmm_batch(kblasHandle_t handle,
char side, char uplo, char trans, char diag,
const int m, const int n,
const TYPE alpha,
const TYPE** A, int lda,
TYPE** B, int ldb,
int batchCount)
{
return Xtrmm_batch_offset(handle,
side, uplo, trans, diag,
m, n,
alpha,
A, 0, 0, lda,
B, 0, 0, ldb,
batchCount);
}
// workspace needed: device pointers
// A, B: host pointer to array of device pointers to device buffers
extern "C"
int kblasXtrmm_batch(kblasHandle_t handle,
char side, char uplo, char trans, char diag,
const int m, const int n,
const TYPE alpha,
const TYPE** A, int lda,
TYPE** B, int ldb,
int batchCount)
{
return Xtrmm_batch_offset(handle,
side, uplo, trans, diag,
m, n,
alpha,
A, 0, 0, lda,
B, 0, 0, ldb,
batchCount);
}
//==============================================================================================
//Strided form
int Xtrmm_batch_offset( kblasHandle_t handle,
char side, char uplo, char trans, char diag,
const int m, const int n,
const TYPE alpha,
const TYPE* A, int A_row_off, int A_col_off, int lda, long strideA,
TYPE* B, int B_row_off, int B_col_off, int ldb, long strideB,
int batchCount){
KBlasWorkspaceState ws_needed;
trmm_batch_wsquery_core<true>(batchCount,
side, m, n,
(kblasWorkspaceState_t)&ws_needed);
bool suffWorkspace = (ws_needed.d_ptrs_bytes <= handle->work_space.allocated_ws_state.d_ptrs_bytes);
if(!suffWorkspace){
return KBLAS_InsufficientWorkspace;
}
return Xtrmm_batch_core<TYPE, TYPE*, true>(
handle,
side, uplo, trans, diag,
m, n,
alpha,
(TYPE*)A, A_row_off, A_col_off, lda, strideA,
(TYPE*)B, B_row_off, B_col_off, ldb, strideB,
batchCount);
}
// workspace needed: device pointers
// A, B: host pointer to array of device pointers to device buffers
int kblas_trmm_batch(kblasHandle_t handle,
char side, char uplo, char trans, char diag,
const int m, const int n,
const TYPE alpha,
const TYPE* A, int lda, long strideA,
TYPE* B, int ldb, long strideB,
int batchCount)
{
return Xtrmm_batch_offset(handle,
side, uplo, trans, diag,
m, n,
alpha,
A, 0, 0, lda, strideA,
B, 0, 0, ldb, strideB,
batchCount);
}
// workspace needed: device pointers
// A, B: host pointer to device buffers
extern "C"
int kblasXtrmm_batch_strided(kblasHandle_t handle,
char side, char uplo, char trans, char diag,
const int m, const int n,
const TYPE alpha,
const TYPE* A, int lda, long strideA,
TYPE* B, int ldb, long strideB,
int batchCount)
{
return Xtrmm_batch_offset(handle,
side, uplo, trans, diag,
m, n,
alpha,
A, 0, 0, lda, strideA,
B, 0, 0, ldb, strideB,
batchCount);
}
|
e41d47227e014d4f08d5415c6d3016d15d7f7ec1.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <ATen/ATen.h>
using namespace at;
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <iostream>
// Cuda tensor accessor definitions
// restrict pointer traits piroritize speed over memory consumption
#define TensorAcc3R PackedTensorAccessor<scalar_t,3,RestrictPtrTraits,int32_t>
#define TensorAcc4R PackedTensorAccessor<scalar_t,4,RestrictPtrTraits,int32_t>
#define TensorAcc5R PackedTensorAccessor<scalar_t,5,RestrictPtrTraits,int32_t>
#define TensorAcc6R PackedTensorAccessor<scalar_t,6,RestrictPtrTraits,int32_t>
#define WITHIN_BOUNDS(x, H) (x >= 0 && x < H)
#define THREADS_FORWARD 32 //should be multiple of 32
namespace {
template <typename scalar_t>
__global__ void max_convolution2d_cuda_forward_kernel(
const TensorAcc4R rInput,
const TensorAcc4R rWeight,
TensorAcc4R output1,
TensorAcc6R output2,
int padH, int padW, int oW) {
const int iC = rInput.size(1);
const int iH = rInput.size(2);
const int iW = rInput.size(3);
const int kH = rWeight.size(2);
const int kW = rWeight.size(3);
// independent, large dimensions to be paralllized: oC, batch_size, oH, oW
const int n = blockIdx.x;
const int oc = blockIdx.y;
const int h = blockIdx.z;
const int thread = threadIdx.x;
for (int w=thread; w<oW; w += THREADS_FORWARD){
scalar_t max_p;
scalar_t p;
torch::Tensor interim_max = torch::zeros({kH,kW});
torch::Tensor interim_argmax = torch::zeros({kH,kW});
scalar_t interim_sum;
interim_sum = 0;
for (int i=0; i<kH; ++i){
int ii = h * kH + i - padH;
if WITHIN_BOUNDS(ii, iH){
for (int j=0; j<kW; ++j){
int ij = w * kW + j -padW;
if WITHIN_BOUNDS(ij, iW){
max_p = - std::numeric_limits<float>::infinity(); // TODO REPLace this!!!
for (int c=0; c<iC; ++c){
scalar_t inp = rInput[n][c][ii][ij];
scalar_t wei = rWeight[oc][c][i][j];
p = inp + wei;
if (p > max_p){
max_p = p;
interim_max[i][j] = p;
interim_argmax[i][j] = c;
}
}
}
}
}
}
output2[n][oc][h][w] = interim_argmax.packed_accessor<scalar_t,2,RestrictPtrTraits,int32_t>();
auto interim_max_acc = interim_max.packed_accessor<scalar_t,2,RestrictPtrTraits,int32_t>();
for (int i=0; i<kH; ++i){
for (int j=0; j<kW; ++j){
interim_sum += interim_max_acc[i][j];
}
}
output1[n][oc][h][w] = interim_sum;
}
// accumulate
__syncthreads();
}
std::tuple<torch::Tensor, torch::Tensor> max_convolution2d_cuda_forward(
torch::Tensor input,
torch::Tensor weight,
int padH, int padW) {
const int batch_size = input.size(0);
const int iH = input.size(2);
const int iW = input.size(3);
const int oC = weight.size(0);
const int kH = weight.size(2);
const int kW = weight.size(3);
const int oH = (iH + 2 * padH) / kH;
const int oW = (iW + 2 * padW) / kW;
auto output1 = torch::zeros({batch_size, oC, oH, oW}, input.options());
auto output2 = torch::zeros({batch_size, oC, oH, oW, kH, kW}, input.options());
auto rInput = input.contiguous();
auto rWeight = weight.contiguous();
const int threads = THREADS_FORWARD;
const dim3 blocks(batch_size, oC, oH);
AT_DISPATCH_FLOATING_TYPES(input.type(), "max_convolution2d_cuda_forward", ([&] {
TensorAcc4R rInput_acc = rInput.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>();
TensorAcc4R rWeight_acc = rWeight.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>();
TensorAcc4R output1_acc = output1.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>();
TensorAcc6R output2_acc = output2.packed_accessor<scalar_t,6,RestrictPtrTraits,int32_t>();
hipLaunchKernelGGL(( max_convolution2d_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
rInput_acc, rWeight_acc, output1_acc, output2_acc, padH, padW, oW);
}));
return std::make_pair (output1, output2);
}
| e41d47227e014d4f08d5415c6d3016d15d7f7ec1.cu | #include <torch/extension.h>
#include <ATen/ATen.h>
using namespace at;
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include <iostream>
// Cuda tensor accessor definitions
// restrict pointer traits piroritize speed over memory consumption
#define TensorAcc3R PackedTensorAccessor<scalar_t,3,RestrictPtrTraits,int32_t>
#define TensorAcc4R PackedTensorAccessor<scalar_t,4,RestrictPtrTraits,int32_t>
#define TensorAcc5R PackedTensorAccessor<scalar_t,5,RestrictPtrTraits,int32_t>
#define TensorAcc6R PackedTensorAccessor<scalar_t,6,RestrictPtrTraits,int32_t>
#define WITHIN_BOUNDS(x, H) (x >= 0 && x < H)
#define THREADS_FORWARD 32 //should be multiple of 32
namespace {
template <typename scalar_t>
__global__ void max_convolution2d_cuda_forward_kernel(
const TensorAcc4R rInput,
const TensorAcc4R rWeight,
TensorAcc4R output1,
TensorAcc6R output2,
int padH, int padW, int oW) {
const int iC = rInput.size(1);
const int iH = rInput.size(2);
const int iW = rInput.size(3);
const int kH = rWeight.size(2);
const int kW = rWeight.size(3);
// independent, large dimensions to be paralllized: oC, batch_size, oH, oW
const int n = blockIdx.x;
const int oc = blockIdx.y;
const int h = blockIdx.z;
const int thread = threadIdx.x;
for (int w=thread; w<oW; w += THREADS_FORWARD){
scalar_t max_p;
scalar_t p;
torch::Tensor interim_max = torch::zeros({kH,kW});
torch::Tensor interim_argmax = torch::zeros({kH,kW});
scalar_t interim_sum;
interim_sum = 0;
for (int i=0; i<kH; ++i){
int ii = h * kH + i - padH;
if WITHIN_BOUNDS(ii, iH){
for (int j=0; j<kW; ++j){
int ij = w * kW + j -padW;
if WITHIN_BOUNDS(ij, iW){
max_p = - std::numeric_limits<float>::infinity(); // TODO REPLace this!!!
for (int c=0; c<iC; ++c){
scalar_t inp = rInput[n][c][ii][ij];
scalar_t wei = rWeight[oc][c][i][j];
p = inp + wei;
if (p > max_p){
max_p = p;
interim_max[i][j] = p;
interim_argmax[i][j] = c;
}
}
}
}
}
}
output2[n][oc][h][w] = interim_argmax.packed_accessor<scalar_t,2,RestrictPtrTraits,int32_t>();
auto interim_max_acc = interim_max.packed_accessor<scalar_t,2,RestrictPtrTraits,int32_t>();
for (int i=0; i<kH; ++i){
for (int j=0; j<kW; ++j){
interim_sum += interim_max_acc[i][j];
}
}
output1[n][oc][h][w] = interim_sum;
}
// accumulate
__syncthreads();
}
std::tuple<torch::Tensor, torch::Tensor> max_convolution2d_cuda_forward(
torch::Tensor input,
torch::Tensor weight,
int padH, int padW) {
const int batch_size = input.size(0);
const int iH = input.size(2);
const int iW = input.size(3);
const int oC = weight.size(0);
const int kH = weight.size(2);
const int kW = weight.size(3);
const int oH = (iH + 2 * padH) / kH;
const int oW = (iW + 2 * padW) / kW;
auto output1 = torch::zeros({batch_size, oC, oH, oW}, input.options());
auto output2 = torch::zeros({batch_size, oC, oH, oW, kH, kW}, input.options());
auto rInput = input.contiguous();
auto rWeight = weight.contiguous();
const int threads = THREADS_FORWARD;
const dim3 blocks(batch_size, oC, oH);
AT_DISPATCH_FLOATING_TYPES(input.type(), "max_convolution2d_cuda_forward", ([&] {
TensorAcc4R rInput_acc = rInput.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>();
TensorAcc4R rWeight_acc = rWeight.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>();
TensorAcc4R output1_acc = output1.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>();
TensorAcc6R output2_acc = output2.packed_accessor<scalar_t,6,RestrictPtrTraits,int32_t>();
max_convolution2d_cuda_forward_kernel<scalar_t><<<blocks, threads>>>(
rInput_acc, rWeight_acc, output1_acc, output2_acc, padH, padW, oW);
}));
return std::make_pair (output1, output2);
}
|
db6c6ac1be66af7e04b584f495b9eb1fd7c3c6e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Unittests for GPU sum functions
* @file TestGpuSum.cu
* @author Adam Koleszar ([email protected])
*/
#include "FloatType.h"
#include "CuTest.h"
#include "GpuSum.h"
#include "ArrayUtils.h"
#include "TestUtils.h"
/**
* @brief Unittest for #gpu_cond_copy
*
* @param tc test case
* @test
* - Allocate arrays and fill them with random values
* - call function
* - check the sum of the arrays against predefined values
*/
void testGpuCondCopy(CuTest *tc)
{
printBanner("Test gpu_cond_copy");
unsigned long seed = time(NULL);
N = 265;
M = 222;
int cond = 1;
AFH = createHostArrayFlt(M*N);
BFH = createHostArrayFlt(M*N);
AIH = createHostArrayInt(M*N);
int i;
for (i=0; i<N*M; ++i)
{
AFH[i] = getRandom(&seed);
AIH[i] = (i<N) ? cond : 0;
}
AFD = createGpuArrayFlt(M*N, ARRAY_COPY, 0, AFH);
BFD = createGpuArrayFlt(M*N);
AID = createGpuArrayInt(M*N, ARRAY_COPY, 0, AIH);
hipLaunchKernelGGL(( gpu_cond_copy) , dim3((N*M-1)/THREADS+1), dim3(THREADS) , 0, 0, BFD, AFD, AID, 1, N*M);
hipMemcpy(BFH, BFD, SIZEFLT(M*N), hipMemcpyDeviceToHost);
int b = 1;
for (i=0; i<N*M; ++i)
{
b &= (BFH[i] == AFH[i] && i<N) || (BFH[i] == 0 && i>=N);
}
CuAssertIntEquals(tc, cond, b);
}
///Clean up after test case
void cleanupTestGpuCondCopy()
{
hipFree(AFD); hipFree(BFD); hipFree(AID);
free(AFH); free(BFH); free(AIH);
}
/**
* @brief Unittest for #gpu_sqsub
*
* @param tc test case
* @test
* - Allocate arrays and fill them with fixed values
* - call function
* - check the sum of the arrays against predefined values
*/
void testGpuSquareSubstract(CuTest *tc)
{
printBanner("Test gpu_sqsub");
N = 211;
M = 259;
AFH = createHostArrayFlt(M*N);
BFH = createHostArrayFlt(M*N);
CFH = createHostArrayFlt(M*N);
int i;
for (i=0; i<N*M; ++i)
{
AFH[i] = 5.0;
BFH[i] = 2.0;
}
AFD = createGpuArrayFlt(M*N, ARRAY_COPY, 0, AFH);
BFD = createGpuArrayFlt(M*N, ARRAY_COPY, 0, BFH);
CFD = createGpuArrayFlt(M*N);
hipLaunchKernelGGL(( gpu_sqsub) , dim3((N*M-1)/THREADS+1), dim3(THREADS) , 0, 0, AFD, BFD, CFD, N*M);
hipMemcpy(CFH, CFD, SIZEFLT(M*N), hipMemcpyDeviceToHost);
int b = 1;
for (i=0; i<N*M; ++i)
{
b &= CFH[i] == 9.0;
}
CuAssertIntEquals(tc, 1, b);
}
///Clean up after test case
void cleanupTestGpuSquareSubsctract()
{
hipFree(AFD); hipFree(BFD); hipFree(CFD);
free(AFH); free(BFH); free(CFH);
}
/**
* @brief Unittest for #gpu_sum
*
* @param tc test case
* @test
* - Allocate arrays and fill them with fixed values
* - call function
* - check the sum of the arrays against predefined values
*/
void testGpuSum(CuTest *tc)
{
printBanner("Test gpu_sum");
dim3 grid_dim;
FLOAT_TYPE result = 0.0;
FLOAT_TYPE num = 1.0;
N = 2307;
M = 255;
AFH = createHostArrayFlt(M*N);
BFH = createHostArrayFlt(M*N);
int i;
for (i=0; i<N*M; ++i)
{
AFH[i] = num;
}
AFD = createGpuArrayFlt(M*N, ARRAY_COPY, 0, AFH);
BFD = createGpuArrayFlt(M*N);
int remaining = N*M;
int shared_size = THREADS * sizeof(FLOAT_TYPE);
int req_blocks = 0;
while (remaining > 1)
{
req_blocks = (remaining - 1) / THREADS / 2 + 1;
grid_dim.x = static_cast<int>(ceil(sqrt(req_blocks)));
grid_dim.y = (req_blocks - 1) / grid_dim.x + 1;
hipLaunchKernelGGL(( gpu_sum) , dim3(grid_dim), dim3(THREADS), shared_size , 0, AFD, BFD, remaining);
//swap
FLOAT_TYPE *temp = AFD;
AFD = BFD;
BFD = temp;
remaining = req_blocks;
hipMemcpy(&result, AFD, sizeof(FLOAT_TYPE), hipMemcpyDeviceToHost);
}
hipMemcpy(&result, AFD, sizeof(FLOAT_TYPE), hipMemcpyDeviceToHost);
CuAssertDblEquals(tc, N*M*num, result, 0.0001);
}
/**
* @brief Unittest for #gpu_sum256
*
* @param tc test case
* @test
* - Allocate arrays and fill them with fixed values
* - call function
* - check the sum of the arrays against predefined values
*/
void testGpuSum256(CuTest *tc)
{
printBanner("Test gpu_sum256");
dim3 grid_dim;
FLOAT_TYPE result = 0.0;
FLOAT_TYPE num = 1.0;
N = 2307;
M = 255;
AFH = createHostArrayFlt(M*N);
BFH = createHostArrayFlt(M*N);
int i;
for (i=0; i<N*M; ++i)
{
AFH[i] = num;
}
AFD = createGpuArrayFlt(M*N, ARRAY_COPY, 0, AFH);
BFD = createGpuArrayFlt(M*N);
int remaining = N*M;
int shared_size = THREADS * sizeof(FLOAT_TYPE);
int req_blocks = 0;
while (remaining > 1)
{
req_blocks = (remaining - 1) / THREADS / 2 + 1;
grid_dim.x = static_cast<int>(ceil(sqrt(req_blocks)));
grid_dim.y = (req_blocks - 1) / grid_dim.x + 1;
hipLaunchKernelGGL(( gpu_sum256) , dim3(grid_dim), dim3(THREADS), shared_size , 0, AFD, BFD, remaining);
//swap
FLOAT_TYPE *temp = AFD;
AFD = BFD;
BFD = temp;
remaining = req_blocks;
hipMemcpy(&result, AFD, sizeof(FLOAT_TYPE), hipMemcpyDeviceToHost);
}
hipMemcpy(&result, AFD, sizeof(FLOAT_TYPE), hipMemcpyDeviceToHost);
CuAssertDblEquals(tc, N*M*num, result, 0.0001);
}
/**
* @brief Unittest for #gpu_sum_h
*
* @param tc test case
* @test
* - Allocate arrays and fill them with fixed values
* - call function
* - check the sum of the arrays against predefined values
*/
void testGpusumHost(CuTest *tc)
{
printBanner("Test gpu_sum_h");
FLOAT_TYPE result = 0.0;
FLOAT_TYPE num = 1.0;
N = 2907;
M = 242;
AFH = createHostArrayFlt(M*N);
BFH = createHostArrayFlt(M*N);
int i;
for (i=0; i<N*M; ++i)
{
AFH[i] = num;
}
AFD = createGpuArrayFlt(M*N, ARRAY_COPY, 0, AFH);
BFD = createGpuArrayFlt(M*N);
result = gpu_sum_h(AFD, BFD, N*M);
CuAssertDblEquals(tc, N*M*num, result, 0.0001);
}
///Clean up after test case
void cleanTestGpuSum()
{
hipFree(AFD); hipFree(BFD);
free(AFH); free(BFH);
}
CuSuite* gpuSumGetSuite()
{
CuSuite *suite = CuSuiteNew();
SUITE_ADD_TCLN(suite, testGpuCondCopy, cleanupTestGpuCondCopy);
SUITE_ADD_TCLN(suite, testGpuSquareSubstract, cleanupTestGpuSquareSubsctract);
SUITE_ADD_TCLN(suite, testGpuSum, cleanTestGpuSum);
SUITE_ADD_TCLN(suite, testGpuSum256, cleanTestGpuSum);
SUITE_ADD_TCLN(suite, testGpusumHost, cleanTestGpuSum);
return suite;
} | db6c6ac1be66af7e04b584f495b9eb1fd7c3c6e5.cu | /**
* Unittests for GPU sum functions
* @file TestGpuSum.cu
* @author Adam Koleszar ([email protected])
*/
#include "FloatType.h"
#include "CuTest.h"
#include "GpuSum.h"
#include "ArrayUtils.h"
#include "TestUtils.h"
/**
* @brief Unittest for #gpu_cond_copy
*
* @param tc test case
* @test
* - Allocate arrays and fill them with random values
* - call function
* - check the sum of the arrays against predefined values
*/
void testGpuCondCopy(CuTest *tc)
{
printBanner("Test gpu_cond_copy");
unsigned long seed = time(NULL);
N = 265;
M = 222;
int cond = 1;
AFH = createHostArrayFlt(M*N);
BFH = createHostArrayFlt(M*N);
AIH = createHostArrayInt(M*N);
int i;
for (i=0; i<N*M; ++i)
{
AFH[i] = getRandom(&seed);
AIH[i] = (i<N) ? cond : 0;
}
AFD = createGpuArrayFlt(M*N, ARRAY_COPY, 0, AFH);
BFD = createGpuArrayFlt(M*N);
AID = createGpuArrayInt(M*N, ARRAY_COPY, 0, AIH);
gpu_cond_copy <<< (N*M-1)/THREADS+1, THREADS >>> (BFD, AFD, AID, 1, N*M);
cudaMemcpy(BFH, BFD, SIZEFLT(M*N), cudaMemcpyDeviceToHost);
int b = 1;
for (i=0; i<N*M; ++i)
{
b &= (BFH[i] == AFH[i] && i<N) || (BFH[i] == 0 && i>=N);
}
CuAssertIntEquals(tc, cond, b);
}
///Clean up after test case
void cleanupTestGpuCondCopy()
{
cudaFree(AFD); cudaFree(BFD); cudaFree(AID);
free(AFH); free(BFH); free(AIH);
}
/**
* @brief Unittest for #gpu_sqsub
*
* @param tc test case
* @test
* - Allocate arrays and fill them with fixed values
* - call function
* - check the sum of the arrays against predefined values
*/
void testGpuSquareSubstract(CuTest *tc)
{
printBanner("Test gpu_sqsub");
N = 211;
M = 259;
AFH = createHostArrayFlt(M*N);
BFH = createHostArrayFlt(M*N);
CFH = createHostArrayFlt(M*N);
int i;
for (i=0; i<N*M; ++i)
{
AFH[i] = 5.0;
BFH[i] = 2.0;
}
AFD = createGpuArrayFlt(M*N, ARRAY_COPY, 0, AFH);
BFD = createGpuArrayFlt(M*N, ARRAY_COPY, 0, BFH);
CFD = createGpuArrayFlt(M*N);
gpu_sqsub <<< (N*M-1)/THREADS+1, THREADS >>> (AFD, BFD, CFD, N*M);
cudaMemcpy(CFH, CFD, SIZEFLT(M*N), cudaMemcpyDeviceToHost);
int b = 1;
for (i=0; i<N*M; ++i)
{
b &= CFH[i] == 9.0;
}
CuAssertIntEquals(tc, 1, b);
}
///Clean up after test case
void cleanupTestGpuSquareSubsctract()
{
cudaFree(AFD); cudaFree(BFD); cudaFree(CFD);
free(AFH); free(BFH); free(CFH);
}
/**
* @brief Unittest for #gpu_sum
*
* @param tc test case
* @test
* - Allocate arrays and fill them with fixed values
* - call function
* - check the sum of the arrays against predefined values
*/
void testGpuSum(CuTest *tc)
{
printBanner("Test gpu_sum");
dim3 grid_dim;
FLOAT_TYPE result = 0.0;
FLOAT_TYPE num = 1.0;
N = 2307;
M = 255;
AFH = createHostArrayFlt(M*N);
BFH = createHostArrayFlt(M*N);
int i;
for (i=0; i<N*M; ++i)
{
AFH[i] = num;
}
AFD = createGpuArrayFlt(M*N, ARRAY_COPY, 0, AFH);
BFD = createGpuArrayFlt(M*N);
int remaining = N*M;
int shared_size = THREADS * sizeof(FLOAT_TYPE);
int req_blocks = 0;
while (remaining > 1)
{
req_blocks = (remaining - 1) / THREADS / 2 + 1;
grid_dim.x = static_cast<int>(ceil(sqrt(req_blocks)));
grid_dim.y = (req_blocks - 1) / grid_dim.x + 1;
gpu_sum <<<grid_dim, THREADS, shared_size >>>(AFD, BFD, remaining);
//swap
FLOAT_TYPE *temp = AFD;
AFD = BFD;
BFD = temp;
remaining = req_blocks;
cudaMemcpy(&result, AFD, sizeof(FLOAT_TYPE), cudaMemcpyDeviceToHost);
}
cudaMemcpy(&result, AFD, sizeof(FLOAT_TYPE), cudaMemcpyDeviceToHost);
CuAssertDblEquals(tc, N*M*num, result, 0.0001);
}
/**
* @brief Unittest for #gpu_sum256
*
* @param tc test case
* @test
* - Allocate arrays and fill them with fixed values
* - call function
* - check the sum of the arrays against predefined values
*/
void testGpuSum256(CuTest *tc)
{
printBanner("Test gpu_sum256");
dim3 grid_dim;
FLOAT_TYPE result = 0.0;
FLOAT_TYPE num = 1.0;
N = 2307;
M = 255;
AFH = createHostArrayFlt(M*N);
BFH = createHostArrayFlt(M*N);
int i;
for (i=0; i<N*M; ++i)
{
AFH[i] = num;
}
AFD = createGpuArrayFlt(M*N, ARRAY_COPY, 0, AFH);
BFD = createGpuArrayFlt(M*N);
int remaining = N*M;
int shared_size = THREADS * sizeof(FLOAT_TYPE);
int req_blocks = 0;
while (remaining > 1)
{
req_blocks = (remaining - 1) / THREADS / 2 + 1;
grid_dim.x = static_cast<int>(ceil(sqrt(req_blocks)));
grid_dim.y = (req_blocks - 1) / grid_dim.x + 1;
gpu_sum256 <<<grid_dim, THREADS, shared_size >>>(AFD, BFD, remaining);
//swap
FLOAT_TYPE *temp = AFD;
AFD = BFD;
BFD = temp;
remaining = req_blocks;
cudaMemcpy(&result, AFD, sizeof(FLOAT_TYPE), cudaMemcpyDeviceToHost);
}
cudaMemcpy(&result, AFD, sizeof(FLOAT_TYPE), cudaMemcpyDeviceToHost);
CuAssertDblEquals(tc, N*M*num, result, 0.0001);
}
/**
* @brief Unittest for #gpu_sum_h
*
* @param tc test case
* @test
* - Allocate arrays and fill them with fixed values
* - call function
* - check the sum of the arrays against predefined values
*/
void testGpusumHost(CuTest *tc)
{
printBanner("Test gpu_sum_h");
FLOAT_TYPE result = 0.0;
FLOAT_TYPE num = 1.0;
N = 2907;
M = 242;
AFH = createHostArrayFlt(M*N);
BFH = createHostArrayFlt(M*N);
int i;
for (i=0; i<N*M; ++i)
{
AFH[i] = num;
}
AFD = createGpuArrayFlt(M*N, ARRAY_COPY, 0, AFH);
BFD = createGpuArrayFlt(M*N);
result = gpu_sum_h(AFD, BFD, N*M);
CuAssertDblEquals(tc, N*M*num, result, 0.0001);
}
///Clean up after test case
void cleanTestGpuSum()
{
cudaFree(AFD); cudaFree(BFD);
free(AFH); free(BFH);
}
CuSuite* gpuSumGetSuite()
{
CuSuite *suite = CuSuiteNew();
SUITE_ADD_TCLN(suite, testGpuCondCopy, cleanupTestGpuCondCopy);
SUITE_ADD_TCLN(suite, testGpuSquareSubstract, cleanupTestGpuSquareSubsctract);
SUITE_ADD_TCLN(suite, testGpuSum, cleanTestGpuSum);
SUITE_ADD_TCLN(suite, testGpuSum256, cleanTestGpuSum);
SUITE_ADD_TCLN(suite, testGpusumHost, cleanTestGpuSum);
return suite;
} |
5b3ebc7418c914cce684654951cb8195a41518b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <math.h>
//134217728
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
__global__ void ecuacion_kernel_outplace_p1(double *d_matA,double *d_matAT,double *d_matB,double *d_matBT, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
//transpuesta out-place A y B
if( (distA<n*n) && (distB<n*n) ){
d_matAT [distB*n + distA] = d_matA[distA*n + distB];
d_matBT [distB*n + distA] = d_matB[distA*n + distB];
}
}
__global__ void ecuacion_kernel_outplace_p2(double *d_matA,double *d_matB,double *d_matC,double *d_matAT,double *d_matBT, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
if (distA*n+distB <= (n*n - 1)){
//multiplicacion
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matBT[distB+k*n];
}
//suma
d_matC[distA*n+distB] += d_matB[distA*n+distB] + d_matAT[distA*n+distB];
}
}
__global__ void kernel_sum_Matriz (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
//suma
if (distA*n+distB <= (n*n)){
d_matC[distA*n+distB] += d_matA[distA*n+distB] + d_matB[distA*n+distB];
}
}
__global__ void kernel_transpuesta(double *m, int N){
int tid = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
int i = int((1 + sqrtf(1 + 8*tid)) / 2);
int j = tid - (i*(i-1)/2); int aux;
if ( (i<N) && (j<N) ){
aux = m[i*N + j] ;
m[i*N + j] = m[j*N + i];
m[j*N + i] = aux;
}
}
__global__ void kernel_mult_sum_matriz (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
//multiplicacion
if (distA*n+distB <= (n*n)){
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matB[distB+n*k];
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Falta argumento: N, CUDABLK\n");
return 0;
}
//declaracion de variables
hipError_t error;
unsigned int N = atoi (argv[1]);
unsigned long CUDA_BLK = atoi (argv[2]), gridBlock;
unsigned long numBytes = sizeof(double)*N*N;
double *matA,*matB,*matC,*d_matA,*d_matB,*d_matC,*d_matAT,*d_matBT,timetick;
unsigned int i,j,k;
//inicializa variables para cpu
matA = (double *)malloc(numBytes);
matB = (double *)malloc(numBytes);
matC = (double *)malloc(numBytes);
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
//inicializa variables para gpu
hipMalloc((void **) &d_matA, numBytes);
hipMalloc((void **) &d_matAT, numBytes);
hipMalloc((void **) &d_matB, numBytes);
hipMalloc((void **) &d_matBT, numBytes);
hipMalloc((void **) &d_matC, numBytes);
gridBlock = (unsigned int)sqrt(N*N/CUDA_BLK/CUDA_BLK);
dim3 dimBlock(CUDA_BLK,CUDA_BLK); // Bloque bidimencional de hilos (*cb* hilos)
dim3 dimGrid(gridBlock,gridBlock); // Grid bidimencional (*ceil(n/cb)* bloques)
//--------------------------------cpu comienza ------------------------------------
//secuencial
timetick = dwalltime();
//multiplicacion
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
for(k = 0; k < N ;k++){
matC[i*N+j] += matA[i*N+k] * matB[j*N+k]; //multiplica a matB por fila, eso simula la matB transpuesta
}
}
}
//suma
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
matC[i*N+j] += matB[i*N+j] + matA[i+j*N];
}
}
printf("Tiempo para la ecuacion CPU: %f\n\n",dwalltime() - timetick);
/*
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
//--------------------------------gpu out-place comienza ------------------------------------
timetick = dwalltime();
hipMemcpy(d_matA, matA, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
hipMemcpy(d_matB, matB, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
hipMemcpy(d_matC, matC, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
hipLaunchKernelGGL(( ecuacion_kernel_outplace_p1), dim3(dimGrid), dim3(dimBlock), 0, 0, d_matA, d_matAT,d_matB,d_matBT, N);
hipDeviceSynchronize();
hipLaunchKernelGGL(( ecuacion_kernel_outplace_p2), dim3(dimGrid), dim3(dimBlock), 0, 0, d_matA, d_matB,d_matC,d_matAT,d_matBT, N);
hipDeviceSynchronize();
hipMemcpy(matC, d_matC, numBytes, hipMemcpyDeviceToHost); // GPU -> CPU
printf("Tiempo para la ecuacion out-place GPU: %f\n",dwalltime() - timetick);
error = hipGetLastError();
printf("error: %d\n\n",error);
/*
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
//--------------------------------gpu out-place termina ------------------------------------
hipFree(d_matA);
hipFree(d_matB);
hipFree(d_matC);
hipFree(d_matAT);
hipFree(d_matBT);
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
hipMalloc((void **) &d_matA, numBytes);
hipMalloc((void **) &d_matB, numBytes);
hipMalloc((void **) &d_matC, numBytes);
//--------------------------------gpu in-place comienza ------------------------------------
timetick = dwalltime();
hipMemcpy(d_matA, matA, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
hipMemcpy(d_matB, matB, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
hipMemcpy(d_matC, matC, numBytes, hipMemcpyHostToDevice); // CPU -> GPU
hipLaunchKernelGGL(( kernel_transpuesta), dim3(dimGrid), dim3(dimBlock), 0, 0, d_matA, N);
hipDeviceSynchronize();
hipLaunchKernelGGL(( kernel_sum_Matriz), dim3(dimGrid), dim3(dimBlock), 0, 0, d_matA, d_matB,d_matC, N);
hipDeviceSynchronize();
hipLaunchKernelGGL(( kernel_transpuesta), dim3(dimGrid), dim3(dimBlock), 0, 0, d_matA, N);
hipDeviceSynchronize();
hipLaunchKernelGGL(( kernel_transpuesta), dim3(dimGrid), dim3(dimBlock), 0, 0, d_matB, N);
hipDeviceSynchronize();
hipLaunchKernelGGL(( kernel_mult_sum_matriz), dim3(dimGrid), dim3(dimBlock), 0, 0, d_matA, d_matB,d_matC, N);
hipDeviceSynchronize();
hipMemcpy(matC, d_matC, numBytes, hipMemcpyDeviceToHost); // GPU -> CPU
printf("Tiempo para la ecuacion in-place GPU: %f\n",dwalltime() - timetick);
error = hipGetLastError();
printf("error: %d\n\n",error);
//--------------------------------gpu in-place termina ------------------------------------
hipFree(d_matA);
hipFree(d_matB);
hipFree(d_matC);
/*
//imprime la matriz matC
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
free(matA);
free(matB);
free(matC);
return 0;
}
| 5b3ebc7418c914cce684654951cb8195a41518b6.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <math.h>
//134217728
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
__global__ void ecuacion_kernel_outplace_p1(double *d_matA,double *d_matAT,double *d_matB,double *d_matBT, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
//transpuesta out-place A y B
if( (distA<n*n) && (distB<n*n) ){
d_matAT [distB*n + distA] = d_matA[distA*n + distB];
d_matBT [distB*n + distA] = d_matB[distA*n + distB];
}
}
__global__ void ecuacion_kernel_outplace_p2(double *d_matA,double *d_matB,double *d_matC,double *d_matAT,double *d_matBT, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
if (distA*n+distB <= (n*n - 1)){
//multiplicacion
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matBT[distB+k*n];
}
//suma
d_matC[distA*n+distB] += d_matB[distA*n+distB] + d_matAT[distA*n+distB];
}
}
__global__ void kernel_sum_Matriz (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
//suma
if (distA*n+distB <= (n*n)){
d_matC[distA*n+distB] += d_matA[distA*n+distB] + d_matB[distA*n+distB];
}
}
__global__ void kernel_transpuesta(double *m, int N){
int tid = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
int i = int((1 + sqrtf(1 + 8*tid)) / 2);
int j = tid - (i*(i-1)/2); int aux;
if ( (i<N) && (j<N) ){
aux = m[i*N + j] ;
m[i*N + j] = m[j*N + i];
m[j*N + i] = aux;
}
}
__global__ void kernel_mult_sum_matriz (double *d_matA,double *d_matB,double *d_matC, unsigned int n){
int distA = blockIdx.y * blockDim.y + threadIdx.y; //i
int distB = blockIdx.x * blockDim.x + threadIdx.x; //j
int k;
//multiplicacion
if (distA*n+distB <= (n*n)){
for(k = 0; k < n ;k++){
d_matC[distA*n+distB] += d_matA[distA*n+k] * d_matB[distB+n*k];
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Falta argumento: N, CUDABLK\n");
return 0;
}
//declaracion de variables
cudaError_t error;
unsigned int N = atoi (argv[1]);
unsigned long CUDA_BLK = atoi (argv[2]), gridBlock;
unsigned long numBytes = sizeof(double)*N*N;
double *matA,*matB,*matC,*d_matA,*d_matB,*d_matC,*d_matAT,*d_matBT,timetick;
unsigned int i,j,k;
//inicializa variables para cpu
matA = (double *)malloc(numBytes);
matB = (double *)malloc(numBytes);
matC = (double *)malloc(numBytes);
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
//inicializa variables para gpu
cudaMalloc((void **) &d_matA, numBytes);
cudaMalloc((void **) &d_matAT, numBytes);
cudaMalloc((void **) &d_matB, numBytes);
cudaMalloc((void **) &d_matBT, numBytes);
cudaMalloc((void **) &d_matC, numBytes);
gridBlock = (unsigned int)sqrt(N*N/CUDA_BLK/CUDA_BLK);
dim3 dimBlock(CUDA_BLK,CUDA_BLK); // Bloque bidimencional de hilos (*cb* hilos)
dim3 dimGrid(gridBlock,gridBlock); // Grid bidimencional (*ceil(n/cb)* bloques)
//--------------------------------cpu comienza ------------------------------------
//secuencial
timetick = dwalltime();
//multiplicacion
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
for(k = 0; k < N ;k++){
matC[i*N+j] += matA[i*N+k] * matB[j*N+k]; //multiplica a matB por fila, eso simula la matB transpuesta
}
}
}
//suma
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
matC[i*N+j] += matB[i*N+j] + matA[i+j*N];
}
}
printf("Tiempo para la ecuacion CPU: %f\n\n",dwalltime() - timetick);
/*
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
//--------------------------------gpu out-place comienza ------------------------------------
timetick = dwalltime();
cudaMemcpy(d_matA, matA, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_matB, matB, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_matC, matC, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
ecuacion_kernel_outplace_p1<<<dimGrid, dimBlock>>>(d_matA, d_matAT,d_matB,d_matBT, N);
cudaThreadSynchronize();
ecuacion_kernel_outplace_p2<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC,d_matAT,d_matBT, N);
cudaThreadSynchronize();
cudaMemcpy(matC, d_matC, numBytes, cudaMemcpyDeviceToHost); // GPU -> CPU
printf("Tiempo para la ecuacion out-place GPU: %f\n",dwalltime() - timetick);
error = cudaGetLastError();
printf("error: %d\n\n",error);
/*
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
//--------------------------------gpu out-place termina ------------------------------------
cudaFree(d_matA);
cudaFree(d_matB);
cudaFree(d_matC);
cudaFree(d_matAT);
cudaFree(d_matBT);
for (i = 0; i < N*N; i++){
matA[i] = i;
matB[i] = i;
matC[i] = 0;
}
cudaMalloc((void **) &d_matA, numBytes);
cudaMalloc((void **) &d_matB, numBytes);
cudaMalloc((void **) &d_matC, numBytes);
//--------------------------------gpu in-place comienza ------------------------------------
timetick = dwalltime();
cudaMemcpy(d_matA, matA, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_matB, matB, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_matC, matC, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
kernel_transpuesta<<<dimGrid, dimBlock>>>(d_matA, N);
cudaThreadSynchronize();
kernel_sum_Matriz<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC, N);
cudaThreadSynchronize();
kernel_transpuesta<<<dimGrid, dimBlock>>>(d_matA, N);
cudaThreadSynchronize();
kernel_transpuesta<<<dimGrid, dimBlock>>>(d_matB, N);
cudaThreadSynchronize();
kernel_mult_sum_matriz<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC, N);
cudaThreadSynchronize();
cudaMemcpy(matC, d_matC, numBytes, cudaMemcpyDeviceToHost); // GPU -> CPU
printf("Tiempo para la ecuacion in-place GPU: %f\n",dwalltime() - timetick);
error = cudaGetLastError();
printf("error: %d\n\n",error);
//--------------------------------gpu in-place termina ------------------------------------
cudaFree(d_matA);
cudaFree(d_matB);
cudaFree(d_matC);
/*
//imprime la matriz matC
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matC[i*N+j]);
}
printf("\n");
}
printf("\n");
*/
free(matA);
free(matB);
free(matC);
return 0;
}
|
e3e66677cb4bfb70860327397c2c801fe5a86f4f.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <cfloat>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
#define spmv_NBLOCKS 12*8*21 //22
#define spmv_BLOCK_SIZE 256
#define WARP_SIZE 32
texture<float,1,hipReadModeElementType> tex_vec;
texture<int,1,hipReadModeElementType> tex_cols;
texture<float,1,hipReadModeElementType> tex_val;
static const double MAX_RELATIVE_ERROR = .02;
static const int PAD_FACTOR = 16;
void fill(float *A, const int n, const float maxi)
{
for (int j = 0; j < n; j++)
{
A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f)));
}
}
void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim)
{
int nnzAssigned = 0;
// Figure out the probability that a nonzero should be assigned to a given
// spot in the matrix
double prob = (double)n / ((double)dim * (double)dim);
// Seed random number generator
srand48(2013);
// Randomly decide whether entry i,j gets a value, but ensure n values
// are assigned
bool fillRemaining = false;
for (int i = 0; i < dim; i++)
{
rowDelimiters[i] = nnzAssigned;
for (int j = 0; j < dim; j++)
{
int numEntriesLeft = (dim * dim) - ((i * dim) + j);
int needToAssign = n - nnzAssigned;
if (numEntriesLeft <= needToAssign) {
fillRemaining = true;
}
if ((nnzAssigned < n && drand48() <= prob) || fillRemaining)
{
// Assign (i,j) a value
cols[nnzAssigned] = j;
nnzAssigned++;
}
}
}
// Observe the convention to put the number of non zeroes at the end of the
// row delimiters array
rowDelimiters[dim] = n;
assert(nnzAssigned == n);
}
void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters,
float **newA_ptr, int **newcols_ptr, int *newIndices,
int *newSize)
{
// determine total padded size and new row indices
int paddedSize = 0;
int rowSize;
for (int i=0; i<dim; i++)
{
newIndices[i] = paddedSize;
rowSize = rowDelimiters[i+1] - rowDelimiters[i];
if (rowSize % PAD_FACTOR != 0)
{
rowSize += PAD_FACTOR - rowSize % PAD_FACTOR;
}
paddedSize += rowSize;
}
*newSize = paddedSize;
newIndices[dim] = paddedSize;
hipHostMalloc(newA_ptr, paddedSize * sizeof(float));
hipHostMalloc(newcols_ptr, paddedSize * sizeof(int));
float *newA = *newA_ptr;
int *newcols = *newcols_ptr;
memset(newA, 0, paddedSize * sizeof(float));
// fill newA and newcols
for (int i=0; i<dim; i++)
{
for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1];
j++, k++)
{
newA[k] = A[j];
newcols[k] = cols[j];
}
}
}
void spmvCpu(const float *val, const int *cols, const int *rowDelimiters,
const float *vec, int dim, float *out)
{
for (int i=0; i<dim; i++)
{
float t = 0;
for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++)
{
int col = cols[j];
t += val[j] * vec[col];//tex1Dfetch(tex_vec,col);
}
out[i] = t;
}
}
void spmv_verifyResults(const float *cpuResults, const float *gpuResults,
const int size)
{
bool passed = true;
for (int i = 0; i < size; i++)
{
if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i]
> MAX_RELATIVE_ERROR)
{
cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] <<
" dev: " << gpuResults[i] << endl;
return;
}
}
cout << "spmv passed" << endl;
}
__global__ void
spmv_kernel(volatile float* val,
const int* __restrict__ cols,
const int * rowDelimiters,
volatile float * vec,
const int dim, float * out)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (WARP_SIZE-1);
int warpsPerBlock = blockDim.x / WARP_SIZE;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE);
// __shared__ int rowDeli[spmv_BLOCK_SIZE/WARP_SIZE+1];
__shared__ volatile float partialSums[spmv_BLOCK_SIZE];
/* if (threadIdx.x<spmv_BLOCK_SIZE/WARP_SIZE+1)
rowDeli[threadIdx.x]=rowDelimiters[myRow+threadIdx.x];
__syncthreads();
*/
if (myRow < dim)
{
int warpStart = rowDelimiters[myRow];
int warpEnd = rowDelimiters[myRow+1];
float mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE)
{
int col = tex1Dfetch(tex_cols,j);
mySum += val[j]*vec[col];//tex1Dfetch(tex_val,j) *vec[col];
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
int main(int argc, char **argv) {
hipSetDevice(2);
srand(2013);
float *h_spmv_val, *h_spmv_valPad;
int *h_spmv_cols, *h_spmv_colsPad;
int *h_rowDelimiters, *h_rowDelimitersPad;
float *h_spmv_vec, *h_spmv_out, *spmv_refOut;
int spmv_nItems, nItemsPadded, spmv_numRows;
spmv_numRows = spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE);
spmv_nItems = spmv_numRows * spmv_numRows / 10; // 1% of entries will be non-zero
float maxval = 200.0;
hipHostMalloc(&h_spmv_val, spmv_nItems * sizeof(float));
hipHostMalloc(&h_spmv_cols, spmv_nItems * sizeof(int));
hipHostMalloc(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_val, spmv_nItems, maxval);
initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows);
// Set up remaining host data
int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR);
hipHostMalloc(&h_spmv_vec, spmv_numRows * sizeof(float)) ;
spmv_refOut = new float[spmv_numRows];
hipHostMalloc(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_vec, spmv_numRows, maxval);
hipHostMalloc(&h_spmv_out, paddedSize * sizeof(float));
convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad,
&h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded);
// Compute reference solution
spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut);
float *d_spmv_val, *d_spmv_vec, *d_spmv_out;
int *d_spmv_cols, *d_rowDelimiters;
// Allocate device memory
hipMalloc(&d_spmv_val, spmv_nItems * sizeof(float));
hipMalloc(&d_spmv_cols, spmv_nItems * sizeof(int));
hipMalloc(&d_spmv_vec, spmv_numRows * sizeof(float));
hipMalloc(&d_spmv_out, spmv_numRows * sizeof(float));
hipMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int));
// Transfer data to device
hipMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), hipMemcpyHostToDevice);
hipBindTexture(0,tex_vec,d_spmv_vec,spmv_numRows * sizeof(float));
hipBindTexture(0,tex_cols,d_spmv_cols,spmv_nItems * sizeof(int));
hipBindTexture(0,tex_val,d_spmv_val,spmv_nItems * sizeof(float));
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
hipEventRecord(kernel_start, 0);
// Setup thread configuration
int spmv_grid = (int) ceil(spmv_numRows / (float)(spmv_BLOCK_SIZE / WARP_SIZE));
hipLaunchKernelGGL(( spmv_kernel) , dim3(spmv_grid), dim3(spmv_BLOCK_SIZE), 0, 0,
d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
hipMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), hipMemcpyDeviceToHost);
spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows);
return 0;
}
| e3e66677cb4bfb70860327397c2c801fe5a86f4f.cu |
#include <cassert>
#include <cfloat>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
#define spmv_NBLOCKS 12*8*21 //22
#define spmv_BLOCK_SIZE 256
#define WARP_SIZE 32
texture<float,1,cudaReadModeElementType> tex_vec;
texture<int,1,cudaReadModeElementType> tex_cols;
texture<float,1,cudaReadModeElementType> tex_val;
static const double MAX_RELATIVE_ERROR = .02;
static const int PAD_FACTOR = 16;
void fill(float *A, const int n, const float maxi)
{
for (int j = 0; j < n; j++)
{
A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f)));
}
}
void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim)
{
int nnzAssigned = 0;
// Figure out the probability that a nonzero should be assigned to a given
// spot in the matrix
double prob = (double)n / ((double)dim * (double)dim);
// Seed random number generator
srand48(2013);
// Randomly decide whether entry i,j gets a value, but ensure n values
// are assigned
bool fillRemaining = false;
for (int i = 0; i < dim; i++)
{
rowDelimiters[i] = nnzAssigned;
for (int j = 0; j < dim; j++)
{
int numEntriesLeft = (dim * dim) - ((i * dim) + j);
int needToAssign = n - nnzAssigned;
if (numEntriesLeft <= needToAssign) {
fillRemaining = true;
}
if ((nnzAssigned < n && drand48() <= prob) || fillRemaining)
{
// Assign (i,j) a value
cols[nnzAssigned] = j;
nnzAssigned++;
}
}
}
// Observe the convention to put the number of non zeroes at the end of the
// row delimiters array
rowDelimiters[dim] = n;
assert(nnzAssigned == n);
}
void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters,
float **newA_ptr, int **newcols_ptr, int *newIndices,
int *newSize)
{
// determine total padded size and new row indices
int paddedSize = 0;
int rowSize;
for (int i=0; i<dim; i++)
{
newIndices[i] = paddedSize;
rowSize = rowDelimiters[i+1] - rowDelimiters[i];
if (rowSize % PAD_FACTOR != 0)
{
rowSize += PAD_FACTOR - rowSize % PAD_FACTOR;
}
paddedSize += rowSize;
}
*newSize = paddedSize;
newIndices[dim] = paddedSize;
cudaMallocHost(newA_ptr, paddedSize * sizeof(float));
cudaMallocHost(newcols_ptr, paddedSize * sizeof(int));
float *newA = *newA_ptr;
int *newcols = *newcols_ptr;
memset(newA, 0, paddedSize * sizeof(float));
// fill newA and newcols
for (int i=0; i<dim; i++)
{
for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1];
j++, k++)
{
newA[k] = A[j];
newcols[k] = cols[j];
}
}
}
void spmvCpu(const float *val, const int *cols, const int *rowDelimiters,
const float *vec, int dim, float *out)
{
for (int i=0; i<dim; i++)
{
float t = 0;
for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++)
{
int col = cols[j];
t += val[j] * vec[col];//tex1Dfetch(tex_vec,col);
}
out[i] = t;
}
}
void spmv_verifyResults(const float *cpuResults, const float *gpuResults,
const int size)
{
bool passed = true;
for (int i = 0; i < size; i++)
{
if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i]
> MAX_RELATIVE_ERROR)
{
cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] <<
" dev: " << gpuResults[i] << endl;
return;
}
}
cout << "spmv passed" << endl;
}
__global__ void
spmv_kernel(volatile float* val,
const int* __restrict__ cols,
const int * rowDelimiters,
volatile float * vec,
const int dim, float * out)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (WARP_SIZE-1);
int warpsPerBlock = blockDim.x / WARP_SIZE;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE);
// __shared__ int rowDeli[spmv_BLOCK_SIZE/WARP_SIZE+1];
__shared__ volatile float partialSums[spmv_BLOCK_SIZE];
/* if (threadIdx.x<spmv_BLOCK_SIZE/WARP_SIZE+1)
rowDeli[threadIdx.x]=rowDelimiters[myRow+threadIdx.x];
__syncthreads();
*/
if (myRow < dim)
{
int warpStart = rowDelimiters[myRow];
int warpEnd = rowDelimiters[myRow+1];
float mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE)
{
int col = tex1Dfetch(tex_cols,j);
mySum += val[j]*vec[col];//tex1Dfetch(tex_val,j) *vec[col];
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
int main(int argc, char **argv) {
cudaSetDevice(2);
srand(2013);
float *h_spmv_val, *h_spmv_valPad;
int *h_spmv_cols, *h_spmv_colsPad;
int *h_rowDelimiters, *h_rowDelimitersPad;
float *h_spmv_vec, *h_spmv_out, *spmv_refOut;
int spmv_nItems, nItemsPadded, spmv_numRows;
spmv_numRows = spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE);
spmv_nItems = spmv_numRows * spmv_numRows / 10; // 1% of entries will be non-zero
float maxval = 200.0;
cudaMallocHost(&h_spmv_val, spmv_nItems * sizeof(float));
cudaMallocHost(&h_spmv_cols, spmv_nItems * sizeof(int));
cudaMallocHost(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_val, spmv_nItems, maxval);
initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows);
// Set up remaining host data
int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR);
cudaMallocHost(&h_spmv_vec, spmv_numRows * sizeof(float)) ;
spmv_refOut = new float[spmv_numRows];
cudaMallocHost(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_vec, spmv_numRows, maxval);
cudaMallocHost(&h_spmv_out, paddedSize * sizeof(float));
convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad,
&h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded);
// Compute reference solution
spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut);
float *d_spmv_val, *d_spmv_vec, *d_spmv_out;
int *d_spmv_cols, *d_rowDelimiters;
// Allocate device memory
cudaMalloc(&d_spmv_val, spmv_nItems * sizeof(float));
cudaMalloc(&d_spmv_cols, spmv_nItems * sizeof(int));
cudaMalloc(&d_spmv_vec, spmv_numRows * sizeof(float));
cudaMalloc(&d_spmv_out, spmv_numRows * sizeof(float));
cudaMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int));
// Transfer data to device
cudaMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), cudaMemcpyHostToDevice);
cudaBindTexture(0,tex_vec,d_spmv_vec,spmv_numRows * sizeof(float));
cudaBindTexture(0,tex_cols,d_spmv_cols,spmv_nItems * sizeof(int));
cudaBindTexture(0,tex_val,d_spmv_val,spmv_nItems * sizeof(float));
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
cudaEventRecord(kernel_start, 0);
// Setup thread configuration
int spmv_grid = (int) ceil(spmv_numRows / (float)(spmv_BLOCK_SIZE / WARP_SIZE));
spmv_kernel <<<spmv_grid, spmv_BLOCK_SIZE>>>
(d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
cudaMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), cudaMemcpyDeviceToHost);
spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows);
return 0;
}
|
6599631ca60ee50433c85199e3e7d34b08bc7e42.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "common.h"
template <typename Dtype, typename Acctype>
__global__ void
#if __CUDA_ARCH__ >= 320
__launch_bounds__(CUDA_NUM_THREADS)
#endif
LRNFillScale(const int nthreads, const Dtype* const in,
const int num, const int channels, const int height,
const int width, const int size, const Dtype alpha_over_size,
const Dtype k, Dtype* const scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int n = index / width / height;
const int offset = (n * channels * height + h) * width + w;
const int step = height * width;
const Dtype* const in_off = in + offset;
Dtype* const scale_off = scale + offset;
int head = 0;
const int pre_pad = (size - 1) / 2;
const int post_pad = size - pre_pad - 1;
Acctype accum_scale = Acctype(0);
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad && head < channels) {
accum_scale += in_off[head * step] * in_off[head * step];
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in_off[head * step] * in_off[head * step];
if (head - size >= 0) {
accum_scale -= in_off[(head - size) * step]
* in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = ScalarConvert<Acctype, Dtype>::to(k + accum_scale * alpha_over_size);
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head - size >= 0) {
accum_scale -= in_off[(head - size) * step]
* in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = ScalarConvert<Acctype, Dtype>::to(k + accum_scale * alpha_over_size);
++head;
}
}
}
template <typename Dtype>
__global__ void LRNComputeOutput(const int nthreads, const Dtype* in,
const Dtype* scale, const Dtype negative_beta, Dtype* out) {
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = in[index] * pow(scale[index], negative_beta);
}
}
template <typename Dtype, typename Acctype>
__global__ void LRNComputeDiff(const int nthreads,
const Dtype* const bottom_data, const Dtype* const top_data,
const Dtype* const scale, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int size, const Dtype negative_beta,
const Dtype cache_ratio, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int n = index / width / height;
const int offset = (n * channels * height + h) * width + w;
const int step = height * width;
const Dtype* const bottom_off = bottom_data + offset;
const Dtype* const top_off = top_data + offset;
const Dtype* const scale_off = scale + offset;
const Dtype* const top_diff_off = top_diff + offset;
Dtype* const bottom_diff_off = bottom_diff + offset;
int head = 0;
const int pre_pad = size - (size + 1) / 2;
const int post_pad = size - pre_pad - 1;
Acctype accum_ratio = Acctype(0);
// accumulate values
while (head < post_pad && head < channels) {
accum_ratio += top_diff_off[head * step] * top_off[head * step] /
scale_off[head * step];
++head;
}
// both add and subtract
while (head < channels) {
accum_ratio += top_diff_off[head * step] * top_off[head * step] /
scale_off[head * step];
if (head - size >= 0) {
accum_ratio -= top_diff_off[(head - size) * step] *
top_off[(head - size) * step] / scale_off[(head - size) * step];
}
bottom_diff_off[(head - post_pad) * step] =
ScalarConvert<Acctype, Dtype>::to(top_diff_off[(head - post_pad) * step]
* pow(scale_off[(head - post_pad) * step], negative_beta)
- cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio);
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head - size >= 0) {
accum_ratio -= top_diff_off[(head - size) * step] *
top_off[(head - size) * step] / scale_off[(head - size) * step];
}
bottom_diff_off[(head - post_pad) * step] =
ScalarConvert<Acctype, Dtype>::to(top_diff_off[(head - post_pad) * step]
* pow(scale_off[(head - post_pad) * step], negative_beta)
- cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio);
++head;
}
}
}
#include "generic/SpatialCrossMapLRN.cu"
#include "THHGenerateFloatTypes.h"
| 6599631ca60ee50433c85199e3e7d34b08bc7e42.cu | #include "THCUNN.h"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "common.h"
template <typename Dtype, typename Acctype>
__global__ void
#if __CUDA_ARCH__ >= 320
__launch_bounds__(CUDA_NUM_THREADS)
#endif
LRNFillScale(const int nthreads, const Dtype* const in,
const int num, const int channels, const int height,
const int width, const int size, const Dtype alpha_over_size,
const Dtype k, Dtype* const scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int n = index / width / height;
const int offset = (n * channels * height + h) * width + w;
const int step = height * width;
const Dtype* const in_off = in + offset;
Dtype* const scale_off = scale + offset;
int head = 0;
const int pre_pad = (size - 1) / 2;
const int post_pad = size - pre_pad - 1;
Acctype accum_scale = Acctype(0);
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad && head < channels) {
accum_scale += in_off[head * step] * in_off[head * step];
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in_off[head * step] * in_off[head * step];
if (head - size >= 0) {
accum_scale -= in_off[(head - size) * step]
* in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = ScalarConvert<Acctype, Dtype>::to(k + accum_scale * alpha_over_size);
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head - size >= 0) {
accum_scale -= in_off[(head - size) * step]
* in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = ScalarConvert<Acctype, Dtype>::to(k + accum_scale * alpha_over_size);
++head;
}
}
}
template <typename Dtype>
__global__ void LRNComputeOutput(const int nthreads, const Dtype* in,
const Dtype* scale, const Dtype negative_beta, Dtype* out) {
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = in[index] * pow(scale[index], negative_beta);
}
}
template <typename Dtype, typename Acctype>
__global__ void LRNComputeDiff(const int nthreads,
const Dtype* const bottom_data, const Dtype* const top_data,
const Dtype* const scale, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int size, const Dtype negative_beta,
const Dtype cache_ratio, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int n = index / width / height;
const int offset = (n * channels * height + h) * width + w;
const int step = height * width;
const Dtype* const bottom_off = bottom_data + offset;
const Dtype* const top_off = top_data + offset;
const Dtype* const scale_off = scale + offset;
const Dtype* const top_diff_off = top_diff + offset;
Dtype* const bottom_diff_off = bottom_diff + offset;
int head = 0;
const int pre_pad = size - (size + 1) / 2;
const int post_pad = size - pre_pad - 1;
Acctype accum_ratio = Acctype(0);
// accumulate values
while (head < post_pad && head < channels) {
accum_ratio += top_diff_off[head * step] * top_off[head * step] /
scale_off[head * step];
++head;
}
// both add and subtract
while (head < channels) {
accum_ratio += top_diff_off[head * step] * top_off[head * step] /
scale_off[head * step];
if (head - size >= 0) {
accum_ratio -= top_diff_off[(head - size) * step] *
top_off[(head - size) * step] / scale_off[(head - size) * step];
}
bottom_diff_off[(head - post_pad) * step] =
ScalarConvert<Acctype, Dtype>::to(top_diff_off[(head - post_pad) * step]
* pow(scale_off[(head - post_pad) * step], negative_beta)
- cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio);
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head - size >= 0) {
accum_ratio -= top_diff_off[(head - size) * step] *
top_off[(head - size) * step] / scale_off[(head - size) * step];
}
bottom_diff_off[(head - post_pad) * step] =
ScalarConvert<Acctype, Dtype>::to(top_diff_off[(head - post_pad) * step]
* pow(scale_off[(head - post_pad) * step], negative_beta)
- cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio);
++head;
}
}
}
#include "generic/SpatialCrossMapLRN.cu"
#include "THCGenerateFloatTypes.h"
|
1d594acccff736a561b867d31c6c91625167c7b6.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/matrix_mul_layer.hpp"
#include "caffe/util/device_alternate.hpp"
namespace caffe {
template<typename Dtype>
void MatrixMulLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int count0 = bottom[0]->count();
int count1 = bottom[1]->count();
int offset_data_1 = 0, offset_data_0 = 0;
//the first bottom
const Dtype* bottom_data_0 = bottom[0]->gpu_data();
//the second bottom
const Dtype* bottom_data_1 = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
if(use_streams_){
default_stream = new hipStream_t;
hipblasGetStream(Caffe::cublas_handle(), default_stream);
if(streams_need_init_){
stream_ = new hipStream_t[max_streams_];
for(int i = 0; i < max_streams_; i++){
//create a new stream
CUDA_CHECK(hipStreamCreate(&stream_[i]));
}
streams_need_init_ = false;
}
}
for(int i=0; i<maxch; i++){
if(use_streams_){
// set CUBLAS to use stream
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), stream_[i % max_streams_]));
}
//do matrix multiplication
if(transpose_0){
if(transpose_1)
caffe_gpu_gemm(CblasTrans, CblasTrans, M, N, K, Dtype(1.0), bottom_data_0 + offset_data_0,
bottom_data_1 + offset_data_1, Dtype(0.0), top_data);
else
caffe_gpu_gemm(CblasTrans, CblasNoTrans, M, N, K, Dtype(1.0), bottom_data_0 + offset_data_0,
bottom_data_1 + offset_data_1, Dtype(0.0), top_data);
}
else{
if(transpose_1)
caffe_gpu_gemm(CblasNoTrans, CblasTrans, M, N, K, Dtype(1.0), bottom_data_0 + offset_data_0,
bottom_data_1 + offset_data_1, Dtype(0.0), top_data);
else
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M, N, K, Dtype(1.0), bottom_data_0 + offset_data_0,
bottom_data_1 + offset_data_1, Dtype(0.0), top_data);
}
top_data+=M*N;
offset_data_0 +=M*K; offset_data_0 = offset_data_0 % count0;
offset_data_1 +=K*N; offset_data_1 = offset_data_1 % count1;
}
if(use_streams_){
// set default stream
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), *default_stream));
// Synch streams
for(int i = 0; i < max_streams_; i++){
CUDA_CHECK(hipStreamSynchronize(stream_[i]));
// CUDA_CHECK(hipStreamDestroy(stream_[i]));
}
}
}
template<typename Dtype>
void MatrixMulLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
int count0 = bottom[0]->count();
int count1 = bottom[1]->count();
//all the bottoms
if(propagate_down[0]){
int offset_data_1 = 0, offset_diff_0 = 0;
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff_0 = bottom[0]->mutable_gpu_diff();
const Dtype* bottom_data_1 = bottom[1]->gpu_data();
caffe_gpu_set(bottom[0]->count(), Dtype(0.0), bottom_diff_0);
bool can_use_stream = use_streams_ && ch_0 == maxch;
for(int i = 0; i < maxch; i++){
if(can_use_stream){
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), stream_[i % max_streams_]));
}
if(transpose_0){
if(transpose_1)
caffe_gpu_gemm(CblasTrans, CblasTrans, K, M, N, Dtype(1.0),
bottom_data_1 + offset_data_1, top_diff, Dtype(1.0), bottom_diff_0 + offset_diff_0);
else
caffe_gpu_gemm(CblasNoTrans, CblasTrans, K, M, N, Dtype(1.0),
bottom_data_1 + offset_data_1, top_diff, Dtype(1.0), bottom_diff_0 + offset_diff_0);
}
else{
if(transpose_1)
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M, K, N, Dtype(1.0), top_diff,
bottom_data_1 + offset_data_1, Dtype(1.0), bottom_diff_0 + offset_diff_0);
else
caffe_gpu_gemm(CblasNoTrans, CblasTrans, M, K, N, Dtype(1.0), top_diff,
bottom_data_1 + offset_data_1, Dtype(1.0), bottom_diff_0 + offset_diff_0);
}
top_diff += M*N;
offset_diff_0 += M*K; offset_diff_0 = offset_diff_0 % count0;
offset_data_1 += K*N; offset_data_1 = offset_data_1 % count1;
}
if(can_use_stream){
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), *default_stream));
for(int i = 0; i < max_streams_; i++){
CUDA_CHECK(hipStreamSynchronize(stream_[i]));
}
}
}
if(propagate_down[1]){
int offset_data_0 = 0, offset_diff_1 = 0;
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff_1 = bottom[1]->mutable_gpu_diff();
const Dtype* bottom_data_0 = bottom[0]->gpu_data();
caffe_gpu_set(bottom[1]->count(), Dtype(0.0), bottom_diff_1);
bool can_use_stream = use_streams_ && ch_1 == maxch;
for(int i = 0; i < maxch; i++){
if(can_use_stream){
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), stream_[i % max_streams_]));
}
if(transpose_0){
if(transpose_1)
caffe_gpu_gemm(CblasTrans, CblasTrans, N, K, M, Dtype(1.0),
top_diff, bottom_data_0 + offset_data_0, Dtype(1.0), bottom_diff_1 + offset_diff_1);
else
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, K, N, M, Dtype(1.0), bottom_data_0 + offset_data_0,
top_diff, Dtype(1.0), bottom_diff_1 + offset_diff_1);
}
else{
if(transpose_1)
caffe_gpu_gemm(CblasTrans, CblasNoTrans, N, K, M, Dtype(1.0),
top_diff, bottom_data_0 + offset_data_0, Dtype(1.0), bottom_diff_1 + offset_diff_1);
else
caffe_gpu_gemm(CblasTrans, CblasNoTrans, K, N, M, Dtype(1.0), bottom_data_0 + offset_data_0,
top_diff, Dtype(1.0), bottom_diff_1 + offset_diff_1);
}
top_diff += M*N;
offset_data_0 += M*K; offset_data_0 = offset_data_0 % count0;
offset_diff_1 += K*N; offset_diff_1 = offset_diff_1 % count1;
}
if(can_use_stream){
CUBLAS_CHECK(hipblasSetStream(Caffe::cublas_handle(), *default_stream));
for(int i = 0; i < max_streams_; i++){
CUDA_CHECK(hipStreamSynchronize(stream_[i]));
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(MatrixMulLayer);
}
| 1d594acccff736a561b867d31c6c91625167c7b6.cu | #include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/matrix_mul_layer.hpp"
#include "caffe/util/device_alternate.hpp"
namespace caffe {
template<typename Dtype>
void MatrixMulLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int count0 = bottom[0]->count();
int count1 = bottom[1]->count();
int offset_data_1 = 0, offset_data_0 = 0;
//the first bottom
const Dtype* bottom_data_0 = bottom[0]->gpu_data();
//the second bottom
const Dtype* bottom_data_1 = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
if(use_streams_){
default_stream = new cudaStream_t;
cublasGetStream(Caffe::cublas_handle(), default_stream);
if(streams_need_init_){
stream_ = new cudaStream_t[max_streams_];
for(int i = 0; i < max_streams_; i++){
//create a new stream
CUDA_CHECK(cudaStreamCreate(&stream_[i]));
}
streams_need_init_ = false;
}
}
for(int i=0; i<maxch; i++){
if(use_streams_){
// set CUBLAS to use stream
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), stream_[i % max_streams_]));
}
//do matrix multiplication
if(transpose_0){
if(transpose_1)
caffe_gpu_gemm(CblasTrans, CblasTrans, M, N, K, Dtype(1.0), bottom_data_0 + offset_data_0,
bottom_data_1 + offset_data_1, Dtype(0.0), top_data);
else
caffe_gpu_gemm(CblasTrans, CblasNoTrans, M, N, K, Dtype(1.0), bottom_data_0 + offset_data_0,
bottom_data_1 + offset_data_1, Dtype(0.0), top_data);
}
else{
if(transpose_1)
caffe_gpu_gemm(CblasNoTrans, CblasTrans, M, N, K, Dtype(1.0), bottom_data_0 + offset_data_0,
bottom_data_1 + offset_data_1, Dtype(0.0), top_data);
else
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M, N, K, Dtype(1.0), bottom_data_0 + offset_data_0,
bottom_data_1 + offset_data_1, Dtype(0.0), top_data);
}
top_data+=M*N;
offset_data_0 +=M*K; offset_data_0 = offset_data_0 % count0;
offset_data_1 +=K*N; offset_data_1 = offset_data_1 % count1;
}
if(use_streams_){
// set default stream
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), *default_stream));
// Synch streams
for(int i = 0; i < max_streams_; i++){
CUDA_CHECK(cudaStreamSynchronize(stream_[i]));
// CUDA_CHECK(cudaStreamDestroy(stream_[i]));
}
}
}
template<typename Dtype>
void MatrixMulLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
int count0 = bottom[0]->count();
int count1 = bottom[1]->count();
//all the bottoms
if(propagate_down[0]){
int offset_data_1 = 0, offset_diff_0 = 0;
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff_0 = bottom[0]->mutable_gpu_diff();
const Dtype* bottom_data_1 = bottom[1]->gpu_data();
caffe_gpu_set(bottom[0]->count(), Dtype(0.0), bottom_diff_0);
bool can_use_stream = use_streams_ && ch_0 == maxch;
for(int i = 0; i < maxch; i++){
if(can_use_stream){
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), stream_[i % max_streams_]));
}
if(transpose_0){
if(transpose_1)
caffe_gpu_gemm(CblasTrans, CblasTrans, K, M, N, Dtype(1.0),
bottom_data_1 + offset_data_1, top_diff, Dtype(1.0), bottom_diff_0 + offset_diff_0);
else
caffe_gpu_gemm(CblasNoTrans, CblasTrans, K, M, N, Dtype(1.0),
bottom_data_1 + offset_data_1, top_diff, Dtype(1.0), bottom_diff_0 + offset_diff_0);
}
else{
if(transpose_1)
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M, K, N, Dtype(1.0), top_diff,
bottom_data_1 + offset_data_1, Dtype(1.0), bottom_diff_0 + offset_diff_0);
else
caffe_gpu_gemm(CblasNoTrans, CblasTrans, M, K, N, Dtype(1.0), top_diff,
bottom_data_1 + offset_data_1, Dtype(1.0), bottom_diff_0 + offset_diff_0);
}
top_diff += M*N;
offset_diff_0 += M*K; offset_diff_0 = offset_diff_0 % count0;
offset_data_1 += K*N; offset_data_1 = offset_data_1 % count1;
}
if(can_use_stream){
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), *default_stream));
for(int i = 0; i < max_streams_; i++){
CUDA_CHECK(cudaStreamSynchronize(stream_[i]));
}
}
}
if(propagate_down[1]){
int offset_data_0 = 0, offset_diff_1 = 0;
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff_1 = bottom[1]->mutable_gpu_diff();
const Dtype* bottom_data_0 = bottom[0]->gpu_data();
caffe_gpu_set(bottom[1]->count(), Dtype(0.0), bottom_diff_1);
bool can_use_stream = use_streams_ && ch_1 == maxch;
for(int i = 0; i < maxch; i++){
if(can_use_stream){
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), stream_[i % max_streams_]));
}
if(transpose_0){
if(transpose_1)
caffe_gpu_gemm(CblasTrans, CblasTrans, N, K, M, Dtype(1.0),
top_diff, bottom_data_0 + offset_data_0, Dtype(1.0), bottom_diff_1 + offset_diff_1);
else
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, K, N, M, Dtype(1.0), bottom_data_0 + offset_data_0,
top_diff, Dtype(1.0), bottom_diff_1 + offset_diff_1);
}
else{
if(transpose_1)
caffe_gpu_gemm(CblasTrans, CblasNoTrans, N, K, M, Dtype(1.0),
top_diff, bottom_data_0 + offset_data_0, Dtype(1.0), bottom_diff_1 + offset_diff_1);
else
caffe_gpu_gemm(CblasTrans, CblasNoTrans, K, N, M, Dtype(1.0), bottom_data_0 + offset_data_0,
top_diff, Dtype(1.0), bottom_diff_1 + offset_diff_1);
}
top_diff += M*N;
offset_data_0 += M*K; offset_data_0 = offset_data_0 % count0;
offset_diff_1 += K*N; offset_diff_1 = offset_diff_1 % count1;
}
if(can_use_stream){
CUBLAS_CHECK(cublasSetStream(Caffe::cublas_handle(), *default_stream));
for(int i = 0; i < max_streams_; i++){
CUDA_CHECK(cudaStreamSynchronize(stream_[i]));
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(MatrixMulLayer);
}
|
2cba7a0ec3ea2c72f21fd0b67bf0ce52fa4ecd0a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "DamierHSBAFloat.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
__global__ void damierHSBAFloat(float4* ptrDevPixels, uint w, uint h, DomaineMath domaineMath, uint n, float t);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
DamierHSBAFloat::DamierHSBAFloat(const Grid& grid, uint w, uint h, float dt, uint n, const DomaineMath& domaineMath) :
Animable_I<float4>(grid, w, h, "Damier_Cuda_HSBA_float4", domaineMath), variateurAnimation(Interval<float>(0, 2 * PI), dt)
{
// Inputs
this->n = n;
// Tools
this->t = 0; // protected dans Animable
}
DamierHSBAFloat::~DamierHSBAFloat()
{
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*/
void DamierHSBAFloat::process(float4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
hipLaunchKernelGGL(( damierHSBAFloat), dim3(dg),dim3(db), 0, 0, ptrDevPixels,w,h,domaineMath,n,t);
}
/**
* Override
* Call periodicly by the API
*/
void DamierHSBAFloat::animationStep()
{
this->t = variateurAnimation.varierAndGet(); // in [0,2pi]
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 2cba7a0ec3ea2c72f21fd0b67bf0ce52fa4ecd0a.cu | #include "DamierHSBAFloat.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
__global__ void damierHSBAFloat(float4* ptrDevPixels, uint w, uint h, DomaineMath domaineMath, uint n, float t);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
DamierHSBAFloat::DamierHSBAFloat(const Grid& grid, uint w, uint h, float dt, uint n, const DomaineMath& domaineMath) :
Animable_I<float4>(grid, w, h, "Damier_Cuda_HSBA_float4", domaineMath), variateurAnimation(Interval<float>(0, 2 * PI), dt)
{
// Inputs
this->n = n;
// Tools
this->t = 0; // protected dans Animable
}
DamierHSBAFloat::~DamierHSBAFloat()
{
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*/
void DamierHSBAFloat::process(float4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
damierHSBAFloat<<<dg,db>>>(ptrDevPixels,w,h,domaineMath,n,t);
}
/**
* Override
* Call periodicly by the API
*/
void DamierHSBAFloat::animationStep()
{
this->t = variateurAnimation.varierAndGet(); // in [0,2pi]
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
forward_backward.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "include/common/cuda_error_hadling.h"
#include "forward_backward.cuh"
#include <iostream>
using namespace std;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ init_fb_data_kernel(int *_trees, bool *_active, int *_components, int _vertices_count)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _vertices_count)
{
_trees[idx] = INIT_TREE;
_active[idx] = true;
_components[idx] = INIT_COMPONENT;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ set_degrees(int *_src_ids, int *_dst_ids, int _edges_count, int *_in_deg, int *_out_deg, bool *_active)
{
register const int idx = (blockIdx.x * blockDim.x + threadIdx.x) + blockIdx.y * blockDim.x * gridDim.x;
if (idx < _edges_count)
{
int src_id = _src_ids[idx];
int dst_id = _dst_ids[idx];
if (_active[src_id])
atomicAdd(&(_out_deg[dst_id]), 1);
if (_active[dst_id])
atomicAdd(&(_in_deg[src_id]), 1);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ trim_kernel(int _vertices_count, int *_in_deg, int *_out_deg, bool *_active, int *_trees, int *_components, int *_last_component, bool *_changes)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _vertices_count)
{
if (_active[idx] && ((_in_deg[idx] == 0) || (_out_deg[idx] == 0)))
{
int last_component = atomicAdd(&(_last_component[0]), 1);
_active[idx] = false;
_trees[idx] = INIT_TREE - 1;
_components[idx] = last_component;
_changes[0] = true;
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ select_pivot_kernel(int *_trees, int _tree_num, int _vertices_count, int *_pivot)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _vertices_count)
{
if (_trees[idx] == _tree_num)
_pivot[0] = idx;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ init_visited(bool *_visited, int *_pivot, int _vertices_count)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _vertices_count)
{
_visited[idx] = false;
}
_visited[_pivot[0]] = true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ bfs_kernel(int *_src_ids, int *_dst_ids, long long _edges_count, bool *_visited, bool *_terminate, int *_trees,
bool *_active)
{
register const int idx = (blockIdx.x * blockDim.x + threadIdx.x) + blockIdx.y * blockDim.x * gridDim.x;
if (idx < _edges_count)
{
int src_id = _src_ids[idx];
int dst_id = _dst_ids[idx];
if ((_visited[src_id] == true) && (_trees[src_id] == _trees[dst_id]) && (_visited[dst_id] == false))
{
_visited[dst_id] = true;
_terminate[0] = false;
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ process_reach_result(bool *_fwd_result, bool *_bwd_result, int *_components, int *_trees, bool *_active, int _vertices_count,
int _last_tree, int _last_component)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _vertices_count)
{
if (!_active[idx])
return;
int fwd_res = _fwd_result[idx];
int bwd_res = _bwd_result[idx];
if ((fwd_res == true) && (bwd_res == true))
{
_active[idx] = false;
_components[idx] = _last_component;
_trees[idx] = _last_tree;
}
else if ((fwd_res == false) && (bwd_res == false))
{
_trees[idx] = _last_tree + 1;
}
else if ((fwd_res == true) && (bwd_res == false))
{
_trees[idx] = _last_tree + 2;
}
else if ((fwd_res == false) && (bwd_res == true))
{
_trees[idx] = _last_tree + 3;
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// functions
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void bfs(int *_src_ids, int *_dst_ids, int _vertices_count, long long _edges_count, int *_pivot, bool *_visited, int *_trees, bool *_active)
{
dim3 threads(1024, 1, 1);
dim3 grid_vertices((_vertices_count - 1) / threads.x + 1, 1, 1);
dim3 grid_edges((_edges_count - 1) / threads.x + 1, 1, 1);
#ifdef __USE_FERMI__
if(grid_edges.x > 65535)
{
grid_edges.y = (grid_edges.x - 1) / 65535 + 1;
grid_edges.x = 65535;
}
#endif
SAFE_KERNEL_CALL((hipLaunchKernelGGL(( init_visited) , dim3(grid_vertices), dim3(threads), 0, 0, _visited, _pivot, _vertices_count) ));
bool *device_terminate;
SAFE_CALL(hipMalloc((void**)&device_terminate, sizeof(bool)));
bool host_terminate = false;
while (host_terminate == false)
{
host_terminate = true;
SAFE_CALL(hipMemcpy(device_terminate, &host_terminate, sizeof(bool), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( SAFE_KERNEL_CALL((bfs_kernel) , dim3(grid_edges), dim3(threads) , 0, 0, _src_ids, _dst_ids, _edges_count, _visited, device_terminate, _trees, _active)));
SAFE_CALL(hipMemcpy(&host_terminate, device_terminate, sizeof(bool), hipMemcpyDeviceToHost));
}
SAFE_CALL(hipFree(device_terminate));
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// wrappers
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void init_fb_data_wrapper(int *_trees, bool *_active, int *_components, int _vertices_count)
{
dim3 threads(1024, 1, 1);
dim3 grid_vertices((_vertices_count - 1) / threads.x + 1, 1, 1);
SAFE_KERNEL_CALL((hipLaunchKernelGGL(( init_fb_data_kernel), dim3(grid_vertices), dim3(threads) , 0, 0, _trees, _active, _components, _vertices_count) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// trim edges on GPU (eliminate SCC with size 1)
void trim_wrapper(int *_src_ids, int *_dst_ids, int _vertices_count, long long _edges_count, int *_components,
int *_trees, bool *_active, int &_last_component)
{
int *in_deg;
int *out_deg;
SAFE_CALL(hipMalloc((void**)&in_deg, _vertices_count * sizeof(int)));
SAFE_CALL(hipMalloc((void**)&out_deg, _vertices_count * sizeof(int)));
dim3 threads_edges(1024, 1, 1);
dim3 grid_edges((_edges_count - 1) / threads_edges.x + 1, 1, 1);
#ifdef __USE_FERMI__
if(grid_edges.x > 65535)
{
grid_edges.y = (grid_edges.x - 1) / 65535 + 1;
grid_edges.x = 65535;
}
#endif
dim3 threads_vertices(1024, 1, 1);
dim3 grid_vertices((_vertices_count - 1) / threads_vertices.x + 1, 1, 1);
int *device_last_component;
bool *device_changes;
SAFE_CALL(hipMalloc((void**)&device_changes, sizeof(bool)));
SAFE_CALL(hipMalloc((void**)&device_last_component, sizeof(int)));
SAFE_CALL(hipMemcpy(device_last_component, &_last_component, sizeof(int), hipMemcpyHostToDevice));
bool host_changes = false;
do
{
// clear data
host_changes = false;
SAFE_CALL(hipMemcpy(device_changes, &host_changes, sizeof(bool), hipMemcpyHostToDevice));
SAFE_CALL(hipMemset(in_deg, 0, _vertices_count * sizeof(int)));
SAFE_CALL(hipMemset(out_deg, 0, _vertices_count * sizeof(int)));
SAFE_KERNEL_CALL((hipLaunchKernelGGL(( set_degrees) , dim3(grid_edges), dim3(threads_edges) , 0, 0, _src_ids, _dst_ids, _edges_count, in_deg, out_deg, _active)) );
SAFE_KERNEL_CALL((hipLaunchKernelGGL(( trim_kernel) , dim3(grid_vertices), dim3(threads_vertices) , 0, 0, _vertices_count, in_deg, out_deg, _active, _trees, _components,
device_last_component, device_changes) ));
SAFE_CALL(hipMemcpy(&host_changes, device_changes, sizeof(bool), hipMemcpyDeviceToHost));
} while (host_changes);
SAFE_CALL(hipMemcpy(&_last_component, device_last_component, sizeof(int), hipMemcpyDeviceToHost));
SAFE_CALL(hipFree(in_deg));
SAFE_CALL(hipFree(out_deg));
SAFE_CALL(hipFree(device_changes));
SAFE_CALL(hipFree(device_last_component));
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void forward_backward_wrapper(int *_src_ids, int *_dst_ids, int _vertices_count, long long _edges_count, int *_components, int *_trees, int _tree_num,
bool *_active, int _last_component)
{
static int last_component = _last_component;
static int last_tree = _tree_num;
int *device_pivot;
SAFE_CALL(hipMalloc((void**)&device_pivot, sizeof(int)));
dim3 threads(1024, 1, 1);
dim3 grid_vertices((_vertices_count - 1) / threads.x + 1, 1, 1);
int host_pivot = ERROR_IN_PIVOT;
SAFE_CALL(hipMemcpy(device_pivot, &host_pivot, sizeof(int), hipMemcpyHostToDevice));
SAFE_KERNEL_CALL((hipLaunchKernelGGL(( select_pivot_kernel) , dim3(grid_vertices), dim3(threads) , 0, 0, _trees, _tree_num, _vertices_count, device_pivot) ));
SAFE_CALL(hipMemcpy(&host_pivot, device_pivot, sizeof(int), hipMemcpyDeviceToHost));
if (host_pivot == ERROR_IN_PIVOT)
return;
bool *fwd_result, *bwd_result;
SAFE_CALL(hipMalloc((void**)&fwd_result, _vertices_count * sizeof(bool)));
SAFE_CALL(hipMalloc((void**)&bwd_result, _vertices_count * sizeof(bool)));
bfs(_src_ids, _dst_ids, _vertices_count, _edges_count, device_pivot, fwd_result, _trees, _active);
bfs(_dst_ids, _src_ids, _vertices_count, _edges_count, device_pivot, bwd_result, _trees, _active);
SAFE_KERNEL_CALL((hipLaunchKernelGGL(( process_reach_result) , dim3(grid_vertices), dim3(threads) , 0, 0, fwd_result, bwd_result, _components, _trees, _active, _vertices_count, last_tree, last_component) ));
last_component++;
last_tree += 4;
SAFE_CALL(hipFree(fwd_result));
SAFE_CALL(hipFree(bwd_result));
SAFE_CALL(hipFree(device_pivot));
forward_backward_wrapper(_src_ids, _dst_ids, _vertices_count, _edges_count, _components, _trees, last_tree - 1, _active, _last_component);
forward_backward_wrapper(_src_ids, _dst_ids, _vertices_count, _edges_count, _components, _trees, last_tree - 2, _active, _last_component);
forward_backward_wrapper(_src_ids, _dst_ids, _vertices_count, _edges_count, _components, _trees, last_tree - 3, _active, _last_component);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
| forward_backward.cu | #include "include/common/cuda_error_hadling.h"
#include "forward_backward.cuh"
#include <iostream>
using namespace std;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ init_fb_data_kernel(int *_trees, bool *_active, int *_components, int _vertices_count)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _vertices_count)
{
_trees[idx] = INIT_TREE;
_active[idx] = true;
_components[idx] = INIT_COMPONENT;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ set_degrees(int *_src_ids, int *_dst_ids, int _edges_count, int *_in_deg, int *_out_deg, bool *_active)
{
register const int idx = (blockIdx.x * blockDim.x + threadIdx.x) + blockIdx.y * blockDim.x * gridDim.x;
if (idx < _edges_count)
{
int src_id = _src_ids[idx];
int dst_id = _dst_ids[idx];
if (_active[src_id])
atomicAdd(&(_out_deg[dst_id]), 1);
if (_active[dst_id])
atomicAdd(&(_in_deg[src_id]), 1);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ trim_kernel(int _vertices_count, int *_in_deg, int *_out_deg, bool *_active, int *_trees, int *_components, int *_last_component, bool *_changes)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _vertices_count)
{
if (_active[idx] && ((_in_deg[idx] == 0) || (_out_deg[idx] == 0)))
{
int last_component = atomicAdd(&(_last_component[0]), 1);
_active[idx] = false;
_trees[idx] = INIT_TREE - 1;
_components[idx] = last_component;
_changes[0] = true;
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ select_pivot_kernel(int *_trees, int _tree_num, int _vertices_count, int *_pivot)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _vertices_count)
{
if (_trees[idx] == _tree_num)
_pivot[0] = idx;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ init_visited(bool *_visited, int *_pivot, int _vertices_count)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _vertices_count)
{
_visited[idx] = false;
}
_visited[_pivot[0]] = true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ bfs_kernel(int *_src_ids, int *_dst_ids, long long _edges_count, bool *_visited, bool *_terminate, int *_trees,
bool *_active)
{
register const int idx = (blockIdx.x * blockDim.x + threadIdx.x) + blockIdx.y * blockDim.x * gridDim.x;
if (idx < _edges_count)
{
int src_id = _src_ids[idx];
int dst_id = _dst_ids[idx];
if ((_visited[src_id] == true) && (_trees[src_id] == _trees[dst_id]) && (_visited[dst_id] == false))
{
_visited[dst_id] = true;
_terminate[0] = false;
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void __global__ process_reach_result(bool *_fwd_result, bool *_bwd_result, int *_components, int *_trees, bool *_active, int _vertices_count,
int _last_tree, int _last_component)
{
register const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < _vertices_count)
{
if (!_active[idx])
return;
int fwd_res = _fwd_result[idx];
int bwd_res = _bwd_result[idx];
if ((fwd_res == true) && (bwd_res == true))
{
_active[idx] = false;
_components[idx] = _last_component;
_trees[idx] = _last_tree;
}
else if ((fwd_res == false) && (bwd_res == false))
{
_trees[idx] = _last_tree + 1;
}
else if ((fwd_res == true) && (bwd_res == false))
{
_trees[idx] = _last_tree + 2;
}
else if ((fwd_res == false) && (bwd_res == true))
{
_trees[idx] = _last_tree + 3;
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// functions
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void bfs(int *_src_ids, int *_dst_ids, int _vertices_count, long long _edges_count, int *_pivot, bool *_visited, int *_trees, bool *_active)
{
dim3 threads(1024, 1, 1);
dim3 grid_vertices((_vertices_count - 1) / threads.x + 1, 1, 1);
dim3 grid_edges((_edges_count - 1) / threads.x + 1, 1, 1);
#ifdef __USE_FERMI__
if(grid_edges.x > 65535)
{
grid_edges.y = (grid_edges.x - 1) / 65535 + 1;
grid_edges.x = 65535;
}
#endif
SAFE_KERNEL_CALL(( init_visited <<<grid_vertices, threads>>> (_visited, _pivot, _vertices_count) ));
bool *device_terminate;
SAFE_CALL(cudaMalloc((void**)&device_terminate, sizeof(bool)));
bool host_terminate = false;
while (host_terminate == false)
{
host_terminate = true;
SAFE_CALL(cudaMemcpy(device_terminate, &host_terminate, sizeof(bool), cudaMemcpyHostToDevice));
SAFE_KERNEL_CALL((bfs_kernel <<< grid_edges, threads >>> (_src_ids, _dst_ids, _edges_count, _visited, device_terminate, _trees, _active)));
SAFE_CALL(cudaMemcpy(&host_terminate, device_terminate, sizeof(bool), cudaMemcpyDeviceToHost));
}
SAFE_CALL(cudaFree(device_terminate));
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// wrappers
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void init_fb_data_wrapper(int *_trees, bool *_active, int *_components, int _vertices_count)
{
dim3 threads(1024, 1, 1);
dim3 grid_vertices((_vertices_count - 1) / threads.x + 1, 1, 1);
SAFE_KERNEL_CALL(( init_fb_data_kernel<<< grid_vertices, threads >>> (_trees, _active, _components, _vertices_count) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// trim edges on GPU (eliminate SCC with size 1)
void trim_wrapper(int *_src_ids, int *_dst_ids, int _vertices_count, long long _edges_count, int *_components,
int *_trees, bool *_active, int &_last_component)
{
int *in_deg;
int *out_deg;
SAFE_CALL(cudaMalloc((void**)&in_deg, _vertices_count * sizeof(int)));
SAFE_CALL(cudaMalloc((void**)&out_deg, _vertices_count * sizeof(int)));
dim3 threads_edges(1024, 1, 1);
dim3 grid_edges((_edges_count - 1) / threads_edges.x + 1, 1, 1);
#ifdef __USE_FERMI__
if(grid_edges.x > 65535)
{
grid_edges.y = (grid_edges.x - 1) / 65535 + 1;
grid_edges.x = 65535;
}
#endif
dim3 threads_vertices(1024, 1, 1);
dim3 grid_vertices((_vertices_count - 1) / threads_vertices.x + 1, 1, 1);
int *device_last_component;
bool *device_changes;
SAFE_CALL(cudaMalloc((void**)&device_changes, sizeof(bool)));
SAFE_CALL(cudaMalloc((void**)&device_last_component, sizeof(int)));
SAFE_CALL(cudaMemcpy(device_last_component, &_last_component, sizeof(int), cudaMemcpyHostToDevice));
bool host_changes = false;
do
{
// clear data
host_changes = false;
SAFE_CALL(cudaMemcpy(device_changes, &host_changes, sizeof(bool), cudaMemcpyHostToDevice));
SAFE_CALL(cudaMemset(in_deg, 0, _vertices_count * sizeof(int)));
SAFE_CALL(cudaMemset(out_deg, 0, _vertices_count * sizeof(int)));
SAFE_KERNEL_CALL(( set_degrees <<< grid_edges, threads_edges >>> (_src_ids, _dst_ids, _edges_count, in_deg, out_deg, _active)) );
SAFE_KERNEL_CALL(( trim_kernel <<< grid_vertices, threads_vertices >>> (_vertices_count, in_deg, out_deg, _active, _trees, _components,
device_last_component, device_changes) ));
SAFE_CALL(cudaMemcpy(&host_changes, device_changes, sizeof(bool), cudaMemcpyDeviceToHost));
} while (host_changes);
SAFE_CALL(cudaMemcpy(&_last_component, device_last_component, sizeof(int), cudaMemcpyDeviceToHost));
SAFE_CALL(cudaFree(in_deg));
SAFE_CALL(cudaFree(out_deg));
SAFE_CALL(cudaFree(device_changes));
SAFE_CALL(cudaFree(device_last_component));
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void forward_backward_wrapper(int *_src_ids, int *_dst_ids, int _vertices_count, long long _edges_count, int *_components, int *_trees, int _tree_num,
bool *_active, int _last_component)
{
static int last_component = _last_component;
static int last_tree = _tree_num;
int *device_pivot;
SAFE_CALL(cudaMalloc((void**)&device_pivot, sizeof(int)));
dim3 threads(1024, 1, 1);
dim3 grid_vertices((_vertices_count - 1) / threads.x + 1, 1, 1);
int host_pivot = ERROR_IN_PIVOT;
SAFE_CALL(cudaMemcpy(device_pivot, &host_pivot, sizeof(int), cudaMemcpyHostToDevice));
SAFE_KERNEL_CALL(( select_pivot_kernel <<< grid_vertices, threads >>> (_trees, _tree_num, _vertices_count, device_pivot) ));
SAFE_CALL(cudaMemcpy(&host_pivot, device_pivot, sizeof(int), cudaMemcpyDeviceToHost));
if (host_pivot == ERROR_IN_PIVOT)
return;
bool *fwd_result, *bwd_result;
SAFE_CALL(cudaMalloc((void**)&fwd_result, _vertices_count * sizeof(bool)));
SAFE_CALL(cudaMalloc((void**)&bwd_result, _vertices_count * sizeof(bool)));
bfs(_src_ids, _dst_ids, _vertices_count, _edges_count, device_pivot, fwd_result, _trees, _active);
bfs(_dst_ids, _src_ids, _vertices_count, _edges_count, device_pivot, bwd_result, _trees, _active);
SAFE_KERNEL_CALL(( process_reach_result <<< grid_vertices, threads >>> (fwd_result, bwd_result, _components, _trees, _active, _vertices_count, last_tree, last_component) ));
last_component++;
last_tree += 4;
SAFE_CALL(cudaFree(fwd_result));
SAFE_CALL(cudaFree(bwd_result));
SAFE_CALL(cudaFree(device_pivot));
forward_backward_wrapper(_src_ids, _dst_ids, _vertices_count, _edges_count, _components, _trees, last_tree - 1, _active, _last_component);
forward_backward_wrapper(_src_ids, _dst_ids, _vertices_count, _edges_count, _components, _trees, last_tree - 2, _active, _last_component);
forward_backward_wrapper(_src_ids, _dst_ids, _vertices_count, _edges_count, _components, _trees, last_tree - 3, _active, _last_component);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
bb8a0f6352f9f0baa6bc3c4e19593ca877e3a835.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <aev.h>
#include <thrust/equal.h>
#include <torch/extension.h>
#include <hipcub/hipcub.hpp>
#include <vector>
#include <ATen/Context.h>
#include <THH/THH.h>
#include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h>
#include <THH/THHThrustAllocator.cuh>
#define PI 3.141592653589793
using torch::Tensor;
// fetch from the following matrix
// [[ 0, 1, 2, 3, 4],
// [ 1, 5, 6, 7, 8],
// [ 2, 6, 9, 10, 11],
// [ 3, 7, 10, 12, 13],
// [ 4, 8, 11, 13, 14]]
constexpr int csubaev_offsets(int i, int j, int n) {
int larger = ::max(i, j);
int smaller = ::min(i, j);
int starting = smaller * (2 * n - smaller + 1) / 2; // n + (n - 1) + ... + (n - smaller + 1)
int offset = larger - smaller;
return starting + offset;
}
template <typename DataT>
struct PairDist {
DataT Rij;
int midx;
short i;
short j;
};
// used to group Rijs by atom id
template <typename DataT>
__host__ __device__ bool operator==(const PairDist<DataT>& lhs, const PairDist<DataT>& rhs) {
return lhs.midx == rhs.midx && lhs.i == rhs.i;
}
/// Alignment of memory. Must be a power of two
/// \tparam boundary Boundary to align to (NOTE: must be power of 2)
/// \param value Input value that is to be aligned
/// \return Value aligned to boundary
template <int32_t boundary>
__host__ __device__ __forceinline__ int align(const int& value) {
static_assert((boundary & (boundary - 1)) == 0, "Boundary for align must be power of 2");
return (value + boundary) & ~(boundary - 1);
}
template <typename SpeciesT, typename DataT, typename IndexT = int>
__global__ void pairwiseDistance(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t,
PairDist<DataT>* d_Rij,
IndexT max_natoms_per_mol) {
extern __shared__ DataT spos[];
DataT* sx = &spos[0];
DataT* sy = &spos[max_natoms_per_mol];
DataT* sz = &spos[2 * max_natoms_per_mol];
int mol_idx = blockIdx.x;
int tidx = threadIdx.y * blockDim.x + threadIdx.x;
for (int i = tidx; i < max_natoms_per_mol; i += blockDim.x * blockDim.y) {
sx[i] = pos_t[mol_idx][i][0];
sy[i] = pos_t[mol_idx][i][1];
sz[i] = pos_t[mol_idx][i][2];
}
__syncthreads();
int natom_pairs = max_natoms_per_mol * max_natoms_per_mol;
for (int i = threadIdx.y; i < max_natoms_per_mol; i += blockDim.y) {
SpeciesT type_i = species_t[mol_idx][i];
DataT xi = sx[i];
DataT yi = sy[i];
DataT zi = sz[i];
for (int j = threadIdx.x; j < max_natoms_per_mol; j += blockDim.x) {
SpeciesT type_j = species_t[mol_idx][j];
const DataT xj = sx[j];
const DataT yj = sy[j];
const DataT zj = sz[j];
const DataT delx = xj - xi;
const DataT dely = yj - yi;
const DataT delz = zj - zi;
const DataT Rsq = delx * delx + dely * dely + delz * delz;
if (type_i != -1 && type_j != -1 && i != j) {
DataT Rij = sqrt(Rsq);
PairDist<DataT> d;
d.Rij = Rij;
d.midx = mol_idx;
d.i = i;
d.j = j;
d_Rij[mol_idx * natom_pairs + i * max_natoms_per_mol + j] = d;
}
}
}
}
template <typename SpeciesT, typename DataT, typename IndexT = int>
__global__ void pairwiseDistanceSingleMolecule(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t,
PairDist<DataT>* d_Rij,
IndexT max_natoms_per_mol) {
constexpr int mol_idx = 0;
int natom_pairs = max_natoms_per_mol * max_natoms_per_mol;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= max_natoms_per_mol || j >= max_natoms_per_mol)
return;
SpeciesT type_i = species_t[mol_idx][i];
DataT xi = pos_t[mol_idx][i][0];
DataT yi = pos_t[mol_idx][i][1];
DataT zi = pos_t[mol_idx][i][2];
SpeciesT type_j = species_t[mol_idx][j];
DataT xj = pos_t[mol_idx][j][0];
DataT yj = pos_t[mol_idx][j][1];
DataT zj = pos_t[mol_idx][j][2];
DataT delx = xj - xi;
DataT dely = yj - yi;
DataT delz = zj - zi;
DataT Rsq = delx * delx + dely * dely + delz * delz;
if (type_i != -1 && type_j != -1 && i != j) {
DataT Rij = sqrt(Rsq);
PairDist<DataT> d;
d.Rij = Rij;
d.midx = mol_idx;
d.i = i;
d.j = j;
d_Rij[mol_idx * natom_pairs + i * max_natoms_per_mol + j] = d;
}
}
// every block compute blocksize RIJ's gradient by column major, to avoid atomicAdd waiting
template <bool is_double_backward, typename DataT, typename IndexT = int>
__global__ void pairwiseDistance_backward_or_doublebackward(
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits>
grad_dist, // ddist for backward, dddist for double backward
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits>
grad_coord_or_force, // dcoord for backward, dforce(i.e. ddcoord) for double backward
const PairDist<DataT>* d_radialRij,
IndexT nRadialRij) {
int gidx = threadIdx.x * gridDim.x + blockIdx.x;
if (gidx >= nRadialRij)
return;
PairDist<DataT> d = d_radialRij[gidx];
DataT Rij = d.Rij;
int mol_idx = d.midx;
int i = d.i;
int j = d.j;
const DataT delx = pos_t[mol_idx][j][0] - pos_t[mol_idx][i][0];
const DataT dely = pos_t[mol_idx][j][1] - pos_t[mol_idx][i][1];
const DataT delz = pos_t[mol_idx][j][2] - pos_t[mol_idx][i][2];
if (is_double_backward) {
auto& grad_force = grad_coord_or_force;
DataT grad_force_coord_Rij_item = (grad_force[mol_idx][j][0] - grad_force[mol_idx][i][0]) * delx / Rij +
(grad_force[mol_idx][j][1] - grad_force[mol_idx][i][1]) * dely / Rij +
(grad_force[mol_idx][j][2] - grad_force[mol_idx][i][2]) * delz / Rij;
grad_dist[gidx] = grad_force_coord_Rij_item;
} else {
auto& grad_coord = grad_coord_or_force;
DataT grad_dist_coord_x = delx / Rij;
DataT grad_dist_coord_y = dely / Rij;
DataT grad_dist_coord_z = delz / Rij;
DataT grad_radial_dist_item = grad_dist[gidx];
atomicAdd(&grad_coord[mol_idx][j][0], grad_radial_dist_item * grad_dist_coord_x);
atomicAdd(&grad_coord[mol_idx][j][1], grad_radial_dist_item * grad_dist_coord_y);
atomicAdd(&grad_coord[mol_idx][j][2], grad_radial_dist_item * grad_dist_coord_z);
atomicAdd(&grad_coord[mol_idx][i][0], -grad_radial_dist_item * grad_dist_coord_x);
atomicAdd(&grad_coord[mol_idx][i][1], -grad_radial_dist_item * grad_dist_coord_y);
atomicAdd(&grad_coord[mol_idx][i][2], -grad_radial_dist_item * grad_dist_coord_z);
}
}
template <typename SpeciesT, typename DataT, typename IndexT = int, int TILEX = 8, int TILEY = 4>
__global__ void cuAngularAEVs(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfA_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfZ_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaA_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> Zeta_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> aev_t,
PairDist<DataT>* d_Rij,
PairDist<DataT>* d_centralAtom,
int* d_nPairsPerCenterAtom,
int* d_centerAtomStartIdx,
float Rca,
int angular_length,
int angular_sublength,
int radial_length,
int num_species,
int maxnbrs_per_atom_aligned,
int angular_length_aligned,
int ncentral_atoms) {
extern __shared__ DataT smem[];
constexpr int threads_per_catom = TILEX * TILEY;
static_assert(threads_per_catom == C10_WARP_SIZE);
int gIdx = blockIdx.x * blockDim.x + threadIdx.x;
int cIdx = gIdx / threads_per_catom; // central atom id
if (cIdx >= ncentral_atoms)
return;
int groupIdx = threadIdx.x / threads_per_catom;
int laneIdx = threadIdx.x % threads_per_catom;
int ncatom_per_tpb = blockDim.x / threads_per_catom;
DataT* saev = &smem[groupIdx * angular_length_aligned];
int offset = ncatom_per_tpb * angular_length_aligned;
DataT* sdx = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdy = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdz = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdist = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sfc = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
int* stype = (int*)&smem[offset + groupIdx * maxnbrs_per_atom_aligned];
DataT EtaA = EtaA_t[0];
DataT Zeta = Zeta_t[0];
IndexT nShfA = ShfA_t.size(0);
IndexT nShfZ = ShfZ_t.size(0);
PairDist<DataT> d = d_centralAtom[cIdx];
int start_idx = d_centerAtomStartIdx[cIdx];
int jnum = d_nPairsPerCenterAtom[cIdx];
// center atom
int i = d.i;
int mol_idx = d.midx;
for (int iaev = laneIdx; iaev < angular_length; iaev += threads_per_catom) {
saev[iaev] = 0;
}
DataT xi = pos_t[mol_idx][i][0];
DataT yi = pos_t[mol_idx][i][1];
DataT zi = pos_t[mol_idx][i][2];
for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) {
PairDist<DataT> dij = d_Rij[start_idx + jj];
int j = dij.j;
DataT Rij = dij.Rij;
SpeciesT type_j = species_t[mol_idx][j];
sdx[jj] = pos_t[mol_idx][j][0] - xi;
sdy[jj] = pos_t[mol_idx][j][1] - yi;
sdz[jj] = pos_t[mol_idx][j][2] - zi;
stype[jj] = type_j;
sdist[jj] = Rij;
DataT fc_ij = 0.5 * cos(PI * Rij / Rca) + 0.5;
sfc[jj] = fc_ij;
}
short2 tile = make_short2(laneIdx % TILEX, laneIdx / TILEX);
// must sync if threads_per_catom != 32 (wrap size) to make sure shared data is ready
// __syncthreads
for (int jj = 0; jj < jnum; jj++) {
const DataT Rij = sdist[jj];
SpeciesT type_j = stype[jj];
DataT fc_ij = sfc[jj];
for (int kk_start = jj + 1; kk_start < jnum; kk_start += threads_per_catom) {
int kk = kk_start + laneIdx;
DataT theta = 0;
if (kk < jnum) {
const DataT Rik = sdist[kk];
theta = acos(0.95 * (sdx[jj] * sdx[kk] + sdy[jj] * sdy[kk] + sdz[jj] * sdz[kk]) / (Rij * Rik));
}
for (int srcLane = 0; srcLane < C10_WARP_SIZE && (kk_start + srcLane) < jnum; ++srcLane) {
int kk = kk_start + srcLane;
DataT theta_ijk = __shfl_sync(0xFFFFFFFF, theta, srcLane);
const DataT Rik = sdist[kk];
SpeciesT type_k = stype[kk];
DataT fc_ik = sfc[kk];
DataT Rijk = (Rij + Rik) / 2;
DataT fc_ijk = fc_ij * fc_ik;
IndexT subaev_offset = angular_sublength * csubaev_offsets(type_j, type_k, num_species);
for (int itheta = tile.x; itheta < nShfZ; itheta += TILEX) {
DataT ShfZ = ShfZ_t[itheta];
DataT factor1 = pow((1 + cos(theta_ijk - ShfZ)) / 2, Zeta);
for (int ishfr = tile.y; ishfr < nShfA; ishfr += TILEY) {
DataT ShfA = ShfA_t[ishfr];
DataT factor2 = exp(-EtaA * (Rijk - ShfA) * (Rijk - ShfA));
DataT res = 2 * factor1 * factor2 * fc_ijk;
saev[subaev_offset + ishfr * nShfZ + itheta] += res;
}
}
}
}
}
for (int iaev = laneIdx; iaev < angular_length; iaev += threads_per_catom) {
aev_t[mol_idx][i][radial_length + iaev] = saev[iaev];
}
}
template <
bool is_double_backward,
typename SpeciesT,
typename DataT,
typename IndexT = int,
int TILEX = 8,
int TILEY = 4>
__global__ void cuAngularAEVs_backward_or_doublebackward(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfA_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfZ_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaA_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> Zeta_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits>
grad_output, // for backward, this is daev, for double backward, this is dforce (i.e. ddcoord)
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits>
grad_input, // for backward, this is dcoord, for double backward, this is ddaev
const PairDist<DataT>* d_Rij,
const PairDist<DataT>* d_centralAtom,
int* d_nPairsPerCenterAtom,
int* d_centerAtomStartIdx,
float Rca,
int angular_length,
int angular_sublength,
int radial_length,
int num_species,
int maxnbrs_per_atom_aligned,
int angular_length_aligned,
int ncentral_atoms) {
extern __shared__ DataT smem[];
constexpr int threads_per_catom = TILEX * TILEY;
static_assert(threads_per_catom == C10_WARP_SIZE);
int gIdx = blockIdx.x * blockDim.x + threadIdx.x;
int cIdx = gIdx / threads_per_catom; // central atom id
if (cIdx >= ncentral_atoms)
return;
int groupIdx = threadIdx.x / threads_per_catom;
int laneIdx = threadIdx.x % threads_per_catom;
int ncatom_per_tpb = blockDim.x / threads_per_catom; // e.g. 2 catom per block
DataT* sdx = &smem[groupIdx * maxnbrs_per_atom_aligned];
int offset = ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdy = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdz = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdjx_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdjy_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdjz_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdist = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sfc = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sfc_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
int* stype = (int*)&smem[offset + groupIdx * maxnbrs_per_atom_aligned];
DataT EtaA = EtaA_t[0];
DataT Zeta = Zeta_t[0];
IndexT nShfA = ShfA_t.size(0);
IndexT nShfZ = ShfZ_t.size(0);
PairDist<DataT> d = d_centralAtom[cIdx];
int start_idx = d_centerAtomStartIdx[cIdx];
int jnum = d_nPairsPerCenterAtom[cIdx];
// center atom
int i = d.i;
int mol_idx = d.midx;
DataT xi = pos_t[mol_idx][i][0];
DataT yi = pos_t[mol_idx][i][1];
DataT zi = pos_t[mol_idx][i][2];
for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) {
PairDist<DataT> dij = d_Rij[start_idx + jj];
int j = dij.j;
DataT Rij = dij.Rij;
SpeciesT type_j = species_t[mol_idx][j];
sdx[jj] = pos_t[mol_idx][j][0] - xi;
sdy[jj] = pos_t[mol_idx][j][1] - yi;
sdz[jj] = pos_t[mol_idx][j][2] - zi;
stype[jj] = type_j;
sdist[jj] = Rij;
// cutoff
DataT fc_ij = 0.5 * cos(PI * Rij / Rca) + 0.5;
DataT fc_ij_grad = -0.5 * (PI / Rca) * sin(PI * Rij / Rca);
sfc[jj] = fc_ij;
sfc_grad[jj] = fc_ij_grad;
}
// grad init
DataT sdix_grad = 0;
DataT sdiy_grad = 0;
DataT sdiz_grad = 0;
for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) {
sdjx_grad[jj] = 0;
sdjy_grad[jj] = 0;
sdjz_grad[jj] = 0;
}
short2 tile = make_short2(laneIdx % TILEX, laneIdx / TILEX);
const DataT tc = 0.95; // theta constant factor
// must sync if threads_per_catom != 32 (wrap size) to make sure shared data is ready
// __syncthreads
for (int jj = 0; jj < jnum; jj++) {
const DataT Rij = sdist[jj];
SpeciesT type_j = stype[jj];
DataT fc_ij = sfc[jj];
DataT grad_fc_ij = sfc_grad[jj];
for (int kk_start = jj + 1; kk_start < jnum; kk_start += threads_per_catom) {
int kk = kk_start + laneIdx;
DataT theta = 0;
DataT grad_theta_vij_x = 0;
DataT grad_theta_vij_y = 0;
DataT grad_theta_vij_z = 0;
DataT grad_theta_vik_x = 0;
DataT grad_theta_vik_y = 0;
DataT grad_theta_vik_z = 0;
if (kk < jnum) {
const DataT Rik = sdist[kk];
DataT vij_vik_dot = sdx[jj] * sdx[kk] + sdy[jj] * sdy[kk] + sdz[jj] * sdz[kk];
theta = acos(tc * vij_vik_dot / (Rij * Rik));
// grad
DataT vij_factor =
tc / (Rij * Rij * Rij * sqrt(-tc * tc * vij_vik_dot * vij_vik_dot / (Rij * Rij) + Rik * Rik));
DataT vik_factor = tc /
(Rik * Rik * Rik *
sqrt(-tc * tc * vij_vik_dot * vij_vik_dot / (Rik * Rik) + Rij * Rij)); // tricky 80ms improved
grad_theta_vij_x = vij_factor * (sdx[jj] * vij_vik_dot - sdx[kk] * Rij * Rij);
grad_theta_vij_y = vij_factor * (sdy[jj] * vij_vik_dot - sdy[kk] * Rij * Rij);
grad_theta_vij_z = vij_factor * (sdz[jj] * vij_vik_dot - sdz[kk] * Rij * Rij);
grad_theta_vik_x = vik_factor * (sdx[kk] * vij_vik_dot - sdx[jj] * Rik * Rik);
grad_theta_vik_y = vik_factor * (sdy[kk] * vij_vik_dot - sdy[jj] * Rik * Rik);
grad_theta_vik_z = vik_factor * (sdz[kk] * vij_vik_dot - sdz[jj] * Rik * Rik);
}
for (int srcLane = 0; srcLane < C10_WARP_SIZE && (kk_start + srcLane) < jnum; ++srcLane) {
int kk = kk_start + srcLane;
DataT theta_ijk = __shfl_sync(0xFFFFFFFF, theta, srcLane);
// TODO necessary?
DataT grad_theta_vij_x_ = __shfl_sync(0xFFFFFFFF, grad_theta_vij_x, srcLane);
DataT grad_theta_vij_y_ = __shfl_sync(0xFFFFFFFF, grad_theta_vij_y, srcLane);
DataT grad_theta_vij_z_ = __shfl_sync(0xFFFFFFFF, grad_theta_vij_z, srcLane);
DataT grad_theta_vik_x_ = __shfl_sync(0xFFFFFFFF, grad_theta_vik_x, srcLane);
DataT grad_theta_vik_y_ = __shfl_sync(0xFFFFFFFF, grad_theta_vik_y, srcLane);
DataT grad_theta_vik_z_ = __shfl_sync(0xFFFFFFFF, grad_theta_vik_z, srcLane);
const DataT Rik = sdist[kk];
SpeciesT type_k = stype[kk];
DataT fc_ik = sfc[kk];
DataT grad_fc_ik = sfc_grad[kk];
DataT Rijk = (Rij + Rik) / 2;
DataT fc_ijk = fc_ij * fc_ik;
IndexT subaev_offset = angular_sublength * csubaev_offsets(type_j, type_k, num_species);
for (int itheta = tile.x; itheta < nShfZ; itheta += TILEX) {
DataT ShfZ = ShfZ_t[itheta];
DataT factor1 = pow((1 + cos(theta_ijk - ShfZ)) / 2, Zeta);
DataT grad_factor1_theta = 1.0 / 2.0 * Zeta * pow((1 + cos(ShfZ - theta_ijk)) / 2, Zeta - 1) *
sin(ShfZ - theta_ijk); // tricky 100ms improved
for (int ishfr = tile.y; ishfr < nShfA; ishfr += TILEY) {
DataT ShfA = ShfA_t[ishfr];
DataT factor2 = exp(-EtaA * (Rijk - ShfA) * (Rijk - ShfA));
DataT grad_factor2_dist = -EtaA * (Rijk - ShfA) * factor2;
DataT grad_vij_x = 2 *
(grad_factor1_theta * grad_theta_vij_x_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdx[jj] / Rij * fc_ijk +
factor1 * factor2 * fc_ik * grad_fc_ij * sdx[jj] / Rij);
DataT grad_vij_y = 2 *
(grad_factor1_theta * grad_theta_vij_y_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdy[jj] / Rij * fc_ijk +
factor1 * factor2 * fc_ik * grad_fc_ij * sdy[jj] / Rij);
DataT grad_vij_z = 2 *
(grad_factor1_theta * grad_theta_vij_z_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdz[jj] / Rij * fc_ijk +
factor1 * factor2 * fc_ik * grad_fc_ij * sdz[jj] / Rij);
DataT grad_vik_x = 2 *
(grad_factor1_theta * grad_theta_vik_x_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdx[kk] / Rik * fc_ijk +
factor1 * factor2 * fc_ij * grad_fc_ik * sdx[kk] / Rik);
DataT grad_vik_y = 2 *
(grad_factor1_theta * grad_theta_vik_y_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdy[kk] / Rik * fc_ijk +
factor1 * factor2 * fc_ij * grad_fc_ik * sdy[kk] / Rik);
DataT grad_vik_z = 2 *
(grad_factor1_theta * grad_theta_vik_z_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdz[kk] / Rik * fc_ijk +
factor1 * factor2 * fc_ij * grad_fc_ik * sdz[kk] / Rik);
if (is_double_backward) {
int atomj_idx = d_Rij[start_idx + jj].j;
int atomk_idx = d_Rij[start_idx + kk].j;
auto& grad_force = grad_output;
auto& grad_grad_aev = grad_input;
grad_vij_x *= (grad_force[mol_idx][atomj_idx][0] - grad_force[mol_idx][i][0]);
grad_vij_y *= (grad_force[mol_idx][atomj_idx][1] - grad_force[mol_idx][i][1]);
grad_vij_z *= (grad_force[mol_idx][atomj_idx][2] - grad_force[mol_idx][i][2]);
grad_vik_x *= (grad_force[mol_idx][atomk_idx][0] - grad_force[mol_idx][i][0]);
grad_vik_y *= (grad_force[mol_idx][atomk_idx][1] - grad_force[mol_idx][i][1]);
grad_vik_z *= (grad_force[mol_idx][atomk_idx][2] - grad_force[mol_idx][i][2]);
atomicAdd(
&grad_grad_aev[mol_idx][i][radial_length + subaev_offset + ishfr * nShfZ + itheta],
grad_vij_x + grad_vij_y + grad_vij_z + grad_vik_x + grad_vik_y + grad_vik_z);
} else {
DataT grad_output_item = grad_output[mol_idx][i][radial_length + subaev_offset + ishfr * nShfZ + itheta];
grad_vij_x *= grad_output_item;
grad_vij_y *= grad_output_item;
grad_vij_z *= grad_output_item;
grad_vik_x *= grad_output_item;
grad_vik_y *= grad_output_item;
grad_vik_z *= grad_output_item;
sdix_grad += (-grad_vij_x - grad_vik_x);
sdiy_grad += (-grad_vij_y - grad_vik_y);
sdiz_grad += (-grad_vij_z - grad_vik_z);
for (int offset = 16; offset > 0; offset /= 2) {
grad_vij_x += __shfl_down_sync(0xFFFFFFFF, grad_vij_x, offset);
grad_vij_y += __shfl_down_sync(0xFFFFFFFF, grad_vij_y, offset);
grad_vij_z += __shfl_down_sync(0xFFFFFFFF, grad_vij_z, offset);
grad_vik_x += __shfl_down_sync(0xFFFFFFFF, grad_vik_x, offset);
grad_vik_y += __shfl_down_sync(0xFFFFFFFF, grad_vik_y, offset);
grad_vik_z += __shfl_down_sync(0xFFFFFFFF, grad_vik_z, offset);
}
if (laneIdx == 0) {
sdjx_grad[jj] += grad_vij_x;
sdjy_grad[jj] += grad_vij_y;
sdjz_grad[jj] += grad_vij_z;
sdjx_grad[kk] += grad_vik_x;
sdjy_grad[kk] += grad_vik_y;
sdjz_grad[kk] += grad_vik_z;
}
}
}
}
}
}
}
if (!is_double_backward) {
auto& grad_coord = grad_input;
int atomi_idx = i;
atomicAdd(&grad_coord[mol_idx][atomi_idx][0], sdix_grad);
atomicAdd(&grad_coord[mol_idx][atomi_idx][1], sdiy_grad);
atomicAdd(&grad_coord[mol_idx][atomi_idx][2], sdiz_grad);
for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) {
int atomj_idx = d_Rij[start_idx + jj].j;
atomicAdd(&grad_coord[mol_idx][atomj_idx][0], sdjx_grad[jj]);
atomicAdd(&grad_coord[mol_idx][atomj_idx][1], sdjy_grad[jj]);
atomicAdd(&grad_coord[mol_idx][atomj_idx][2], sdjz_grad[jj]);
}
}
}
template <typename SpeciesT, typename DataT, int THREADS_PER_RIJ>
__global__ void cuRadialAEVs(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfR_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaR_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> aev_t,
PairDist<DataT>* d_Rij,
float Rcr,
int radial_length,
int radial_sublength,
int nRadialRij) {
int gidx = blockIdx.x * blockDim.x + threadIdx.x;
int idx = gidx / THREADS_PER_RIJ;
int nShfR = ShfR_t.size(0);
DataT EtaR = EtaR_t[0];
if (idx >= nRadialRij)
return;
int laneIdx = threadIdx.x % THREADS_PER_RIJ;
PairDist<DataT> d = d_Rij[idx];
DataT Rij = d.Rij;
int mol_idx = d.midx;
int i = d.i;
int j = d.j;
SpeciesT type_j = species_t[mol_idx][j];
DataT fc = 0.5 * cos(PI * Rij / Rcr) + 0.5;
for (int ishfr = laneIdx; ishfr < nShfR; ishfr += THREADS_PER_RIJ) {
DataT ShfR = ShfR_t[ishfr];
DataT GmR = 0.25 * exp(-EtaR * (Rij - ShfR) * (Rij - ShfR)) * fc;
atomicAdd(&aev_t[mol_idx][i][type_j * radial_sublength + ishfr], GmR);
}
}
// every <THREADS_PER_RIJ> threads take care of 1 RIJ, and iterate <nShfR / THREADS_PER_RIJ> times
template <bool is_double_backward, typename SpeciesT, typename DataT, int THREADS_PER_RIJ>
__global__ void cuRadialAEVs_backward_or_doublebackward(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfR_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaR_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits>
grad_aev, // daev for backward, ddaev for double backward
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits>
grad_dist, // ddist for backward, dddist for double backward
const PairDist<DataT>* d_Rij,
float Rcr,
int radial_length,
int radial_sublength,
int nRadialRij) {
int gidx = blockIdx.x * blockDim.x + threadIdx.x;
int idx = gidx / THREADS_PER_RIJ;
int nShfR = ShfR_t.size(0);
DataT EtaR = EtaR_t[0];
if (idx >= nRadialRij)
return;
int laneIdx = threadIdx.x % THREADS_PER_RIJ;
PairDist<DataT> d = d_Rij[idx];
DataT Rij = d.Rij;
int mol_idx = d.midx;
int i = d.i;
int j = d.j;
SpeciesT type_j = species_t[mol_idx][j];
DataT fc = 0.5 * cos(PI * Rij / Rcr) + 0.5;
DataT fc_grad = -0.5 * (PI / Rcr) * sin(PI * Rij / Rcr);
DataT upstream_grad;
if (is_double_backward) {
upstream_grad = grad_dist[idx];
}
for (int ishfr = laneIdx; ishfr < nShfR; ishfr += THREADS_PER_RIJ) {
DataT ShfR = ShfR_t[ishfr];
DataT GmR = 0.25 * exp(-EtaR * (Rij - ShfR) * (Rij - ShfR));
DataT GmR_grad = -EtaR * (-2 * ShfR + 2 * Rij) * GmR;
DataT jacobian = GmR_grad * fc + GmR * fc_grad;
if (is_double_backward) {
atomicAdd(&grad_aev[mol_idx][i][type_j * radial_sublength + ishfr], upstream_grad * jacobian);
} else {
upstream_grad = grad_aev[mol_idx][i][type_j * radial_sublength + ishfr];
atomicAdd(&grad_dist[idx], upstream_grad * jacobian);
}
}
}
template <typename DataT>
void cubScan(const DataT* d_in, DataT* d_out, int num_items, hipStream_t stream) {
auto& allocator = *c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
// Determine temporary device storage requirements
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream);
// Allocate temporary storage
auto buffer_tmp = allocator.allocate(temp_storage_bytes);
d_temp_storage = buffer_tmp.get();
// Run exclusive prefix sum
hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream);
}
template <typename DataT, typename IndexT>
int cubEncode(
const DataT* d_in,
DataT* d_unique_out,
IndexT* d_counts_out,
int num_items,
int* d_num_runs_out,
hipStream_t stream) {
auto& allocator = *c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
// Determine temporary device storage requirements
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
hipcub::DeviceRunLengthEncode::Encode(
d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_counts_out, d_num_runs_out, num_items, stream);
// Allocate temporary storage
auto buffer_tmp = allocator.allocate(temp_storage_bytes);
d_temp_storage = buffer_tmp.get();
// Run encoding
hipcub::DeviceRunLengthEncode::Encode(
d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_counts_out, d_num_runs_out, num_items, stream);
int num_selected = 0;
hipMemcpyAsync(&num_selected, d_num_runs_out, sizeof(int), hipMemcpyDefault, stream);
hipStreamSynchronize(stream);
return num_selected;
}
template <typename DataT, typename LambdaOpT>
int cubDeviceSelect(
const DataT* d_in,
DataT* d_out,
int num_items,
int* d_num_selected_out,
LambdaOpT select_op,
hipStream_t stream) {
auto& allocator = *c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
// Determine temporary device storage requirements
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
hipcub::DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op);
// Allocate temporary storage
auto buffer_tmp = allocator.allocate(temp_storage_bytes);
d_temp_storage = buffer_tmp.get();
// Run selection
hipcub::DeviceSelect::If(
d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op, stream);
int num_selected = 0;
hipMemcpyAsync(&num_selected, d_num_selected_out, sizeof(int), hipMemcpyDefault, stream);
hipStreamSynchronize(stream);
return num_selected;
}
template <typename DataT>
DataT cubMax(const DataT* d_in, int num_items, DataT* d_out, hipStream_t stream) {
auto& allocator = *c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
// Determine temporary device storage requirements
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
hipcub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream);
// Allocate temporary storage
auto buffer_tmp = allocator.allocate(temp_storage_bytes);
d_temp_storage = buffer_tmp.get();
// Run min-reduction
hipcub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream);
int maxVal = 0;
hipMemcpyAsync(&maxVal, d_out, sizeof(DataT), hipMemcpyDefault, stream);
hipStreamSynchronize(stream);
return maxVal;
}
// NOTE: assumes size of EtaA_t = Zeta_t = EtaR_t = 1
Result cuaev_forward(const Tensor& coordinates_t, const Tensor& species_t, const AEVScalarParams& aev_params) {
TORCH_CHECK(
(species_t.dtype() == torch::kInt32) && (coordinates_t.dtype() == torch::kFloat32), "Unsupported input type");
TORCH_CHECK(
aev_params.EtaR_t.size(0) == 1 || aev_params.EtaA_t.size(0) == 1 || aev_params.Zeta_t.size(0) == 1,
"cuda extension is currently not supported for the specified "
"configuration");
float Rcr = aev_params.Rcr;
float Rca = aev_params.Rca;
const int n_molecules = species_t.size(0);
const int max_natoms_per_mol = species_t.size(1);
int aev_length = aev_params.radial_length + aev_params.angular_length;
auto aev_t = torch::zeros({n_molecules, max_natoms_per_mol, aev_length}, coordinates_t.options());
if (species_t.numel() == 0) {
return {
aev_t, Tensor(), Tensor(), Tensor(), 0, 0, 0, Tensor(), Tensor(), Tensor(), 0, 0, 0, coordinates_t, species_t};
}
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto thrust_allocator = THCThrustAllocator(at::globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(thrust_allocator).on(stream);
auto& allocator = *c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
// buffer to store all the pairwise distance (Rij)
auto total_natom_pairs = n_molecules * max_natoms_per_mol * max_natoms_per_mol;
auto d_options = torch::dtype(torch::kUInt8).device(coordinates_t.device());
Tensor tensor_Rij = torch::empty(sizeof(PairDist<float>) * total_natom_pairs, d_options);
PairDist<float>* d_Rij = (PairDist<float>*)tensor_Rij.data_ptr();
// init all Rij to inf
PairDist<float> init;
init.Rij = std::numeric_limits<float>::infinity();
thrust::fill(policy, d_Rij, d_Rij + total_natom_pairs, init);
// buffer to store all the pairwise distance that is needed for Radial AEV
// computation
Tensor tensor_radialRij = torch::empty(sizeof(PairDist<float>) * total_natom_pairs, d_options);
PairDist<float>* d_radialRij = (PairDist<float>*)tensor_radialRij.data_ptr();
auto buffer_count = allocator.allocate(sizeof(int));
int* d_count_out = (int*)buffer_count.get();
const int block_size = 64;
if (n_molecules == 1) {
int tileWidth = 32;
int tilesPerRow = (max_natoms_per_mol + tileWidth - 1) / tileWidth;
dim3 block(tileWidth, tileWidth, 1);
dim3 grid(tilesPerRow, tilesPerRow, 1);
hipLaunchKernelGGL(( pairwiseDistanceSingleMolecule), dim3(grid), dim3(block), 0, stream,
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_Rij,
max_natoms_per_mol);
} else {
dim3 block(8, 8, 1);
// Compute pairwise distance (Rij) for all atom pairs in a molecule
// maximum 4096 atoms, which needs 49152 byte (48 kb) of shared memory
// TODO: the kernel is not optimized for batched huge molecule (max_natoms_per_mol > 1000)
hipLaunchKernelGGL(( pairwiseDistance), dim3(n_molecules), dim3(block), sizeof(float) * max_natoms_per_mol * 3, stream,
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_Rij,
max_natoms_per_mol);
}
// Extract Rijs that is needed for RadialAEV comptuation i.e. all the Rij <= Rcr
int nRadialRij = cubDeviceSelect(
d_Rij,
d_radialRij,
total_natom_pairs,
d_count_out,
[=] __device__(const PairDist<float> d) { return d.Rij <= Rcr; },
stream);
int nblocks = (nRadialRij * 8 + block_size - 1) / block_size;
hipLaunchKernelGGL(( cuRadialAEVs<int, float, 8>), dim3(nblocks), dim3(block_size), 0, stream,
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
aev_params.ShfR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_radialRij,
aev_params.Rcr,
aev_params.radial_length,
aev_params.radial_sublength,
nRadialRij);
// reuse buffer allocated for all Rij
// d_angularRij will store all the Rij required in Angular AEV computation
Tensor tensor_angularRij = torch::empty(sizeof(PairDist<float>) * nRadialRij, d_options);
PairDist<float>* d_angularRij = (PairDist<float>*)tensor_angularRij.data_ptr();
// Extract Rijs that is needed for AngularAEV comptuation i.e. all the Rij
// <= Rca
int nAngularRij = cubDeviceSelect(
d_radialRij,
d_angularRij,
nRadialRij,
d_count_out,
[=] __device__(const PairDist<float> d) { return d.Rij <= Rca; },
stream);
Tensor tensor_centralAtom = torch::empty(sizeof(PairDist<float>) * nAngularRij, d_options);
PairDist<float>* d_centralAtom = (PairDist<float>*)tensor_centralAtom.data_ptr();
Tensor tensor_numPairsPerCenterAtom = torch::empty(sizeof(int) * nAngularRij, d_options);
int* d_numPairsPerCenterAtom = (int*)tensor_numPairsPerCenterAtom.data_ptr();
// group by center atom
int ncenter_atoms = cubEncode(d_angularRij, d_centralAtom, d_numPairsPerCenterAtom, nAngularRij, d_count_out, stream);
Tensor tensor_centerAtomStartIdx = torch::empty(sizeof(int) * ncenter_atoms, d_options);
int* d_centerAtomStartIdx = (int*)tensor_centerAtomStartIdx.data_ptr();
cubScan(d_numPairsPerCenterAtom, d_centerAtomStartIdx, ncenter_atoms, stream);
{
const int nthreads_per_catom = 32;
const int nblocks_angAEV = (ncenter_atoms * nthreads_per_catom + block_size - 1) / block_size;
auto smem_size = [&aev_params](int max_nbrs, int ncatom_per_tpb) {
int sm_aev = sizeof(float) * align<4>(aev_params.angular_length); // (angular_length / 4 + 1) * 4
int sxyz = sizeof(float) * max_nbrs * 3;
int sRij = sizeof(float) * max_nbrs;
int sfc = sizeof(float) * max_nbrs;
int sj = sizeof(int) * max_nbrs;
return (sm_aev + sxyz + sRij + sfc + sj) * ncatom_per_tpb;
};
int maxNbrsPerCenterAtom = cubMax(d_numPairsPerCenterAtom, ncenter_atoms, d_count_out, stream);
int maxnbrs_per_atom_aligned = align<4>(maxNbrsPerCenterAtom);
int smem_size_aligned = smem_size(maxnbrs_per_atom_aligned, block_size / nthreads_per_catom);
int angular_length_aligned = align<4>(aev_params.angular_length);
hipLaunchKernelGGL(( cuAngularAEVs), dim3(nblocks_angAEV), dim3(block_size), smem_size_aligned, stream,
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
aev_params.ShfA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.ShfZ_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.Zeta_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_angularRij,
d_centralAtom,
d_numPairsPerCenterAtom,
d_centerAtomStartIdx,
aev_params.Rca,
aev_params.angular_length,
aev_params.angular_sublength,
aev_params.radial_length,
aev_params.num_species,
maxnbrs_per_atom_aligned,
angular_length_aligned,
ncenter_atoms);
return {aev_t,
tensor_Rij,
tensor_radialRij,
tensor_angularRij,
total_natom_pairs,
nRadialRij,
nAngularRij,
tensor_centralAtom,
tensor_numPairsPerCenterAtom,
tensor_centerAtomStartIdx,
maxnbrs_per_atom_aligned,
angular_length_aligned,
ncenter_atoms,
coordinates_t,
species_t};
}
}
Tensor cuaev_backward(const Tensor& grad_output, const AEVScalarParams& aev_params, const Result& result) {
using namespace torch::indexing;
Tensor coordinates_t = result.coordinates_t;
Tensor species_t = result.species_t;
const int n_molecules = coordinates_t.size(0);
const int max_natoms_per_mol = coordinates_t.size(1);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto grad_coord = torch::zeros(coordinates_t.sizes(), coordinates_t.options().requires_grad(false)); // [2, 5, 3]
PairDist<float>* d_Rij = (PairDist<float>*)result.tensor_Rij.data_ptr();
PairDist<float>* d_radialRij = (PairDist<float>*)result.tensor_radialRij.data_ptr();
PairDist<float>* d_angularRij = (PairDist<float>*)result.tensor_angularRij.data_ptr();
PairDist<float>* d_centralAtom = (PairDist<float>*)result.tensor_centralAtom.data_ptr();
int* d_numPairsPerCenterAtom = (int*)result.tensor_numPairsPerCenterAtom.data_ptr();
int* d_centerAtomStartIdx = (int*)result.tensor_centerAtomStartIdx.data_ptr();
Tensor grad_radial_dist = torch::zeros(result.nRadialRij, coordinates_t.options().requires_grad(false));
int block_size = 64;
int nblocks = (result.nRadialRij * 8 + block_size - 1) / block_size;
hipLaunchKernelGGL(( cuRadialAEVs_backward_or_doublebackward<false, int, float, 8>), dim3(nblocks), dim3(block_size), 0, stream,
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
aev_params.ShfR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_output.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_radial_dist.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
d_radialRij,
aev_params.Rcr,
aev_params.radial_length,
aev_params.radial_sublength,
result.nRadialRij);
// For best result, block_size should match average molecule size (no padding) to avoid atomicAdd
nblocks = (result.nRadialRij + block_size - 1) / block_size;
hipLaunchKernelGGL(( pairwiseDistance_backward_or_doublebackward<false>), dim3(nblocks), dim3(block_size), 0, stream,
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_radial_dist.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_coord.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_radialRij,
result.nRadialRij);
auto smem_size = [&aev_params](int max_nbrs, int ncatom_per_tpb) {
int sxyz = sizeof(float) * max_nbrs * 3;
int sj_xyz_grad = sizeof(float) * max_nbrs * 3;
int sRij = sizeof(float) * max_nbrs;
int sfc = sizeof(float) * max_nbrs;
int sfc_grad = sizeof(float) * max_nbrs;
int sj = sizeof(int) * max_nbrs;
return (sxyz + sj_xyz_grad + sRij + sfc + sfc_grad + sj) * ncatom_per_tpb;
};
block_size = 32;
const int nthreads_per_catom = 32;
const int nblocks_angAEV = (result.ncenter_atoms * nthreads_per_catom + block_size - 1) / block_size;
int smem_size_aligned = smem_size(result.maxnbrs_per_atom_aligned, block_size / nthreads_per_catom);
Tensor grad_angular_coord = torch::zeros({result.nAngularRij, 3}, coordinates_t.options().requires_grad(false));
hipLaunchKernelGGL(( cuAngularAEVs_backward_or_doublebackward<false>), dim3(nblocks_angAEV), dim3(block_size), smem_size_aligned, stream,
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
aev_params.ShfA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.ShfZ_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.Zeta_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_output.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_coord.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_angularRij,
d_centralAtom,
d_numPairsPerCenterAtom,
d_centerAtomStartIdx,
aev_params.Rca,
aev_params.angular_length,
aev_params.angular_sublength,
aev_params.radial_length,
aev_params.num_species,
result.maxnbrs_per_atom_aligned,
result.angular_length_aligned,
result.ncenter_atoms);
return grad_coord;
}
Tensor cuaev_double_backward(const Tensor& grad_force, const AEVScalarParams& aev_params, const Result& result) {
using namespace torch::indexing;
Tensor coordinates_t = result.coordinates_t;
Tensor species_t = result.species_t;
const int n_molecules = coordinates_t.size(0);
const int max_natoms_per_mol = coordinates_t.size(1);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
int aev_length = aev_params.radial_length + aev_params.angular_length;
auto grad_grad_aev = torch::zeros(
{coordinates_t.size(0), coordinates_t.size(1), aev_length},
coordinates_t.options().requires_grad(false)); // [2, 5, 384]
PairDist<float>* d_Rij = (PairDist<float>*)result.tensor_Rij.data_ptr();
PairDist<float>* d_radialRij = (PairDist<float>*)result.tensor_radialRij.data_ptr();
PairDist<float>* d_angularRij = (PairDist<float>*)result.tensor_angularRij.data_ptr();
PairDist<float>* d_centralAtom = (PairDist<float>*)result.tensor_centralAtom.data_ptr();
int* d_numPairsPerCenterAtom = (int*)result.tensor_numPairsPerCenterAtom.data_ptr();
int* d_centerAtomStartIdx = (int*)result.tensor_centerAtomStartIdx.data_ptr();
auto grad_force_coord_Rij = torch::zeros({result.nRadialRij}, coordinates_t.options().requires_grad(false));
int block_size = 64;
int nblocks = (result.nRadialRij + block_size - 1) / block_size;
hipLaunchKernelGGL(( pairwiseDistance_backward_or_doublebackward<true>), dim3(nblocks), dim3(block_size), 0, stream,
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_force_coord_Rij.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_force.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_radialRij,
result.nRadialRij);
nblocks = (result.nRadialRij * 8 + block_size - 1) / block_size;
hipLaunchKernelGGL(( cuRadialAEVs_backward_or_doublebackward<true, int, float, 8>), dim3(nblocks), dim3(block_size), 0, stream,
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
aev_params.ShfR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_grad_aev.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_force_coord_Rij.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
d_radialRij,
aev_params.Rcr,
aev_params.radial_length,
aev_params.radial_sublength,
result.nRadialRij);
auto smem_size = [&aev_params](int max_nbrs, int ncatom_per_tpb) {
int sxyz = sizeof(float) * max_nbrs * 3;
int sj_xyz_grad = sizeof(float) * max_nbrs * 3;
int sRij = sizeof(float) * max_nbrs;
int sfc = sizeof(float) * max_nbrs;
int sfc_grad = sizeof(float) * max_nbrs;
int sj = sizeof(int) * max_nbrs;
return (sxyz + sj_xyz_grad + sRij + sfc + sfc_grad + sj) * ncatom_per_tpb;
};
block_size = 32;
const int nthreads_per_catom = 32;
const int nblocks_angAEV = (result.ncenter_atoms * nthreads_per_catom + block_size - 1) / block_size;
int smem_size_aligned = smem_size(result.maxnbrs_per_atom_aligned, block_size / nthreads_per_catom);
hipLaunchKernelGGL(( cuAngularAEVs_backward_or_doublebackward<true>), dim3(nblocks_angAEV), dim3(block_size), smem_size_aligned, stream,
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
aev_params.ShfA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.ShfZ_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.Zeta_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_force.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_grad_aev.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_angularRij,
d_centralAtom,
d_numPairsPerCenterAtom,
d_centerAtomStartIdx,
aev_params.Rca,
aev_params.angular_length,
aev_params.angular_sublength,
aev_params.radial_length,
aev_params.num_species,
result.maxnbrs_per_atom_aligned,
result.angular_length_aligned,
result.ncenter_atoms);
return grad_grad_aev;
}
| bb8a0f6352f9f0baa6bc3c4e19593ca877e3a835.cu | #include <aev.h>
#include <thrust/equal.h>
#include <torch/extension.h>
#include <cub/cub.cuh>
#include <vector>
#include <ATen/Context.h>
#include <THC/THC.h>
#include <c10/cuda/CUDACachingAllocator.h>
#include <THC/THCThrustAllocator.cuh>
#define PI 3.141592653589793
using torch::Tensor;
// fetch from the following matrix
// [[ 0, 1, 2, 3, 4],
// [ 1, 5, 6, 7, 8],
// [ 2, 6, 9, 10, 11],
// [ 3, 7, 10, 12, 13],
// [ 4, 8, 11, 13, 14]]
constexpr int csubaev_offsets(int i, int j, int n) {
int larger = std::max(i, j);
int smaller = std::min(i, j);
int starting = smaller * (2 * n - smaller + 1) / 2; // n + (n - 1) + ... + (n - smaller + 1)
int offset = larger - smaller;
return starting + offset;
}
template <typename DataT>
struct PairDist {
DataT Rij;
int midx;
short i;
short j;
};
// used to group Rijs by atom id
template <typename DataT>
__host__ __device__ bool operator==(const PairDist<DataT>& lhs, const PairDist<DataT>& rhs) {
return lhs.midx == rhs.midx && lhs.i == rhs.i;
}
/// Alignment of memory. Must be a power of two
/// \tparam boundary Boundary to align to (NOTE: must be power of 2)
/// \param value Input value that is to be aligned
/// \return Value aligned to boundary
template <int32_t boundary>
__host__ __device__ __forceinline__ int align(const int& value) {
static_assert((boundary & (boundary - 1)) == 0, "Boundary for align must be power of 2");
return (value + boundary) & ~(boundary - 1);
}
template <typename SpeciesT, typename DataT, typename IndexT = int>
__global__ void pairwiseDistance(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t,
PairDist<DataT>* d_Rij,
IndexT max_natoms_per_mol) {
extern __shared__ DataT spos[];
DataT* sx = &spos[0];
DataT* sy = &spos[max_natoms_per_mol];
DataT* sz = &spos[2 * max_natoms_per_mol];
int mol_idx = blockIdx.x;
int tidx = threadIdx.y * blockDim.x + threadIdx.x;
for (int i = tidx; i < max_natoms_per_mol; i += blockDim.x * blockDim.y) {
sx[i] = pos_t[mol_idx][i][0];
sy[i] = pos_t[mol_idx][i][1];
sz[i] = pos_t[mol_idx][i][2];
}
__syncthreads();
int natom_pairs = max_natoms_per_mol * max_natoms_per_mol;
for (int i = threadIdx.y; i < max_natoms_per_mol; i += blockDim.y) {
SpeciesT type_i = species_t[mol_idx][i];
DataT xi = sx[i];
DataT yi = sy[i];
DataT zi = sz[i];
for (int j = threadIdx.x; j < max_natoms_per_mol; j += blockDim.x) {
SpeciesT type_j = species_t[mol_idx][j];
const DataT xj = sx[j];
const DataT yj = sy[j];
const DataT zj = sz[j];
const DataT delx = xj - xi;
const DataT dely = yj - yi;
const DataT delz = zj - zi;
const DataT Rsq = delx * delx + dely * dely + delz * delz;
if (type_i != -1 && type_j != -1 && i != j) {
DataT Rij = sqrt(Rsq);
PairDist<DataT> d;
d.Rij = Rij;
d.midx = mol_idx;
d.i = i;
d.j = j;
d_Rij[mol_idx * natom_pairs + i * max_natoms_per_mol + j] = d;
}
}
}
}
template <typename SpeciesT, typename DataT, typename IndexT = int>
__global__ void pairwiseDistanceSingleMolecule(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t,
PairDist<DataT>* d_Rij,
IndexT max_natoms_per_mol) {
constexpr int mol_idx = 0;
int natom_pairs = max_natoms_per_mol * max_natoms_per_mol;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= max_natoms_per_mol || j >= max_natoms_per_mol)
return;
SpeciesT type_i = species_t[mol_idx][i];
DataT xi = pos_t[mol_idx][i][0];
DataT yi = pos_t[mol_idx][i][1];
DataT zi = pos_t[mol_idx][i][2];
SpeciesT type_j = species_t[mol_idx][j];
DataT xj = pos_t[mol_idx][j][0];
DataT yj = pos_t[mol_idx][j][1];
DataT zj = pos_t[mol_idx][j][2];
DataT delx = xj - xi;
DataT dely = yj - yi;
DataT delz = zj - zi;
DataT Rsq = delx * delx + dely * dely + delz * delz;
if (type_i != -1 && type_j != -1 && i != j) {
DataT Rij = sqrt(Rsq);
PairDist<DataT> d;
d.Rij = Rij;
d.midx = mol_idx;
d.i = i;
d.j = j;
d_Rij[mol_idx * natom_pairs + i * max_natoms_per_mol + j] = d;
}
}
// every block compute blocksize RIJ's gradient by column major, to avoid atomicAdd waiting
template <bool is_double_backward, typename DataT, typename IndexT = int>
__global__ void pairwiseDistance_backward_or_doublebackward(
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits>
grad_dist, // ddist for backward, dddist for double backward
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits>
grad_coord_or_force, // dcoord for backward, dforce(i.e. ddcoord) for double backward
const PairDist<DataT>* d_radialRij,
IndexT nRadialRij) {
int gidx = threadIdx.x * gridDim.x + blockIdx.x;
if (gidx >= nRadialRij)
return;
PairDist<DataT> d = d_radialRij[gidx];
DataT Rij = d.Rij;
int mol_idx = d.midx;
int i = d.i;
int j = d.j;
const DataT delx = pos_t[mol_idx][j][0] - pos_t[mol_idx][i][0];
const DataT dely = pos_t[mol_idx][j][1] - pos_t[mol_idx][i][1];
const DataT delz = pos_t[mol_idx][j][2] - pos_t[mol_idx][i][2];
if (is_double_backward) {
auto& grad_force = grad_coord_or_force;
DataT grad_force_coord_Rij_item = (grad_force[mol_idx][j][0] - grad_force[mol_idx][i][0]) * delx / Rij +
(grad_force[mol_idx][j][1] - grad_force[mol_idx][i][1]) * dely / Rij +
(grad_force[mol_idx][j][2] - grad_force[mol_idx][i][2]) * delz / Rij;
grad_dist[gidx] = grad_force_coord_Rij_item;
} else {
auto& grad_coord = grad_coord_or_force;
DataT grad_dist_coord_x = delx / Rij;
DataT grad_dist_coord_y = dely / Rij;
DataT grad_dist_coord_z = delz / Rij;
DataT grad_radial_dist_item = grad_dist[gidx];
atomicAdd(&grad_coord[mol_idx][j][0], grad_radial_dist_item * grad_dist_coord_x);
atomicAdd(&grad_coord[mol_idx][j][1], grad_radial_dist_item * grad_dist_coord_y);
atomicAdd(&grad_coord[mol_idx][j][2], grad_radial_dist_item * grad_dist_coord_z);
atomicAdd(&grad_coord[mol_idx][i][0], -grad_radial_dist_item * grad_dist_coord_x);
atomicAdd(&grad_coord[mol_idx][i][1], -grad_radial_dist_item * grad_dist_coord_y);
atomicAdd(&grad_coord[mol_idx][i][2], -grad_radial_dist_item * grad_dist_coord_z);
}
}
template <typename SpeciesT, typename DataT, typename IndexT = int, int TILEX = 8, int TILEY = 4>
__global__ void cuAngularAEVs(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfA_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfZ_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaA_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> Zeta_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> aev_t,
PairDist<DataT>* d_Rij,
PairDist<DataT>* d_centralAtom,
int* d_nPairsPerCenterAtom,
int* d_centerAtomStartIdx,
float Rca,
int angular_length,
int angular_sublength,
int radial_length,
int num_species,
int maxnbrs_per_atom_aligned,
int angular_length_aligned,
int ncentral_atoms) {
extern __shared__ DataT smem[];
constexpr int threads_per_catom = TILEX * TILEY;
static_assert(threads_per_catom == C10_WARP_SIZE);
int gIdx = blockIdx.x * blockDim.x + threadIdx.x;
int cIdx = gIdx / threads_per_catom; // central atom id
if (cIdx >= ncentral_atoms)
return;
int groupIdx = threadIdx.x / threads_per_catom;
int laneIdx = threadIdx.x % threads_per_catom;
int ncatom_per_tpb = blockDim.x / threads_per_catom;
DataT* saev = &smem[groupIdx * angular_length_aligned];
int offset = ncatom_per_tpb * angular_length_aligned;
DataT* sdx = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdy = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdz = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdist = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sfc = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
int* stype = (int*)&smem[offset + groupIdx * maxnbrs_per_atom_aligned];
DataT EtaA = EtaA_t[0];
DataT Zeta = Zeta_t[0];
IndexT nShfA = ShfA_t.size(0);
IndexT nShfZ = ShfZ_t.size(0);
PairDist<DataT> d = d_centralAtom[cIdx];
int start_idx = d_centerAtomStartIdx[cIdx];
int jnum = d_nPairsPerCenterAtom[cIdx];
// center atom
int i = d.i;
int mol_idx = d.midx;
for (int iaev = laneIdx; iaev < angular_length; iaev += threads_per_catom) {
saev[iaev] = 0;
}
DataT xi = pos_t[mol_idx][i][0];
DataT yi = pos_t[mol_idx][i][1];
DataT zi = pos_t[mol_idx][i][2];
for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) {
PairDist<DataT> dij = d_Rij[start_idx + jj];
int j = dij.j;
DataT Rij = dij.Rij;
SpeciesT type_j = species_t[mol_idx][j];
sdx[jj] = pos_t[mol_idx][j][0] - xi;
sdy[jj] = pos_t[mol_idx][j][1] - yi;
sdz[jj] = pos_t[mol_idx][j][2] - zi;
stype[jj] = type_j;
sdist[jj] = Rij;
DataT fc_ij = 0.5 * cos(PI * Rij / Rca) + 0.5;
sfc[jj] = fc_ij;
}
short2 tile = make_short2(laneIdx % TILEX, laneIdx / TILEX);
// must sync if threads_per_catom != 32 (wrap size) to make sure shared data is ready
// __syncthreads
for (int jj = 0; jj < jnum; jj++) {
const DataT Rij = sdist[jj];
SpeciesT type_j = stype[jj];
DataT fc_ij = sfc[jj];
for (int kk_start = jj + 1; kk_start < jnum; kk_start += threads_per_catom) {
int kk = kk_start + laneIdx;
DataT theta = 0;
if (kk < jnum) {
const DataT Rik = sdist[kk];
theta = acos(0.95 * (sdx[jj] * sdx[kk] + sdy[jj] * sdy[kk] + sdz[jj] * sdz[kk]) / (Rij * Rik));
}
for (int srcLane = 0; srcLane < C10_WARP_SIZE && (kk_start + srcLane) < jnum; ++srcLane) {
int kk = kk_start + srcLane;
DataT theta_ijk = __shfl_sync(0xFFFFFFFF, theta, srcLane);
const DataT Rik = sdist[kk];
SpeciesT type_k = stype[kk];
DataT fc_ik = sfc[kk];
DataT Rijk = (Rij + Rik) / 2;
DataT fc_ijk = fc_ij * fc_ik;
IndexT subaev_offset = angular_sublength * csubaev_offsets(type_j, type_k, num_species);
for (int itheta = tile.x; itheta < nShfZ; itheta += TILEX) {
DataT ShfZ = ShfZ_t[itheta];
DataT factor1 = pow((1 + cos(theta_ijk - ShfZ)) / 2, Zeta);
for (int ishfr = tile.y; ishfr < nShfA; ishfr += TILEY) {
DataT ShfA = ShfA_t[ishfr];
DataT factor2 = exp(-EtaA * (Rijk - ShfA) * (Rijk - ShfA));
DataT res = 2 * factor1 * factor2 * fc_ijk;
saev[subaev_offset + ishfr * nShfZ + itheta] += res;
}
}
}
}
}
for (int iaev = laneIdx; iaev < angular_length; iaev += threads_per_catom) {
aev_t[mol_idx][i][radial_length + iaev] = saev[iaev];
}
}
template <
bool is_double_backward,
typename SpeciesT,
typename DataT,
typename IndexT = int,
int TILEX = 8,
int TILEY = 4>
__global__ void cuAngularAEVs_backward_or_doublebackward(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfA_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfZ_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaA_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> Zeta_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits>
grad_output, // for backward, this is daev, for double backward, this is dforce (i.e. ddcoord)
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits>
grad_input, // for backward, this is dcoord, for double backward, this is ddaev
const PairDist<DataT>* d_Rij,
const PairDist<DataT>* d_centralAtom,
int* d_nPairsPerCenterAtom,
int* d_centerAtomStartIdx,
float Rca,
int angular_length,
int angular_sublength,
int radial_length,
int num_species,
int maxnbrs_per_atom_aligned,
int angular_length_aligned,
int ncentral_atoms) {
extern __shared__ DataT smem[];
constexpr int threads_per_catom = TILEX * TILEY;
static_assert(threads_per_catom == C10_WARP_SIZE);
int gIdx = blockIdx.x * blockDim.x + threadIdx.x;
int cIdx = gIdx / threads_per_catom; // central atom id
if (cIdx >= ncentral_atoms)
return;
int groupIdx = threadIdx.x / threads_per_catom;
int laneIdx = threadIdx.x % threads_per_catom;
int ncatom_per_tpb = blockDim.x / threads_per_catom; // e.g. 2 catom per block
DataT* sdx = &smem[groupIdx * maxnbrs_per_atom_aligned];
int offset = ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdy = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdz = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdjx_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdjy_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdjz_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdist = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sfc = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sfc_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
int* stype = (int*)&smem[offset + groupIdx * maxnbrs_per_atom_aligned];
DataT EtaA = EtaA_t[0];
DataT Zeta = Zeta_t[0];
IndexT nShfA = ShfA_t.size(0);
IndexT nShfZ = ShfZ_t.size(0);
PairDist<DataT> d = d_centralAtom[cIdx];
int start_idx = d_centerAtomStartIdx[cIdx];
int jnum = d_nPairsPerCenterAtom[cIdx];
// center atom
int i = d.i;
int mol_idx = d.midx;
DataT xi = pos_t[mol_idx][i][0];
DataT yi = pos_t[mol_idx][i][1];
DataT zi = pos_t[mol_idx][i][2];
for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) {
PairDist<DataT> dij = d_Rij[start_idx + jj];
int j = dij.j;
DataT Rij = dij.Rij;
SpeciesT type_j = species_t[mol_idx][j];
sdx[jj] = pos_t[mol_idx][j][0] - xi;
sdy[jj] = pos_t[mol_idx][j][1] - yi;
sdz[jj] = pos_t[mol_idx][j][2] - zi;
stype[jj] = type_j;
sdist[jj] = Rij;
// cutoff
DataT fc_ij = 0.5 * cos(PI * Rij / Rca) + 0.5;
DataT fc_ij_grad = -0.5 * (PI / Rca) * sin(PI * Rij / Rca);
sfc[jj] = fc_ij;
sfc_grad[jj] = fc_ij_grad;
}
// grad init
DataT sdix_grad = 0;
DataT sdiy_grad = 0;
DataT sdiz_grad = 0;
for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) {
sdjx_grad[jj] = 0;
sdjy_grad[jj] = 0;
sdjz_grad[jj] = 0;
}
short2 tile = make_short2(laneIdx % TILEX, laneIdx / TILEX);
const DataT tc = 0.95; // theta constant factor
// must sync if threads_per_catom != 32 (wrap size) to make sure shared data is ready
// __syncthreads
for (int jj = 0; jj < jnum; jj++) {
const DataT Rij = sdist[jj];
SpeciesT type_j = stype[jj];
DataT fc_ij = sfc[jj];
DataT grad_fc_ij = sfc_grad[jj];
for (int kk_start = jj + 1; kk_start < jnum; kk_start += threads_per_catom) {
int kk = kk_start + laneIdx;
DataT theta = 0;
DataT grad_theta_vij_x = 0;
DataT grad_theta_vij_y = 0;
DataT grad_theta_vij_z = 0;
DataT grad_theta_vik_x = 0;
DataT grad_theta_vik_y = 0;
DataT grad_theta_vik_z = 0;
if (kk < jnum) {
const DataT Rik = sdist[kk];
DataT vij_vik_dot = sdx[jj] * sdx[kk] + sdy[jj] * sdy[kk] + sdz[jj] * sdz[kk];
theta = acos(tc * vij_vik_dot / (Rij * Rik));
// grad
DataT vij_factor =
tc / (Rij * Rij * Rij * sqrt(-tc * tc * vij_vik_dot * vij_vik_dot / (Rij * Rij) + Rik * Rik));
DataT vik_factor = tc /
(Rik * Rik * Rik *
sqrt(-tc * tc * vij_vik_dot * vij_vik_dot / (Rik * Rik) + Rij * Rij)); // tricky 80ms improved
grad_theta_vij_x = vij_factor * (sdx[jj] * vij_vik_dot - sdx[kk] * Rij * Rij);
grad_theta_vij_y = vij_factor * (sdy[jj] * vij_vik_dot - sdy[kk] * Rij * Rij);
grad_theta_vij_z = vij_factor * (sdz[jj] * vij_vik_dot - sdz[kk] * Rij * Rij);
grad_theta_vik_x = vik_factor * (sdx[kk] * vij_vik_dot - sdx[jj] * Rik * Rik);
grad_theta_vik_y = vik_factor * (sdy[kk] * vij_vik_dot - sdy[jj] * Rik * Rik);
grad_theta_vik_z = vik_factor * (sdz[kk] * vij_vik_dot - sdz[jj] * Rik * Rik);
}
for (int srcLane = 0; srcLane < C10_WARP_SIZE && (kk_start + srcLane) < jnum; ++srcLane) {
int kk = kk_start + srcLane;
DataT theta_ijk = __shfl_sync(0xFFFFFFFF, theta, srcLane);
// TODO necessary?
DataT grad_theta_vij_x_ = __shfl_sync(0xFFFFFFFF, grad_theta_vij_x, srcLane);
DataT grad_theta_vij_y_ = __shfl_sync(0xFFFFFFFF, grad_theta_vij_y, srcLane);
DataT grad_theta_vij_z_ = __shfl_sync(0xFFFFFFFF, grad_theta_vij_z, srcLane);
DataT grad_theta_vik_x_ = __shfl_sync(0xFFFFFFFF, grad_theta_vik_x, srcLane);
DataT grad_theta_vik_y_ = __shfl_sync(0xFFFFFFFF, grad_theta_vik_y, srcLane);
DataT grad_theta_vik_z_ = __shfl_sync(0xFFFFFFFF, grad_theta_vik_z, srcLane);
const DataT Rik = sdist[kk];
SpeciesT type_k = stype[kk];
DataT fc_ik = sfc[kk];
DataT grad_fc_ik = sfc_grad[kk];
DataT Rijk = (Rij + Rik) / 2;
DataT fc_ijk = fc_ij * fc_ik;
IndexT subaev_offset = angular_sublength * csubaev_offsets(type_j, type_k, num_species);
for (int itheta = tile.x; itheta < nShfZ; itheta += TILEX) {
DataT ShfZ = ShfZ_t[itheta];
DataT factor1 = pow((1 + cos(theta_ijk - ShfZ)) / 2, Zeta);
DataT grad_factor1_theta = 1.0 / 2.0 * Zeta * pow((1 + cos(ShfZ - theta_ijk)) / 2, Zeta - 1) *
sin(ShfZ - theta_ijk); // tricky 100ms improved
for (int ishfr = tile.y; ishfr < nShfA; ishfr += TILEY) {
DataT ShfA = ShfA_t[ishfr];
DataT factor2 = exp(-EtaA * (Rijk - ShfA) * (Rijk - ShfA));
DataT grad_factor2_dist = -EtaA * (Rijk - ShfA) * factor2;
DataT grad_vij_x = 2 *
(grad_factor1_theta * grad_theta_vij_x_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdx[jj] / Rij * fc_ijk +
factor1 * factor2 * fc_ik * grad_fc_ij * sdx[jj] / Rij);
DataT grad_vij_y = 2 *
(grad_factor1_theta * grad_theta_vij_y_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdy[jj] / Rij * fc_ijk +
factor1 * factor2 * fc_ik * grad_fc_ij * sdy[jj] / Rij);
DataT grad_vij_z = 2 *
(grad_factor1_theta * grad_theta_vij_z_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdz[jj] / Rij * fc_ijk +
factor1 * factor2 * fc_ik * grad_fc_ij * sdz[jj] / Rij);
DataT grad_vik_x = 2 *
(grad_factor1_theta * grad_theta_vik_x_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdx[kk] / Rik * fc_ijk +
factor1 * factor2 * fc_ij * grad_fc_ik * sdx[kk] / Rik);
DataT grad_vik_y = 2 *
(grad_factor1_theta * grad_theta_vik_y_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdy[kk] / Rik * fc_ijk +
factor1 * factor2 * fc_ij * grad_fc_ik * sdy[kk] / Rik);
DataT grad_vik_z = 2 *
(grad_factor1_theta * grad_theta_vik_z_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdz[kk] / Rik * fc_ijk +
factor1 * factor2 * fc_ij * grad_fc_ik * sdz[kk] / Rik);
if (is_double_backward) {
int atomj_idx = d_Rij[start_idx + jj].j;
int atomk_idx = d_Rij[start_idx + kk].j;
auto& grad_force = grad_output;
auto& grad_grad_aev = grad_input;
grad_vij_x *= (grad_force[mol_idx][atomj_idx][0] - grad_force[mol_idx][i][0]);
grad_vij_y *= (grad_force[mol_idx][atomj_idx][1] - grad_force[mol_idx][i][1]);
grad_vij_z *= (grad_force[mol_idx][atomj_idx][2] - grad_force[mol_idx][i][2]);
grad_vik_x *= (grad_force[mol_idx][atomk_idx][0] - grad_force[mol_idx][i][0]);
grad_vik_y *= (grad_force[mol_idx][atomk_idx][1] - grad_force[mol_idx][i][1]);
grad_vik_z *= (grad_force[mol_idx][atomk_idx][2] - grad_force[mol_idx][i][2]);
atomicAdd(
&grad_grad_aev[mol_idx][i][radial_length + subaev_offset + ishfr * nShfZ + itheta],
grad_vij_x + grad_vij_y + grad_vij_z + grad_vik_x + grad_vik_y + grad_vik_z);
} else {
DataT grad_output_item = grad_output[mol_idx][i][radial_length + subaev_offset + ishfr * nShfZ + itheta];
grad_vij_x *= grad_output_item;
grad_vij_y *= grad_output_item;
grad_vij_z *= grad_output_item;
grad_vik_x *= grad_output_item;
grad_vik_y *= grad_output_item;
grad_vik_z *= grad_output_item;
sdix_grad += (-grad_vij_x - grad_vik_x);
sdiy_grad += (-grad_vij_y - grad_vik_y);
sdiz_grad += (-grad_vij_z - grad_vik_z);
for (int offset = 16; offset > 0; offset /= 2) {
grad_vij_x += __shfl_down_sync(0xFFFFFFFF, grad_vij_x, offset);
grad_vij_y += __shfl_down_sync(0xFFFFFFFF, grad_vij_y, offset);
grad_vij_z += __shfl_down_sync(0xFFFFFFFF, grad_vij_z, offset);
grad_vik_x += __shfl_down_sync(0xFFFFFFFF, grad_vik_x, offset);
grad_vik_y += __shfl_down_sync(0xFFFFFFFF, grad_vik_y, offset);
grad_vik_z += __shfl_down_sync(0xFFFFFFFF, grad_vik_z, offset);
}
if (laneIdx == 0) {
sdjx_grad[jj] += grad_vij_x;
sdjy_grad[jj] += grad_vij_y;
sdjz_grad[jj] += grad_vij_z;
sdjx_grad[kk] += grad_vik_x;
sdjy_grad[kk] += grad_vik_y;
sdjz_grad[kk] += grad_vik_z;
}
}
}
}
}
}
}
if (!is_double_backward) {
auto& grad_coord = grad_input;
int atomi_idx = i;
atomicAdd(&grad_coord[mol_idx][atomi_idx][0], sdix_grad);
atomicAdd(&grad_coord[mol_idx][atomi_idx][1], sdiy_grad);
atomicAdd(&grad_coord[mol_idx][atomi_idx][2], sdiz_grad);
for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) {
int atomj_idx = d_Rij[start_idx + jj].j;
atomicAdd(&grad_coord[mol_idx][atomj_idx][0], sdjx_grad[jj]);
atomicAdd(&grad_coord[mol_idx][atomj_idx][1], sdjy_grad[jj]);
atomicAdd(&grad_coord[mol_idx][atomj_idx][2], sdjz_grad[jj]);
}
}
}
template <typename SpeciesT, typename DataT, int THREADS_PER_RIJ>
__global__ void cuRadialAEVs(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfR_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaR_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> aev_t,
PairDist<DataT>* d_Rij,
float Rcr,
int radial_length,
int radial_sublength,
int nRadialRij) {
int gidx = blockIdx.x * blockDim.x + threadIdx.x;
int idx = gidx / THREADS_PER_RIJ;
int nShfR = ShfR_t.size(0);
DataT EtaR = EtaR_t[0];
if (idx >= nRadialRij)
return;
int laneIdx = threadIdx.x % THREADS_PER_RIJ;
PairDist<DataT> d = d_Rij[idx];
DataT Rij = d.Rij;
int mol_idx = d.midx;
int i = d.i;
int j = d.j;
SpeciesT type_j = species_t[mol_idx][j];
DataT fc = 0.5 * cos(PI * Rij / Rcr) + 0.5;
for (int ishfr = laneIdx; ishfr < nShfR; ishfr += THREADS_PER_RIJ) {
DataT ShfR = ShfR_t[ishfr];
DataT GmR = 0.25 * exp(-EtaR * (Rij - ShfR) * (Rij - ShfR)) * fc;
atomicAdd(&aev_t[mol_idx][i][type_j * radial_sublength + ishfr], GmR);
}
}
// every <THREADS_PER_RIJ> threads take care of 1 RIJ, and iterate <nShfR / THREADS_PER_RIJ> times
template <bool is_double_backward, typename SpeciesT, typename DataT, int THREADS_PER_RIJ>
__global__ void cuRadialAEVs_backward_or_doublebackward(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfR_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaR_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits>
grad_aev, // daev for backward, ddaev for double backward
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits>
grad_dist, // ddist for backward, dddist for double backward
const PairDist<DataT>* d_Rij,
float Rcr,
int radial_length,
int radial_sublength,
int nRadialRij) {
int gidx = blockIdx.x * blockDim.x + threadIdx.x;
int idx = gidx / THREADS_PER_RIJ;
int nShfR = ShfR_t.size(0);
DataT EtaR = EtaR_t[0];
if (idx >= nRadialRij)
return;
int laneIdx = threadIdx.x % THREADS_PER_RIJ;
PairDist<DataT> d = d_Rij[idx];
DataT Rij = d.Rij;
int mol_idx = d.midx;
int i = d.i;
int j = d.j;
SpeciesT type_j = species_t[mol_idx][j];
DataT fc = 0.5 * cos(PI * Rij / Rcr) + 0.5;
DataT fc_grad = -0.5 * (PI / Rcr) * sin(PI * Rij / Rcr);
DataT upstream_grad;
if (is_double_backward) {
upstream_grad = grad_dist[idx];
}
for (int ishfr = laneIdx; ishfr < nShfR; ishfr += THREADS_PER_RIJ) {
DataT ShfR = ShfR_t[ishfr];
DataT GmR = 0.25 * exp(-EtaR * (Rij - ShfR) * (Rij - ShfR));
DataT GmR_grad = -EtaR * (-2 * ShfR + 2 * Rij) * GmR;
DataT jacobian = GmR_grad * fc + GmR * fc_grad;
if (is_double_backward) {
atomicAdd(&grad_aev[mol_idx][i][type_j * radial_sublength + ishfr], upstream_grad * jacobian);
} else {
upstream_grad = grad_aev[mol_idx][i][type_j * radial_sublength + ishfr];
atomicAdd(&grad_dist[idx], upstream_grad * jacobian);
}
}
}
template <typename DataT>
void cubScan(const DataT* d_in, DataT* d_out, int num_items, cudaStream_t stream) {
auto& allocator = *c10::cuda::CUDACachingAllocator::get();
// Determine temporary device storage requirements
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream);
// Allocate temporary storage
auto buffer_tmp = allocator.allocate(temp_storage_bytes);
d_temp_storage = buffer_tmp.get();
// Run exclusive prefix sum
cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream);
}
template <typename DataT, typename IndexT>
int cubEncode(
const DataT* d_in,
DataT* d_unique_out,
IndexT* d_counts_out,
int num_items,
int* d_num_runs_out,
cudaStream_t stream) {
auto& allocator = *c10::cuda::CUDACachingAllocator::get();
// Determine temporary device storage requirements
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
cub::DeviceRunLengthEncode::Encode(
d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_counts_out, d_num_runs_out, num_items, stream);
// Allocate temporary storage
auto buffer_tmp = allocator.allocate(temp_storage_bytes);
d_temp_storage = buffer_tmp.get();
// Run encoding
cub::DeviceRunLengthEncode::Encode(
d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_counts_out, d_num_runs_out, num_items, stream);
int num_selected = 0;
cudaMemcpyAsync(&num_selected, d_num_runs_out, sizeof(int), cudaMemcpyDefault, stream);
cudaStreamSynchronize(stream);
return num_selected;
}
template <typename DataT, typename LambdaOpT>
int cubDeviceSelect(
const DataT* d_in,
DataT* d_out,
int num_items,
int* d_num_selected_out,
LambdaOpT select_op,
cudaStream_t stream) {
auto& allocator = *c10::cuda::CUDACachingAllocator::get();
// Determine temporary device storage requirements
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
cub::DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op);
// Allocate temporary storage
auto buffer_tmp = allocator.allocate(temp_storage_bytes);
d_temp_storage = buffer_tmp.get();
// Run selection
cub::DeviceSelect::If(
d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op, stream);
int num_selected = 0;
cudaMemcpyAsync(&num_selected, d_num_selected_out, sizeof(int), cudaMemcpyDefault, stream);
cudaStreamSynchronize(stream);
return num_selected;
}
template <typename DataT>
DataT cubMax(const DataT* d_in, int num_items, DataT* d_out, cudaStream_t stream) {
auto& allocator = *c10::cuda::CUDACachingAllocator::get();
// Determine temporary device storage requirements
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream);
// Allocate temporary storage
auto buffer_tmp = allocator.allocate(temp_storage_bytes);
d_temp_storage = buffer_tmp.get();
// Run min-reduction
cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream);
int maxVal = 0;
cudaMemcpyAsync(&maxVal, d_out, sizeof(DataT), cudaMemcpyDefault, stream);
cudaStreamSynchronize(stream);
return maxVal;
}
// NOTE: assumes size of EtaA_t = Zeta_t = EtaR_t = 1
Result cuaev_forward(const Tensor& coordinates_t, const Tensor& species_t, const AEVScalarParams& aev_params) {
TORCH_CHECK(
(species_t.dtype() == torch::kInt32) && (coordinates_t.dtype() == torch::kFloat32), "Unsupported input type");
TORCH_CHECK(
aev_params.EtaR_t.size(0) == 1 || aev_params.EtaA_t.size(0) == 1 || aev_params.Zeta_t.size(0) == 1,
"cuda extension is currently not supported for the specified "
"configuration");
float Rcr = aev_params.Rcr;
float Rca = aev_params.Rca;
const int n_molecules = species_t.size(0);
const int max_natoms_per_mol = species_t.size(1);
int aev_length = aev_params.radial_length + aev_params.angular_length;
auto aev_t = torch::zeros({n_molecules, max_natoms_per_mol, aev_length}, coordinates_t.options());
if (species_t.numel() == 0) {
return {
aev_t, Tensor(), Tensor(), Tensor(), 0, 0, 0, Tensor(), Tensor(), Tensor(), 0, 0, 0, coordinates_t, species_t};
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto thrust_allocator = THCThrustAllocator(at::globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(thrust_allocator).on(stream);
auto& allocator = *c10::cuda::CUDACachingAllocator::get();
// buffer to store all the pairwise distance (Rij)
auto total_natom_pairs = n_molecules * max_natoms_per_mol * max_natoms_per_mol;
auto d_options = torch::dtype(torch::kUInt8).device(coordinates_t.device());
Tensor tensor_Rij = torch::empty(sizeof(PairDist<float>) * total_natom_pairs, d_options);
PairDist<float>* d_Rij = (PairDist<float>*)tensor_Rij.data_ptr();
// init all Rij to inf
PairDist<float> init;
init.Rij = std::numeric_limits<float>::infinity();
thrust::fill(policy, d_Rij, d_Rij + total_natom_pairs, init);
// buffer to store all the pairwise distance that is needed for Radial AEV
// computation
Tensor tensor_radialRij = torch::empty(sizeof(PairDist<float>) * total_natom_pairs, d_options);
PairDist<float>* d_radialRij = (PairDist<float>*)tensor_radialRij.data_ptr();
auto buffer_count = allocator.allocate(sizeof(int));
int* d_count_out = (int*)buffer_count.get();
const int block_size = 64;
if (n_molecules == 1) {
int tileWidth = 32;
int tilesPerRow = (max_natoms_per_mol + tileWidth - 1) / tileWidth;
dim3 block(tileWidth, tileWidth, 1);
dim3 grid(tilesPerRow, tilesPerRow, 1);
pairwiseDistanceSingleMolecule<<<grid, block, 0, stream>>>(
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_Rij,
max_natoms_per_mol);
} else {
dim3 block(8, 8, 1);
// Compute pairwise distance (Rij) for all atom pairs in a molecule
// maximum 4096 atoms, which needs 49152 byte (48 kb) of shared memory
// TODO: the kernel is not optimized for batched huge molecule (max_natoms_per_mol > 1000)
pairwiseDistance<<<n_molecules, block, sizeof(float) * max_natoms_per_mol * 3, stream>>>(
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_Rij,
max_natoms_per_mol);
}
// Extract Rijs that is needed for RadialAEV comptuation i.e. all the Rij <= Rcr
int nRadialRij = cubDeviceSelect(
d_Rij,
d_radialRij,
total_natom_pairs,
d_count_out,
[=] __device__(const PairDist<float> d) { return d.Rij <= Rcr; },
stream);
int nblocks = (nRadialRij * 8 + block_size - 1) / block_size;
cuRadialAEVs<int, float, 8><<<nblocks, block_size, 0, stream>>>(
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
aev_params.ShfR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_radialRij,
aev_params.Rcr,
aev_params.radial_length,
aev_params.radial_sublength,
nRadialRij);
// reuse buffer allocated for all Rij
// d_angularRij will store all the Rij required in Angular AEV computation
Tensor tensor_angularRij = torch::empty(sizeof(PairDist<float>) * nRadialRij, d_options);
PairDist<float>* d_angularRij = (PairDist<float>*)tensor_angularRij.data_ptr();
// Extract Rijs that is needed for AngularAEV comptuation i.e. all the Rij
// <= Rca
int nAngularRij = cubDeviceSelect(
d_radialRij,
d_angularRij,
nRadialRij,
d_count_out,
[=] __device__(const PairDist<float> d) { return d.Rij <= Rca; },
stream);
Tensor tensor_centralAtom = torch::empty(sizeof(PairDist<float>) * nAngularRij, d_options);
PairDist<float>* d_centralAtom = (PairDist<float>*)tensor_centralAtom.data_ptr();
Tensor tensor_numPairsPerCenterAtom = torch::empty(sizeof(int) * nAngularRij, d_options);
int* d_numPairsPerCenterAtom = (int*)tensor_numPairsPerCenterAtom.data_ptr();
// group by center atom
int ncenter_atoms = cubEncode(d_angularRij, d_centralAtom, d_numPairsPerCenterAtom, nAngularRij, d_count_out, stream);
Tensor tensor_centerAtomStartIdx = torch::empty(sizeof(int) * ncenter_atoms, d_options);
int* d_centerAtomStartIdx = (int*)tensor_centerAtomStartIdx.data_ptr();
cubScan(d_numPairsPerCenterAtom, d_centerAtomStartIdx, ncenter_atoms, stream);
{
const int nthreads_per_catom = 32;
const int nblocks_angAEV = (ncenter_atoms * nthreads_per_catom + block_size - 1) / block_size;
auto smem_size = [&aev_params](int max_nbrs, int ncatom_per_tpb) {
int sm_aev = sizeof(float) * align<4>(aev_params.angular_length); // (angular_length / 4 + 1) * 4
int sxyz = sizeof(float) * max_nbrs * 3;
int sRij = sizeof(float) * max_nbrs;
int sfc = sizeof(float) * max_nbrs;
int sj = sizeof(int) * max_nbrs;
return (sm_aev + sxyz + sRij + sfc + sj) * ncatom_per_tpb;
};
int maxNbrsPerCenterAtom = cubMax(d_numPairsPerCenterAtom, ncenter_atoms, d_count_out, stream);
int maxnbrs_per_atom_aligned = align<4>(maxNbrsPerCenterAtom);
int smem_size_aligned = smem_size(maxnbrs_per_atom_aligned, block_size / nthreads_per_catom);
int angular_length_aligned = align<4>(aev_params.angular_length);
cuAngularAEVs<<<nblocks_angAEV, block_size, smem_size_aligned, stream>>>(
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
aev_params.ShfA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.ShfZ_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.Zeta_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_angularRij,
d_centralAtom,
d_numPairsPerCenterAtom,
d_centerAtomStartIdx,
aev_params.Rca,
aev_params.angular_length,
aev_params.angular_sublength,
aev_params.radial_length,
aev_params.num_species,
maxnbrs_per_atom_aligned,
angular_length_aligned,
ncenter_atoms);
return {aev_t,
tensor_Rij,
tensor_radialRij,
tensor_angularRij,
total_natom_pairs,
nRadialRij,
nAngularRij,
tensor_centralAtom,
tensor_numPairsPerCenterAtom,
tensor_centerAtomStartIdx,
maxnbrs_per_atom_aligned,
angular_length_aligned,
ncenter_atoms,
coordinates_t,
species_t};
}
}
Tensor cuaev_backward(const Tensor& grad_output, const AEVScalarParams& aev_params, const Result& result) {
using namespace torch::indexing;
Tensor coordinates_t = result.coordinates_t;
Tensor species_t = result.species_t;
const int n_molecules = coordinates_t.size(0);
const int max_natoms_per_mol = coordinates_t.size(1);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto grad_coord = torch::zeros(coordinates_t.sizes(), coordinates_t.options().requires_grad(false)); // [2, 5, 3]
PairDist<float>* d_Rij = (PairDist<float>*)result.tensor_Rij.data_ptr();
PairDist<float>* d_radialRij = (PairDist<float>*)result.tensor_radialRij.data_ptr();
PairDist<float>* d_angularRij = (PairDist<float>*)result.tensor_angularRij.data_ptr();
PairDist<float>* d_centralAtom = (PairDist<float>*)result.tensor_centralAtom.data_ptr();
int* d_numPairsPerCenterAtom = (int*)result.tensor_numPairsPerCenterAtom.data_ptr();
int* d_centerAtomStartIdx = (int*)result.tensor_centerAtomStartIdx.data_ptr();
Tensor grad_radial_dist = torch::zeros(result.nRadialRij, coordinates_t.options().requires_grad(false));
int block_size = 64;
int nblocks = (result.nRadialRij * 8 + block_size - 1) / block_size;
cuRadialAEVs_backward_or_doublebackward<false, int, float, 8><<<nblocks, block_size, 0, stream>>>(
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
aev_params.ShfR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_output.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_radial_dist.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
d_radialRij,
aev_params.Rcr,
aev_params.radial_length,
aev_params.radial_sublength,
result.nRadialRij);
// For best result, block_size should match average molecule size (no padding) to avoid atomicAdd
nblocks = (result.nRadialRij + block_size - 1) / block_size;
pairwiseDistance_backward_or_doublebackward<false><<<nblocks, block_size, 0, stream>>>(
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_radial_dist.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_coord.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_radialRij,
result.nRadialRij);
auto smem_size = [&aev_params](int max_nbrs, int ncatom_per_tpb) {
int sxyz = sizeof(float) * max_nbrs * 3;
int sj_xyz_grad = sizeof(float) * max_nbrs * 3;
int sRij = sizeof(float) * max_nbrs;
int sfc = sizeof(float) * max_nbrs;
int sfc_grad = sizeof(float) * max_nbrs;
int sj = sizeof(int) * max_nbrs;
return (sxyz + sj_xyz_grad + sRij + sfc + sfc_grad + sj) * ncatom_per_tpb;
};
block_size = 32;
const int nthreads_per_catom = 32;
const int nblocks_angAEV = (result.ncenter_atoms * nthreads_per_catom + block_size - 1) / block_size;
int smem_size_aligned = smem_size(result.maxnbrs_per_atom_aligned, block_size / nthreads_per_catom);
Tensor grad_angular_coord = torch::zeros({result.nAngularRij, 3}, coordinates_t.options().requires_grad(false));
cuAngularAEVs_backward_or_doublebackward<false><<<nblocks_angAEV, block_size, smem_size_aligned, stream>>>(
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
aev_params.ShfA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.ShfZ_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.Zeta_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_output.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_coord.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_angularRij,
d_centralAtom,
d_numPairsPerCenterAtom,
d_centerAtomStartIdx,
aev_params.Rca,
aev_params.angular_length,
aev_params.angular_sublength,
aev_params.radial_length,
aev_params.num_species,
result.maxnbrs_per_atom_aligned,
result.angular_length_aligned,
result.ncenter_atoms);
return grad_coord;
}
Tensor cuaev_double_backward(const Tensor& grad_force, const AEVScalarParams& aev_params, const Result& result) {
using namespace torch::indexing;
Tensor coordinates_t = result.coordinates_t;
Tensor species_t = result.species_t;
const int n_molecules = coordinates_t.size(0);
const int max_natoms_per_mol = coordinates_t.size(1);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
int aev_length = aev_params.radial_length + aev_params.angular_length;
auto grad_grad_aev = torch::zeros(
{coordinates_t.size(0), coordinates_t.size(1), aev_length},
coordinates_t.options().requires_grad(false)); // [2, 5, 384]
PairDist<float>* d_Rij = (PairDist<float>*)result.tensor_Rij.data_ptr();
PairDist<float>* d_radialRij = (PairDist<float>*)result.tensor_radialRij.data_ptr();
PairDist<float>* d_angularRij = (PairDist<float>*)result.tensor_angularRij.data_ptr();
PairDist<float>* d_centralAtom = (PairDist<float>*)result.tensor_centralAtom.data_ptr();
int* d_numPairsPerCenterAtom = (int*)result.tensor_numPairsPerCenterAtom.data_ptr();
int* d_centerAtomStartIdx = (int*)result.tensor_centerAtomStartIdx.data_ptr();
auto grad_force_coord_Rij = torch::zeros({result.nRadialRij}, coordinates_t.options().requires_grad(false));
int block_size = 64;
int nblocks = (result.nRadialRij + block_size - 1) / block_size;
pairwiseDistance_backward_or_doublebackward<true><<<nblocks, block_size, 0, stream>>>(
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_force_coord_Rij.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_force.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_radialRij,
result.nRadialRij);
nblocks = (result.nRadialRij * 8 + block_size - 1) / block_size;
cuRadialAEVs_backward_or_doublebackward<true, int, float, 8><<<nblocks, block_size, 0, stream>>>(
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
aev_params.ShfR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_grad_aev.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_force_coord_Rij.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
d_radialRij,
aev_params.Rcr,
aev_params.radial_length,
aev_params.radial_sublength,
result.nRadialRij);
auto smem_size = [&aev_params](int max_nbrs, int ncatom_per_tpb) {
int sxyz = sizeof(float) * max_nbrs * 3;
int sj_xyz_grad = sizeof(float) * max_nbrs * 3;
int sRij = sizeof(float) * max_nbrs;
int sfc = sizeof(float) * max_nbrs;
int sfc_grad = sizeof(float) * max_nbrs;
int sj = sizeof(int) * max_nbrs;
return (sxyz + sj_xyz_grad + sRij + sfc + sfc_grad + sj) * ncatom_per_tpb;
};
block_size = 32;
const int nthreads_per_catom = 32;
const int nblocks_angAEV = (result.ncenter_atoms * nthreads_per_catom + block_size - 1) / block_size;
int smem_size_aligned = smem_size(result.maxnbrs_per_atom_aligned, block_size / nthreads_per_catom);
cuAngularAEVs_backward_or_doublebackward<true><<<nblocks_angAEV, block_size, smem_size_aligned, stream>>>(
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
aev_params.ShfA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.ShfZ_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.Zeta_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_force.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_grad_aev.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_angularRij,
d_centralAtom,
d_numPairsPerCenterAtom,
d_centerAtomStartIdx,
aev_params.Rca,
aev_params.angular_length,
aev_params.angular_sublength,
aev_params.radial_length,
aev_params.num_species,
result.maxnbrs_per_atom_aligned,
result.angular_length_aligned,
result.ncenter_atoms);
return grad_grad_aev;
}
|
8fc032efdced4949cdee72295dbd8b8ddd57b363.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************//**
* \file calculateForce.cu
* \author Anush Krishnan ([email protected]),
* \author Christopher Minar ([email protected])
* \based of original cuIBM
*/
#include "calculateForce.h"
namespace kernels
{
__global__
void calcForceFadlun(double *force, double *L, double *Lnew, double *Nold, double *N, double *u, double *uold, int *tags, double dt, int nx, int ny)
{
if (threadIdx.x + (blockDim.x * blockIdx.x) >= (ny-1)*nx + (nx-1)*ny)
return;
int i = threadIdx.x + (blockDim.x * blockIdx.x);
force[i] = (tags[i]!=-1) * (
u[i]/dt - 0.5*Lnew[i] - //u^l+1
uold[i]/dt + 1.5*N[i] - 0.5*L[i] - //u^l
0.5*Nold[i] //u^l-1
);
}
/**
* \brief Calculates drag using a control-volume approach (left-right).
*
* Evaluate the contribution from the left and right parts of the control surface.
*
* \param FxX raw pointer to the vector storing the drag in the x-direction
* \param lambda raw pointer to the vector storing all the pressure and Lagrangian forces
* \param q raw pointer to the vector storing all the fluxes
* \param nu viscosity
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
* \param I x-index of the bottom-left corner cell of the control surface
* \param J y-index of the top-right corner cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param ncy number of cells in the y-direction in the control volume
*/
__global__
void dragLeftRight(double *FxX, double *u, double *p, double nu, double *dx, double *dy,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= ncy)
return;
int Ip = (J+idx)*nx + I,
Iu = (J+idx)*(nx-1) + I;
FxX[idx] = -(
// multiply the pressure with the surface area to get p dy
//(p[e]-p[w])*dy
(
p[Ip+ncx] - p[Ip]
)*dy[J+idx]
+
// ur^2 - ul^2 * dy
(
(u[Iu+ncx]+u[Iu+ncx-1])*(u[Iu+ncx]+u[Iu+ncx-1])/4
- (u[Iu-1]+u[Iu])*(u[Iu-1]+u[Iu])/4
)*dy[J+idx]
-
// du/dx * dy
// approximate using dudx of the inside cell of the cv instead of the lr average
nu*
(
(u[Iu+ncx] - u[Iu+ncx-1])/dx[I+ncx]
- (u[Iu] - u[Iu-1])/dx[I]
)*dy[J+idx]
);
}
/**
* \brief Calculate drag using a control-volume approach (bottom-top).
*
* Evaluate the contribution from the bottom and top parts of the control surface.
*
* \param FxY raw pointer to the vector storing the drag in the y-direction
* \param q raw pointer to the vector storing all the fluxes
* \param nu viscosity
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
* \param I x-index of the bottom-left corner cell of the control surface
* \param J y-index of the top-right corner cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param ncy number of cells in the y-direction in the control volume
*/
__global__
void dragBottomTop(double *FxY, double *u, double nu, double *dx, double *dy,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx > ncx)
return;
int Iu = J*(nx-1) + (I-1+idx),
Iv = (nx-1)*ny + (J-1)*nx + I+idx;
FxY[idx] = -(
// multiply by dS
(
0.25 * ( u[Iu+ncy*(nx-1)] + u[Iu+(ncy-1)*(nx-1)] )
* ( u[Iv+ncy*nx] + u[Iv+ncy*nx-1] )
-
0.25 * ( u[Iu] + u[Iu-(nx-1)] )
* ( u[Iv] + u[Iv-1] )
)
-
// multiply by dS (cannot use the leftRight trick in this case)
nu*
(
(
(u[Iu+ncy*(nx-1)] - u[Iu+(ncy-1)*(nx-1)])/2.0/(dy[J+ncy]+dy[J+ncy-1]) +
(u[Iv+ncy*nx] - u[Iv+ncy*nx-1]) /2.0/(dx[I+idx]+dx[I+idx-1])
)
-
(
(u[Iu] - u[Iu-(nx-1)])/2.0/(dy[J]+dy[J-1]) +
(u[Iv] - u[Iv-1]) /2.0/(dx[I+idx]+dx[I+idx-1])
)
)
)*0.5*(dx[I+idx]+dx[I+idx-1]);
}
/**
* \brief Calculate drag using a control-volume approach (unsteady).
*
* Evaluate the unsteady contribution of the control volume.
*
* \param FxU raw pointer to the vector storing the unsteady drag components
* \param q raw pointer to the vector storing all the fluxes
* \param qOld raw pointer to the vector sotring all the fluxes at the previous time-step
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param dt time increment
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direcyion
* \param I x-index of the bottom-left cell of the control surface
* \param J y-index of the top-right cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param nyc number of cells in the y-direction in the control volume
*/
__global__
void dragUnsteady(double *FxU, double *u, double *uold, int *tagsIn, double *dx, double *dy, double dt,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= (ncx+1)*ncy)
return;
int i = idx%(ncx+1),
j = idx/(ncx+1);
int Iu = (J+j)*(nx-1) + (I-1+i);
FxU[idx] = - (tagsIn[Iu] == -1) * ((u[Iu]*dy[J+j] - uold[Iu]*dy[J+j])/dt * 0.5*(dx[I+i]+dx[I-1+i]));
}
/**
* \brief Calculate lift using a control-volume approach (left-right).
*
* Evaluate the contribution from the left and right parts of the control surface.
*
* \param FyX raw pointer to the vector storing the lift components in the x-direction
* \param q raw pointer to the vector storing all the fluxes
* \param nu viscosity
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direcyion
* \param I x-index of the bottom-left cell of the control surface
* \param J y-index of the top-right cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param nyc number of cells in the y-direction in the control volume
*/
__global__
void liftLeftRight(double *FyX, double *u, double nu, double *dx, double *dy,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx > ncy)
return;
int Iu = (J+idx)*(nx-1) + (I-1),
Iv = (nx-1)*ny + (J-1+idx)*nx + I;
FyX[idx] = -(
// multiply by dS
(
0.25 * ( u[Iu+ncx] + u[Iu+ncx-(nx-1)] )
* ( u[Iv+ncx] + u[Iv+ncx-1] )
-
0.25 * ( u[Iu] + u[Iu-(nx-1)] )
* ( u[Iv] + u[Iv-1] )
)
-
// multiply by dS (cannot use the leftRight trick in this case)
nu*
(
(
(u[Iu+ncx] - u[Iu+ncx-(nx-1)])/2.0/(dy[J+idx]+dy[J-1+idx]) +
(u[Iv+ncx] - u[Iv+ncx-1])/2.0/(dx[I+ncx]+dx[I+ncx-1])
)
-
(
(u[Iu] - u[Iu-(nx-1)])/2.0/(dy[J+idx]+dy[J-1+idx]) +
(u[Iv] - u[Iv-1])/2.0/(dx[I]+dx[I-1])
)
)
)*0.5*(dy[J+idx]+dy[J-1+idx]);
}
/**
* \brief Calculate lift using a control-volume approach (bottom-top).
*
* Evaluate the contribution from the bottom and top parts of the control surface.
*
* \param FyY raw pointer to the vector storing the lift components in the y-direction
* \param q raw pointer to the vector storing all the fluxes
* \param lambda raw pointer to the vector storing the pressure and Lagrangian forces
* \param nu viscosity
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direcyion
* \param I x-index of the bottom-left cell of the control surface
* \param J y-index of the top-right cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param nyc number of cells in the y-direction in the control volume
*/
__global__
void liftBottomTop(double *FyY, double *u, double *p, double nu, double *dx, double *dy,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= ncx)
return;
int Ip = J*nx + I+idx,
Iv = (nx-1)*ny + (J-1)*nx + I+idx;
FyY[idx] = -(
// multiply the pressure with the surface area to get p dx
(p[Ip+ncy*nx]-p[Ip-nx])*dx[I+idx]
+
// divide q^2 by dx, so that just v^2 dx is obtained
(
0.25*(u[Iv+(ncy+1)*nx] + u[Iv+ncy*nx])*(u[Iv+(ncy+1)*nx] + u[Iv+ncy*nx])
- 0.25*(u[Iv] + u[Iv-nx])*(u[Iv] + u[Iv-nx])
)*dx[I+idx]
-
// no multiplication or division since dv/dy dx = dq/dy
nu*
(
(u[Iv+(ncy+1)*nx] - u[Iv+ncy*nx])*dx[I+idx]/dy[J+ncy]
- (u[Iv] - u[Iv-nx])*dx[I]/dy[J-1]
)
);
}
/**
* \brief Calculate lift using a control-volume approach (unsteady).
*
* Evaluate the unsteady contribution of the control volume.
*
* \param FyU raw pointer to the vector storing the unsteady lift components
* \param q raw pointer to the vector storing all the fluxes
* \param qOld raw pointer to the vector sotring all the fluxes at the previous time-step
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param dt time increment
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direcyion
* \param I x-index of the bottom-left cell of the control surface
* \param J y-index of the top-right cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param nyc number of cells in the y-direction in the control volume
*/
__global__
void liftUnsteady(double *FyU, double *u, double *uold, int *tagsIn, double *dx, double *dy, double dt,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if( idx >= ncx*(ncy+1) )
return;
int i = idx%ncx,
j = idx/ncx;
int Iv = (J-1+j)*nx + (I+i) + (nx-1)*ny;
FyU[idx] = -(tagsIn[Iv] == -1) * ((u[Iv]*dx[I+i] - uold[Iv]*dx[I+i])/dt * 0.5*(dy[J+j]+dy[J-1+j]));
}
/**
* \brief To be documented
*/
}
| 8fc032efdced4949cdee72295dbd8b8ddd57b363.cu | /***************************************************************************//**
* \file calculateForce.cu
* \author Anush Krishnan ([email protected]),
* \author Christopher Minar ([email protected])
* \based of original cuIBM
*/
#include "calculateForce.h"
namespace kernels
{
__global__
void calcForceFadlun(double *force, double *L, double *Lnew, double *Nold, double *N, double *u, double *uold, int *tags, double dt, int nx, int ny)
{
if (threadIdx.x + (blockDim.x * blockIdx.x) >= (ny-1)*nx + (nx-1)*ny)
return;
int i = threadIdx.x + (blockDim.x * blockIdx.x);
force[i] = (tags[i]!=-1) * (
u[i]/dt - 0.5*Lnew[i] - //u^l+1
uold[i]/dt + 1.5*N[i] - 0.5*L[i] - //u^l
0.5*Nold[i] //u^l-1
);
}
/**
* \brief Calculates drag using a control-volume approach (left-right).
*
* Evaluate the contribution from the left and right parts of the control surface.
*
* \param FxX raw pointer to the vector storing the drag in the x-direction
* \param lambda raw pointer to the vector storing all the pressure and Lagrangian forces
* \param q raw pointer to the vector storing all the fluxes
* \param nu viscosity
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
* \param I x-index of the bottom-left corner cell of the control surface
* \param J y-index of the top-right corner cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param ncy number of cells in the y-direction in the control volume
*/
__global__
void dragLeftRight(double *FxX, double *u, double *p, double nu, double *dx, double *dy,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= ncy)
return;
int Ip = (J+idx)*nx + I,
Iu = (J+idx)*(nx-1) + I;
FxX[idx] = -(
// multiply the pressure with the surface area to get p dy
//(p[e]-p[w])*dy
(
p[Ip+ncx] - p[Ip]
)*dy[J+idx]
+
// ur^2 - ul^2 * dy
(
(u[Iu+ncx]+u[Iu+ncx-1])*(u[Iu+ncx]+u[Iu+ncx-1])/4
- (u[Iu-1]+u[Iu])*(u[Iu-1]+u[Iu])/4
)*dy[J+idx]
-
// du/dx * dy
// approximate using dudx of the inside cell of the cv instead of the lr average
nu*
(
(u[Iu+ncx] - u[Iu+ncx-1])/dx[I+ncx]
- (u[Iu] - u[Iu-1])/dx[I]
)*dy[J+idx]
);
}
/**
* \brief Calculate drag using a control-volume approach (bottom-top).
*
* Evaluate the contribution from the bottom and top parts of the control surface.
*
* \param FxY raw pointer to the vector storing the drag in the y-direction
* \param q raw pointer to the vector storing all the fluxes
* \param nu viscosity
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direction
* \param I x-index of the bottom-left corner cell of the control surface
* \param J y-index of the top-right corner cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param ncy number of cells in the y-direction in the control volume
*/
__global__
void dragBottomTop(double *FxY, double *u, double nu, double *dx, double *dy,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx > ncx)
return;
int Iu = J*(nx-1) + (I-1+idx),
Iv = (nx-1)*ny + (J-1)*nx + I+idx;
FxY[idx] = -(
// multiply by dS
(
0.25 * ( u[Iu+ncy*(nx-1)] + u[Iu+(ncy-1)*(nx-1)] )
* ( u[Iv+ncy*nx] + u[Iv+ncy*nx-1] )
-
0.25 * ( u[Iu] + u[Iu-(nx-1)] )
* ( u[Iv] + u[Iv-1] )
)
-
// multiply by dS (cannot use the leftRight trick in this case)
nu*
(
(
(u[Iu+ncy*(nx-1)] - u[Iu+(ncy-1)*(nx-1)])/2.0/(dy[J+ncy]+dy[J+ncy-1]) +
(u[Iv+ncy*nx] - u[Iv+ncy*nx-1]) /2.0/(dx[I+idx]+dx[I+idx-1])
)
-
(
(u[Iu] - u[Iu-(nx-1)])/2.0/(dy[J]+dy[J-1]) +
(u[Iv] - u[Iv-1]) /2.0/(dx[I+idx]+dx[I+idx-1])
)
)
)*0.5*(dx[I+idx]+dx[I+idx-1]);
}
/**
* \brief Calculate drag using a control-volume approach (unsteady).
*
* Evaluate the unsteady contribution of the control volume.
*
* \param FxU raw pointer to the vector storing the unsteady drag components
* \param q raw pointer to the vector storing all the fluxes
* \param qOld raw pointer to the vector sotring all the fluxes at the previous time-step
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param dt time increment
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direcyion
* \param I x-index of the bottom-left cell of the control surface
* \param J y-index of the top-right cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param nyc number of cells in the y-direction in the control volume
*/
__global__
void dragUnsteady(double *FxU, double *u, double *uold, int *tagsIn, double *dx, double *dy, double dt,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= (ncx+1)*ncy)
return;
int i = idx%(ncx+1),
j = idx/(ncx+1);
int Iu = (J+j)*(nx-1) + (I-1+i);
FxU[idx] = - (tagsIn[Iu] == -1) * ((u[Iu]*dy[J+j] - uold[Iu]*dy[J+j])/dt * 0.5*(dx[I+i]+dx[I-1+i]));
}
/**
* \brief Calculate lift using a control-volume approach (left-right).
*
* Evaluate the contribution from the left and right parts of the control surface.
*
* \param FyX raw pointer to the vector storing the lift components in the x-direction
* \param q raw pointer to the vector storing all the fluxes
* \param nu viscosity
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direcyion
* \param I x-index of the bottom-left cell of the control surface
* \param J y-index of the top-right cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param nyc number of cells in the y-direction in the control volume
*/
__global__
void liftLeftRight(double *FyX, double *u, double nu, double *dx, double *dy,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx > ncy)
return;
int Iu = (J+idx)*(nx-1) + (I-1),
Iv = (nx-1)*ny + (J-1+idx)*nx + I;
FyX[idx] = -(
// multiply by dS
(
0.25 * ( u[Iu+ncx] + u[Iu+ncx-(nx-1)] )
* ( u[Iv+ncx] + u[Iv+ncx-1] )
-
0.25 * ( u[Iu] + u[Iu-(nx-1)] )
* ( u[Iv] + u[Iv-1] )
)
-
// multiply by dS (cannot use the leftRight trick in this case)
nu*
(
(
(u[Iu+ncx] - u[Iu+ncx-(nx-1)])/2.0/(dy[J+idx]+dy[J-1+idx]) +
(u[Iv+ncx] - u[Iv+ncx-1])/2.0/(dx[I+ncx]+dx[I+ncx-1])
)
-
(
(u[Iu] - u[Iu-(nx-1)])/2.0/(dy[J+idx]+dy[J-1+idx]) +
(u[Iv] - u[Iv-1])/2.0/(dx[I]+dx[I-1])
)
)
)*0.5*(dy[J+idx]+dy[J-1+idx]);
}
/**
* \brief Calculate lift using a control-volume approach (bottom-top).
*
* Evaluate the contribution from the bottom and top parts of the control surface.
*
* \param FyY raw pointer to the vector storing the lift components in the y-direction
* \param q raw pointer to the vector storing all the fluxes
* \param lambda raw pointer to the vector storing the pressure and Lagrangian forces
* \param nu viscosity
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direcyion
* \param I x-index of the bottom-left cell of the control surface
* \param J y-index of the top-right cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param nyc number of cells in the y-direction in the control volume
*/
__global__
void liftBottomTop(double *FyY, double *u, double *p, double nu, double *dx, double *dy,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= ncx)
return;
int Ip = J*nx + I+idx,
Iv = (nx-1)*ny + (J-1)*nx + I+idx;
FyY[idx] = -(
// multiply the pressure with the surface area to get p dx
(p[Ip+ncy*nx]-p[Ip-nx])*dx[I+idx]
+
// divide q^2 by dx, so that just v^2 dx is obtained
(
0.25*(u[Iv+(ncy+1)*nx] + u[Iv+ncy*nx])*(u[Iv+(ncy+1)*nx] + u[Iv+ncy*nx])
- 0.25*(u[Iv] + u[Iv-nx])*(u[Iv] + u[Iv-nx])
)*dx[I+idx]
-
// no multiplication or division since dv/dy dx = dq/dy
nu*
(
(u[Iv+(ncy+1)*nx] - u[Iv+ncy*nx])*dx[I+idx]/dy[J+ncy]
- (u[Iv] - u[Iv-nx])*dx[I]/dy[J-1]
)
);
}
/**
* \brief Calculate lift using a control-volume approach (unsteady).
*
* Evaluate the unsteady contribution of the control volume.
*
* \param FyU raw pointer to the vector storing the unsteady lift components
* \param q raw pointer to the vector storing all the fluxes
* \param qOld raw pointer to the vector sotring all the fluxes at the previous time-step
* \param dx raw pointer to the vector storing the cell widths in the x-direction
* \param dy raw pointer to the vector storing the cell widths in the y-direction
* \param dt time increment
* \param nx number of cells in the x-direction
* \param ny number of cells in the y-direcyion
* \param I x-index of the bottom-left cell of the control surface
* \param J y-index of the top-right cell of the control surface
* \param ncx number of cells in the x-direction in the control volume
* \param nyc number of cells in the y-direction in the control volume
*/
__global__
void liftUnsteady(double *FyU, double *u, double *uold, int *tagsIn, double *dx, double *dy, double dt,
int nx, int ny, int I, int J, int ncx, int ncy)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if( idx >= ncx*(ncy+1) )
return;
int i = idx%ncx,
j = idx/ncx;
int Iv = (J-1+j)*nx + (I+i) + (nx-1)*ny;
FyU[idx] = -(tagsIn[Iv] == -1) * ((u[Iv]*dx[I+i] - uold[Iv]*dx[I+i])/dt * 0.5*(dy[J+j]+dy[J-1+j]));
}
/**
* \brief To be documented
*/
}
|
0fe85ac972c3838ef46ea1a5de975bf7380e0f63.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while (0)
#define min(X, Y) (((X) < (Y)) ? (X) : (Y))
#define max(X, Y) (((X) > (Y)) ? (X) : (Y))
#define Mask_width 5
#define maskRadius Mask_width / 2
#define TILE_WIDTH 16
#define w (TILE_WIDTH + Mask_width - 1)
#define clamp(x) (min(max((x), 0.0), 1.0))
//@@ INSERT CODE HERE
__global__ void convolution(float *I, float *K, float *P, int channels, int width, int height)
{
for (int i = 0; i <= height; i++)
{
for (int j = 0; j <= width; j++)
{
for (k = 0; k <= channels; k++)
{
float accum = 0;
for (int y = -maskRadius; y <= maskRadius; y++)
{
for (x = -maskRadius; x <= maskRadius; x++)
{
int xOffset = j + x;
int yOffset = i + y;
if (xOffset >= 0 && xOffset < width && yOffset >= 0 && yOffset < height)
{
float imagePixel = I[(yOffset * width + xOffset) * channels + k];
float maskValue = K[(y+maskRadius)*maskWidth+x+maskRadius];
accum += imagePixel * maskValue;
}
}
}
P[(i * width + j)*channels + k] = clamp(accum, 0, 1);
}
}
}
}
int main(int argc, char *argv[])
{
wbArg_t arg;
int maskRows;
int maskColumns;
int imageChannels;
int imageWidth;
int imageHeight;
char *inputImageFile;
char *inputMaskFile;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *hostMaskData;
float *deviceInputImageData;
float *deviceOutputImageData;
float *deviceMaskData;
arg = wbArg_read(argc, argv); /* parse the input arguments */
if (arg.argc != 11)
{
printf("Usage: ./Convolution_Template e <expected.ppm> i <input0.ppm> , <input1.raw> o <output.ppm> t image\n");
exit(0);
}
inputImageFile = wbArg_getInputFile(arg, 3);
inputMaskFile = wbArg_getInputFile(arg, 5);
inputImage = wbImport(inputImageFile);
hostMaskData = (float *)wbImport(inputMaskFile, &maskRows, &maskColumns);
assert(maskRows == 5); /* mask height is fixed to 5 in this mp */
assert(maskColumns == 5); /* mask width is fixed to 5 in this mp */
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
//@@ INSERT CODE HERE
hipMalloc((void **)&deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
hipMalloc((void **)&deviceOutputImageData, imageWidth * imageHeight * sizeof(float));
hipMalloc((void **)&deviceMaskData, 5 * 5 * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
//@@ INSERT CODE HERE
hipMemcpy(deviceInputImageData, hostInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(deviceMaskData, hostMaskData, 5 * 5 * sizeof(float), hipMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
//@@ INSERT CODE HERE
dim3 blockSize(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridSize((int)ceil(imageWidth/(float)blockSize.x), (int)ceil(imageHeight/(float)blockSize.y), 1);
hipLaunchKernelGGL(( convolution), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceInputImageData, deviceMaskData, deviceOutputImageData, imageChannels, imageWidth, imageHeight);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
//@@ INSERT CODE HERE
hipMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
wbSolution(arg, outputImage);
//@@ Insert code here
free(hostMaskData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
return 0;
}
| 0fe85ac972c3838ef46ea1a5de975bf7380e0f63.cu | #include <wb.h>
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while (0)
#define min(X, Y) (((X) < (Y)) ? (X) : (Y))
#define max(X, Y) (((X) > (Y)) ? (X) : (Y))
#define Mask_width 5
#define maskRadius Mask_width / 2
#define TILE_WIDTH 16
#define w (TILE_WIDTH + Mask_width - 1)
#define clamp(x) (min(max((x), 0.0), 1.0))
//@@ INSERT CODE HERE
__global__ void convolution(float *I, float *K, float *P, int channels, int width, int height)
{
for (int i = 0; i <= height; i++)
{
for (int j = 0; j <= width; j++)
{
for (k = 0; k <= channels; k++)
{
float accum = 0;
for (int y = -maskRadius; y <= maskRadius; y++)
{
for (x = -maskRadius; x <= maskRadius; x++)
{
int xOffset = j + x;
int yOffset = i + y;
if (xOffset >= 0 && xOffset < width && yOffset >= 0 && yOffset < height)
{
float imagePixel = I[(yOffset * width + xOffset) * channels + k];
float maskValue = K[(y+maskRadius)*maskWidth+x+maskRadius];
accum += imagePixel * maskValue;
}
}
}
P[(i * width + j)*channels + k] = clamp(accum, 0, 1);
}
}
}
}
int main(int argc, char *argv[])
{
wbArg_t arg;
int maskRows;
int maskColumns;
int imageChannels;
int imageWidth;
int imageHeight;
char *inputImageFile;
char *inputMaskFile;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *hostMaskData;
float *deviceInputImageData;
float *deviceOutputImageData;
float *deviceMaskData;
arg = wbArg_read(argc, argv); /* parse the input arguments */
if (arg.argc != 11)
{
printf("Usage: ./Convolution_Template e <expected.ppm> i <input0.ppm> , <input1.raw> o <output.ppm> t image\n");
exit(0);
}
inputImageFile = wbArg_getInputFile(arg, 3);
inputMaskFile = wbArg_getInputFile(arg, 5);
inputImage = wbImport(inputImageFile);
hostMaskData = (float *)wbImport(inputMaskFile, &maskRows, &maskColumns);
assert(maskRows == 5); /* mask height is fixed to 5 in this mp */
assert(maskColumns == 5); /* mask width is fixed to 5 in this mp */
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
//@@ INSERT CODE HERE
cudaMalloc((void **)&deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
cudaMalloc((void **)&deviceOutputImageData, imageWidth * imageHeight * sizeof(float));
cudaMalloc((void **)&deviceMaskData, 5 * 5 * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
//@@ INSERT CODE HERE
cudaMemcpy(deviceInputImageData, hostInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(deviceMaskData, hostMaskData, 5 * 5 * sizeof(float), cudaMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
//@@ INSERT CODE HERE
dim3 blockSize(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridSize((int)ceil(imageWidth/(float)blockSize.x), (int)ceil(imageHeight/(float)blockSize.y), 1);
convolution<<<dimGrid, dimBlock>>>(deviceInputImageData, deviceMaskData, deviceOutputImageData, imageChannels, imageWidth, imageHeight);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
//@@ INSERT CODE HERE
cudaMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
wbSolution(arg, outputImage);
//@@ Insert code here
free(hostMaskData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
return 0;
}
|
2fc8b12ad3932b3401a3e4ae5cd00f8d6ab4650f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <torch/types.h>
#include "computeUtil.h"
__global__ void mhsddmm(const int v, const int f, const int h, const int nnz,
int *rowptr, int *colind, float *grad,
float *feature, float *out) // V * H * F
{
int eid = (blockIdx.x << 4) + (threadIdx.y << 2);
int cid = threadIdx.x;
int hid = blockIdx.y;
if (blockIdx.x < nnz / 16)
{
float multi[4] = {0, 0, 0, 0};
int offset1[4], offset2[4];
float D1tmp[4], D2tmp[4];
Load<int4, int>(offset2, colind, eid);
offset1[0] = findRow(rowptr, eid, 0, v);
offset1[3] = findRow(rowptr, eid + 3, offset1[0], v);
offset1[1] = findRow(rowptr, eid + 1, offset1[0], offset1[3]);
offset1[2] = findRow(rowptr, eid + 2, offset1[1], offset1[3]);
selfMulConst4<int>(offset1, f * h);
selfAddConst4<int>(offset1, hid * f);
selfMulConst4<int>(offset2, f * h);
selfAddConst4<int>(offset2, hid * f);
for (int i = 0; i < (f >> 5); i++)
{
Load4<float, float>(D1tmp, grad, offset1, cid);
Load4<float, float>(D2tmp, feature, offset2, cid);
Dot4<float>(multi, D1tmp, D2tmp);
cid += 32;
}
int res = f & 31;
if(res)
{
float D1[4] = {0, 0, 0, 0}, D2[4] = {0, 0, 0, 0};
if(threadIdx.x < res)
{
Load4<float, float>(D1, grad, offset1, cid);
Load4<float, float>(D2, feature, offset2, cid);
Dot4<float>(multi, D1, D2);
}
}
AllReduce4<float>(multi, 16, 32);
if (threadIdx.x == 0)
{
out[eid * h + hid] = multi[0];
out[(eid + 1) * h + hid] = multi[1];
out[(eid + 2) * h + hid] = multi[2];
out[(eid + 3) * h + hid] = multi[3];
}
}
else // Dynamic parrallel?
{
eid = nnz - (nnz & 15) + (blockIdx.x - (nnz / 16));
int offset1 = findRow(rowptr, eid, 0, v) * f * h + hid * f;
int offset2 = colind[eid] * f * h + hid * f;
float multi = 0;
int off1 = cid = threadIdx.x;
float D1tmp0, D2tmp0;
for (int cc = 0; cc < (f >> 5); cc++)
{
D1tmp0 = grad[offset1 + cid];
D2tmp0 = feature[offset2 + cid];
multi += D1tmp0 * D2tmp0;
cid += 32;
}
int res = f & 31;
D1tmp0 = D2tmp0 = 0;
if(res)
{
if(off1 < res)
{
D1tmp0 = grad[offset1 + cid];
D2tmp0 = feature[offset2 + cid];
}
multi += D1tmp0 * D2tmp0;
}
for (int stride = 16; stride > 0; stride >>= 1)
{
multi += __shfl_xor_sync(0xffffffff, multi, stride, 32);
}
if (threadIdx.x == 0 && threadIdx.y == 0)
{
out[eid * h + hid] = multi;
}
}
}
torch::Tensor mhsddmm_cuda(
torch::Tensor rowptr,
torch::Tensor colind,
torch::Tensor grad, // V * H * F
torch::Tensor feature // V * H * F
)
{
const auto v = rowptr.size(0) - 1; // V
const auto nnz = colind.size(0); // E
const auto h = feature.size(1); // H
const auto f = feature.size(2); // F
auto devid = feature.device().index();
auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid);
auto out = torch::empty({nnz, h}, options);
hipLaunchKernelGGL(( mhsddmm), dim3(dim3(nnz / 16 + (nnz & 15), h, 1)), dim3(dim3(32, 4, 1)), 0, 0, v, f, h, nnz,
rowptr.data_ptr<int>(), colind.data_ptr<int>(), grad.data_ptr<float>(), feature.data_ptr<float>(), out.data_ptr<float>());
return out;
}
| 2fc8b12ad3932b3401a3e4ae5cd00f8d6ab4650f.cu | #include <cuda.h>
#include <torch/types.h>
#include "computeUtil.h"
__global__ void mhsddmm(const int v, const int f, const int h, const int nnz,
int *rowptr, int *colind, float *grad,
float *feature, float *out) // V * H * F
{
int eid = (blockIdx.x << 4) + (threadIdx.y << 2);
int cid = threadIdx.x;
int hid = blockIdx.y;
if (blockIdx.x < nnz / 16)
{
float multi[4] = {0, 0, 0, 0};
int offset1[4], offset2[4];
float D1tmp[4], D2tmp[4];
Load<int4, int>(offset2, colind, eid);
offset1[0] = findRow(rowptr, eid, 0, v);
offset1[3] = findRow(rowptr, eid + 3, offset1[0], v);
offset1[1] = findRow(rowptr, eid + 1, offset1[0], offset1[3]);
offset1[2] = findRow(rowptr, eid + 2, offset1[1], offset1[3]);
selfMulConst4<int>(offset1, f * h);
selfAddConst4<int>(offset1, hid * f);
selfMulConst4<int>(offset2, f * h);
selfAddConst4<int>(offset2, hid * f);
for (int i = 0; i < (f >> 5); i++)
{
Load4<float, float>(D1tmp, grad, offset1, cid);
Load4<float, float>(D2tmp, feature, offset2, cid);
Dot4<float>(multi, D1tmp, D2tmp);
cid += 32;
}
int res = f & 31;
if(res)
{
float D1[4] = {0, 0, 0, 0}, D2[4] = {0, 0, 0, 0};
if(threadIdx.x < res)
{
Load4<float, float>(D1, grad, offset1, cid);
Load4<float, float>(D2, feature, offset2, cid);
Dot4<float>(multi, D1, D2);
}
}
AllReduce4<float>(multi, 16, 32);
if (threadIdx.x == 0)
{
out[eid * h + hid] = multi[0];
out[(eid + 1) * h + hid] = multi[1];
out[(eid + 2) * h + hid] = multi[2];
out[(eid + 3) * h + hid] = multi[3];
}
}
else // Dynamic parrallel?
{
eid = nnz - (nnz & 15) + (blockIdx.x - (nnz / 16));
int offset1 = findRow(rowptr, eid, 0, v) * f * h + hid * f;
int offset2 = colind[eid] * f * h + hid * f;
float multi = 0;
int off1 = cid = threadIdx.x;
float D1tmp0, D2tmp0;
for (int cc = 0; cc < (f >> 5); cc++)
{
D1tmp0 = grad[offset1 + cid];
D2tmp0 = feature[offset2 + cid];
multi += D1tmp0 * D2tmp0;
cid += 32;
}
int res = f & 31;
D1tmp0 = D2tmp0 = 0;
if(res)
{
if(off1 < res)
{
D1tmp0 = grad[offset1 + cid];
D2tmp0 = feature[offset2 + cid];
}
multi += D1tmp0 * D2tmp0;
}
for (int stride = 16; stride > 0; stride >>= 1)
{
multi += __shfl_xor_sync(0xffffffff, multi, stride, 32);
}
if (threadIdx.x == 0 && threadIdx.y == 0)
{
out[eid * h + hid] = multi;
}
}
}
torch::Tensor mhsddmm_cuda(
torch::Tensor rowptr,
torch::Tensor colind,
torch::Tensor grad, // V * H * F
torch::Tensor feature // V * H * F
)
{
const auto v = rowptr.size(0) - 1; // V
const auto nnz = colind.size(0); // E
const auto h = feature.size(1); // H
const auto f = feature.size(2); // F
auto devid = feature.device().index();
auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid);
auto out = torch::empty({nnz, h}, options);
mhsddmm<<<dim3(nnz / 16 + (nnz & 15), h, 1), dim3(32, 4, 1)>>>(v, f, h, nnz,
rowptr.data_ptr<int>(), colind.data_ptr<int>(), grad.data_ptr<float>(), feature.data_ptr<float>(), out.data_ptr<float>());
return out;
}
|
99acf9411d817737542d3bec174836b41e1c5c37.hip | // !!! This is a file automatically generated by hipify!!!
#include "../NativeOps.h"
#include <hip/hip_runtime.h>
#include <cuda_launch_config.h>
#include <buffer.h>
#include <shape.h>
#include <reduce3.h>
#include <reduce.h>
#include <indexreduce.h>
#include <pairwise_transform.h>
#include <transform.h>
#include <scalar.h>
#include <broadcasting.h>
#include <summarystatsreduce.h>
#include <thread>
#include <map>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <cuda_device_runtime_api.h>
#include <pointercast.h>
#include <stdio.h>
hipDeviceProp_t *deviceProperties;
hipFuncAttributes *funcAttributes = new hipFuncAttributes[28];
template <typename T>
dim3 getOptimalDimensions(Nd4jIndex n,hipFuncAttributes attributes, hipDeviceProp_t properties) {
// we can combine the two to compute a block size
int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties);
// no real sense launching more threads, then number of elements we have
if (num_threads > n) num_threads = n;
// compute the number of blocks of size num_threads to launch
int num_blocks = n / num_threads;
// check for partial block at the end
if(n % num_threads) ++num_blocks;
return dim3(num_blocks,num_threads, (num_threads * sizeof(T)) + (attributes.sharedSizeBytes < 1024 ? 1024 : attributes.sharedSizeBytes));
}
/**
* Returns optimal launch parameters
* given the extra pointers passed in.
* The extra pointer should be
* the host pointer for the shape information
* associated with the data.
* From there it is used to obtain the length
* from which we can derive the optimal launch parameters.
*
*/
template <typename T>
dim3 getOptimalLaunchParameters(Nd4jPointer *extraPointers, hipFuncAttributes attributes, hipDeviceProp_t properties) {
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
Nd4jIndex n = shape::length(hostXShapeInfo);
dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties);
//printf("Params: gridSize: [1], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y));
return launchDims;
}
nd4j::buffer::Buffer<int> * createScalarBuffer(hipStream_t stream) {
int *scalarShapeInfo = shape::createScalarShapeInfo();
nd4j::buffer::Buffer<int> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream);
nd4j::buffer::copyDataToGpu(&buff, stream);
return buff;
}
class ScalarShapeInformation {
private:
nd4j::buffer::Buffer<int> *scalarDimension;
nd4j::buffer::Buffer<int> *scalarShapeInfo;
std::thread::id threadId;
public:
ScalarShapeInformation(hipStream_t stream) {
int *scalarDimensionBuff = (int *) malloc(sizeof(int));
scalarDimensionBuff[0] = shape::MAX_DIMENSION;
scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream);
scalarShapeInfo = createScalarBuffer(stream);
threadId = std::this_thread::get_id();
}
~ScalarShapeInformation() {
nd4j::buffer::freeBuffer(&scalarShapeInfo);
nd4j::buffer::freeBuffer(&scalarDimension);
}
int *getShapeInfoHostPointer() {
return scalarShapeInfo->data;
}
int * getShapeInfoGpuPointer() {
return scalarShapeInfo->gData;
}
int * getDimensionHostPointer() {
return scalarDimension->data;
}
int * getDimensionGpuPointer() {
return scalarDimension->gData;
}
};
template <typename T>
class ScalarInfo {
nd4j::buffer::Buffer<T> *scalarData;
ScalarShapeInformation *shapeInfo;
T finalResult;
hipStream_t streamRef;
public:
ScalarInfo(hipStream_t stream) {
T *scalarResult = (T*)malloc(sizeof(T));
shapeInfo = new ScalarShapeInformation(stream);
scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream);
streamRef = stream;
nd4j::buffer::copyDataToGpu(&scalarData, stream);
}
T getFinalResultFromDevice() {
nd4j::buffer::copyDataFromGpu(&scalarData, streamRef);
return scalarData->data[0];
}
/**
* Get the device shape information
* representing a scalar
*/
int *getDeviceShapeInfo() {
return shapeInfo->getShapeInfoGpuPointer();
}
/**
* Get the result pointers
*/
T *getDevicePointer() {
return scalarData->gData;
}
/**
* Get the infinite dimension device pointer
*/
int *getDimensionDevicePointer() {
return shapeInfo->getDimensionGpuPointer();
}
~ScalarInfo() {
nd4j::buffer::freeBuffer(&scalarData);
delete shapeInfo;
}
};
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
double NativeOps::execIndexReduceScalarDouble(Nd4jPointer *extraPointers,int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[27], deviceProperties[(int) extraPointers[2]]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
hipLaunchKernelGGL(( indexReduceDouble), dim3(1),dim3(launchDims.y),launchDims.z * 4, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
NULL,
NULL,
1,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension, int dimensionLength) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[27], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( indexReduceDouble), dim3(1),dim3(launchDims.y),launchDims.z * 2, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcastDouble(Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension, int dimensionLength){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[26], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( broadcastDouble), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param y
* @param yStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer y,
int yStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams, Nd4jIndex n) {
double *xPointer = reinterpret_cast<double *>(dx);
double *yPointer = reinterpret_cast<double *>(y);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[25], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( pairWiseTransformStridedDouble), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
xPointer,
yPointer,
xStride,
yStride,
extraParamsPointer,
resultPointer,
resultStride);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
* @param xIndexes
* @param yIndexes
* @param resultIndexes
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer yIndexes,
Nd4jPointer resultIndexes) {
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *xIndexesPointer = reinterpret_cast<int *>(xIndexes);
int *yIndexesPointer = reinterpret_cast<int *>(yIndexes);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[24], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( pairWiseTransformDoubleIndex) , dim3(1), dim3(launchDims.y), launchDims.z, *stream,
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer,
yShapeInfoPointer,
resultShapeInfoPointer,
xIndexesPointer,
yIndexesPointer,
resultIndexesPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams) {
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[23], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( pairWiseTransformDouble), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer,
yShapeInfoPointer,
resultShapeInfoPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[22], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( reduceDouble), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
NULL,
1,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceDouble(
Nd4jPointer *extraPointers
,int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension,
int dimensionLength) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[22], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( reduceDouble), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @return
*/
double NativeOps::execReduceScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[22], deviceProperties[(int) extraPointers[2]]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
hipLaunchKernelGGL(( reduceDouble), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
resultPointer,
NULL,
NULL,
1,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduce3Double(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParamsVals);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[21], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( reduce3Double), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
NULL,
1,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
*/
double NativeOps::execReduce3ScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParamsVals);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[21], deviceProperties[(int) extraPointers[2]]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
hipLaunchKernelGGL(( reduce3Double), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
NULL,
NULL,
1,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execReduce3Double(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
double *extraParamsPointer = reinterpret_cast<double *>(extraParamsVals);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[21], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( reduce3Double), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xStride
* @param result
* @param resultStride
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
int xStride,
Nd4jPointer result,
int resultStride,
double scalar,
Nd4jPointer extraParams,
Nd4jIndex n) {
double *xPointer = reinterpret_cast<double *>(x);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[20], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( scalarDouble), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
scalar,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
double scalar,
Nd4jPointer extraParams){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[19], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( scalarDouble), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
scalar,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,resultShapeInfoPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
* @param xIndexes
* @param resultIndexes
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
double scalar,
Nd4jPointer extraParams,
Nd4jIndex n,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes){
double *xPointer = reinterpret_cast<double *>(x);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[18], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( scalarDoubleIndexes), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
scalar,
xPointer,
extraParamsPointer,
resultPointer,
resultIndexesPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
double NativeOps::execSummaryStatsScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,bool biasCorrected){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[17], deviceProperties[(int) extraPointers[2]]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
hipLaunchKernelGGL(( summaryStatsReduceDouble), dim3(1),dim3(launchDims.y),launchDims.z * 10, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
NULL,
NULL,
1,
1,biasCorrected);
checkCudaErrors(hipStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execSummaryStatsDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,bool biasCorrected) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[17], deviceProperties[(int) extraPointers[2]]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
hipLaunchKernelGGL(( summaryStatsReduceDouble), dim3(1),dim3(launchDims.y),launchDims.z * 10, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
NULL,
1,
1,biasCorrected);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execSummaryStatsDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension, int dimensionLength,bool biasCorrected){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[17], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( summaryStatsReduceDouble), dim3(1),dim3(launchDims.y),launchDims.z * 10, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1,biasCorrected);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams,
Nd4jIndex n) {
double *xPointer = reinterpret_cast<double *>(dx);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[16], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( transformDouble), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams){
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[15], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( transformDouble), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,resultShapeInfoPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes) {
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[14], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( transformDoubleIndexes), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultIndexesPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
float NativeOps::execIndexReduceScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[13], deviceProperties[(int) extraPointers[2]]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
hipLaunchKernelGGL(( indexReduceFloat), dim3(1),dim3(launchDims.y), launchDims.z * 2, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
NULL,
NULL,
1,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
float result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[13], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( indexReduceFloat), dim3(1),dim3(launchDims.y),launchDims.z * 2, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcastFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension, int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[12], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( broadcastFloat), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param y
* @param yStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer y,
int yStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams, Nd4jIndex n){
float *xPointer = reinterpret_cast<float *>(dx);
float *yPointer = reinterpret_cast<float *>(y);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[11], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( pairWiseTransformStridedFloat), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
xPointer,
yPointer,
xStride,
yStride,
extraParamsPointer,
resultPointer,
resultStride);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
* @param xIndexes
* @param yIndexes
* @param resultIndexes
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer yIndexes,
Nd4jPointer resultIndexes){
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *xIndexesPointer = reinterpret_cast<int *>(xIndexes);
int *yIndexesPointer = reinterpret_cast<int *>(yIndexes);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[10], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( pairWiseTransformFloatIndex), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer,
yShapeInfoPointer,
resultShapeInfoPointer,
xIndexesPointer,
yIndexesPointer,
resultIndexesPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[9], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( pairWiseTransformFloat), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer,
yShapeInfoPointer,
resultShapeInfoPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo) {
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( reduceFloat), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
NULL,
1,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension,int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( reduceFloat), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @return
*/
float NativeOps::execReduceScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[(int) extraPointers[2]]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
hipLaunchKernelGGL(( reduceFloat), dim3(1),dim3(launchDims.y), launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
resultPointer,
NULL,
NULL,
1,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
float result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduce3Float(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParamsVals);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( reduce3Float), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
NULL,
1,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
*/
float NativeOps::execReduce3ScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo) {
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParamsVals);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[(int) extraPointers[2]]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
hipLaunchKernelGGL(( reduce3Float), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
NULL,
NULL,
1,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execReduce3Float(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
float *extraParamsPointer = reinterpret_cast<float *>(extraParamsVals);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( reduce3Float), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xStride
* @param result
* @param resultStride
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
int xStride,
Nd4jPointer result,
int resultStride,
double scalar,
Nd4jPointer extraParams,
Nd4jIndex n){
float *xPointer = reinterpret_cast<float *>(x);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[6], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( scalarFloat), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
scalar,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
float scalar,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
Nd4jIndex n = shape::length(hostXShapeInfo);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( scalarFloat), dim3(1), dim3(launchDims.y),launchDims.z, *stream,
opNum,
scalar,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,resultShapeInfoPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
* @param xIndexes
* @param resultIndexes
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
double scalar,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes){
float *xPointer = reinterpret_cast<float *>(x);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int *hostShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
Nd4jIndex n = shape::length(hostShapeInfo);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[4], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( scalarFloatIndexes), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
scalar,
xPointer,
extraParamsPointer,
resultPointer,
resultIndexesPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
float NativeOps::execSummaryStatsScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,bool biasCorrected){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[(int) extraPointers[2]]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
hipLaunchKernelGGL(( summaryStatsReduceFloat), dim3(1),dim3(launchDims.y),launchDims.z * 10, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
NULL,
NULL,
1,
1,biasCorrected);
checkCudaErrors(hipStreamSynchronize(*stream));
float result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execSummaryStatsFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,bool biasCorrected){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( summaryStatsReduceFloat), dim3(1),dim3(launchDims.y),launchDims.z * 10, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
NULL,
1,
1,biasCorrected);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execSummaryStatsFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength,bool biasCorrected){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( summaryStatsReduceFloat), dim3(1),dim3(launchDims.y),launchDims.z * 10, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1,biasCorrected);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams,
Nd4jIndex n) {
float *xPointer = reinterpret_cast<float *>(dx);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[2], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( transformFloat), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
n,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams) {
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[1], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( transformFloat), dim3(1),dim3(launchDims.y), launchDims.z * 3, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,resultShapeInfoPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes) {
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[0], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( transformFloatIndexes), dim3(1),dim3(launchDims.y),launchDims.z, *stream,
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultIndexesPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
template <typename T>
__device__ void flattenKernelGeneric(int dOffset,
char order,
T *result,
int *resultShapeInfo,
T *input,
int *inputShapeInfo) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int *zShape = shape::shapeOf(resultShapeInfo);
int *zStride = shape::stride(resultShapeInfo);
int *yShape = shape::shapeOf(inputShapeInfo);
int *yStride = shape::stride(inputShapeInfo);
char yOrder = shape::order(inputShapeInfo);
int len = shape::length(inputShapeInfo);
int resultEWS = shape::elementWiseStride(resultShapeInfo);
int inputEWS = shape::elementWiseStride(inputShapeInfo);
if (yOrder == order) {
if (resultEWS >= 1 && inputEWS >= 1) {
for (int i = tid; i < len; i+= gridDim.x * blockDim.x) {
result[i * resultEWS + dOffset] = input[i * inputEWS];
}
} else {
int rank = shape::rank(inputShapeInfo);
int *coord = (int *) malloc(sizeof(int) * rank);
if(order == 'f') {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2sub(rank,yShape,i,&coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i + dOffset] = input[offset];
}
}
else {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2subC(rank,yShape,i,&coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i + dOffset] = input[offset];
}
}
free(coord);
}
} else {
int rank = shape::rank(inputShapeInfo);
int *coord = (int *) malloc(sizeof(int) * rank);
if(order == 'f') {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2sub(rank,yShape,i,&coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i+dOffset] = input[offset];
}
}
else {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2subC(rank,yShape,i,&coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i+dOffset] = input[offset];
}
}
free(coord);
}
}
extern "C" __global__ void flattenKernelDouble(int offset,
char order,
double *result,
int *resultShapeInfo,
double *input,
int *inputShapeInfo) {
flattenKernelGeneric<double>(offset, order, result, resultShapeInfo, input, inputShapeInfo);
}
extern "C" __global__ void flattenKernelFloat(int offset,
char order,
float *result,
int *resultShapeInfo,
float *input,
int *inputShapeInfo) {
flattenKernelGeneric<float>(offset, order, result, resultShapeInfo, input, inputShapeInfo);
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param result the result array
* @param resultShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flattenFloat(
Nd4jPointer *extraPointers,
int offset,
char order,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer input,
Nd4jPointer inputShapeInfo) {
float *xPointer = reinterpret_cast<float *>(result);
int *xShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *yPointer = reinterpret_cast<float *>(input);
int *yShapeInfoPointer = reinterpret_cast<int *>(inputShapeInfo);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
int length = (int) extraPointers[2];
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( flattenKernelFloat), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, offset, order, xPointer, xShapeInfoPointer, yPointer, yShapeInfoPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param result the result array
* @param resultShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flattenDouble(
Nd4jPointer *extraPointers,
int offset,
char order,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer input,
Nd4jPointer inputShapeInfo) {
double *xPointer = reinterpret_cast<double *>(result);
int *xShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *yPointer = reinterpret_cast<double *>(input);
int *yShapeInfoPointer = reinterpret_cast<int *>(inputShapeInfo);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[(int) extraPointers[2]]);
hipLaunchKernelGGL(( flattenKernelDouble), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, offset, order, xPointer, xShapeInfoPointer, yPointer, yShapeInfoPointer);
checkCudaErrors(hipStreamSynchronize(*stream));
}
void NativeOps::initializeDevicesAndFunctions() {
int devCnt = 0;
hipGetDeviceCount(&devCnt);
deviceProperties = new hipDeviceProp_t[devCnt];
for (int i = 0; i < devCnt; i++) {
hipGetDeviceProperties(&deviceProperties[i], i);
}
hipFuncGetAttributes(&funcAttributes[0], (void *)transformFloatIndexes);
void (*transformFloatPointer1)(int opNum, float *dy,int *shapeInfo, float *params, float *result,int *resultShapeInfo) = transformFloat;
hipFuncGetAttributes(&funcAttributes[1], transformFloatPointer1);
void (*transformFloatPointer2)(int opNum, Nd4jIndex n, float *dy, int incy, float *params, float *result,int resultStride) = transformFloat;
hipFuncGetAttributes(&funcAttributes[2], transformFloatPointer2);
hipFuncGetAttributes(&funcAttributes[3], (void *)summaryStatsReduceFloat);
hipFuncGetAttributes(&funcAttributes[4], (void *)scalarFloatIndexes);
void (*scalarFloatPointer1)(int opNum, float dx,float *dy, int *shapeInfo,float *params, float *result,int *resultShapeInfo) = scalarFloat;
hipFuncGetAttributes(&funcAttributes[5], scalarFloatPointer1);
void (*scalarFloatPointer2)(int opNum, Nd4jIndex n,float dx, float *dy, int incy, float *params, float *result,int resultStride) = scalarFloat;
hipFuncGetAttributes(&funcAttributes[6], scalarFloatPointer2);
hipFuncGetAttributes(&funcAttributes[7], reduce3Float);
hipFuncGetAttributes(&funcAttributes[8], reduceFloat);
hipFuncGetAttributes(&funcAttributes[9], pairWiseTransformFloat);
hipFuncGetAttributes(&funcAttributes[10], pairWiseTransformFloatIndex);
hipFuncGetAttributes(&funcAttributes[11], pairWiseTransformStridedFloat);
hipFuncGetAttributes(&funcAttributes[12], broadcastFloat);
hipFuncGetAttributes(&funcAttributes[13], indexReduceFloat);
///////////////////////////////////////// Doubles are separate, just in case of...
hipFuncGetAttributes(&funcAttributes[14], transformDoubleIndexes);
void (*transformDoublePointer1)(int opNum, double *dy, int *shapeInfo, double *params, double *result,int *resultShapeInfo) = transformDouble;
hipFuncGetAttributes(&funcAttributes[15], transformDoublePointer1);
void (*transformDoublePointer2)(int opNum, Nd4jIndex n, double *dy, int incy, double *params, double *result,int resultStride) = transformDouble;
hipFuncGetAttributes(&funcAttributes[16], transformDoublePointer2);
hipFuncGetAttributes(&funcAttributes[17], summaryStatsReduceDouble);
hipFuncGetAttributes(&funcAttributes[18], scalarDoubleIndexes);
void (*scalarDoublePointer1)(int opNum, double dx,double *dy, int *shapeInfo,double *params, double *result,int *resultShapeInfo) = scalarDouble;
hipFuncGetAttributes(&funcAttributes[19], scalarDoublePointer1);
void (*scalarDoublePointer2)(int opNum, Nd4jIndex n,double dx, double *dy, int incy, double *params, double *result,int resultStride) = scalarDouble;
hipFuncGetAttributes(&funcAttributes[20], scalarDoublePointer2);
hipFuncGetAttributes(&funcAttributes[21], reduce3Double);
hipFuncGetAttributes(&funcAttributes[22], reduceDouble);
hipFuncGetAttributes(&funcAttributes[23], pairWiseTransformDouble);
hipFuncGetAttributes(&funcAttributes[24], pairWiseTransformDoubleIndex);
hipFuncGetAttributes(&funcAttributes[25], pairWiseTransformStridedDouble);
hipFuncGetAttributes(&funcAttributes[26], broadcastDouble);
hipFuncGetAttributes(&funcAttributes[27], indexReduceDouble);
}
/**
* This method acquires memory chunk of requested size on host side
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocHost(long memorySize, int flags) {
Nd4jPointer pointer;
hipError_t res = hipHostMalloc((void **)&pointer, memorySize, hipHostMallocMapped |hipHostMallocPortable );
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method acquires memory chunk of requested size on specified device
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocDevice(long memorySize, Nd4jPointer ptrToDeviceId, int flags) {
Nd4jPointer pointer;
hipError_t res = hipMalloc((void **)&pointer, memorySize);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method releases previously allocated host memory space
*
* @param pointer pointer that'll be freed
*/
Nd4jPointer NativeOps::freeHost(Nd4jPointer pointer) {
hipError_t res = hipHostFree((void *) pointer);
if (res != 0)
pointer = 0L;
return 1L;
}
/**
* This method releases previously allocated memory space on device
*
* @param pointer pointer that'll be freed
* @param ptrToDeviceId pointer to deviceId.
*/
Nd4jPointer NativeOps::freeDevice(Nd4jPointer pointer, Nd4jPointer ptrToDeviceId) {
hipError_t res = hipFree((void *)pointer);
if (res != 0)
pointer = 0L;
return 1L;
}
| 99acf9411d817737542d3bec174836b41e1c5c37.cu | #include "../NativeOps.h"
#include <cuda.h>
#include <cuda_launch_config.h>
#include <buffer.h>
#include <shape.h>
#include <reduce3.h>
#include <reduce.h>
#include <indexreduce.h>
#include <pairwise_transform.h>
#include <transform.h>
#include <scalar.h>
#include <broadcasting.h>
#include <summarystatsreduce.h>
#include <thread>
#include <map>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cuda_runtime.h>
#include <cuda_device_runtime_api.h>
#include <pointercast.h>
#include <stdio.h>
cudaDeviceProp *deviceProperties;
cudaFuncAttributes *funcAttributes = new cudaFuncAttributes[28];
template <typename T>
dim3 getOptimalDimensions(Nd4jIndex n,cudaFuncAttributes attributes, cudaDeviceProp properties) {
// we can combine the two to compute a block size
int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties);
// no real sense launching more threads, then number of elements we have
if (num_threads > n) num_threads = n;
// compute the number of blocks of size num_threads to launch
int num_blocks = n / num_threads;
// check for partial block at the end
if(n % num_threads) ++num_blocks;
return dim3(num_blocks,num_threads, (num_threads * sizeof(T)) + (attributes.sharedSizeBytes < 1024 ? 1024 : attributes.sharedSizeBytes));
}
/**
* Returns optimal launch parameters
* given the extra pointers passed in.
* The extra pointer should be
* the host pointer for the shape information
* associated with the data.
* From there it is used to obtain the length
* from which we can derive the optimal launch parameters.
*
*/
template <typename T>
dim3 getOptimalLaunchParameters(Nd4jPointer *extraPointers, cudaFuncAttributes attributes, cudaDeviceProp properties) {
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
Nd4jIndex n = shape::length(hostXShapeInfo);
dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties);
//printf("Params: gridSize: [1], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y));
return launchDims;
}
nd4j::buffer::Buffer<int> * createScalarBuffer(cudaStream_t stream) {
int *scalarShapeInfo = shape::createScalarShapeInfo();
nd4j::buffer::Buffer<int> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream);
nd4j::buffer::copyDataToGpu(&buff, stream);
return buff;
}
class ScalarShapeInformation {
private:
nd4j::buffer::Buffer<int> *scalarDimension;
nd4j::buffer::Buffer<int> *scalarShapeInfo;
std::thread::id threadId;
public:
ScalarShapeInformation(cudaStream_t stream) {
int *scalarDimensionBuff = (int *) malloc(sizeof(int));
scalarDimensionBuff[0] = shape::MAX_DIMENSION;
scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream);
scalarShapeInfo = createScalarBuffer(stream);
threadId = std::this_thread::get_id();
}
~ScalarShapeInformation() {
nd4j::buffer::freeBuffer(&scalarShapeInfo);
nd4j::buffer::freeBuffer(&scalarDimension);
}
int *getShapeInfoHostPointer() {
return scalarShapeInfo->data;
}
int * getShapeInfoGpuPointer() {
return scalarShapeInfo->gData;
}
int * getDimensionHostPointer() {
return scalarDimension->data;
}
int * getDimensionGpuPointer() {
return scalarDimension->gData;
}
};
template <typename T>
class ScalarInfo {
nd4j::buffer::Buffer<T> *scalarData;
ScalarShapeInformation *shapeInfo;
T finalResult;
cudaStream_t streamRef;
public:
ScalarInfo(cudaStream_t stream) {
T *scalarResult = (T*)malloc(sizeof(T));
shapeInfo = new ScalarShapeInformation(stream);
scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream);
streamRef = stream;
nd4j::buffer::copyDataToGpu(&scalarData, stream);
}
T getFinalResultFromDevice() {
nd4j::buffer::copyDataFromGpu(&scalarData, streamRef);
return scalarData->data[0];
}
/**
* Get the device shape information
* representing a scalar
*/
int *getDeviceShapeInfo() {
return shapeInfo->getShapeInfoGpuPointer();
}
/**
* Get the result pointers
*/
T *getDevicePointer() {
return scalarData->gData;
}
/**
* Get the infinite dimension device pointer
*/
int *getDimensionDevicePointer() {
return shapeInfo->getDimensionGpuPointer();
}
~ScalarInfo() {
nd4j::buffer::freeBuffer(&scalarData);
delete shapeInfo;
}
};
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
double NativeOps::execIndexReduceScalarDouble(Nd4jPointer *extraPointers,int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[27], deviceProperties[(int) extraPointers[2]]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
indexReduceDouble<<<1,launchDims.y,launchDims.z * 4, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
NULL,
NULL,
1,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension, int dimensionLength) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[27], deviceProperties[(int) extraPointers[2]]);
indexReduceDouble<<<1,launchDims.y,launchDims.z * 2, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcastDouble(Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension, int dimensionLength){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[26], deviceProperties[(int) extraPointers[2]]);
broadcastDouble<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param y
* @param yStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer y,
int yStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams, Nd4jIndex n) {
double *xPointer = reinterpret_cast<double *>(dx);
double *yPointer = reinterpret_cast<double *>(y);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[25], deviceProperties[(int) extraPointers[2]]);
pairWiseTransformStridedDouble<<<1,launchDims.y,launchDims.z, *stream>>> (
opNum,
n,
xPointer,
yPointer,
xStride,
yStride,
extraParamsPointer,
resultPointer,
resultStride);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
* @param xIndexes
* @param yIndexes
* @param resultIndexes
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer yIndexes,
Nd4jPointer resultIndexes) {
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *xIndexesPointer = reinterpret_cast<int *>(xIndexes);
int *yIndexesPointer = reinterpret_cast<int *>(yIndexes);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[24], deviceProperties[(int) extraPointers[2]]);
pairWiseTransformDoubleIndex <<<1, launchDims.y, launchDims.z, *stream>>>(
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer,
yShapeInfoPointer,
resultShapeInfoPointer,
xIndexesPointer,
yIndexesPointer,
resultIndexesPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams) {
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[23], deviceProperties[(int) extraPointers[2]]);
pairWiseTransformDouble<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer,
yShapeInfoPointer,
resultShapeInfoPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[22], deviceProperties[(int) extraPointers[2]]);
reduceDouble<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
NULL,
1,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceDouble(
Nd4jPointer *extraPointers
,int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension,
int dimensionLength) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[22], deviceProperties[(int) extraPointers[2]]);
reduceDouble<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @return
*/
double NativeOps::execReduceScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[22], deviceProperties[(int) extraPointers[2]]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
reduceDouble<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
resultPointer,
NULL,
NULL,
1,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduce3Double(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParamsVals);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[21], deviceProperties[(int) extraPointers[2]]);
reduce3Double<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
NULL,
1,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
*/
double NativeOps::execReduce3ScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParamsVals);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[21], deviceProperties[(int) extraPointers[2]]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
reduce3Double<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
NULL,
NULL,
1,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execReduce3Double(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *yPointer = reinterpret_cast<double *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
double *extraParamsPointer = reinterpret_cast<double *>(extraParamsVals);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[21], deviceProperties[(int) extraPointers[2]]);
reduce3Double<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xStride
* @param result
* @param resultStride
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
int xStride,
Nd4jPointer result,
int resultStride,
double scalar,
Nd4jPointer extraParams,
Nd4jIndex n) {
double *xPointer = reinterpret_cast<double *>(x);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[20], deviceProperties[(int) extraPointers[2]]);
scalarDouble<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
scalar,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
double scalar,
Nd4jPointer extraParams){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[19], deviceProperties[(int) extraPointers[2]]);
scalarDouble<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
scalar,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,resultShapeInfoPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
* @param xIndexes
* @param resultIndexes
*/
void NativeOps::execScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
double scalar,
Nd4jPointer extraParams,
Nd4jIndex n,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes){
double *xPointer = reinterpret_cast<double *>(x);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[18], deviceProperties[(int) extraPointers[2]]);
scalarDoubleIndexes<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
scalar,
xPointer,
extraParamsPointer,
resultPointer,
resultIndexesPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
double NativeOps::execSummaryStatsScalarDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,bool biasCorrected){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[17], deviceProperties[(int) extraPointers[2]]);
double *resultPointer = reinterpret_cast<double *>(extraPointers[5]);
summaryStatsReduceDouble<<<1,launchDims.y,launchDims.z * 10, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
NULL,
NULL,
1,
1,biasCorrected);
checkCudaErrors(cudaStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execSummaryStatsDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,bool biasCorrected) {
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[17], deviceProperties[(int) extraPointers[2]]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
summaryStatsReduceDouble<<<1,launchDims.y,launchDims.z * 10, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
NULL,
1,
1,biasCorrected);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execSummaryStatsDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension, int dimensionLength,bool biasCorrected){
double *xPointer = reinterpret_cast<double *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[17], deviceProperties[(int) extraPointers[2]]);
summaryStatsReduceDouble<<<1,launchDims.y,launchDims.z * 10, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1,biasCorrected);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams,
Nd4jIndex n) {
double *xPointer = reinterpret_cast<double *>(dx);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[16], deviceProperties[(int) extraPointers[2]]);
transformDouble<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams){
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[15], deviceProperties[(int) extraPointers[2]]);
transformDouble<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,resultShapeInfoPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformDouble(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes) {
double *xPointer = reinterpret_cast<double *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
double *resultPointer = reinterpret_cast<double *>(result);
double *extraParamsPointer = reinterpret_cast<double *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[14], deviceProperties[(int) extraPointers[2]]);
transformDoubleIndexes<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultIndexesPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
float NativeOps::execIndexReduceScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[13], deviceProperties[(int) extraPointers[2]]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
indexReduceFloat<<<1,launchDims.y, launchDims.z * 2, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
NULL,
NULL,
1,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
float result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execIndexReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[13], deviceProperties[(int) extraPointers[2]]);
indexReduceFloat<<<1,launchDims.y,launchDims.z * 2, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param dimension
* @param dimensionLength
*/
void NativeOps::execBroadcastFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension, int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[12], deviceProperties[(int) extraPointers[2]]);
broadcastFloat<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param y
* @param yStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer y,
int yStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams, Nd4jIndex n){
float *xPointer = reinterpret_cast<float *>(dx);
float *yPointer = reinterpret_cast<float *>(y);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[11], deviceProperties[(int) extraPointers[2]]);
pairWiseTransformStridedFloat<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
xPointer,
yPointer,
xStride,
yStride,
extraParamsPointer,
resultPointer,
resultStride);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
* @param xIndexes
* @param yIndexes
* @param resultIndexes
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer yIndexes,
Nd4jPointer resultIndexes){
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *xIndexesPointer = reinterpret_cast<int *>(xIndexes);
int *yIndexesPointer = reinterpret_cast<int *>(yIndexes);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[10], deviceProperties[(int) extraPointers[2]]);
pairWiseTransformFloatIndex<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer,
yShapeInfoPointer,
resultShapeInfoPointer,
xIndexesPointer,
yIndexesPointer,
resultIndexesPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execPairwiseTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[9], deviceProperties[(int) extraPointers[2]]);
pairWiseTransformFloat<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
yPointer,
extraParamsPointer,
resultPointer,
xShapeInfoPointer,
yShapeInfoPointer,
resultShapeInfoPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo) {
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[(int) extraPointers[2]]);
reduceFloat<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
NULL,
1,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduceFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer dimension,int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[(int) extraPointers[2]]);
reduceFloat<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @return
*/
float NativeOps::execReduceScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[8], deviceProperties[(int) extraPointers[2]]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
reduceFloat<<< 1,launchDims.y, launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer
,extraParamsPointer,
resultPointer,
NULL,
NULL,
1,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
float result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfo
*/
void NativeOps::execReduce3Float(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParamsVals);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[(int) extraPointers[2]]);
reduce3Float<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
NULL,
1,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
*/
float NativeOps::execReduce3ScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo) {
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParamsVals);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[(int) extraPointers[2]]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
reduce3Float<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
NULL,
NULL,
1,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
double result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParamsVals
* @param y
* @param yShapeInfo
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execReduce3Float(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParamsVals,
Nd4jPointer y,
Nd4jPointer yShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *yPointer = reinterpret_cast<float *>(y);
int *yShapeInfoPointer = reinterpret_cast<int *>(yShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
float *extraParamsPointer = reinterpret_cast<float *>(extraParamsVals);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[7], deviceProperties[(int) extraPointers[2]]);
reduce3Float<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
yPointer,
yShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xStride
* @param result
* @param resultStride
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
int xStride,
Nd4jPointer result,
int resultStride,
double scalar,
Nd4jPointer extraParams,
Nd4jIndex n){
float *xPointer = reinterpret_cast<float *>(x);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[6], deviceProperties[(int) extraPointers[2]]);
scalarFloat<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
scalar,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
float scalar,
Nd4jPointer extraParams){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
Nd4jIndex n = shape::length(hostXShapeInfo);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[(int) extraPointers[2]]);
scalarFloat<<<1, launchDims.y,launchDims.z, *stream>>>(
opNum,
scalar,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,resultShapeInfoPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param scalar
* @param extraParams
* @param n
* @param xIndexes
* @param resultIndexes
*/
void NativeOps::execScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
double scalar,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes){
float *xPointer = reinterpret_cast<float *>(x);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int *hostShapeInfo = reinterpret_cast<int *>(extraPointers[0]);
Nd4jIndex n = shape::length(hostShapeInfo);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[4], deviceProperties[(int) extraPointers[2]]);
scalarFloatIndexes<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
scalar,
xPointer,
extraParamsPointer,
resultPointer,
resultIndexesPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
*/
float NativeOps::execSummaryStatsScalarFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,bool biasCorrected){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[(int) extraPointers[2]]);
float *resultPointer = reinterpret_cast<float *>(extraPointers[5]);
summaryStatsReduceFloat<<<1,launchDims.y,launchDims.z * 10, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
NULL,
NULL,
1,
1,biasCorrected);
checkCudaErrors(cudaStreamSynchronize(*stream));
float result = resultPointer[0];
return result;
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfo
*/
void NativeOps::execSummaryStatsFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,bool biasCorrected){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[(int) extraPointers[2]]);
summaryStatsReduceFloat<<<1,launchDims.y,launchDims.z * 10, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
NULL,
1,
1,biasCorrected);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param x
* @param xShapeInfo
* @param extraParams
* @param result
* @param resultShapeInfoBuffer
* @param dimension
* @param dimensionLength
*/
void NativeOps::execSummaryStatsFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer x,
Nd4jPointer xShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer result,
Nd4jPointer resultShapeInfoBuffer,
Nd4jPointer dimension,
int dimensionLength,bool biasCorrected){
float *xPointer = reinterpret_cast<float *>(x);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfoBuffer);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *dimensionPointer = reinterpret_cast<int *>(dimension);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[3], deviceProperties[(int) extraPointers[2]]);
summaryStatsReduceFloat<<<1,launchDims.y,launchDims.z * 10, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultShapeInfoPointer,
dimensionPointer,
dimensionLength,
1,biasCorrected);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xStride
* @param result
* @param resultStride
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
int xStride,
Nd4jPointer result,
int resultStride,
Nd4jPointer extraParams,
Nd4jIndex n) {
float *xPointer = reinterpret_cast<float *>(dx);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[2], deviceProperties[(int) extraPointers[2]]);
transformFloat<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
n,
xPointer,
xStride,
extraParamsPointer,
resultPointer,resultStride);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams) {
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[1], deviceProperties[(int) extraPointers[2]]);
transformFloat<<<1,launchDims.y, launchDims.z * 3, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,resultShapeInfoPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
*
* @param opNum
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
void NativeOps::execTransformFloat(
Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer dx,
Nd4jPointer xShapeInfo,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer extraParams,
Nd4jPointer xIndexes,
Nd4jPointer resultIndexes) {
float *xPointer = reinterpret_cast<float *>(dx);
int *xShapeInfoPointer = reinterpret_cast<int *>(xShapeInfo);
float *resultPointer = reinterpret_cast<float *>(result);
float *extraParamsPointer = reinterpret_cast<float *>(extraParams);
int *resultIndexesPointer = reinterpret_cast<int *>(resultIndexes);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[0], deviceProperties[(int) extraPointers[2]]);
transformFloatIndexes<<<1,launchDims.y,launchDims.z, *stream>>>(
opNum,
xPointer,
xShapeInfoPointer,
extraParamsPointer,
resultPointer,
resultIndexesPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
template <typename T>
__device__ void flattenKernelGeneric(int dOffset,
char order,
T *result,
int *resultShapeInfo,
T *input,
int *inputShapeInfo) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int *zShape = shape::shapeOf(resultShapeInfo);
int *zStride = shape::stride(resultShapeInfo);
int *yShape = shape::shapeOf(inputShapeInfo);
int *yStride = shape::stride(inputShapeInfo);
char yOrder = shape::order(inputShapeInfo);
int len = shape::length(inputShapeInfo);
int resultEWS = shape::elementWiseStride(resultShapeInfo);
int inputEWS = shape::elementWiseStride(inputShapeInfo);
if (yOrder == order) {
if (resultEWS >= 1 && inputEWS >= 1) {
for (int i = tid; i < len; i+= gridDim.x * blockDim.x) {
result[i * resultEWS + dOffset] = input[i * inputEWS];
}
} else {
int rank = shape::rank(inputShapeInfo);
int *coord = (int *) malloc(sizeof(int) * rank);
if(order == 'f') {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2sub(rank,yShape,i,&coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i + dOffset] = input[offset];
}
}
else {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2subC(rank,yShape,i,&coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i + dOffset] = input[offset];
}
}
free(coord);
}
} else {
int rank = shape::rank(inputShapeInfo);
int *coord = (int *) malloc(sizeof(int) * rank);
if(order == 'f') {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2sub(rank,yShape,i,&coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i+dOffset] = input[offset];
}
}
else {
for(int i = tid; i < len; i+= gridDim.x * blockDim.x) {
shape::ind2subC(rank,yShape,i,&coord);
int offset = shape::getOffset(0,yShape,yStride,coord,rank);
result[i+dOffset] = input[offset];
}
}
free(coord);
}
}
extern "C" __global__ void flattenKernelDouble(int offset,
char order,
double *result,
int *resultShapeInfo,
double *input,
int *inputShapeInfo) {
flattenKernelGeneric<double>(offset, order, result, resultShapeInfo, input, inputShapeInfo);
}
extern "C" __global__ void flattenKernelFloat(int offset,
char order,
float *result,
int *resultShapeInfo,
float *input,
int *inputShapeInfo) {
flattenKernelGeneric<float>(offset, order, result, resultShapeInfo, input, inputShapeInfo);
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param result the result array
* @param resultShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flattenFloat(
Nd4jPointer *extraPointers,
int offset,
char order,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer input,
Nd4jPointer inputShapeInfo) {
float *xPointer = reinterpret_cast<float *>(result);
int *xShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
float *yPointer = reinterpret_cast<float *>(input);
int *yShapeInfoPointer = reinterpret_cast<int *>(inputShapeInfo);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
int length = (int) extraPointers[2];
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[(int) extraPointers[2]]);
flattenKernelFloat<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(offset, order, xPointer, xShapeInfoPointer, yPointer, yShapeInfoPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
/**
* Append an input array
* to the end of a flat array
* in a particular order
* @param offset the offset of the array to start at
* @param order the order
* @param result the result array
* @param resultShapeInfo the shape info for te array
* @param input the input for the array
* @param inputShapeInfo the shape information for that array
*/
void NativeOps::flattenDouble(
Nd4jPointer *extraPointers,
int offset,
char order,
Nd4jPointer result,
Nd4jPointer resultShapeInfo,
Nd4jPointer input,
Nd4jPointer inputShapeInfo) {
double *xPointer = reinterpret_cast<double *>(result);
int *xShapeInfoPointer = reinterpret_cast<int *>(resultShapeInfo);
double *yPointer = reinterpret_cast<double *>(input);
int *yShapeInfoPointer = reinterpret_cast<int *>(inputShapeInfo);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]);
dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[(int) extraPointers[2]]);
flattenKernelDouble<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(offset, order, xPointer, xShapeInfoPointer, yPointer, yShapeInfoPointer);
checkCudaErrors(cudaStreamSynchronize(*stream));
}
void NativeOps::initializeDevicesAndFunctions() {
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
deviceProperties = new cudaDeviceProp[devCnt];
for (int i = 0; i < devCnt; i++) {
cudaGetDeviceProperties(&deviceProperties[i], i);
}
cudaFuncGetAttributes(&funcAttributes[0], (void *)transformFloatIndexes);
void (*transformFloatPointer1)(int opNum, float *dy,int *shapeInfo, float *params, float *result,int *resultShapeInfo) = transformFloat;
cudaFuncGetAttributes(&funcAttributes[1], transformFloatPointer1);
void (*transformFloatPointer2)(int opNum, Nd4jIndex n, float *dy, int incy, float *params, float *result,int resultStride) = transformFloat;
cudaFuncGetAttributes(&funcAttributes[2], transformFloatPointer2);
cudaFuncGetAttributes(&funcAttributes[3], (void *)summaryStatsReduceFloat);
cudaFuncGetAttributes(&funcAttributes[4], (void *)scalarFloatIndexes);
void (*scalarFloatPointer1)(int opNum, float dx,float *dy, int *shapeInfo,float *params, float *result,int *resultShapeInfo) = scalarFloat;
cudaFuncGetAttributes(&funcAttributes[5], scalarFloatPointer1);
void (*scalarFloatPointer2)(int opNum, Nd4jIndex n,float dx, float *dy, int incy, float *params, float *result,int resultStride) = scalarFloat;
cudaFuncGetAttributes(&funcAttributes[6], scalarFloatPointer2);
cudaFuncGetAttributes(&funcAttributes[7], reduce3Float);
cudaFuncGetAttributes(&funcAttributes[8], reduceFloat);
cudaFuncGetAttributes(&funcAttributes[9], pairWiseTransformFloat);
cudaFuncGetAttributes(&funcAttributes[10], pairWiseTransformFloatIndex);
cudaFuncGetAttributes(&funcAttributes[11], pairWiseTransformStridedFloat);
cudaFuncGetAttributes(&funcAttributes[12], broadcastFloat);
cudaFuncGetAttributes(&funcAttributes[13], indexReduceFloat);
///////////////////////////////////////// Doubles are separate, just in case of...
cudaFuncGetAttributes(&funcAttributes[14], transformDoubleIndexes);
void (*transformDoublePointer1)(int opNum, double *dy, int *shapeInfo, double *params, double *result,int *resultShapeInfo) = transformDouble;
cudaFuncGetAttributes(&funcAttributes[15], transformDoublePointer1);
void (*transformDoublePointer2)(int opNum, Nd4jIndex n, double *dy, int incy, double *params, double *result,int resultStride) = transformDouble;
cudaFuncGetAttributes(&funcAttributes[16], transformDoublePointer2);
cudaFuncGetAttributes(&funcAttributes[17], summaryStatsReduceDouble);
cudaFuncGetAttributes(&funcAttributes[18], scalarDoubleIndexes);
void (*scalarDoublePointer1)(int opNum, double dx,double *dy, int *shapeInfo,double *params, double *result,int *resultShapeInfo) = scalarDouble;
cudaFuncGetAttributes(&funcAttributes[19], scalarDoublePointer1);
void (*scalarDoublePointer2)(int opNum, Nd4jIndex n,double dx, double *dy, int incy, double *params, double *result,int resultStride) = scalarDouble;
cudaFuncGetAttributes(&funcAttributes[20], scalarDoublePointer2);
cudaFuncGetAttributes(&funcAttributes[21], reduce3Double);
cudaFuncGetAttributes(&funcAttributes[22], reduceDouble);
cudaFuncGetAttributes(&funcAttributes[23], pairWiseTransformDouble);
cudaFuncGetAttributes(&funcAttributes[24], pairWiseTransformDoubleIndex);
cudaFuncGetAttributes(&funcAttributes[25], pairWiseTransformStridedDouble);
cudaFuncGetAttributes(&funcAttributes[26], broadcastDouble);
cudaFuncGetAttributes(&funcAttributes[27], indexReduceDouble);
}
/**
* This method acquires memory chunk of requested size on host side
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocHost(long memorySize, int flags) {
Nd4jPointer pointer;
cudaError_t res = cudaHostAlloc((void **)&pointer, memorySize, cudaHostAllocMapped |cudaHostAllocPortable );
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method acquires memory chunk of requested size on specified device
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
* @param flags optional parameter
*/
Nd4jPointer NativeOps::mallocDevice(long memorySize, Nd4jPointer ptrToDeviceId, int flags) {
Nd4jPointer pointer;
cudaError_t res = cudaMalloc((void **)&pointer, memorySize);
if (res != 0)
pointer = 0L;
return pointer;
}
/**
* This method releases previously allocated host memory space
*
* @param pointer pointer that'll be freed
*/
Nd4jPointer NativeOps::freeHost(Nd4jPointer pointer) {
cudaError_t res = cudaFreeHost((void *) pointer);
if (res != 0)
pointer = 0L;
return 1L;
}
/**
* This method releases previously allocated memory space on device
*
* @param pointer pointer that'll be freed
* @param ptrToDeviceId pointer to deviceId.
*/
Nd4jPointer NativeOps::freeDevice(Nd4jPointer pointer, Nd4jPointer ptrToDeviceId) {
cudaError_t res = cudaFree((void *)pointer);
if (res != 0)
pointer = 0L;
return 1L;
}
|
e7a66d8e932b894a79b17ffe2d50fe012436ff66.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 64, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 64, true>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k64_seqaligned_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 64, true>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 64, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k64_seqaligned_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 64, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 64, true>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k64_seqaligned_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 64, true>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 900
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 64, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k64_seqaligned_sm80` is for sm80-sm90, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, false, false, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, false, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k64_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, false, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, false, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k64_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k64_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k64_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, false, false, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, false, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k64_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, false, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, false, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k64_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k64_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 900
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k64_sm80` is for sm80-sm90, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
| e7a66d8e932b894a79b17ffe2d50fe012436ff66.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 64, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 64, true>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k64_seqaligned_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 64, true>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 64, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k64_seqaligned_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 64, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 64, true>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k64_seqaligned_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 64, true>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 900
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 64, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k64_seqaligned_sm80` is for sm80-sm90, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, false, false, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, false, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k64_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, false, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, false, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k64_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k64_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, false, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k64_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, false, false, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, false, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k64_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, false, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, false, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k64_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k64_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 900
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k64_sm80` is for sm80-sm90, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
f0b81d82af91d1fe7ab3023628b3da1d2d5031c0.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/cudaarithm.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/features2d.hpp>
#include <iomanip>
#include <vector>
#include <cstdio>
#include <ctime>
#include "opencv2/xfeatures2d.hpp"
#include <opencv2/xfeatures2d/nonfree.hpp>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "CycleTimer.h"
#include "filter.h"
#include "filter.cu_incl"
#define DEBUG
#ifdef DEBUG
/* When debugging is enabled, these form aliases to useful functions */
#define dbg_printf(...) printf(__VA_ARGS__);
#else
/* When debugging is disnabled, no code gets generated for these */
#define dbg_printf(...)
#endif
using namespace cv;
using namespace cv::cuda;
using namespace std;
#define MAX_IMG_SZ 4096 * 4096 *2
#define MAX_PYRAMIDS 5
/* Global memory for keypoints */
const int max_kp = 12000;
__device__ int kp = 0;
__device__ int kp_x[max_kp];
__device__ int kp_y[max_kp];
__device__ float d[MAX_PYRAMIDS][MAX_IMG_SZ];
__device__ float s[MAX_PYRAMIDS + 1][MAX_IMG_SZ];
__device__ int ref_b[256] = {52, 70, 37, 11, 6, 65, 75, 10, 25,
38, 48, 75, 73, 7, 37, 3, 16, 35, 44,
50, 56, 52, 65, 52, 48, 48, 77, 45, 48,
66, 61, 63, 14, 27, 17, 56, 55, 38, 69,
2, 51, 12, 6, 31, 7, 60, 19, 3, 40,
3, 34, 28, 23, 4, 61, 30, 49, 31, 56,
65, 26, 18, 79, 49, 5, 50, 19, 67, 72,
75, 15, 3, 40, 54, 33, 52, 16, 79, 8,
45, 25, 42, 64, 77, 63, 47, 26, 11, 56,
23, 74, 19, 33, 63, 57, 52, 46, 14, 48,
24, 59, 56, 27, 70, 38, 63, 44, 52, 51,
33, 9, 71, 2, 66, 81, 36, 43, 20, 4,
76, 60, 55, 80, 54, 55, 54, 18, 41, 68,
10, 30, 57, 6, 53, 54, 35, 53, 80, 32,
19, 27, 3, 47, 46, 58, 15, 32, 8, 67,
50, 23, 67, 30, 29, 18, 53, 70, 67, 24,
48, 19, 13, 16, 55, 22, 49, 17, 16, 7,
67, 61, 68, 5, 17, 26, 66, 27, 52, 62,
46, 24, 27, 57, 78, 23, 78, 71, 79, 47,
72, 8, 10, 20, 43, 15, 69, 18, 81, 14,
68, 38, 34, 62, 15, 5, 58, 27, 39, 72,
61, 8, 1, 63, 21, 33, 18, 57, 11, 4,
57, 58, 50, 61, 66, 65, 12, 29, 41, 3,
34, 11, 59, 47, 71, 50, 63, 38, 32, 35,
11, 54, 77, 21, 38, 4, 7, 21, 57, 17,
44, 12, 22, 34, 31, 41, 64};
__device__ int ref_a[256] = {75, 70, 47, 48, 70, 1, 3, 38, 20,
39, 16, 53, 28, 71, 31, 15, 5, 34, 72,
74, 53, 4, 57, 15, 59, 16, 51, 79, 9,
7, 77, 69, 32, 73, 3, 28, 42, 31, 22,
4, 63, 1, 1, 8, 16, 17, 63, 80, 36,
48, 16, 74, 54, 37, 33, 65, 7, 12, 24,
52, 72, 38, 34, 70, 54, 77, 54, 73, 44,
81, 7, 69, 80, 60, 38, 28, 2, 79, 51,
47, 17, 8, 7, 44, 44, 25, 75, 34, 48,
15, 30, 54, 6, 28, 33, 2, 13, 54, 47,
42, 31, 29, 46, 65, 18, 39, 64, 72, 41,
19, 64, 14, 15, 66, 54, 42, 70, 37, 32,
51, 45, 70, 9, 20, 67, 64, 20, 4, 50,
9, 16, 81, 27, 57, 48, 37, 72, 27, 21,
78, 73, 44, 60, 76, 31, 54, 33, 56, 10,
55, 52, 24, 56, 56, 79, 45, 4, 38, 65,
48, 38, 62, 19, 26, 57, 61, 19, 47, 56,
26, 39, 79, 76, 25, 44, 81, 22, 19, 55,
24, 17, 43, 78, 39, 66, 59, 14, 11, 24,
57, 16, 12, 53, 26, 33, 53, 34, 45, 63,
69, 21, 32, 58, 73, 49, 62, 2, 73, 19,
72, 81, 13, 13, 67, 72, 11, 69, 61, 47,
24, 79, 22, 3, 62, 67, 53, 14, 80, 20,
72, 17, 10, 10, 45, 81, 57, 33, 61, 16,
16, 9, 66, 3, 63, 36, 63, 67, 41, 30,
64, 35, 49, 47, 22, 44, 40 };
__device__ inline void smoothedSum(float* src, short2 pt, int step, uchar* temp, int idx)
{
const int img_y = (int)pt.y - 4;
const int img_x = (int)pt.x - 4;
uchar* t = temp + 32*idx;
for(int j=0;j<32;j++){
uchar dig = '\0';
for(int i=0;i<8;i++){
int index = j*8 + i;
int start1_x = img_x + (ref_a[index]-1)%9;
int start1_y = img_y + (ref_a[index]-1)/9;
int start2_x = img_x + (ref_b[index]-1)%9;
int start2_y = img_y + (ref_b[index]-1)/9;
int result = src[start1_y*step + start1_x] < src[start2_y*step + start2_x];
dig = dig | (uchar(result));
if(i!=7)
dig = dig<<1;
}
// if(idx == 6){
// printf("result is %d\n", dig);
// }
t[j] = dig;
}
}
__device__ void img_to_s(float *img, float *s, int w, int h, int x, int y)
{
for (int yy = -1; yy <= 1; yy++)
for (int xx = -1; xx <= 1; xx++)
{
int x_i = x +xx;
int y_i = y + yy;
if (x_i < 0 || y_i < 0 || x_i >= w || y_i >= h)
s[ (yy + 1) * 3 + (xx +1)] = 0;
else
s[ (yy + 1) * 3 + (xx +1)] = img[(y_i * w) + x_i];
}
}
__global__ void vertical_blur( float* img,
int w,
int h,
filter_bank fb)
{
/*2D Index of current thread */
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int center = 3*(1) + 1;
const int levels = fb.n;
float fy[50];
float fx[50];
/* Out of bounds pixel */
if (x >= w || y >= h)
return;
for (int i = 0; i < levels; i ++)
{
int k = fb.bank[i].k;
linear_filter lf;
lf.hy = fy;
lf.hx = fx;
lf.k = fb.bank[i].k;
square_to_linear(fb.bank[i], &lf);
s[i][(y*w) + x] = get_filter_response_vertical(img, w, h, lf, x, y);
}
//printf("Test1 \n");
}
__global__ void horizontal_blur( float* img,
int w,
int h,
filter_bank fb)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int center = 3*(1) + 1;
const int levels = fb.n - 1;
float fx[50];
float fy[50];
float sk, sk_1;
if (x >= w || y >= h)
return;
int k = fb.bank[0].k;
linear_filter lf;
lf.hx = fx;
lf.hy = fy;
lf.k = fb.bank[0].k;
sk = get_filter_response_horizontal(s[0], w, h, lf, x, y);
for (int i = 0; i < levels; i ++)
{
k = fb.bank[i+1].k;
linear_filter lf;
lf.hx = fx;
lf.hy = fy;
lf.k = k;
square_to_linear(fb.bank[i+1], &lf);
sk_1 = get_filter_response_horizontal(s[i+1], w, h, lf, x, y);
d[i][(y*w) + x] = sk - sk_1;
sk = sk_1;
}
//printf("Test 2\n");
}
__global__ void DoG_Pyramid( float* img,
int w,
int h,
filter_bank fb)
{
/*2D Index of current thread */
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int center = 3*(1) + 1;
const int levels = fb.n - 1;
float sk, sk_1;
/* Out of bounds pixel */
if (x >= w || y >= h)
return;
sk = get_filter_response(img, w, h, fb.bank[0], x, y);
for (int i = 0; i < levels; i ++)
{
//sk = get_filter_response(img, w, h, fb.bank[i], x, y);
sk_1 = get_filter_response(img, w, h, fb.bank[i+1], x, y);
d[i][(y*w) + x] = sk - sk_1;
sk = sk_1;
}
}
__global__ void DoG_Kernel( float* img,
int w,
int h,
filter_bank fb,
uchar *output)
{
/*2D Index of current thread */
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int center = 3*(1) + 1;
const int levels = fb.n - 1;
const float th_c = 7.0;
const float th_r = 12.0;
bool keypoint_detected = false;
/* Out of bounds pixel */
if (x >= w || y >= h)
return;
/* Pixels in the border */
if ( (x >= w-4) || (y >= h-4) || x < 4 || y < 4)
return;
/* DoG first levels */
/* D(k-1), D(k), D(k+1) */
float d_1k[9], dk[9], dk_1[9];
/* Regsiters to calculate Hessian of DoG */
float dh[25], sh_1[25], sh[25];
/* Compute D(k) and D(k+1) for first level */
int idx;
for (int i = 0; i < levels; i++)
{
float current = d[i][(y*w) + x];
bool ismax = true;
bool ismin = true;
/* If threshold test fails go to next iteration */
if (fabs(current) < th_c)
continue;
img_to_s(d[i], dk, w, h, x, y);
/* Current layer */
ismax = ismax && is_max(dk, current);
ismin = ismin && is_min(dk, current);
if (!ismax && !ismin)
continue;
/* Layer below */
if (i != levels - 1)
{
img_to_s(d[i+1], dk_1, w, h, x, y);
ismax = ismax && is_max(dk_1, current);
ismin = ismin && is_min(dk_1, current);
if (!ismax && !ismin)
continue;
}
/* Layer above */
if (i != 0)
{
img_to_s(d[i-1], d_1k, w, h, x, y);
ismax = ismax && is_max(d_1k, current);
ismin = ismin && is_min(d_1k, current);
if (!ismax && !ismin)
continue;
}
float R = get_curvature(d[i], w, h, x, y);
if (R > th_r)
break;
/* Atomically increase the number of keypoints
and add the new found keypoint
*/
idx = atomicAdd(&kp, 1);
kp_x[idx] = x;
kp_y[idx] = y;
// dbg_printf("Keypoint detected at x = %d, y= %d. idx is %d ,"
// "and level is %d, and intensity is %f\n", x, y, idx, i, dk[center]);
keypoint_detected = true;
break;
}
// if(keypoint_detected == true){
// short2 pt;
// pt.x = x;
// pt.y = y;
// smoothedSum(img, pt, w, output, idx);
// }
}
uchar* DoG_detector_cuda(Mat img, int **k_x, int **k_y, int *n, float th_c, float th_r,
int levels, float sigma)
{
double start, end;
/* Device image */
float *gpu_img;
float *img_ptr = (float*) img.ptr<float>();
/* Get width and height */
int w = img.cols;
int h = img.rows;
/* BLock width */
int block_width = 32;
/* Calculate image size in bytes */
size_t img_sz = w * h * sizeof(float);
/* Generate DoG Levels */
float sigma_l[10];
for (int i = -1; i < levels - 1; i ++)
sigma_l[i+1] = (float) i;
/* Create device and host filter banks */
filter_bank fb, fbd;
create_DoG_bank (&fb, levels, sqrt(2), sigma, sigma_l);
/* Copy device filter bank to host */
copy_DoG_bank_device(&fb, &fbd);
/* Allocate image memory in device */
hipMalloc(&gpu_img, img_sz);
/* Copy image from host to device */
hipMemcpy(gpu_img, img_ptr, img_sz, hipMemcpyHostToDevice);
/* Calculate Grid Size */
const dim3 block(block_width, block_width);
const dim3 grid( (w + block.x - 1) / block.x, (h + block.y - 1) / block.y);
start = CycleTimer::currentSeconds();
/* Launch Kernel */
//DoG_Kernel<<<grid,block>>>(gpu_img, w, h, fbd);
hipLaunchKernelGGL(( DoG_Pyramid), dim3(grid),dim3(block), 0, 0, gpu_img, w, h, fbd);
//vertical_blur<<<grid,block>>>(gpu_img, w, h, fbd);
hipDeviceSynchronize();
end = CycleTimer::currentSeconds();
cout<<"Filters took "<<end-start<<" seconds"<<endl;
//horizontal_blur<<<grid,block>>>(gpu_img, w, h, fbd);
//hipDeviceSynchronize();
//end = CycleTimer::currentSeconds();
//cout<<"Filters took "<<end-start<<" seconds"<<endl;
uchar *d_descriptor, *h_descriptor;
hipMalloc(&d_descriptor, 32*sizeof(uchar)*500);
h_descriptor = (uchar*)malloc(32*sizeof(uchar)*500);
start = CycleTimer::currentSeconds();
hipLaunchKernelGGL(( DoG_Kernel), dim3(grid),dim3(block), 0, 0, gpu_img, w, h, fbd, d_descriptor);
hipDeviceSynchronize();
end = CycleTimer::currentSeconds();
//cout<<"CUDA MINMAX kernel took "<<end-start<<" seconds"<<endl;
//cout<<"CUDA KERNEL took "<<end-start<<" seconds"<<endl;
// dbg_printf("Finished calling kernel\n");
/* Free device memory */
hipFree(gpu_img);
/* Copy results from device to host */
hipMemcpyFromSymbol(n, kp, sizeof(int));
dbg_printf("Detected %d keypoints \n",*n);
*k_y = (int *) malloc(*n * sizeof(int));
*k_x = (int *) malloc(*n * sizeof(int));
hipMemcpyFromSymbol(*k_x, kp_x, sizeof(int)* (*n));
hipMemcpyFromSymbol(*k_y, kp_y, sizeof(int)* (*n));
// start = CycleTimer::currentSeconds();
hipMemcpy(h_descriptor, d_descriptor, 32*sizeof(uchar)*(*n), hipMemcpyDeviceToHost);
// end = CycleTimer::currentSeconds();
// cout<<"memcopy of descriptor took "<<end-start<<" seconds"<<endl;
/* Clear kp */
int zero = 0;
hipMemcpyToSymbol(kp, &zero, sizeof(int));
hipFree(d_descriptor);
return h_descriptor;
}
| f0b81d82af91d1fe7ab3023628b3da1d2d5031c0.cu | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/cudaarithm.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/features2d.hpp>
#include <iomanip>
#include <vector>
#include <cstdio>
#include <ctime>
#include "opencv2/xfeatures2d.hpp"
#include <opencv2/xfeatures2d/nonfree.hpp>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "CycleTimer.h"
#include "filter.h"
#include "filter.cu_incl"
#define DEBUG
#ifdef DEBUG
/* When debugging is enabled, these form aliases to useful functions */
#define dbg_printf(...) printf(__VA_ARGS__);
#else
/* When debugging is disnabled, no code gets generated for these */
#define dbg_printf(...)
#endif
using namespace cv;
using namespace cv::cuda;
using namespace std;
#define MAX_IMG_SZ 4096 * 4096 *2
#define MAX_PYRAMIDS 5
/* Global memory for keypoints */
const int max_kp = 12000;
__device__ int kp = 0;
__device__ int kp_x[max_kp];
__device__ int kp_y[max_kp];
__device__ float d[MAX_PYRAMIDS][MAX_IMG_SZ];
__device__ float s[MAX_PYRAMIDS + 1][MAX_IMG_SZ];
__device__ int ref_b[256] = {52, 70, 37, 11, 6, 65, 75, 10, 25,
38, 48, 75, 73, 7, 37, 3, 16, 35, 44,
50, 56, 52, 65, 52, 48, 48, 77, 45, 48,
66, 61, 63, 14, 27, 17, 56, 55, 38, 69,
2, 51, 12, 6, 31, 7, 60, 19, 3, 40,
3, 34, 28, 23, 4, 61, 30, 49, 31, 56,
65, 26, 18, 79, 49, 5, 50, 19, 67, 72,
75, 15, 3, 40, 54, 33, 52, 16, 79, 8,
45, 25, 42, 64, 77, 63, 47, 26, 11, 56,
23, 74, 19, 33, 63, 57, 52, 46, 14, 48,
24, 59, 56, 27, 70, 38, 63, 44, 52, 51,
33, 9, 71, 2, 66, 81, 36, 43, 20, 4,
76, 60, 55, 80, 54, 55, 54, 18, 41, 68,
10, 30, 57, 6, 53, 54, 35, 53, 80, 32,
19, 27, 3, 47, 46, 58, 15, 32, 8, 67,
50, 23, 67, 30, 29, 18, 53, 70, 67, 24,
48, 19, 13, 16, 55, 22, 49, 17, 16, 7,
67, 61, 68, 5, 17, 26, 66, 27, 52, 62,
46, 24, 27, 57, 78, 23, 78, 71, 79, 47,
72, 8, 10, 20, 43, 15, 69, 18, 81, 14,
68, 38, 34, 62, 15, 5, 58, 27, 39, 72,
61, 8, 1, 63, 21, 33, 18, 57, 11, 4,
57, 58, 50, 61, 66, 65, 12, 29, 41, 3,
34, 11, 59, 47, 71, 50, 63, 38, 32, 35,
11, 54, 77, 21, 38, 4, 7, 21, 57, 17,
44, 12, 22, 34, 31, 41, 64};
__device__ int ref_a[256] = {75, 70, 47, 48, 70, 1, 3, 38, 20,
39, 16, 53, 28, 71, 31, 15, 5, 34, 72,
74, 53, 4, 57, 15, 59, 16, 51, 79, 9,
7, 77, 69, 32, 73, 3, 28, 42, 31, 22,
4, 63, 1, 1, 8, 16, 17, 63, 80, 36,
48, 16, 74, 54, 37, 33, 65, 7, 12, 24,
52, 72, 38, 34, 70, 54, 77, 54, 73, 44,
81, 7, 69, 80, 60, 38, 28, 2, 79, 51,
47, 17, 8, 7, 44, 44, 25, 75, 34, 48,
15, 30, 54, 6, 28, 33, 2, 13, 54, 47,
42, 31, 29, 46, 65, 18, 39, 64, 72, 41,
19, 64, 14, 15, 66, 54, 42, 70, 37, 32,
51, 45, 70, 9, 20, 67, 64, 20, 4, 50,
9, 16, 81, 27, 57, 48, 37, 72, 27, 21,
78, 73, 44, 60, 76, 31, 54, 33, 56, 10,
55, 52, 24, 56, 56, 79, 45, 4, 38, 65,
48, 38, 62, 19, 26, 57, 61, 19, 47, 56,
26, 39, 79, 76, 25, 44, 81, 22, 19, 55,
24, 17, 43, 78, 39, 66, 59, 14, 11, 24,
57, 16, 12, 53, 26, 33, 53, 34, 45, 63,
69, 21, 32, 58, 73, 49, 62, 2, 73, 19,
72, 81, 13, 13, 67, 72, 11, 69, 61, 47,
24, 79, 22, 3, 62, 67, 53, 14, 80, 20,
72, 17, 10, 10, 45, 81, 57, 33, 61, 16,
16, 9, 66, 3, 63, 36, 63, 67, 41, 30,
64, 35, 49, 47, 22, 44, 40 };
__device__ inline void smoothedSum(float* src, short2 pt, int step, uchar* temp, int idx)
{
const int img_y = (int)pt.y - 4;
const int img_x = (int)pt.x - 4;
uchar* t = temp + 32*idx;
for(int j=0;j<32;j++){
uchar dig = '\0';
for(int i=0;i<8;i++){
int index = j*8 + i;
int start1_x = img_x + (ref_a[index]-1)%9;
int start1_y = img_y + (ref_a[index]-1)/9;
int start2_x = img_x + (ref_b[index]-1)%9;
int start2_y = img_y + (ref_b[index]-1)/9;
int result = src[start1_y*step + start1_x] < src[start2_y*step + start2_x];
dig = dig | (uchar(result));
if(i!=7)
dig = dig<<1;
}
// if(idx == 6){
// printf("result is %d\n", dig);
// }
t[j] = dig;
}
}
__device__ void img_to_s(float *img, float *s, int w, int h, int x, int y)
{
for (int yy = -1; yy <= 1; yy++)
for (int xx = -1; xx <= 1; xx++)
{
int x_i = x +xx;
int y_i = y + yy;
if (x_i < 0 || y_i < 0 || x_i >= w || y_i >= h)
s[ (yy + 1) * 3 + (xx +1)] = 0;
else
s[ (yy + 1) * 3 + (xx +1)] = img[(y_i * w) + x_i];
}
}
__global__ void vertical_blur( float* img,
int w,
int h,
filter_bank fb)
{
/*2D Index of current thread */
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int center = 3*(1) + 1;
const int levels = fb.n;
float fy[50];
float fx[50];
/* Out of bounds pixel */
if (x >= w || y >= h)
return;
for (int i = 0; i < levels; i ++)
{
int k = fb.bank[i].k;
linear_filter lf;
lf.hy = fy;
lf.hx = fx;
lf.k = fb.bank[i].k;
square_to_linear(fb.bank[i], &lf);
s[i][(y*w) + x] = get_filter_response_vertical(img, w, h, lf, x, y);
}
//printf("Test1 \n");
}
__global__ void horizontal_blur( float* img,
int w,
int h,
filter_bank fb)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int center = 3*(1) + 1;
const int levels = fb.n - 1;
float fx[50];
float fy[50];
float sk, sk_1;
if (x >= w || y >= h)
return;
int k = fb.bank[0].k;
linear_filter lf;
lf.hx = fx;
lf.hy = fy;
lf.k = fb.bank[0].k;
sk = get_filter_response_horizontal(s[0], w, h, lf, x, y);
for (int i = 0; i < levels; i ++)
{
k = fb.bank[i+1].k;
linear_filter lf;
lf.hx = fx;
lf.hy = fy;
lf.k = k;
square_to_linear(fb.bank[i+1], &lf);
sk_1 = get_filter_response_horizontal(s[i+1], w, h, lf, x, y);
d[i][(y*w) + x] = sk - sk_1;
sk = sk_1;
}
//printf("Test 2\n");
}
__global__ void DoG_Pyramid( float* img,
int w,
int h,
filter_bank fb)
{
/*2D Index of current thread */
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int center = 3*(1) + 1;
const int levels = fb.n - 1;
float sk, sk_1;
/* Out of bounds pixel */
if (x >= w || y >= h)
return;
sk = get_filter_response(img, w, h, fb.bank[0], x, y);
for (int i = 0; i < levels; i ++)
{
//sk = get_filter_response(img, w, h, fb.bank[i], x, y);
sk_1 = get_filter_response(img, w, h, fb.bank[i+1], x, y);
d[i][(y*w) + x] = sk - sk_1;
sk = sk_1;
}
}
__global__ void DoG_Kernel( float* img,
int w,
int h,
filter_bank fb,
uchar *output)
{
/*2D Index of current thread */
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int center = 3*(1) + 1;
const int levels = fb.n - 1;
const float th_c = 7.0;
const float th_r = 12.0;
bool keypoint_detected = false;
/* Out of bounds pixel */
if (x >= w || y >= h)
return;
/* Pixels in the border */
if ( (x >= w-4) || (y >= h-4) || x < 4 || y < 4)
return;
/* DoG first levels */
/* D(k-1), D(k), D(k+1) */
float d_1k[9], dk[9], dk_1[9];
/* Regsiters to calculate Hessian of DoG */
float dh[25], sh_1[25], sh[25];
/* Compute D(k) and D(k+1) for first level */
int idx;
for (int i = 0; i < levels; i++)
{
float current = d[i][(y*w) + x];
bool ismax = true;
bool ismin = true;
/* If threshold test fails go to next iteration */
if (fabs(current) < th_c)
continue;
img_to_s(d[i], dk, w, h, x, y);
/* Current layer */
ismax = ismax && is_max(dk, current);
ismin = ismin && is_min(dk, current);
if (!ismax && !ismin)
continue;
/* Layer below */
if (i != levels - 1)
{
img_to_s(d[i+1], dk_1, w, h, x, y);
ismax = ismax && is_max(dk_1, current);
ismin = ismin && is_min(dk_1, current);
if (!ismax && !ismin)
continue;
}
/* Layer above */
if (i != 0)
{
img_to_s(d[i-1], d_1k, w, h, x, y);
ismax = ismax && is_max(d_1k, current);
ismin = ismin && is_min(d_1k, current);
if (!ismax && !ismin)
continue;
}
float R = get_curvature(d[i], w, h, x, y);
if (R > th_r)
break;
/* Atomically increase the number of keypoints
and add the new found keypoint
*/
idx = atomicAdd(&kp, 1);
kp_x[idx] = x;
kp_y[idx] = y;
// dbg_printf("Keypoint detected at x = %d, y= %d. idx is %d ,"
// "and level is %d, and intensity is %f\n", x, y, idx, i, dk[center]);
keypoint_detected = true;
break;
}
// if(keypoint_detected == true){
// short2 pt;
// pt.x = x;
// pt.y = y;
// smoothedSum(img, pt, w, output, idx);
// }
}
uchar* DoG_detector_cuda(Mat img, int **k_x, int **k_y, int *n, float th_c, float th_r,
int levels, float sigma)
{
double start, end;
/* Device image */
float *gpu_img;
float *img_ptr = (float*) img.ptr<float>();
/* Get width and height */
int w = img.cols;
int h = img.rows;
/* BLock width */
int block_width = 32;
/* Calculate image size in bytes */
size_t img_sz = w * h * sizeof(float);
/* Generate DoG Levels */
float sigma_l[10];
for (int i = -1; i < levels - 1; i ++)
sigma_l[i+1] = (float) i;
/* Create device and host filter banks */
filter_bank fb, fbd;
create_DoG_bank (&fb, levels, sqrt(2), sigma, sigma_l);
/* Copy device filter bank to host */
copy_DoG_bank_device(&fb, &fbd);
/* Allocate image memory in device */
cudaMalloc(&gpu_img, img_sz);
/* Copy image from host to device */
cudaMemcpy(gpu_img, img_ptr, img_sz, cudaMemcpyHostToDevice);
/* Calculate Grid Size */
const dim3 block(block_width, block_width);
const dim3 grid( (w + block.x - 1) / block.x, (h + block.y - 1) / block.y);
start = CycleTimer::currentSeconds();
/* Launch Kernel */
//DoG_Kernel<<<grid,block>>>(gpu_img, w, h, fbd);
DoG_Pyramid<<<grid,block>>>(gpu_img, w, h, fbd);
//vertical_blur<<<grid,block>>>(gpu_img, w, h, fbd);
cudaDeviceSynchronize();
end = CycleTimer::currentSeconds();
cout<<"Filters took "<<end-start<<" seconds"<<endl;
//horizontal_blur<<<grid,block>>>(gpu_img, w, h, fbd);
//cudaDeviceSynchronize();
//end = CycleTimer::currentSeconds();
//cout<<"Filters took "<<end-start<<" seconds"<<endl;
uchar *d_descriptor, *h_descriptor;
cudaMalloc(&d_descriptor, 32*sizeof(uchar)*500);
h_descriptor = (uchar*)malloc(32*sizeof(uchar)*500);
start = CycleTimer::currentSeconds();
DoG_Kernel<<<grid,block>>>(gpu_img, w, h, fbd, d_descriptor);
cudaDeviceSynchronize();
end = CycleTimer::currentSeconds();
//cout<<"CUDA MINMAX kernel took "<<end-start<<" seconds"<<endl;
//cout<<"CUDA KERNEL took "<<end-start<<" seconds"<<endl;
// dbg_printf("Finished calling kernel\n");
/* Free device memory */
cudaFree(gpu_img);
/* Copy results from device to host */
cudaMemcpyFromSymbol(n, kp, sizeof(int));
dbg_printf("Detected %d keypoints \n",*n);
*k_y = (int *) malloc(*n * sizeof(int));
*k_x = (int *) malloc(*n * sizeof(int));
cudaMemcpyFromSymbol(*k_x, kp_x, sizeof(int)* (*n));
cudaMemcpyFromSymbol(*k_y, kp_y, sizeof(int)* (*n));
// start = CycleTimer::currentSeconds();
cudaMemcpy(h_descriptor, d_descriptor, 32*sizeof(uchar)*(*n), cudaMemcpyDeviceToHost);
// end = CycleTimer::currentSeconds();
// cout<<"memcopy of descriptor took "<<end-start<<" seconds"<<endl;
/* Clear kp */
int zero = 0;
cudaMemcpyToSymbol(kp, &zero, sizeof(int));
cudaFree(d_descriptor);
return h_descriptor;
}
|
eb043258186259e7dd1870234db44971df5c8b13.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: %clang_cc1 -emit-pch -o %t %s
// RUN: %clang_cc1 -include-pch %t -fsyntax-only %s
#ifndef HEADER
#define HEADER
// Header.
#include "../SemaCUDA/cuda.h"
void kcall(void (*kp)()) {
hipLaunchKernelGGL(( kp), dim3(1), dim3(1), 0, 0, );
}
__global__ void kern() {
}
#else
// Using the header.
void test() {
kcall(kern);
hipLaunchKernelGGL(( kern), dim3(1), dim3(1), 0, 0, );
}
#endif
| eb043258186259e7dd1870234db44971df5c8b13.cu | // RUN: %clang_cc1 -emit-pch -o %t %s
// RUN: %clang_cc1 -include-pch %t -fsyntax-only %s
#ifndef HEADER
#define HEADER
// Header.
#include "../SemaCUDA/cuda.h"
void kcall(void (*kp)()) {
kp<<<1, 1>>>();
}
__global__ void kern() {
}
#else
// Using the header.
void test() {
kcall(kern);
kern<<<1, 1>>>();
}
#endif
|
eba16e0b76d3730e9a741ee0de359ac8217f45e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/limits.hpp"
#include "opencv2/core/cuda/utility.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "fgd.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
namespace bgfg
{
////////////////////////////////////////////////////////////////////////////
// calcDiffHistogram
const unsigned int UINT_BITS = 32U;
const int LOG_WARP_SIZE = 5;
const int WARP_SIZE = 1 << LOG_WARP_SIZE;
#if (__CUDA_ARCH__ < 120)
const unsigned int TAG_MASK = (1U << (UINT_BITS - LOG_WARP_SIZE)) - 1U;
#endif
const int MERGE_THREADBLOCK_SIZE = 256;
__device__ __forceinline__ void addByte(unsigned int* s_WarpHist_, unsigned int data, unsigned int threadTag)
{
#if (__CUDA_ARCH__ < 120)
volatile unsigned int* s_WarpHist = s_WarpHist_;
unsigned int count;
do
{
count = s_WarpHist[data] & TAG_MASK;
count = threadTag | (count + 1);
s_WarpHist[data] = count;
} while (s_WarpHist[data] != count);
#else
atomicInc(s_WarpHist_ + data, (unsigned int)(-1));
#endif
}
template <typename PT, typename CT>
__global__ void calcPartialHistogram(const PtrStepSz<PT> prevFrame, const PtrStep<CT> curFrame, unsigned int* partialBuf0, unsigned int* partialBuf1, unsigned int* partialBuf2)
{
#if (__CUDA_ARCH__ < 200)
const int HISTOGRAM_WARP_COUNT = 4;
#else
const int HISTOGRAM_WARP_COUNT = 6;
#endif
const int HISTOGRAM_THREADBLOCK_SIZE = HISTOGRAM_WARP_COUNT * WARP_SIZE;
const int HISTOGRAM_THREADBLOCK_MEMORY = HISTOGRAM_WARP_COUNT * HISTOGRAM_BIN_COUNT;
//Per-warp subhistogram storage
__shared__ unsigned int s_Hist0[HISTOGRAM_THREADBLOCK_MEMORY];
__shared__ unsigned int s_Hist1[HISTOGRAM_THREADBLOCK_MEMORY];
__shared__ unsigned int s_Hist2[HISTOGRAM_THREADBLOCK_MEMORY];
//Clear shared memory storage for current threadblock before processing
#pragma unroll
for (int i = 0; i < (HISTOGRAM_THREADBLOCK_MEMORY / HISTOGRAM_THREADBLOCK_SIZE); ++i)
{
s_Hist0[threadIdx.x + i * HISTOGRAM_THREADBLOCK_SIZE] = 0;
s_Hist1[threadIdx.x + i * HISTOGRAM_THREADBLOCK_SIZE] = 0;
s_Hist2[threadIdx.x + i * HISTOGRAM_THREADBLOCK_SIZE] = 0;
}
__syncthreads();
const unsigned int warpId = threadIdx.x >> LOG_WARP_SIZE;
unsigned int* s_WarpHist0 = s_Hist0 + warpId * HISTOGRAM_BIN_COUNT;
unsigned int* s_WarpHist1 = s_Hist1 + warpId * HISTOGRAM_BIN_COUNT;
unsigned int* s_WarpHist2 = s_Hist2 + warpId * HISTOGRAM_BIN_COUNT;
const unsigned int tag = threadIdx.x << (UINT_BITS - LOG_WARP_SIZE);
const int dataCount = prevFrame.rows * prevFrame.cols;
for (unsigned int pos = blockIdx.x * HISTOGRAM_THREADBLOCK_SIZE + threadIdx.x; pos < dataCount; pos += HISTOGRAM_THREADBLOCK_SIZE * PARTIAL_HISTOGRAM_COUNT)
{
const unsigned int y = pos / prevFrame.cols;
const unsigned int x = pos % prevFrame.cols;
PT prevVal = prevFrame(y, x);
CT curVal = curFrame(y, x);
int3 diff = make_int3(
::abs(curVal.x - prevVal.x),
::abs(curVal.y - prevVal.y),
::abs(curVal.z - prevVal.z)
);
addByte(s_WarpHist0, diff.x, tag);
addByte(s_WarpHist1, diff.y, tag);
addByte(s_WarpHist2, diff.z, tag);
}
__syncthreads();
//Merge per-warp histograms into per-block and write to global memory
for (unsigned int bin = threadIdx.x; bin < HISTOGRAM_BIN_COUNT; bin += HISTOGRAM_THREADBLOCK_SIZE)
{
unsigned int sum0 = 0;
unsigned int sum1 = 0;
unsigned int sum2 = 0;
#pragma unroll
for (int i = 0; i < HISTOGRAM_WARP_COUNT; ++i)
{
#if (__CUDA_ARCH__ < 120)
sum0 += s_Hist0[bin + i * HISTOGRAM_BIN_COUNT] & TAG_MASK;
sum1 += s_Hist1[bin + i * HISTOGRAM_BIN_COUNT] & TAG_MASK;
sum2 += s_Hist2[bin + i * HISTOGRAM_BIN_COUNT] & TAG_MASK;
#else
sum0 += s_Hist0[bin + i * HISTOGRAM_BIN_COUNT];
sum1 += s_Hist1[bin + i * HISTOGRAM_BIN_COUNT];
sum2 += s_Hist2[bin + i * HISTOGRAM_BIN_COUNT];
#endif
}
partialBuf0[blockIdx.x * HISTOGRAM_BIN_COUNT + bin] = sum0;
partialBuf1[blockIdx.x * HISTOGRAM_BIN_COUNT + bin] = sum1;
partialBuf2[blockIdx.x * HISTOGRAM_BIN_COUNT + bin] = sum2;
}
}
__global__ void mergeHistogram(const unsigned int* partialBuf0, const unsigned int* partialBuf1, const unsigned int* partialBuf2, unsigned int* hist0, unsigned int* hist1, unsigned int* hist2)
{
unsigned int sum0 = 0;
unsigned int sum1 = 0;
unsigned int sum2 = 0;
#pragma unroll
for (unsigned int i = threadIdx.x; i < PARTIAL_HISTOGRAM_COUNT; i += MERGE_THREADBLOCK_SIZE)
{
sum0 += partialBuf0[blockIdx.x + i * HISTOGRAM_BIN_COUNT];
sum1 += partialBuf1[blockIdx.x + i * HISTOGRAM_BIN_COUNT];
sum2 += partialBuf2[blockIdx.x + i * HISTOGRAM_BIN_COUNT];
}
__shared__ unsigned int data0[MERGE_THREADBLOCK_SIZE];
__shared__ unsigned int data1[MERGE_THREADBLOCK_SIZE];
__shared__ unsigned int data2[MERGE_THREADBLOCK_SIZE];
plus<unsigned int> op;
reduce<MERGE_THREADBLOCK_SIZE>(smem_tuple(data0, data1, data2), thrust::tie(sum0, sum1, sum2), threadIdx.x, thrust::make_tuple(op, op, op));
if(threadIdx.x == 0)
{
hist0[blockIdx.x] = sum0;
hist1[blockIdx.x] = sum1;
hist2[blockIdx.x] = sum2;
}
}
template <typename PT, typename CT>
void calcDiffHistogram_gpu(PtrStepSzb prevFrame, PtrStepSzb curFrame,
unsigned int* hist0, unsigned int* hist1, unsigned int* hist2,
unsigned int* partialBuf0, unsigned int* partialBuf1, unsigned int* partialBuf2,
bool cc20, hipStream_t stream)
{
const int HISTOGRAM_WARP_COUNT = cc20 ? 6 : 4;
const int HISTOGRAM_THREADBLOCK_SIZE = HISTOGRAM_WARP_COUNT * WARP_SIZE;
hipLaunchKernelGGL(( calcPartialHistogram<PT, CT>), dim3(PARTIAL_HISTOGRAM_COUNT), dim3(HISTOGRAM_THREADBLOCK_SIZE), 0, stream,
(PtrStepSz<PT>)prevFrame, (PtrStepSz<CT>)curFrame, partialBuf0, partialBuf1, partialBuf2);
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( mergeHistogram), dim3(HISTOGRAM_BIN_COUNT), dim3(MERGE_THREADBLOCK_SIZE), 0, stream, partialBuf0, partialBuf1, partialBuf2, hist0, hist1, hist2);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void calcDiffHistogram_gpu<uchar3, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, unsigned int* hist0, unsigned int* hist1, unsigned int* hist2, unsigned int* partialBuf0, unsigned int* partialBuf1, unsigned int* partialBuf2, bool cc20, hipStream_t stream);
template void calcDiffHistogram_gpu<uchar3, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, unsigned int* hist0, unsigned int* hist1, unsigned int* hist2, unsigned int* partialBuf0, unsigned int* partialBuf1, unsigned int* partialBuf2, bool cc20, hipStream_t stream);
template void calcDiffHistogram_gpu<uchar4, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, unsigned int* hist0, unsigned int* hist1, unsigned int* hist2, unsigned int* partialBuf0, unsigned int* partialBuf1, unsigned int* partialBuf2, bool cc20, hipStream_t stream);
template void calcDiffHistogram_gpu<uchar4, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, unsigned int* hist0, unsigned int* hist1, unsigned int* hist2, unsigned int* partialBuf0, unsigned int* partialBuf1, unsigned int* partialBuf2, bool cc20, hipStream_t stream);
/////////////////////////////////////////////////////////////////////////
// calcDiffThreshMask
template <typename PT, typename CT>
__global__ void calcDiffThreshMask(const PtrStepSz<PT> prevFrame, const PtrStep<CT> curFrame, uchar3 bestThres, PtrStepb changeMask)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
if (y > prevFrame.rows || x > prevFrame.cols)
return;
PT prevVal = prevFrame(y, x);
CT curVal = curFrame(y, x);
int3 diff = make_int3(
::abs(curVal.x - prevVal.x),
::abs(curVal.y - prevVal.y),
::abs(curVal.z - prevVal.z)
);
if (diff.x > bestThres.x || diff.y > bestThres.y || diff.z > bestThres.z)
changeMask(y, x) = 255;
}
template <typename PT, typename CT>
void calcDiffThreshMask_gpu(PtrStepSzb prevFrame, PtrStepSzb curFrame, uchar3 bestThres, PtrStepSzb changeMask, hipStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(prevFrame.cols, block.x), divUp(prevFrame.rows, block.y));
hipLaunchKernelGGL(( calcDiffThreshMask<PT, CT>), dim3(grid), dim3(block), 0, stream, (PtrStepSz<PT>)prevFrame, (PtrStepSz<CT>)curFrame, bestThres, changeMask);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void calcDiffThreshMask_gpu<uchar3, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, uchar3 bestThres, PtrStepSzb changeMask, hipStream_t stream);
template void calcDiffThreshMask_gpu<uchar3, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, uchar3 bestThres, PtrStepSzb changeMask, hipStream_t stream);
template void calcDiffThreshMask_gpu<uchar4, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, uchar3 bestThres, PtrStepSzb changeMask, hipStream_t stream);
template void calcDiffThreshMask_gpu<uchar4, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, uchar3 bestThres, PtrStepSzb changeMask, hipStream_t stream);
/////////////////////////////////////////////////////////////////////////
// bgfgClassification
__constant__ BGPixelStat c_stat;
void setBGPixelStat(const BGPixelStat& stat)
{
cudaSafeCall( hipMemcpyToSymbol(c_stat, &stat, sizeof(BGPixelStat)) );
}
template <typename T> struct Output;
template <> struct Output<uchar3>
{
static __device__ __forceinline__ uchar3 make(uchar v0, uchar v1, uchar v2)
{
return make_uchar3(v0, v1, v2);
}
};
template <> struct Output<uchar4>
{
static __device__ __forceinline__ uchar4 make(uchar v0, uchar v1, uchar v2)
{
return make_uchar4(v0, v1, v2, 255);
}
};
template <typename PT, typename CT, typename OT>
__global__ void bgfgClassification(const PtrStepSz<PT> prevFrame, const PtrStep<CT> curFrame,
const PtrStepb Ftd, const PtrStepb Fbd, PtrStepb foreground,
int deltaC, int deltaCC, float alpha2, int N1c, int N1cc)
{
const int i = blockIdx.y * blockDim.y + threadIdx.y;
const int j = blockIdx.x * blockDim.x + threadIdx.x;
if (i > prevFrame.rows || j > prevFrame.cols)
return;
if (Fbd(i, j) || Ftd(i, j))
{
float Pb = 0.0f;
float Pv = 0.0f;
float Pvb = 0.0f;
int val = 0;
// Is it a motion pixel?
if (Ftd(i, j))
{
if (!c_stat.is_trained_dyn_model(i, j))
val = 1;
else
{
PT prevVal = prevFrame(i, j);
CT curVal = curFrame(i, j);
// Compare with stored CCt vectors:
for (int k = 0; k < N1cc && c_stat.PV_CC(i, j, k) > alpha2; ++k)
{
OT v1 = c_stat.V1_CC<OT>(i, j, k);
OT v2 = c_stat.V2_CC<OT>(i, j, k);
if (::abs(v1.x - prevVal.x) <= deltaCC &&
::abs(v1.y - prevVal.y) <= deltaCC &&
::abs(v1.z - prevVal.z) <= deltaCC &&
::abs(v2.x - curVal.x) <= deltaCC &&
::abs(v2.y - curVal.y) <= deltaCC &&
::abs(v2.z - curVal.z) <= deltaCC)
{
Pv += c_stat.PV_CC(i, j, k);
Pvb += c_stat.PVB_CC(i, j, k);
}
}
Pb = c_stat.Pbcc(i, j);
if (2 * Pvb * Pb <= Pv)
val = 1;
}
}
else if(c_stat.is_trained_st_model(i, j))
{
CT curVal = curFrame(i, j);
// Compare with stored Ct vectors:
for (int k = 0; k < N1c && c_stat.PV_C(i, j, k) > alpha2; ++k)
{
OT v = c_stat.V_C<OT>(i, j, k);
if (::abs(v.x - curVal.x) <= deltaC &&
::abs(v.y - curVal.y) <= deltaC &&
::abs(v.z - curVal.z) <= deltaC)
{
Pv += c_stat.PV_C(i, j, k);
Pvb += c_stat.PVB_C(i, j, k);
}
}
Pb = c_stat.Pbc(i, j);
if (2 * Pvb * Pb <= Pv)
val = 1;
}
// Update foreground:
foreground(i, j) = static_cast<uchar>(val);
} // end if( change detection...
}
template <typename PT, typename CT, typename OT>
void bgfgClassification_gpu(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground,
int deltaC, int deltaCC, float alpha2, int N1c, int N1cc, hipStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(prevFrame.cols, block.x), divUp(prevFrame.rows, block.y));
cudaSafeCall( hipFuncSetCacheConfig(bgfgClassification<PT, CT, OT>, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( bgfgClassification<PT, CT, OT>), dim3(grid), dim3(block), 0, stream, (PtrStepSz<PT>)prevFrame, (PtrStepSz<CT>)curFrame,
Ftd, Fbd, foreground,
deltaC, deltaCC, alpha2, N1c, N1cc);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void bgfgClassification_gpu<uchar3, uchar3, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, int deltaC, int deltaCC, float alpha2, int N1c, int N1cc, hipStream_t stream);
template void bgfgClassification_gpu<uchar3, uchar3, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, int deltaC, int deltaCC, float alpha2, int N1c, int N1cc, hipStream_t stream);
template void bgfgClassification_gpu<uchar3, uchar4, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, int deltaC, int deltaCC, float alpha2, int N1c, int N1cc, hipStream_t stream);
template void bgfgClassification_gpu<uchar3, uchar4, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, int deltaC, int deltaCC, float alpha2, int N1c, int N1cc, hipStream_t stream);
template void bgfgClassification_gpu<uchar4, uchar3, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, int deltaC, int deltaCC, float alpha2, int N1c, int N1cc, hipStream_t stream);
template void bgfgClassification_gpu<uchar4, uchar3, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, int deltaC, int deltaCC, float alpha2, int N1c, int N1cc, hipStream_t stream);
template void bgfgClassification_gpu<uchar4, uchar4, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, int deltaC, int deltaCC, float alpha2, int N1c, int N1cc, hipStream_t stream);
template void bgfgClassification_gpu<uchar4, uchar4, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, int deltaC, int deltaCC, float alpha2, int N1c, int N1cc, hipStream_t stream);
////////////////////////////////////////////////////////////////////////////
// updateBackgroundModel
template <typename PT, typename CT, typename OT, class PrevFramePtr2D, class CurFramePtr2D, class FtdPtr2D, class FbdPtr2D>
__global__ void updateBackgroundModel(int cols, int rows, const PrevFramePtr2D prevFrame, const CurFramePtr2D curFrame, const FtdPtr2D Ftd, const FbdPtr2D Fbd,
PtrStepb foreground, PtrStep<OT> background,
int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T)
{
const int i = blockIdx.y * blockDim.y + threadIdx.y;
const int j = blockIdx.x * blockDim.x + threadIdx.x;
if (i > rows || j > cols)
return;
const float MIN_PV = 1e-10f;
const uchar is_trained_dyn_model = c_stat.is_trained_dyn_model(i, j);
if (Ftd(i, j) || !is_trained_dyn_model)
{
const float alpha = is_trained_dyn_model ? alpha2 : alpha3;
float Pbcc = c_stat.Pbcc(i, j);
//update Pb
Pbcc *= (1.0f - alpha);
if (!foreground(i, j))
{
Pbcc += alpha;
}
int min_dist = numeric_limits<int>::max();
int indx = -1;
PT prevVal = prevFrame(i, j);
CT curVal = curFrame(i, j);
// Find best Vi match:
for (int k = 0; k < N2cc; ++k)
{
float PV_CC = c_stat.PV_CC(i, j, k);
if (!PV_CC)
break;
if (PV_CC < MIN_PV)
{
c_stat.PV_CC(i, j, k) = 0;
c_stat.PVB_CC(i, j, k) = 0;
continue;
}
c_stat.PV_CC(i, j, k) = PV_CC * (1.0f - alpha);
c_stat.PVB_CC(i, j, k) = c_stat.PVB_CC(i, j, k) * (1.0f - alpha);
OT v1 = c_stat.V1_CC<OT>(i, j, k);
int3 val1 = make_int3(
::abs(v1.x - prevVal.x),
::abs(v1.y - prevVal.y),
::abs(v1.z - prevVal.z)
);
OT v2 = c_stat.V2_CC<OT>(i, j, k);
int3 val2 = make_int3(
::abs(v2.x - curVal.x),
::abs(v2.y - curVal.y),
::abs(v2.z - curVal.z)
);
int dist = val1.x + val1.y + val1.z + val2.x + val2.y + val2.z;
if (dist < min_dist &&
val1.x <= deltaCC && val1.y <= deltaCC && val1.z <= deltaCC &&
val2.x <= deltaCC && val2.y <= deltaCC && val2.z <= deltaCC)
{
min_dist = dist;
indx = k;
}
}
if (indx < 0)
{
// Replace N2th elem in the table by new feature:
indx = N2cc - 1;
c_stat.PV_CC(i, j, indx) = alpha;
c_stat.PVB_CC(i, j, indx) = alpha;
//udate Vt
c_stat.V1_CC<OT>(i, j, indx) = Output<OT>::make(prevVal.x, prevVal.y, prevVal.z);
c_stat.V2_CC<OT>(i, j, indx) = Output<OT>::make(curVal.x, curVal.y, curVal.z);
}
else
{
// Update:
c_stat.PV_CC(i, j, indx) += alpha;
if (!foreground(i, j))
{
c_stat.PVB_CC(i, j, indx) += alpha;
}
}
//re-sort CCt table by Pv
const float PV_CC_indx = c_stat.PV_CC(i, j, indx);
const float PVB_CC_indx = c_stat.PVB_CC(i, j, indx);
const OT V1_CC_indx = c_stat.V1_CC<OT>(i, j, indx);
const OT V2_CC_indx = c_stat.V2_CC<OT>(i, j, indx);
for (int k = 0; k < indx; ++k)
{
if (c_stat.PV_CC(i, j, k) <= PV_CC_indx)
{
//shift elements
float Pv_tmp1;
float Pv_tmp2 = PV_CC_indx;
float Pvb_tmp1;
float Pvb_tmp2 = PVB_CC_indx;
OT v1_tmp1;
OT v1_tmp2 = V1_CC_indx;
OT v2_tmp1;
OT v2_tmp2 = V2_CC_indx;
for (int l = k; l <= indx; ++l)
{
Pv_tmp1 = c_stat.PV_CC(i, j, l);
c_stat.PV_CC(i, j, l) = Pv_tmp2;
Pv_tmp2 = Pv_tmp1;
Pvb_tmp1 = c_stat.PVB_CC(i, j, l);
c_stat.PVB_CC(i, j, l) = Pvb_tmp2;
Pvb_tmp2 = Pvb_tmp1;
v1_tmp1 = c_stat.V1_CC<OT>(i, j, l);
c_stat.V1_CC<OT>(i, j, l) = v1_tmp2;
v1_tmp2 = v1_tmp1;
v2_tmp1 = c_stat.V2_CC<OT>(i, j, l);
c_stat.V2_CC<OT>(i, j, l) = v2_tmp2;
v2_tmp2 = v2_tmp1;
}
break;
}
}
float sum1 = 0.0f;
float sum2 = 0.0f;
//check "once-off" changes
for (int k = 0; k < N1cc; ++k)
{
const float PV_CC = c_stat.PV_CC(i, j, k);
if (!PV_CC)
break;
sum1 += PV_CC;
sum2 += c_stat.PVB_CC(i, j, k);
}
if (sum1 > T)
c_stat.is_trained_dyn_model(i, j) = 1;
float diff = sum1 - Pbcc * sum2;
// Update stat table:
if (diff > T)
{
//new BG features are discovered
for (int k = 0; k < N1cc; ++k)
{
const float PV_CC = c_stat.PV_CC(i, j, k);
if (!PV_CC)
break;
c_stat.PVB_CC(i, j, k) = (PV_CC - Pbcc * c_stat.PVB_CC(i, j, k)) / (1.0f - Pbcc);
}
}
c_stat.Pbcc(i, j) = Pbcc;
}
// Handle "stationary" pixel:
if (!Ftd(i, j))
{
const float alpha = c_stat.is_trained_st_model(i, j) ? alpha2 : alpha3;
float Pbc = c_stat.Pbc(i, j);
//update Pb
Pbc *= (1.0f - alpha);
if (!foreground(i, j))
{
Pbc += alpha;
}
int min_dist = numeric_limits<int>::max();
int indx = -1;
CT curVal = curFrame(i, j);
//find best Vi match
for (int k = 0; k < N2c; ++k)
{
float PV_C = c_stat.PV_C(i, j, k);
if (PV_C < MIN_PV)
{
c_stat.PV_C(i, j, k) = 0;
c_stat.PVB_C(i, j, k) = 0;
continue;
}
// Exponential decay of memory
c_stat.PV_C(i, j, k) = PV_C * (1.0f - alpha);
c_stat.PVB_C(i, j, k) = c_stat.PVB_C(i, j, k) * (1.0f - alpha);
OT v = c_stat.V_C<OT>(i, j, k);
int3 val = make_int3(
::abs(v.x - curVal.x),
::abs(v.y - curVal.y),
::abs(v.z - curVal.z)
);
int dist = val.x + val.y + val.z;
if (dist < min_dist && val.x <= deltaC && val.y <= deltaC && val.z <= deltaC)
{
min_dist = dist;
indx = k;
}
}
if (indx < 0)
{
//N2th elem in the table is replaced by a new features
indx = N2c - 1;
c_stat.PV_C(i, j, indx) = alpha;
c_stat.PVB_C(i, j, indx) = alpha;
//udate Vt
c_stat.V_C<OT>(i, j, indx) = Output<OT>::make(curVal.x, curVal.y, curVal.z);
}
else
{
//update
c_stat.PV_C(i, j, indx) += alpha;
if (!foreground(i, j))
{
c_stat.PVB_C(i, j, indx) += alpha;
}
}
//re-sort Ct table by Pv
const float PV_C_indx = c_stat.PV_C(i, j, indx);
const float PVB_C_indx = c_stat.PVB_C(i, j, indx);
OT V_C_indx = c_stat.V_C<OT>(i, j, indx);
for (int k = 0; k < indx; ++k)
{
if (c_stat.PV_C(i, j, k) <= PV_C_indx)
{
//shift elements
float Pv_tmp1;
float Pv_tmp2 = PV_C_indx;
float Pvb_tmp1;
float Pvb_tmp2 = PVB_C_indx;
OT v_tmp1;
OT v_tmp2 = V_C_indx;
for (int l = k; l <= indx; ++l)
{
Pv_tmp1 = c_stat.PV_C(i, j, l);
c_stat.PV_C(i, j, l) = Pv_tmp2;
Pv_tmp2 = Pv_tmp1;
Pvb_tmp1 = c_stat.PVB_C(i, j, l);
c_stat.PVB_C(i, j, l) = Pvb_tmp2;
Pvb_tmp2 = Pvb_tmp1;
v_tmp1 = c_stat.V_C<OT>(i, j, l);
c_stat.V_C<OT>(i, j, l) = v_tmp2;
v_tmp2 = v_tmp1;
}
break;
}
}
// Check "once-off" changes:
float sum1 = 0.0f;
float sum2 = 0.0f;
for (int k = 0; k < N1c; ++k)
{
const float PV_C = c_stat.PV_C(i, j, k);
if (!PV_C)
break;
sum1 += PV_C;
sum2 += c_stat.PVB_C(i, j, k);
}
if (sum1 > T)
c_stat.is_trained_st_model(i, j) = 1;
float diff = sum1 - Pbc * sum2;
// Update stat table:
if (diff > T)
{
//new BG features are discovered
for (int k = 0; k < N1c; ++k)
{
const float PV_C = c_stat.PV_C(i, j, k);
if (!PV_C)
break;
c_stat.PVB_C(i, j, k) = (PV_C - Pbc * c_stat.PVB_C(i, j, k)) / (1.0f - Pbc);
}
c_stat.Pbc(i, j) = 1.0f - Pbc;
}
else
{
c_stat.Pbc(i, j) = Pbc;
}
} // if !(change detection) at pixel (i,j)
// Update the reference BG image:
if (!foreground(i, j))
{
CT curVal = curFrame(i, j);
if (!Ftd(i, j) && !Fbd(i, j))
{
// Apply IIR filter:
OT oldVal = background(i, j);
int3 newVal = make_int3(
__float2int_rn(oldVal.x * (1.0f - alpha1) + curVal.x * alpha1),
__float2int_rn(oldVal.y * (1.0f - alpha1) + curVal.y * alpha1),
__float2int_rn(oldVal.z * (1.0f - alpha1) + curVal.z * alpha1)
);
background(i, j) = Output<OT>::make(
static_cast<uchar>(newVal.x),
static_cast<uchar>(newVal.y),
static_cast<uchar>(newVal.z)
);
}
else
{
background(i, j) = Output<OT>::make(curVal.x, curVal.y, curVal.z);
}
}
}
template <typename PT, typename CT, typename OT>
struct UpdateBackgroundModel
{
static void call(PtrStepSz<PT> prevFrame, PtrStepSz<CT> curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, PtrStepSz<OT> background,
int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T,
hipStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(prevFrame.cols, block.x), divUp(prevFrame.rows, block.y));
cudaSafeCall( hipFuncSetCacheConfig(updateBackgroundModel<PT, CT, OT, PtrStep<PT>, PtrStep<CT>, PtrStepb, PtrStepb>, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( updateBackgroundModel<PT, CT, OT, PtrStep<PT>, PtrStep<CT>, PtrStepb, PtrStepb>), dim3(grid), dim3(block), 0, stream,
prevFrame.cols, prevFrame.rows,
prevFrame, curFrame,
Ftd, Fbd, foreground, background,
deltaC, deltaCC, alpha1, alpha2, alpha3, N1c, N1cc, N2c, N2cc, T);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
};
template <typename PT, typename CT, typename OT>
void updateBackgroundModel_gpu(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, PtrStepSzb background,
int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T,
hipStream_t stream)
{
UpdateBackgroundModel<PT, CT, OT>::call(PtrStepSz<PT>(prevFrame), PtrStepSz<CT>(curFrame), Ftd, Fbd, foreground, PtrStepSz<OT>(background),
deltaC, deltaCC, alpha1, alpha2, alpha3, N1c, N1cc, N2c, N2cc, T, stream);
}
template void updateBackgroundModel_gpu<uchar3, uchar3, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, PtrStepSzb background, int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T, hipStream_t stream);
template void updateBackgroundModel_gpu<uchar3, uchar3, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, PtrStepSzb background, int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T, hipStream_t stream);
template void updateBackgroundModel_gpu<uchar3, uchar4, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, PtrStepSzb background, int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T, hipStream_t stream);
template void updateBackgroundModel_gpu<uchar3, uchar4, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, PtrStepSzb background, int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T, hipStream_t stream);
template void updateBackgroundModel_gpu<uchar4, uchar3, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, PtrStepSzb background, int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T, hipStream_t stream);
template void updateBackgroundModel_gpu<uchar4, uchar3, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, PtrStepSzb background, int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T, hipStream_t stream);
template void updateBackgroundModel_gpu<uchar4, uchar4, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, PtrStepSzb background, int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T, hipStream_t stream);
template void updateBackgroundModel_gpu<uchar4, uchar4, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, PtrStepSzb background, int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
| eba16e0b76d3730e9a741ee0de359ac8217f45e7.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/limits.hpp"
#include "opencv2/core/cuda/utility.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "fgd.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
namespace bgfg
{
////////////////////////////////////////////////////////////////////////////
// calcDiffHistogram
const unsigned int UINT_BITS = 32U;
const int LOG_WARP_SIZE = 5;
const int WARP_SIZE = 1 << LOG_WARP_SIZE;
#if (__CUDA_ARCH__ < 120)
const unsigned int TAG_MASK = (1U << (UINT_BITS - LOG_WARP_SIZE)) - 1U;
#endif
const int MERGE_THREADBLOCK_SIZE = 256;
__device__ __forceinline__ void addByte(unsigned int* s_WarpHist_, unsigned int data, unsigned int threadTag)
{
#if (__CUDA_ARCH__ < 120)
volatile unsigned int* s_WarpHist = s_WarpHist_;
unsigned int count;
do
{
count = s_WarpHist[data] & TAG_MASK;
count = threadTag | (count + 1);
s_WarpHist[data] = count;
} while (s_WarpHist[data] != count);
#else
atomicInc(s_WarpHist_ + data, (unsigned int)(-1));
#endif
}
template <typename PT, typename CT>
__global__ void calcPartialHistogram(const PtrStepSz<PT> prevFrame, const PtrStep<CT> curFrame, unsigned int* partialBuf0, unsigned int* partialBuf1, unsigned int* partialBuf2)
{
#if (__CUDA_ARCH__ < 200)
const int HISTOGRAM_WARP_COUNT = 4;
#else
const int HISTOGRAM_WARP_COUNT = 6;
#endif
const int HISTOGRAM_THREADBLOCK_SIZE = HISTOGRAM_WARP_COUNT * WARP_SIZE;
const int HISTOGRAM_THREADBLOCK_MEMORY = HISTOGRAM_WARP_COUNT * HISTOGRAM_BIN_COUNT;
//Per-warp subhistogram storage
__shared__ unsigned int s_Hist0[HISTOGRAM_THREADBLOCK_MEMORY];
__shared__ unsigned int s_Hist1[HISTOGRAM_THREADBLOCK_MEMORY];
__shared__ unsigned int s_Hist2[HISTOGRAM_THREADBLOCK_MEMORY];
//Clear shared memory storage for current threadblock before processing
#pragma unroll
for (int i = 0; i < (HISTOGRAM_THREADBLOCK_MEMORY / HISTOGRAM_THREADBLOCK_SIZE); ++i)
{
s_Hist0[threadIdx.x + i * HISTOGRAM_THREADBLOCK_SIZE] = 0;
s_Hist1[threadIdx.x + i * HISTOGRAM_THREADBLOCK_SIZE] = 0;
s_Hist2[threadIdx.x + i * HISTOGRAM_THREADBLOCK_SIZE] = 0;
}
__syncthreads();
const unsigned int warpId = threadIdx.x >> LOG_WARP_SIZE;
unsigned int* s_WarpHist0 = s_Hist0 + warpId * HISTOGRAM_BIN_COUNT;
unsigned int* s_WarpHist1 = s_Hist1 + warpId * HISTOGRAM_BIN_COUNT;
unsigned int* s_WarpHist2 = s_Hist2 + warpId * HISTOGRAM_BIN_COUNT;
const unsigned int tag = threadIdx.x << (UINT_BITS - LOG_WARP_SIZE);
const int dataCount = prevFrame.rows * prevFrame.cols;
for (unsigned int pos = blockIdx.x * HISTOGRAM_THREADBLOCK_SIZE + threadIdx.x; pos < dataCount; pos += HISTOGRAM_THREADBLOCK_SIZE * PARTIAL_HISTOGRAM_COUNT)
{
const unsigned int y = pos / prevFrame.cols;
const unsigned int x = pos % prevFrame.cols;
PT prevVal = prevFrame(y, x);
CT curVal = curFrame(y, x);
int3 diff = make_int3(
::abs(curVal.x - prevVal.x),
::abs(curVal.y - prevVal.y),
::abs(curVal.z - prevVal.z)
);
addByte(s_WarpHist0, diff.x, tag);
addByte(s_WarpHist1, diff.y, tag);
addByte(s_WarpHist2, diff.z, tag);
}
__syncthreads();
//Merge per-warp histograms into per-block and write to global memory
for (unsigned int bin = threadIdx.x; bin < HISTOGRAM_BIN_COUNT; bin += HISTOGRAM_THREADBLOCK_SIZE)
{
unsigned int sum0 = 0;
unsigned int sum1 = 0;
unsigned int sum2 = 0;
#pragma unroll
for (int i = 0; i < HISTOGRAM_WARP_COUNT; ++i)
{
#if (__CUDA_ARCH__ < 120)
sum0 += s_Hist0[bin + i * HISTOGRAM_BIN_COUNT] & TAG_MASK;
sum1 += s_Hist1[bin + i * HISTOGRAM_BIN_COUNT] & TAG_MASK;
sum2 += s_Hist2[bin + i * HISTOGRAM_BIN_COUNT] & TAG_MASK;
#else
sum0 += s_Hist0[bin + i * HISTOGRAM_BIN_COUNT];
sum1 += s_Hist1[bin + i * HISTOGRAM_BIN_COUNT];
sum2 += s_Hist2[bin + i * HISTOGRAM_BIN_COUNT];
#endif
}
partialBuf0[blockIdx.x * HISTOGRAM_BIN_COUNT + bin] = sum0;
partialBuf1[blockIdx.x * HISTOGRAM_BIN_COUNT + bin] = sum1;
partialBuf2[blockIdx.x * HISTOGRAM_BIN_COUNT + bin] = sum2;
}
}
__global__ void mergeHistogram(const unsigned int* partialBuf0, const unsigned int* partialBuf1, const unsigned int* partialBuf2, unsigned int* hist0, unsigned int* hist1, unsigned int* hist2)
{
unsigned int sum0 = 0;
unsigned int sum1 = 0;
unsigned int sum2 = 0;
#pragma unroll
for (unsigned int i = threadIdx.x; i < PARTIAL_HISTOGRAM_COUNT; i += MERGE_THREADBLOCK_SIZE)
{
sum0 += partialBuf0[blockIdx.x + i * HISTOGRAM_BIN_COUNT];
sum1 += partialBuf1[blockIdx.x + i * HISTOGRAM_BIN_COUNT];
sum2 += partialBuf2[blockIdx.x + i * HISTOGRAM_BIN_COUNT];
}
__shared__ unsigned int data0[MERGE_THREADBLOCK_SIZE];
__shared__ unsigned int data1[MERGE_THREADBLOCK_SIZE];
__shared__ unsigned int data2[MERGE_THREADBLOCK_SIZE];
plus<unsigned int> op;
reduce<MERGE_THREADBLOCK_SIZE>(smem_tuple(data0, data1, data2), thrust::tie(sum0, sum1, sum2), threadIdx.x, thrust::make_tuple(op, op, op));
if(threadIdx.x == 0)
{
hist0[blockIdx.x] = sum0;
hist1[blockIdx.x] = sum1;
hist2[blockIdx.x] = sum2;
}
}
template <typename PT, typename CT>
void calcDiffHistogram_gpu(PtrStepSzb prevFrame, PtrStepSzb curFrame,
unsigned int* hist0, unsigned int* hist1, unsigned int* hist2,
unsigned int* partialBuf0, unsigned int* partialBuf1, unsigned int* partialBuf2,
bool cc20, cudaStream_t stream)
{
const int HISTOGRAM_WARP_COUNT = cc20 ? 6 : 4;
const int HISTOGRAM_THREADBLOCK_SIZE = HISTOGRAM_WARP_COUNT * WARP_SIZE;
calcPartialHistogram<PT, CT><<<PARTIAL_HISTOGRAM_COUNT, HISTOGRAM_THREADBLOCK_SIZE, 0, stream>>>(
(PtrStepSz<PT>)prevFrame, (PtrStepSz<CT>)curFrame, partialBuf0, partialBuf1, partialBuf2);
cudaSafeCall( cudaGetLastError() );
mergeHistogram<<<HISTOGRAM_BIN_COUNT, MERGE_THREADBLOCK_SIZE, 0, stream>>>(partialBuf0, partialBuf1, partialBuf2, hist0, hist1, hist2);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void calcDiffHistogram_gpu<uchar3, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, unsigned int* hist0, unsigned int* hist1, unsigned int* hist2, unsigned int* partialBuf0, unsigned int* partialBuf1, unsigned int* partialBuf2, bool cc20, cudaStream_t stream);
template void calcDiffHistogram_gpu<uchar3, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, unsigned int* hist0, unsigned int* hist1, unsigned int* hist2, unsigned int* partialBuf0, unsigned int* partialBuf1, unsigned int* partialBuf2, bool cc20, cudaStream_t stream);
template void calcDiffHistogram_gpu<uchar4, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, unsigned int* hist0, unsigned int* hist1, unsigned int* hist2, unsigned int* partialBuf0, unsigned int* partialBuf1, unsigned int* partialBuf2, bool cc20, cudaStream_t stream);
template void calcDiffHistogram_gpu<uchar4, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, unsigned int* hist0, unsigned int* hist1, unsigned int* hist2, unsigned int* partialBuf0, unsigned int* partialBuf1, unsigned int* partialBuf2, bool cc20, cudaStream_t stream);
/////////////////////////////////////////////////////////////////////////
// calcDiffThreshMask
template <typename PT, typename CT>
__global__ void calcDiffThreshMask(const PtrStepSz<PT> prevFrame, const PtrStep<CT> curFrame, uchar3 bestThres, PtrStepb changeMask)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
if (y > prevFrame.rows || x > prevFrame.cols)
return;
PT prevVal = prevFrame(y, x);
CT curVal = curFrame(y, x);
int3 diff = make_int3(
::abs(curVal.x - prevVal.x),
::abs(curVal.y - prevVal.y),
::abs(curVal.z - prevVal.z)
);
if (diff.x > bestThres.x || diff.y > bestThres.y || diff.z > bestThres.z)
changeMask(y, x) = 255;
}
template <typename PT, typename CT>
void calcDiffThreshMask_gpu(PtrStepSzb prevFrame, PtrStepSzb curFrame, uchar3 bestThres, PtrStepSzb changeMask, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(prevFrame.cols, block.x), divUp(prevFrame.rows, block.y));
calcDiffThreshMask<PT, CT><<<grid, block, 0, stream>>>((PtrStepSz<PT>)prevFrame, (PtrStepSz<CT>)curFrame, bestThres, changeMask);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void calcDiffThreshMask_gpu<uchar3, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, uchar3 bestThres, PtrStepSzb changeMask, cudaStream_t stream);
template void calcDiffThreshMask_gpu<uchar3, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, uchar3 bestThres, PtrStepSzb changeMask, cudaStream_t stream);
template void calcDiffThreshMask_gpu<uchar4, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, uchar3 bestThres, PtrStepSzb changeMask, cudaStream_t stream);
template void calcDiffThreshMask_gpu<uchar4, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, uchar3 bestThres, PtrStepSzb changeMask, cudaStream_t stream);
/////////////////////////////////////////////////////////////////////////
// bgfgClassification
__constant__ BGPixelStat c_stat;
void setBGPixelStat(const BGPixelStat& stat)
{
cudaSafeCall( cudaMemcpyToSymbol(c_stat, &stat, sizeof(BGPixelStat)) );
}
template <typename T> struct Output;
template <> struct Output<uchar3>
{
static __device__ __forceinline__ uchar3 make(uchar v0, uchar v1, uchar v2)
{
return make_uchar3(v0, v1, v2);
}
};
template <> struct Output<uchar4>
{
static __device__ __forceinline__ uchar4 make(uchar v0, uchar v1, uchar v2)
{
return make_uchar4(v0, v1, v2, 255);
}
};
template <typename PT, typename CT, typename OT>
__global__ void bgfgClassification(const PtrStepSz<PT> prevFrame, const PtrStep<CT> curFrame,
const PtrStepb Ftd, const PtrStepb Fbd, PtrStepb foreground,
int deltaC, int deltaCC, float alpha2, int N1c, int N1cc)
{
const int i = blockIdx.y * blockDim.y + threadIdx.y;
const int j = blockIdx.x * blockDim.x + threadIdx.x;
if (i > prevFrame.rows || j > prevFrame.cols)
return;
if (Fbd(i, j) || Ftd(i, j))
{
float Pb = 0.0f;
float Pv = 0.0f;
float Pvb = 0.0f;
int val = 0;
// Is it a motion pixel?
if (Ftd(i, j))
{
if (!c_stat.is_trained_dyn_model(i, j))
val = 1;
else
{
PT prevVal = prevFrame(i, j);
CT curVal = curFrame(i, j);
// Compare with stored CCt vectors:
for (int k = 0; k < N1cc && c_stat.PV_CC(i, j, k) > alpha2; ++k)
{
OT v1 = c_stat.V1_CC<OT>(i, j, k);
OT v2 = c_stat.V2_CC<OT>(i, j, k);
if (::abs(v1.x - prevVal.x) <= deltaCC &&
::abs(v1.y - prevVal.y) <= deltaCC &&
::abs(v1.z - prevVal.z) <= deltaCC &&
::abs(v2.x - curVal.x) <= deltaCC &&
::abs(v2.y - curVal.y) <= deltaCC &&
::abs(v2.z - curVal.z) <= deltaCC)
{
Pv += c_stat.PV_CC(i, j, k);
Pvb += c_stat.PVB_CC(i, j, k);
}
}
Pb = c_stat.Pbcc(i, j);
if (2 * Pvb * Pb <= Pv)
val = 1;
}
}
else if(c_stat.is_trained_st_model(i, j))
{
CT curVal = curFrame(i, j);
// Compare with stored Ct vectors:
for (int k = 0; k < N1c && c_stat.PV_C(i, j, k) > alpha2; ++k)
{
OT v = c_stat.V_C<OT>(i, j, k);
if (::abs(v.x - curVal.x) <= deltaC &&
::abs(v.y - curVal.y) <= deltaC &&
::abs(v.z - curVal.z) <= deltaC)
{
Pv += c_stat.PV_C(i, j, k);
Pvb += c_stat.PVB_C(i, j, k);
}
}
Pb = c_stat.Pbc(i, j);
if (2 * Pvb * Pb <= Pv)
val = 1;
}
// Update foreground:
foreground(i, j) = static_cast<uchar>(val);
} // end if( change detection...
}
template <typename PT, typename CT, typename OT>
void bgfgClassification_gpu(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground,
int deltaC, int deltaCC, float alpha2, int N1c, int N1cc, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(prevFrame.cols, block.x), divUp(prevFrame.rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(bgfgClassification<PT, CT, OT>, cudaFuncCachePreferL1) );
bgfgClassification<PT, CT, OT><<<grid, block, 0, stream>>>((PtrStepSz<PT>)prevFrame, (PtrStepSz<CT>)curFrame,
Ftd, Fbd, foreground,
deltaC, deltaCC, alpha2, N1c, N1cc);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void bgfgClassification_gpu<uchar3, uchar3, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, int deltaC, int deltaCC, float alpha2, int N1c, int N1cc, cudaStream_t stream);
template void bgfgClassification_gpu<uchar3, uchar3, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, int deltaC, int deltaCC, float alpha2, int N1c, int N1cc, cudaStream_t stream);
template void bgfgClassification_gpu<uchar3, uchar4, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, int deltaC, int deltaCC, float alpha2, int N1c, int N1cc, cudaStream_t stream);
template void bgfgClassification_gpu<uchar3, uchar4, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, int deltaC, int deltaCC, float alpha2, int N1c, int N1cc, cudaStream_t stream);
template void bgfgClassification_gpu<uchar4, uchar3, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, int deltaC, int deltaCC, float alpha2, int N1c, int N1cc, cudaStream_t stream);
template void bgfgClassification_gpu<uchar4, uchar3, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, int deltaC, int deltaCC, float alpha2, int N1c, int N1cc, cudaStream_t stream);
template void bgfgClassification_gpu<uchar4, uchar4, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, int deltaC, int deltaCC, float alpha2, int N1c, int N1cc, cudaStream_t stream);
template void bgfgClassification_gpu<uchar4, uchar4, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, int deltaC, int deltaCC, float alpha2, int N1c, int N1cc, cudaStream_t stream);
////////////////////////////////////////////////////////////////////////////
// updateBackgroundModel
template <typename PT, typename CT, typename OT, class PrevFramePtr2D, class CurFramePtr2D, class FtdPtr2D, class FbdPtr2D>
__global__ void updateBackgroundModel(int cols, int rows, const PrevFramePtr2D prevFrame, const CurFramePtr2D curFrame, const FtdPtr2D Ftd, const FbdPtr2D Fbd,
PtrStepb foreground, PtrStep<OT> background,
int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T)
{
const int i = blockIdx.y * blockDim.y + threadIdx.y;
const int j = blockIdx.x * blockDim.x + threadIdx.x;
if (i > rows || j > cols)
return;
const float MIN_PV = 1e-10f;
const uchar is_trained_dyn_model = c_stat.is_trained_dyn_model(i, j);
if (Ftd(i, j) || !is_trained_dyn_model)
{
const float alpha = is_trained_dyn_model ? alpha2 : alpha3;
float Pbcc = c_stat.Pbcc(i, j);
//update Pb
Pbcc *= (1.0f - alpha);
if (!foreground(i, j))
{
Pbcc += alpha;
}
int min_dist = numeric_limits<int>::max();
int indx = -1;
PT prevVal = prevFrame(i, j);
CT curVal = curFrame(i, j);
// Find best Vi match:
for (int k = 0; k < N2cc; ++k)
{
float PV_CC = c_stat.PV_CC(i, j, k);
if (!PV_CC)
break;
if (PV_CC < MIN_PV)
{
c_stat.PV_CC(i, j, k) = 0;
c_stat.PVB_CC(i, j, k) = 0;
continue;
}
c_stat.PV_CC(i, j, k) = PV_CC * (1.0f - alpha);
c_stat.PVB_CC(i, j, k) = c_stat.PVB_CC(i, j, k) * (1.0f - alpha);
OT v1 = c_stat.V1_CC<OT>(i, j, k);
int3 val1 = make_int3(
::abs(v1.x - prevVal.x),
::abs(v1.y - prevVal.y),
::abs(v1.z - prevVal.z)
);
OT v2 = c_stat.V2_CC<OT>(i, j, k);
int3 val2 = make_int3(
::abs(v2.x - curVal.x),
::abs(v2.y - curVal.y),
::abs(v2.z - curVal.z)
);
int dist = val1.x + val1.y + val1.z + val2.x + val2.y + val2.z;
if (dist < min_dist &&
val1.x <= deltaCC && val1.y <= deltaCC && val1.z <= deltaCC &&
val2.x <= deltaCC && val2.y <= deltaCC && val2.z <= deltaCC)
{
min_dist = dist;
indx = k;
}
}
if (indx < 0)
{
// Replace N2th elem in the table by new feature:
indx = N2cc - 1;
c_stat.PV_CC(i, j, indx) = alpha;
c_stat.PVB_CC(i, j, indx) = alpha;
//udate Vt
c_stat.V1_CC<OT>(i, j, indx) = Output<OT>::make(prevVal.x, prevVal.y, prevVal.z);
c_stat.V2_CC<OT>(i, j, indx) = Output<OT>::make(curVal.x, curVal.y, curVal.z);
}
else
{
// Update:
c_stat.PV_CC(i, j, indx) += alpha;
if (!foreground(i, j))
{
c_stat.PVB_CC(i, j, indx) += alpha;
}
}
//re-sort CCt table by Pv
const float PV_CC_indx = c_stat.PV_CC(i, j, indx);
const float PVB_CC_indx = c_stat.PVB_CC(i, j, indx);
const OT V1_CC_indx = c_stat.V1_CC<OT>(i, j, indx);
const OT V2_CC_indx = c_stat.V2_CC<OT>(i, j, indx);
for (int k = 0; k < indx; ++k)
{
if (c_stat.PV_CC(i, j, k) <= PV_CC_indx)
{
//shift elements
float Pv_tmp1;
float Pv_tmp2 = PV_CC_indx;
float Pvb_tmp1;
float Pvb_tmp2 = PVB_CC_indx;
OT v1_tmp1;
OT v1_tmp2 = V1_CC_indx;
OT v2_tmp1;
OT v2_tmp2 = V2_CC_indx;
for (int l = k; l <= indx; ++l)
{
Pv_tmp1 = c_stat.PV_CC(i, j, l);
c_stat.PV_CC(i, j, l) = Pv_tmp2;
Pv_tmp2 = Pv_tmp1;
Pvb_tmp1 = c_stat.PVB_CC(i, j, l);
c_stat.PVB_CC(i, j, l) = Pvb_tmp2;
Pvb_tmp2 = Pvb_tmp1;
v1_tmp1 = c_stat.V1_CC<OT>(i, j, l);
c_stat.V1_CC<OT>(i, j, l) = v1_tmp2;
v1_tmp2 = v1_tmp1;
v2_tmp1 = c_stat.V2_CC<OT>(i, j, l);
c_stat.V2_CC<OT>(i, j, l) = v2_tmp2;
v2_tmp2 = v2_tmp1;
}
break;
}
}
float sum1 = 0.0f;
float sum2 = 0.0f;
//check "once-off" changes
for (int k = 0; k < N1cc; ++k)
{
const float PV_CC = c_stat.PV_CC(i, j, k);
if (!PV_CC)
break;
sum1 += PV_CC;
sum2 += c_stat.PVB_CC(i, j, k);
}
if (sum1 > T)
c_stat.is_trained_dyn_model(i, j) = 1;
float diff = sum1 - Pbcc * sum2;
// Update stat table:
if (diff > T)
{
//new BG features are discovered
for (int k = 0; k < N1cc; ++k)
{
const float PV_CC = c_stat.PV_CC(i, j, k);
if (!PV_CC)
break;
c_stat.PVB_CC(i, j, k) = (PV_CC - Pbcc * c_stat.PVB_CC(i, j, k)) / (1.0f - Pbcc);
}
}
c_stat.Pbcc(i, j) = Pbcc;
}
// Handle "stationary" pixel:
if (!Ftd(i, j))
{
const float alpha = c_stat.is_trained_st_model(i, j) ? alpha2 : alpha3;
float Pbc = c_stat.Pbc(i, j);
//update Pb
Pbc *= (1.0f - alpha);
if (!foreground(i, j))
{
Pbc += alpha;
}
int min_dist = numeric_limits<int>::max();
int indx = -1;
CT curVal = curFrame(i, j);
//find best Vi match
for (int k = 0; k < N2c; ++k)
{
float PV_C = c_stat.PV_C(i, j, k);
if (PV_C < MIN_PV)
{
c_stat.PV_C(i, j, k) = 0;
c_stat.PVB_C(i, j, k) = 0;
continue;
}
// Exponential decay of memory
c_stat.PV_C(i, j, k) = PV_C * (1.0f - alpha);
c_stat.PVB_C(i, j, k) = c_stat.PVB_C(i, j, k) * (1.0f - alpha);
OT v = c_stat.V_C<OT>(i, j, k);
int3 val = make_int3(
::abs(v.x - curVal.x),
::abs(v.y - curVal.y),
::abs(v.z - curVal.z)
);
int dist = val.x + val.y + val.z;
if (dist < min_dist && val.x <= deltaC && val.y <= deltaC && val.z <= deltaC)
{
min_dist = dist;
indx = k;
}
}
if (indx < 0)
{
//N2th elem in the table is replaced by a new features
indx = N2c - 1;
c_stat.PV_C(i, j, indx) = alpha;
c_stat.PVB_C(i, j, indx) = alpha;
//udate Vt
c_stat.V_C<OT>(i, j, indx) = Output<OT>::make(curVal.x, curVal.y, curVal.z);
}
else
{
//update
c_stat.PV_C(i, j, indx) += alpha;
if (!foreground(i, j))
{
c_stat.PVB_C(i, j, indx) += alpha;
}
}
//re-sort Ct table by Pv
const float PV_C_indx = c_stat.PV_C(i, j, indx);
const float PVB_C_indx = c_stat.PVB_C(i, j, indx);
OT V_C_indx = c_stat.V_C<OT>(i, j, indx);
for (int k = 0; k < indx; ++k)
{
if (c_stat.PV_C(i, j, k) <= PV_C_indx)
{
//shift elements
float Pv_tmp1;
float Pv_tmp2 = PV_C_indx;
float Pvb_tmp1;
float Pvb_tmp2 = PVB_C_indx;
OT v_tmp1;
OT v_tmp2 = V_C_indx;
for (int l = k; l <= indx; ++l)
{
Pv_tmp1 = c_stat.PV_C(i, j, l);
c_stat.PV_C(i, j, l) = Pv_tmp2;
Pv_tmp2 = Pv_tmp1;
Pvb_tmp1 = c_stat.PVB_C(i, j, l);
c_stat.PVB_C(i, j, l) = Pvb_tmp2;
Pvb_tmp2 = Pvb_tmp1;
v_tmp1 = c_stat.V_C<OT>(i, j, l);
c_stat.V_C<OT>(i, j, l) = v_tmp2;
v_tmp2 = v_tmp1;
}
break;
}
}
// Check "once-off" changes:
float sum1 = 0.0f;
float sum2 = 0.0f;
for (int k = 0; k < N1c; ++k)
{
const float PV_C = c_stat.PV_C(i, j, k);
if (!PV_C)
break;
sum1 += PV_C;
sum2 += c_stat.PVB_C(i, j, k);
}
if (sum1 > T)
c_stat.is_trained_st_model(i, j) = 1;
float diff = sum1 - Pbc * sum2;
// Update stat table:
if (diff > T)
{
//new BG features are discovered
for (int k = 0; k < N1c; ++k)
{
const float PV_C = c_stat.PV_C(i, j, k);
if (!PV_C)
break;
c_stat.PVB_C(i, j, k) = (PV_C - Pbc * c_stat.PVB_C(i, j, k)) / (1.0f - Pbc);
}
c_stat.Pbc(i, j) = 1.0f - Pbc;
}
else
{
c_stat.Pbc(i, j) = Pbc;
}
} // if !(change detection) at pixel (i,j)
// Update the reference BG image:
if (!foreground(i, j))
{
CT curVal = curFrame(i, j);
if (!Ftd(i, j) && !Fbd(i, j))
{
// Apply IIR filter:
OT oldVal = background(i, j);
int3 newVal = make_int3(
__float2int_rn(oldVal.x * (1.0f - alpha1) + curVal.x * alpha1),
__float2int_rn(oldVal.y * (1.0f - alpha1) + curVal.y * alpha1),
__float2int_rn(oldVal.z * (1.0f - alpha1) + curVal.z * alpha1)
);
background(i, j) = Output<OT>::make(
static_cast<uchar>(newVal.x),
static_cast<uchar>(newVal.y),
static_cast<uchar>(newVal.z)
);
}
else
{
background(i, j) = Output<OT>::make(curVal.x, curVal.y, curVal.z);
}
}
}
template <typename PT, typename CT, typename OT>
struct UpdateBackgroundModel
{
static void call(PtrStepSz<PT> prevFrame, PtrStepSz<CT> curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, PtrStepSz<OT> background,
int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T,
cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(prevFrame.cols, block.x), divUp(prevFrame.rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(updateBackgroundModel<PT, CT, OT, PtrStep<PT>, PtrStep<CT>, PtrStepb, PtrStepb>, cudaFuncCachePreferL1) );
updateBackgroundModel<PT, CT, OT, PtrStep<PT>, PtrStep<CT>, PtrStepb, PtrStepb><<<grid, block, 0, stream>>>(
prevFrame.cols, prevFrame.rows,
prevFrame, curFrame,
Ftd, Fbd, foreground, background,
deltaC, deltaCC, alpha1, alpha2, alpha3, N1c, N1cc, N2c, N2cc, T);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
};
template <typename PT, typename CT, typename OT>
void updateBackgroundModel_gpu(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, PtrStepSzb background,
int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T,
cudaStream_t stream)
{
UpdateBackgroundModel<PT, CT, OT>::call(PtrStepSz<PT>(prevFrame), PtrStepSz<CT>(curFrame), Ftd, Fbd, foreground, PtrStepSz<OT>(background),
deltaC, deltaCC, alpha1, alpha2, alpha3, N1c, N1cc, N2c, N2cc, T, stream);
}
template void updateBackgroundModel_gpu<uchar3, uchar3, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, PtrStepSzb background, int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T, cudaStream_t stream);
template void updateBackgroundModel_gpu<uchar3, uchar3, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, PtrStepSzb background, int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T, cudaStream_t stream);
template void updateBackgroundModel_gpu<uchar3, uchar4, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, PtrStepSzb background, int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T, cudaStream_t stream);
template void updateBackgroundModel_gpu<uchar3, uchar4, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, PtrStepSzb background, int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T, cudaStream_t stream);
template void updateBackgroundModel_gpu<uchar4, uchar3, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, PtrStepSzb background, int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T, cudaStream_t stream);
template void updateBackgroundModel_gpu<uchar4, uchar3, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, PtrStepSzb background, int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T, cudaStream_t stream);
template void updateBackgroundModel_gpu<uchar4, uchar4, uchar3>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, PtrStepSzb background, int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T, cudaStream_t stream);
template void updateBackgroundModel_gpu<uchar4, uchar4, uchar4>(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground, PtrStepSzb background, int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
2c9d518972f790494e309e4f64b311d7588c605d.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Hartwig Anzt
@author Goran Flegar
@generated from sparse/blas/zgeisai_batched32.cu, normal z -> c, Thu Oct 8 23:05:50 2020
*/
#include "magmasparse_internal.h"
#include "shuffle.cuh"
#include <hip/hip_runtime_api.h>
#define PRECISION_c
#define COMPLEX
#define BLOCKSIZE 32
#define WARP_SIZE 32
#define WRP 32
#define WRQ 4
#include <hip/hip_runtime.h> // for TORCH_HIP_VERSION
#if (TORCH_HIP_VERSION >= 7000) // only for cuda>6000
const int MaxBlockSize = 32;
template <int block_size>
__device__ void
magma_clowerisai_regs_inv_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row ]-1;
magmaFloatComplex rB; // registers for trsv
magmaFloatComplex dA[ block_size ]; // registers for trisystem
magmaFloatComplex rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_C_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t+1 ] - 1;
int alim = Arow[ t ]-1;
int l = Mrow[ row+1 ]-1;
int idx = block_size-1;
while( k > alim && l > mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k--;
l--;
idx--;
} else if( acol > mcol ){// need to check next element
k--;
} else { // element does not exist, i.e. l < LC.col[k]
l--; // check next elment in the sparsity pattern
idx--; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == 0 ) ? MAGMA_C_ONE : MAGMA_C_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < block_size; k++)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
magmaFloatComplex top = magmablas_cshfl(rB, k%block_size);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_clowerisai_regs_inv_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
if (N == block_size) {
magma_clowerisai_regs_inv_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_clowerisai_regs_inv_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_clowerisai_regs_inv_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
;
// out of range - do nothing.
// printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_clowerisai_regs_inv_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_clowerisai_regs_inv_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <int block_size>
__device__ void
magma_cupperisai_regs_inv_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row ]-1;
magmaFloatComplex rB; // registers for trsv
magmaFloatComplex dA[ block_size ]; // registers for trisystem
magmaFloatComplex rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_C_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t+1 ] - 1;
int alim = Arow[ t ]-1;
int l = Mrow[ row+1 ]-1;
int idx = block_size-1;
while( k > alim && l > mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k--;
l--;
idx--;
} else if( acol > mcol ){// need to check next element
k--;
} else { // element does not exist, i.e. l < LC.col[k]
l--; // check next elment in the sparsity pattern
idx--; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == block_size-1 ) ? MAGMA_C_ONE : MAGMA_C_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = block_size-1; k >-1; k--)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
magmaFloatComplex bottom = magmablas_cshfl(rB, k%block_size);
if ( tid < k)
rB -= (bottom*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_cupperisai_regs_inv_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
if (N == block_size) {
magma_cupperisai_regs_inv_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_cupperisai_regs_inv_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_cupperisai_regs_inv_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
;
// out of range - do nothing.
// printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_cupperisai_regs_inv_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_cupperisai_regs_inv_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
#endif
/**
Purpose
-------
This routine is designet to combine all kernels into one.
Arguments
---------
@param[in]
uplotype magma_uplo_t
lower or upper triangular
@param[in]
transtype magma_trans_t
possibility for transposed matrix
@param[in]
diagtype magma_diag_t
unit diagonal or not
@param[in]
L magma_c_matrix
triangular factor for which the ISAI matrix is computed.
Col-Major CSR storage.
@param[in,out]
M magma_c_matrix*
SPAI preconditioner CSR col-major
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_cisai_generator_regs(
magma_uplo_t uplotype,
magma_trans_t transtype,
magma_diag_t diagtype,
magma_c_matrix L,
magma_c_matrix *M,
magma_queue_t queue )
{
magma_int_t info = 0;
#if (TORCH_HIP_VERSION >= 7000)
magma_int_t arch = magma_getdevice_arch();
hipDeviceSetCacheConfig( hipFuncCachePreferL1 );
int r2bs1 = 32;
int r2bs2 = 4;
int necessary_blocks = magma_ceildiv(L.num_rows, r2bs2);
int r2dg1 = min( int( sqrt( float( necessary_blocks ))), 65535 );
int r2dg2 = min(magma_ceildiv( necessary_blocks, r2dg1 ), 65535);
int r2dg3 = magma_ceildiv( necessary_blocks, r2dg1*r2dg2 );
dim3 r2block( r2bs1, r2bs2, 1 );
dim3 r2grid( r2dg1, r2dg2, r2dg3 );
if (arch >= 300) {
if (uplotype == MagmaLower) { //printf("in here lower new kernel\n");
hipLaunchKernelGGL(( magma_clowerisai_regs_inv_switch), dim3(r2grid), dim3(r2block), 0, queue->cuda_stream() ,
L.num_rows,
L.row,
L.col,
L.val,
M->row,
M->col,
M->val );
}
else { // printf("in here upper new kernel\n");
hipLaunchKernelGGL(( magma_cupperisai_regs_inv_switch), dim3(r2grid), dim3(r2block), 0, queue->cuda_stream() ,
L.num_rows,
L.row,
L.col,
L.val,
M->row,
M->col,
M->val );
}
}
else {
printf( "%% error: ISAI preconditioner requires CUDA ARCHITECTURE >= 300.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
}
#else
// CUDA < 7000
printf( "%% error: ISAI preconditioner requires CUDA >= 7.0.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
#endif
return info;
}
| 2c9d518972f790494e309e4f64b311d7588c605d.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Hartwig Anzt
@author Goran Flegar
@generated from sparse/blas/zgeisai_batched32.cu, normal z -> c, Thu Oct 8 23:05:50 2020
*/
#include "magmasparse_internal.h"
#include "shuffle.cuh"
#include <cuda_profiler_api.h>
#define PRECISION_c
#define COMPLEX
#define BLOCKSIZE 32
#define WARP_SIZE 32
#define WRP 32
#define WRQ 4
#include <cuda.h> // for CUDA_VERSION
#if (CUDA_VERSION >= 7000) // only for cuda>6000
const int MaxBlockSize = 32;
template <int block_size>
__device__ void
magma_clowerisai_regs_inv_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row ]-1;
magmaFloatComplex rB; // registers for trsv
magmaFloatComplex dA[ block_size ]; // registers for trisystem
magmaFloatComplex rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_C_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t+1 ] - 1;
int alim = Arow[ t ]-1;
int l = Mrow[ row+1 ]-1;
int idx = block_size-1;
while( k > alim && l > mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k--;
l--;
idx--;
} else if( acol > mcol ){// need to check next element
k--;
} else { // element does not exist, i.e. l < LC.col[k]
l--; // check next elment in the sparsity pattern
idx--; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == 0 ) ? MAGMA_C_ONE : MAGMA_C_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = 0; k < block_size; k++)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
magmaFloatComplex top = magmablas_cshfl(rB, k%block_size);
if ( tid > k)
rB -= (top*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_clowerisai_regs_inv_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
if (N == block_size) {
magma_clowerisai_regs_inv_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_clowerisai_regs_inv_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_clowerisai_regs_inv_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
;
// out of range - do nothing.
// printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_clowerisai_regs_inv_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_clowerisai_regs_inv_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <int block_size>
__device__ void
magma_cupperisai_regs_inv_kernel(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
#if (defined( REAL ) && ( __CUDA_ARCH__ >= 300 ))
int tid = threadIdx.x;
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( tid >= block_size )
return;
if( row >= num_rows )
return;
// only if within the size
int mstart = Mrow[ row ];
int mlim = Mrow[ row ]-1;
magmaFloatComplex rB; // registers for trsv
magmaFloatComplex dA[ block_size ]; // registers for trisystem
magmaFloatComplex rA;
// set dA to 0
#pragma unroll
for( int j = 0; j < block_size; j++ ){
dA[ j ] = MAGMA_C_ZERO;
}
// generate the triangular systems
int t = Mcol[ mstart + tid ];
int k = Arow[ t+1 ] - 1;
int alim = Arow[ t ]-1;
int l = Mrow[ row+1 ]-1;
int idx = block_size-1;
while( k > alim && l > mlim ){ // stop once this column is done
int mcol = Mcol[ l ];
int acol = Acol[k];
if( mcol == acol ){ //match
dA[ idx ] = Aval[ k ];
k--;
l--;
idx--;
} else if( acol > mcol ){// need to check next element
k--;
} else { // element does not exist, i.e. l < LC.col[k]
l--; // check next elment in the sparsity pattern
idx--; // leave this element equal zero
}
}
// second: solve the triangular systems - in registers
// we know how RHS looks like
rB = ( tid == block_size-1 ) ? MAGMA_C_ONE : MAGMA_C_ZERO;
// Triangular solve in regs.
#pragma unroll
for (int k = block_size-1; k >-1; k--)
{
rA = dA[ k ];
if (k%block_size == tid)
rB /= rA;
magmaFloatComplex bottom = magmablas_cshfl(rB, k%block_size);
if ( tid < k)
rB -= (bottom*rA);
}
// Drop B to dev memory - in ISAI preconditioner M
Mval[ mstart + tid ] = rB;
#endif
}
template <int block_size>
__device__ __forceinline__ void
magma_cupperisai_regs_inv_select(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
if (N == block_size) {
magma_cupperisai_regs_inv_kernel<block_size>(
num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
} else {
magma_cupperisai_regs_inv_select<block_size-1>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
template <>
__device__ __forceinline__ void
magma_cupperisai_regs_inv_select<0>(
int N,
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
;
// out of range - do nothing.
// printf("%% error: size out of range: %d\n", N);
}
__global__ void
magma_cupperisai_regs_inv_switch(
magma_int_t num_rows,
const magma_index_t * __restrict__ Arow,
const magma_index_t * __restrict__ Acol,
const magmaFloatComplex * __restrict__ Aval,
magma_index_t *Mrow,
magma_index_t *Mcol,
magmaFloatComplex *Mval )
{
int row = gridDim.x*blockIdx.y*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y;
if( row < num_rows ){
int N = Mrow[ row+1 ] - Mrow[ row ];
magma_cupperisai_regs_inv_select<MaxBlockSize>(
N, num_rows, Arow, Acol, Aval, Mrow, Mcol, Mval);
}
}
#endif
/**
Purpose
-------
This routine is designet to combine all kernels into one.
Arguments
---------
@param[in]
uplotype magma_uplo_t
lower or upper triangular
@param[in]
transtype magma_trans_t
possibility for transposed matrix
@param[in]
diagtype magma_diag_t
unit diagonal or not
@param[in]
L magma_c_matrix
triangular factor for which the ISAI matrix is computed.
Col-Major CSR storage.
@param[in,out]
M magma_c_matrix*
SPAI preconditioner CSR col-major
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_cisai_generator_regs(
magma_uplo_t uplotype,
magma_trans_t transtype,
magma_diag_t diagtype,
magma_c_matrix L,
magma_c_matrix *M,
magma_queue_t queue )
{
magma_int_t info = 0;
#if (CUDA_VERSION >= 7000)
magma_int_t arch = magma_getdevice_arch();
cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 );
int r2bs1 = 32;
int r2bs2 = 4;
int necessary_blocks = magma_ceildiv(L.num_rows, r2bs2);
int r2dg1 = min( int( sqrt( float( necessary_blocks ))), 65535 );
int r2dg2 = min(magma_ceildiv( necessary_blocks, r2dg1 ), 65535);
int r2dg3 = magma_ceildiv( necessary_blocks, r2dg1*r2dg2 );
dim3 r2block( r2bs1, r2bs2, 1 );
dim3 r2grid( r2dg1, r2dg2, r2dg3 );
if (arch >= 300) {
if (uplotype == MagmaLower) { //printf("in here lower new kernel\n");
magma_clowerisai_regs_inv_switch<<< r2grid, r2block, 0, queue->cuda_stream() >>>(
L.num_rows,
L.row,
L.col,
L.val,
M->row,
M->col,
M->val );
}
else { // printf("in here upper new kernel\n");
magma_cupperisai_regs_inv_switch<<< r2grid, r2block, 0, queue->cuda_stream() >>>(
L.num_rows,
L.row,
L.col,
L.val,
M->row,
M->col,
M->val );
}
}
else {
printf( "%% error: ISAI preconditioner requires CUDA ARCHITECTURE >= 300.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
}
#else
// CUDA < 7000
printf( "%% error: ISAI preconditioner requires CUDA >= 7.0.\n" );
info = MAGMA_ERR_NOT_SUPPORTED;
#endif
return info;
}
|
74d8aee70f52e9a041756550594c90d8941a2e09.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void OPT_1_HIST(int* lcm, int* hist, int n) {
//
int vertex = blockIdx.x;
int vcomp = threadIdx.x;
bool equal;
//
__shared__ int cval;
//
if(vcomp == 0)
cval = 0;
__syncthreads();
//
if(vertex < n && vcomp < n)
for(int i = vcomp; i < n; i += blockDim.x) {
if(vertex == i) {
atomicAdd(&cval, 1);
continue;
}
equal = false;
for(int j = 0; j < n; j++) {
if(lcm[vertex*n + j] == lcm[i*n + j])
equal = true;
else {
equal = false;
break;
}
}
if(equal)
atomicAdd(&cval, 1);
}
__syncthreads();
if(vertex < n && vcomp == 0 && cval > 0) {
atomicAdd(&hist[cval], 1);
//printf("\nv%d: %d\n", vertex, cval);
}
} | 74d8aee70f52e9a041756550594c90d8941a2e09.cu | #include "includes.h"
__global__ void OPT_1_HIST(int* lcm, int* hist, int n) {
//
int vertex = blockIdx.x;
int vcomp = threadIdx.x;
bool equal;
//
__shared__ int cval;
//
if(vcomp == 0)
cval = 0;
__syncthreads();
//
if(vertex < n && vcomp < n)
for(int i = vcomp; i < n; i += blockDim.x) {
if(vertex == i) {
atomicAdd(&cval, 1);
continue;
}
equal = false;
for(int j = 0; j < n; j++) {
if(lcm[vertex*n + j] == lcm[i*n + j])
equal = true;
else {
equal = false;
break;
}
}
if(equal)
atomicAdd(&cval, 1);
}
__syncthreads();
if(vertex < n && vcomp == 0 && cval > 0) {
atomicAdd(&hist[cval], 1);
//printf("\nv%d: %d\n", vertex, cval);
}
} |
3eeac798dce6f56d1073c0d4f93f34d5da29017e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/copy.hpp>
#include <cudf/detail/copy_if_else.cuh>
#include <cudf/detail/indexalator.cuh>
#include <cudf/detail/unary.hpp>
#include <cudf/dictionary/detail/encode.hpp>
#include <cudf/dictionary/detail/replace.hpp>
#include <cudf/dictionary/detail/search.hpp>
#include <cudf/dictionary/detail/update_keys.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
namespace cudf {
namespace dictionary {
namespace detail {
namespace {
/**
* @brief An index accessor that returns a validity flag along with the index value.
*
* This is used to make a `pair_iterator` for calling `copy_if_else`.
*/
template <bool has_nulls = false>
struct nullable_index_accessor {
cudf::detail::input_indexalator iter;
bitmask_type const* null_mask{};
size_type const offset{};
/**
* @brief Create an accessor from a column_view.
*/
nullable_index_accessor(column_view const& col) : null_mask{col.null_mask()}, offset{col.offset()}
{
if (has_nulls) { CUDF_EXPECTS(col.nullable(), "Unexpected non-nullable column."); }
iter = cudf::detail::indexalator_factory::make_input_iterator(col);
}
/**
* @brief Create an accessor from a scalar.
*/
nullable_index_accessor(scalar const& input)
{
iter = cudf::detail::indexalator_factory::make_input_iterator(input);
}
__device__ thrust::pair<size_type, bool> operator()(size_type i) const
{
return {iter[i], (has_nulls ? bit_is_set(null_mask, i + offset) : true)};
}
};
/**
* @brief Create an index iterator with a nullable index accessor.
*/
template <bool has_nulls>
auto make_nullable_index_iterator(column_view const& col)
{
return thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
nullable_index_accessor<has_nulls>{col});
}
/**
* @brief Create an index iterator with a nullable index accessor for a scalar.
*/
auto make_scalar_iterator(scalar const& input)
{
return thrust::make_transform_iterator(thrust::make_constant_iterator<size_type>(0),
nullable_index_accessor<false>{input});
}
/**
* @brief This utility uses `copy_if_else` to replace null entries using the input bitmask as a
* predicate.
*
* The predicate identifies which column row to copy from and the bitmask specifies which rows
* are null. Since the `copy_if_else` accepts iterators, we also supply it with pair-iterators
* created from indexalators and the validity masks.
*
* @tparam ReplacementItr must be a pair iterator of (index,valid).
*
* @param input lhs for `copy_if_else`
* @param replacement_iter rhs for `copy_if_else`
* @param mr Device memory resource used to allocate the returned column's device memory.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @return Always returns column of type INT32 (size_type)
*/
template <typename ReplacementIter>
std::unique_ptr<column> replace_indices(column_view const& input,
ReplacementIter replacement_iter,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
auto const input_view = column_device_view::create(input, stream);
auto const d_input = *input_view;
auto predicate = [d_input] __device__(auto i) { return d_input.is_valid(i); };
auto input_pair_iterator = make_nullable_index_iterator<true>(input);
return cudf::detail::copy_if_else(true,
input_pair_iterator,
input_pair_iterator + input.size(),
replacement_iter,
predicate,
mr,
stream);
}
} // namespace
/**
* @copydoc cudf::dictionary::detail::replace_nulls(cudf::column_view const&,cudf::column_view
* const&,rmm::mr::device_memory_resource*,hipStream_t)
*/
std::unique_ptr<column> replace_nulls(dictionary_column_view const& input,
dictionary_column_view const& replacement,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
if (input.size() == 0) { return cudf::empty_like(input.parent()); }
if (!input.has_nulls()) { return std::make_unique<cudf::column>(input.parent()); }
CUDF_EXPECTS(input.keys().type() == replacement.keys().type(), "keys must match");
CUDF_EXPECTS(replacement.size() == input.size(), "column sizes must match");
// first combine the keys so both input dictionaries have the same set
auto matched = match_dictionaries({input, replacement}, mr, stream);
// now build the new indices by doing replace-null using the updated input indices
auto const input_indices =
dictionary_column_view(matched.front()->view()).get_indices_annotated();
auto const repl_indices = dictionary_column_view(matched.back()->view()).get_indices_annotated();
auto new_indices =
repl_indices.has_nulls()
? replace_indices(input_indices, make_nullable_index_iterator<true>(repl_indices), mr, stream)
: replace_indices(
input_indices, make_nullable_index_iterator<false>(repl_indices), mr, stream);
// auto keys_column = ;
return make_dictionary_column(
std::move(matched.front()->release().children.back()), std::move(new_indices), mr, stream);
}
/**
* @copydoc cudf::dictionary::detail::replace_nulls(cudf::column_view const&,cudf::scalar
* const&,rmm::mr::device_memory_resource*,hipStream_t)
*/
std::unique_ptr<column> replace_nulls(dictionary_column_view const& input,
scalar const& replacement,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
if (input.size() == 0) { return cudf::empty_like(input.parent()); }
if (!input.has_nulls() || !replacement.is_valid()) {
return std::make_unique<cudf::column>(input.parent());
}
CUDF_EXPECTS(input.keys().type() == replacement.type(), "keys must match scalar type");
// first add the replacment to the keys so only the indices need to be processed
auto const default_mr = rmm::mr::get_current_device_resource();
auto input_matched = dictionary::detail::add_keys(
input, make_column_from_scalar(replacement, 1, default_mr, stream)->view(), mr, stream);
auto const input_view = dictionary_column_view(input_matched->view());
auto const scalar_index = get_index(input_view, replacement, default_mr, stream);
// now build the new indices by doing replace-null on the updated indices
auto const input_indices = input_view.get_indices_annotated();
auto new_indices =
replace_indices(input_indices, make_scalar_iterator(*scalar_index), mr, stream);
new_indices->set_null_mask(rmm::device_buffer{0, stream, mr}, 0);
return make_dictionary_column(
std::move(input_matched->release().children.back()), std::move(new_indices), mr, stream);
}
} // namespace detail
} // namespace dictionary
} // namespace cudf
| 3eeac798dce6f56d1073c0d4f93f34d5da29017e.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/copy.hpp>
#include <cudf/detail/copy_if_else.cuh>
#include <cudf/detail/indexalator.cuh>
#include <cudf/detail/unary.hpp>
#include <cudf/dictionary/detail/encode.hpp>
#include <cudf/dictionary/detail/replace.hpp>
#include <cudf/dictionary/detail/search.hpp>
#include <cudf/dictionary/detail/update_keys.hpp>
#include <cudf/dictionary/dictionary_factories.hpp>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
namespace cudf {
namespace dictionary {
namespace detail {
namespace {
/**
* @brief An index accessor that returns a validity flag along with the index value.
*
* This is used to make a `pair_iterator` for calling `copy_if_else`.
*/
template <bool has_nulls = false>
struct nullable_index_accessor {
cudf::detail::input_indexalator iter;
bitmask_type const* null_mask{};
size_type const offset{};
/**
* @brief Create an accessor from a column_view.
*/
nullable_index_accessor(column_view const& col) : null_mask{col.null_mask()}, offset{col.offset()}
{
if (has_nulls) { CUDF_EXPECTS(col.nullable(), "Unexpected non-nullable column."); }
iter = cudf::detail::indexalator_factory::make_input_iterator(col);
}
/**
* @brief Create an accessor from a scalar.
*/
nullable_index_accessor(scalar const& input)
{
iter = cudf::detail::indexalator_factory::make_input_iterator(input);
}
__device__ thrust::pair<size_type, bool> operator()(size_type i) const
{
return {iter[i], (has_nulls ? bit_is_set(null_mask, i + offset) : true)};
}
};
/**
* @brief Create an index iterator with a nullable index accessor.
*/
template <bool has_nulls>
auto make_nullable_index_iterator(column_view const& col)
{
return thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0),
nullable_index_accessor<has_nulls>{col});
}
/**
* @brief Create an index iterator with a nullable index accessor for a scalar.
*/
auto make_scalar_iterator(scalar const& input)
{
return thrust::make_transform_iterator(thrust::make_constant_iterator<size_type>(0),
nullable_index_accessor<false>{input});
}
/**
* @brief This utility uses `copy_if_else` to replace null entries using the input bitmask as a
* predicate.
*
* The predicate identifies which column row to copy from and the bitmask specifies which rows
* are null. Since the `copy_if_else` accepts iterators, we also supply it with pair-iterators
* created from indexalators and the validity masks.
*
* @tparam ReplacementItr must be a pair iterator of (index,valid).
*
* @param input lhs for `copy_if_else`
* @param replacement_iter rhs for `copy_if_else`
* @param mr Device memory resource used to allocate the returned column's device memory.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @return Always returns column of type INT32 (size_type)
*/
template <typename ReplacementIter>
std::unique_ptr<column> replace_indices(column_view const& input,
ReplacementIter replacement_iter,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
auto const input_view = column_device_view::create(input, stream);
auto const d_input = *input_view;
auto predicate = [d_input] __device__(auto i) { return d_input.is_valid(i); };
auto input_pair_iterator = make_nullable_index_iterator<true>(input);
return cudf::detail::copy_if_else(true,
input_pair_iterator,
input_pair_iterator + input.size(),
replacement_iter,
predicate,
mr,
stream);
}
} // namespace
/**
* @copydoc cudf::dictionary::detail::replace_nulls(cudf::column_view const&,cudf::column_view
* const&,rmm::mr::device_memory_resource*,cudaStream_t)
*/
std::unique_ptr<column> replace_nulls(dictionary_column_view const& input,
dictionary_column_view const& replacement,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
if (input.size() == 0) { return cudf::empty_like(input.parent()); }
if (!input.has_nulls()) { return std::make_unique<cudf::column>(input.parent()); }
CUDF_EXPECTS(input.keys().type() == replacement.keys().type(), "keys must match");
CUDF_EXPECTS(replacement.size() == input.size(), "column sizes must match");
// first combine the keys so both input dictionaries have the same set
auto matched = match_dictionaries({input, replacement}, mr, stream);
// now build the new indices by doing replace-null using the updated input indices
auto const input_indices =
dictionary_column_view(matched.front()->view()).get_indices_annotated();
auto const repl_indices = dictionary_column_view(matched.back()->view()).get_indices_annotated();
auto new_indices =
repl_indices.has_nulls()
? replace_indices(input_indices, make_nullable_index_iterator<true>(repl_indices), mr, stream)
: replace_indices(
input_indices, make_nullable_index_iterator<false>(repl_indices), mr, stream);
// auto keys_column = ;
return make_dictionary_column(
std::move(matched.front()->release().children.back()), std::move(new_indices), mr, stream);
}
/**
* @copydoc cudf::dictionary::detail::replace_nulls(cudf::column_view const&,cudf::scalar
* const&,rmm::mr::device_memory_resource*,cudaStream_t)
*/
std::unique_ptr<column> replace_nulls(dictionary_column_view const& input,
scalar const& replacement,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
if (input.size() == 0) { return cudf::empty_like(input.parent()); }
if (!input.has_nulls() || !replacement.is_valid()) {
return std::make_unique<cudf::column>(input.parent());
}
CUDF_EXPECTS(input.keys().type() == replacement.type(), "keys must match scalar type");
// first add the replacment to the keys so only the indices need to be processed
auto const default_mr = rmm::mr::get_current_device_resource();
auto input_matched = dictionary::detail::add_keys(
input, make_column_from_scalar(replacement, 1, default_mr, stream)->view(), mr, stream);
auto const input_view = dictionary_column_view(input_matched->view());
auto const scalar_index = get_index(input_view, replacement, default_mr, stream);
// now build the new indices by doing replace-null on the updated indices
auto const input_indices = input_view.get_indices_annotated();
auto new_indices =
replace_indices(input_indices, make_scalar_iterator(*scalar_index), mr, stream);
new_indices->set_null_mask(rmm::device_buffer{0, stream, mr}, 0);
return make_dictionary_column(
std::move(input_matched->release().children.back()), std::move(new_indices), mr, stream);
}
} // namespace detail
} // namespace dictionary
} // namespace cudf
|
5b15d16d6eb96c3580abad271adb5a69c489c0cf.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017-2021 XGBoost contributors
*/
#include <cstddef>
#include <cstdint>
#include <thrust/device_vector.h>
#include <vector>
#include <xgboost/base.h>
#include "../../../src/common/device_helpers.cuh"
#include "../../../src/common/quantile.h"
#include "../helpers.h"
#include "gtest/gtest.h"
TEST(SumReduce, Test) {
thrust::device_vector<float> data(100, 1.0f);
auto sum = dh::SumReduction(data.data().get(), data.size());
ASSERT_NEAR(sum, 100.0f, 1e-5);
}
void TestAtomicSizeT() {
size_t constexpr kThreads = 235;
dh::device_vector<size_t> out(1, 0);
auto d_out = dh::ToSpan(out);
dh::LaunchN(kThreads, [=] __device__(size_t idx) {
atomicAdd(&d_out[0], static_cast<size_t>(1));
});
ASSERT_EQ(out[0], kThreads);
}
TEST(AtomicAdd, SizeT) {
TestAtomicSizeT();
}
void TestSegmentID() {
std::vector<size_t> segments{0, 1, 3};
thrust::device_vector<size_t> d_segments(segments);
auto s_segments = dh::ToSpan(d_segments);
dh::LaunchN(1, [=]__device__(size_t idx) {
auto id = dh::SegmentId(s_segments, 0);
SPAN_CHECK(id == 0);
id = dh::SegmentId(s_segments, 1);
SPAN_CHECK(id == 1);
id = dh::SegmentId(s_segments, 2);
SPAN_CHECK(id == 1);
});
}
TEST(SegmentID, Basic) {
TestSegmentID();
}
TEST(SegmentedUnique, Basic) {
std::vector<float> values{0.1f, 0.2f, 0.3f, 0.62448811531066895f, 0.62448811531066895f, 0.4f};
std::vector<size_t> segments{0, 3, 6};
thrust::device_vector<float> d_values(values);
thrust::device_vector<xgboost::bst_feature_t> d_segments{segments};
thrust::device_vector<xgboost::bst_feature_t> d_segs_out(d_segments.size());
thrust::device_vector<float> d_vals_out(d_values.size());
size_t n_uniques = dh::SegmentedUnique(
d_segments.data().get(), d_segments.data().get() + d_segments.size(),
d_values.data().get(), d_values.data().get() + d_values.size(),
d_segs_out.data().get(), d_vals_out.data().get(),
thrust::equal_to<float>{});
CHECK_EQ(n_uniques, 5);
std::vector<float> values_sol{0.1f, 0.2f, 0.3f, 0.62448811531066895f, 0.4f};
for (auto i = 0 ; i < values_sol.size(); i ++) {
ASSERT_EQ(d_vals_out[i], values_sol[i]);
}
std::vector<xgboost::bst_feature_t> segments_sol{0, 3, 5};
for (size_t i = 0; i < d_segments.size(); ++i) {
ASSERT_EQ(segments_sol[i], d_segs_out[i]);
}
d_segments[1] = 4;
d_segments[2] = 6;
n_uniques = dh::SegmentedUnique(
d_segments.data().get(), d_segments.data().get() + d_segments.size(),
d_values.data().get(), d_values.data().get() + d_values.size(),
d_segs_out.data().get(), d_vals_out.data().get(),
thrust::equal_to<float>{});
ASSERT_EQ(n_uniques, values.size());
for (auto i = 0 ; i < values.size(); i ++) {
ASSERT_EQ(d_vals_out[i], values[i]);
}
}
namespace {
using SketchEntry = xgboost::common::WQSummary<float, float>::Entry;
struct SketchUnique {
bool __device__ operator()(SketchEntry const& a, SketchEntry const& b) const {
return a.value - b.value == 0;
}
};
struct IsSorted {
bool __device__ operator()(SketchEntry const& a, SketchEntry const& b) const {
return a.value < b.value;
}
};
} // namespace
namespace xgboost {
void TestSegmentedUniqueRegression(std::vector<SketchEntry> values, size_t n_duplicated) {
std::vector<bst_feature_t> segments{0, static_cast<bst_feature_t>(values.size())};
thrust::device_vector<SketchEntry> d_values(values);
thrust::device_vector<bst_feature_t> d_segments(segments);
thrust::device_vector<bst_feature_t> d_segments_out(segments.size());
size_t n_uniques = dh::SegmentedUnique(
d_segments.data().get(), d_segments.data().get() + d_segments.size(), d_values.data().get(),
d_values.data().get() + d_values.size(), d_segments_out.data().get(), d_values.data().get(),
SketchUnique{});
ASSERT_EQ(n_uniques, values.size() - n_duplicated);
ASSERT_TRUE(thrust::is_sorted(thrust::device, d_values.begin(),
d_values.begin() + n_uniques, IsSorted{}));
ASSERT_EQ(segments.at(0), d_segments_out[0]);
ASSERT_EQ(segments.at(1), d_segments_out[1] + n_duplicated);
}
TEST(DeviceHelpers, Reduce) {
size_t kSize = std::numeric_limits<uint32_t>::max();
auto it = thrust::make_counting_iterator(0ul);
dh::XGBCachingDeviceAllocator<char> alloc;
auto batched = dh::Reduce(thrust::hip::par(alloc), it, it + kSize, 0ul, thrust::maximum<size_t>{});
CHECK_EQ(batched, kSize - 1);
}
TEST(SegmentedUnique, Regression) {
{
std::vector<SketchEntry> values{{3149, 3150, 1, 0.62392902374267578},
{3151, 3152, 1, 0.62418866157531738},
{3152, 3153, 1, 0.62419462203979492},
{3153, 3154, 1, 0.62431186437606812},
{3154, 3155, 1, 0.6244881153106689453125},
{3155, 3156, 1, 0.6244881153106689453125},
{3155, 3156, 1, 0.6244881153106689453125},
{3155, 3156, 1, 0.6244881153106689453125},
{3157, 3158, 1, 0.62552797794342041},
{3158, 3159, 1, 0.6256556510925293},
{3159, 3160, 1, 0.62571090459823608},
{3160, 3161, 1, 0.62577134370803833}};
TestSegmentedUniqueRegression(values, 3);
}
{
std::vector<SketchEntry> values{{3149, 3150, 1, 0.62392902374267578},
{3151, 3152, 1, 0.62418866157531738},
{3152, 3153, 1, 0.62419462203979492},
{3153, 3154, 1, 0.62431186437606812},
{3154, 3155, 1, 0.6244881153106689453125},
{3157, 3158, 1, 0.62552797794342041},
{3158, 3159, 1, 0.6256556510925293},
{3159, 3160, 1, 0.62571090459823608},
{3160, 3161, 1, 0.62577134370803833}};
TestSegmentedUniqueRegression(values, 0);
}
{
std::vector<SketchEntry> values;
TestSegmentedUniqueRegression(values, 0);
}
}
TEST(Allocator, OOM) {
auto size = dh::AvailableMemory(0) * 4;
ASSERT_THROW({dh::caching_device_vector<char> vec(size);}, dmlc::Error);
ASSERT_THROW({dh::device_vector<char> vec(size);}, dmlc::Error);
// Clear last error so we don't fail subsequent tests
hipGetLastError();
}
TEST(DeviceHelpers, ArgSort) {
dh::device_vector<float> values(20);
dh::Iota(dh::ToSpan(values)); // accending
dh::device_vector<size_t> sorted_idx(20);
dh::ArgSort<false>(dh::ToSpan(values), dh::ToSpan(sorted_idx)); // sort to descending
ASSERT_TRUE(thrust::is_sorted(thrust::device, sorted_idx.begin(),
sorted_idx.end(), thrust::greater<size_t>{}));
dh::Iota(dh::ToSpan(values));
dh::device_vector<size_t> groups(3);
groups[0] = 0;
groups[1] = 10;
groups[2] = 20;
dh::SegmentedArgSort<false>(dh::ToSpan(values), dh::ToSpan(groups),
dh::ToSpan(sorted_idx));
ASSERT_FALSE(thrust::is_sorted(thrust::device, sorted_idx.begin(),
sorted_idx.end(), thrust::greater<size_t>{}));
ASSERT_TRUE(thrust::is_sorted(sorted_idx.begin(), sorted_idx.begin() + 10,
thrust::greater<size_t>{}));
ASSERT_TRUE(thrust::is_sorted(sorted_idx.begin() + 10, sorted_idx.end(),
thrust::greater<size_t>{}));
}
namespace {
// Atomic add as type cast for test.
XGBOOST_DEV_INLINE int64_t atomicAdd(int64_t *dst, int64_t src) { // NOLINT
uint64_t* u_dst = reinterpret_cast<uint64_t*>(dst);
uint64_t u_src = *reinterpret_cast<uint64_t*>(&src);
uint64_t ret = ::atomicAdd(u_dst, u_src);
return *reinterpret_cast<int64_t*>(&ret);
}
}
void TestAtomicAdd() {
size_t n_elements = 1024;
dh::device_vector<int64_t> result_a(1, 0);
auto d_result_a = result_a.data().get();
dh::device_vector<int64_t> result_b(1, 0);
auto d_result_b = result_b.data().get();
/**
* Test for simple inputs
*/
std::vector<int64_t> h_inputs(n_elements);
for (size_t i = 0; i < h_inputs.size(); ++i) {
h_inputs[i] = (i % 2 == 0) ? i : -i;
}
dh::device_vector<int64_t> inputs(h_inputs);
auto d_inputs = inputs.data().get();
dh::LaunchN(n_elements, [=] __device__(size_t i) {
dh::AtomicAdd64As32(d_result_a, d_inputs[i]);
atomicAdd(d_result_b, d_inputs[i]);
});
ASSERT_EQ(result_a[0], result_b[0]);
/**
* Test for positive values that don't fit into 32 bit integer.
*/
thrust::fill(inputs.begin(), inputs.end(),
(std::numeric_limits<uint32_t>::max() / 2));
thrust::fill(result_a.begin(), result_a.end(), 0);
thrust::fill(result_b.begin(), result_b.end(), 0);
dh::LaunchN(n_elements, [=] __device__(size_t i) {
dh::AtomicAdd64As32(d_result_a, d_inputs[i]);
atomicAdd(d_result_b, d_inputs[i]);
});
ASSERT_EQ(result_a[0], result_b[0]);
ASSERT_GT(result_a[0], std::numeric_limits<uint32_t>::max());
CHECK_EQ(thrust::reduce(inputs.begin(), inputs.end(), int64_t(0)), result_a[0]);
/**
* Test for negative values that don't fit into 32 bit integer.
*/
thrust::fill(inputs.begin(), inputs.end(),
(std::numeric_limits<int32_t>::min() / 2));
thrust::fill(result_a.begin(), result_a.end(), 0);
thrust::fill(result_b.begin(), result_b.end(), 0);
dh::LaunchN(n_elements, [=] __device__(size_t i) {
dh::AtomicAdd64As32(d_result_a, d_inputs[i]);
atomicAdd(d_result_b, d_inputs[i]);
});
ASSERT_EQ(result_a[0], result_b[0]);
ASSERT_LT(result_a[0], std::numeric_limits<int32_t>::min());
CHECK_EQ(thrust::reduce(inputs.begin(), inputs.end(), int64_t(0)), result_a[0]);
}
TEST(AtomicAdd, Int64) {
TestAtomicAdd();
}
} // namespace xgboost
| 5b15d16d6eb96c3580abad271adb5a69c489c0cf.cu | /*!
* Copyright 2017-2021 XGBoost contributors
*/
#include <cstddef>
#include <cstdint>
#include <thrust/device_vector.h>
#include <vector>
#include <xgboost/base.h>
#include "../../../src/common/device_helpers.cuh"
#include "../../../src/common/quantile.h"
#include "../helpers.h"
#include "gtest/gtest.h"
TEST(SumReduce, Test) {
thrust::device_vector<float> data(100, 1.0f);
auto sum = dh::SumReduction(data.data().get(), data.size());
ASSERT_NEAR(sum, 100.0f, 1e-5);
}
void TestAtomicSizeT() {
size_t constexpr kThreads = 235;
dh::device_vector<size_t> out(1, 0);
auto d_out = dh::ToSpan(out);
dh::LaunchN(kThreads, [=] __device__(size_t idx) {
atomicAdd(&d_out[0], static_cast<size_t>(1));
});
ASSERT_EQ(out[0], kThreads);
}
TEST(AtomicAdd, SizeT) {
TestAtomicSizeT();
}
void TestSegmentID() {
std::vector<size_t> segments{0, 1, 3};
thrust::device_vector<size_t> d_segments(segments);
auto s_segments = dh::ToSpan(d_segments);
dh::LaunchN(1, [=]__device__(size_t idx) {
auto id = dh::SegmentId(s_segments, 0);
SPAN_CHECK(id == 0);
id = dh::SegmentId(s_segments, 1);
SPAN_CHECK(id == 1);
id = dh::SegmentId(s_segments, 2);
SPAN_CHECK(id == 1);
});
}
TEST(SegmentID, Basic) {
TestSegmentID();
}
TEST(SegmentedUnique, Basic) {
std::vector<float> values{0.1f, 0.2f, 0.3f, 0.62448811531066895f, 0.62448811531066895f, 0.4f};
std::vector<size_t> segments{0, 3, 6};
thrust::device_vector<float> d_values(values);
thrust::device_vector<xgboost::bst_feature_t> d_segments{segments};
thrust::device_vector<xgboost::bst_feature_t> d_segs_out(d_segments.size());
thrust::device_vector<float> d_vals_out(d_values.size());
size_t n_uniques = dh::SegmentedUnique(
d_segments.data().get(), d_segments.data().get() + d_segments.size(),
d_values.data().get(), d_values.data().get() + d_values.size(),
d_segs_out.data().get(), d_vals_out.data().get(),
thrust::equal_to<float>{});
CHECK_EQ(n_uniques, 5);
std::vector<float> values_sol{0.1f, 0.2f, 0.3f, 0.62448811531066895f, 0.4f};
for (auto i = 0 ; i < values_sol.size(); i ++) {
ASSERT_EQ(d_vals_out[i], values_sol[i]);
}
std::vector<xgboost::bst_feature_t> segments_sol{0, 3, 5};
for (size_t i = 0; i < d_segments.size(); ++i) {
ASSERT_EQ(segments_sol[i], d_segs_out[i]);
}
d_segments[1] = 4;
d_segments[2] = 6;
n_uniques = dh::SegmentedUnique(
d_segments.data().get(), d_segments.data().get() + d_segments.size(),
d_values.data().get(), d_values.data().get() + d_values.size(),
d_segs_out.data().get(), d_vals_out.data().get(),
thrust::equal_to<float>{});
ASSERT_EQ(n_uniques, values.size());
for (auto i = 0 ; i < values.size(); i ++) {
ASSERT_EQ(d_vals_out[i], values[i]);
}
}
namespace {
using SketchEntry = xgboost::common::WQSummary<float, float>::Entry;
struct SketchUnique {
bool __device__ operator()(SketchEntry const& a, SketchEntry const& b) const {
return a.value - b.value == 0;
}
};
struct IsSorted {
bool __device__ operator()(SketchEntry const& a, SketchEntry const& b) const {
return a.value < b.value;
}
};
} // namespace
namespace xgboost {
void TestSegmentedUniqueRegression(std::vector<SketchEntry> values, size_t n_duplicated) {
std::vector<bst_feature_t> segments{0, static_cast<bst_feature_t>(values.size())};
thrust::device_vector<SketchEntry> d_values(values);
thrust::device_vector<bst_feature_t> d_segments(segments);
thrust::device_vector<bst_feature_t> d_segments_out(segments.size());
size_t n_uniques = dh::SegmentedUnique(
d_segments.data().get(), d_segments.data().get() + d_segments.size(), d_values.data().get(),
d_values.data().get() + d_values.size(), d_segments_out.data().get(), d_values.data().get(),
SketchUnique{});
ASSERT_EQ(n_uniques, values.size() - n_duplicated);
ASSERT_TRUE(thrust::is_sorted(thrust::device, d_values.begin(),
d_values.begin() + n_uniques, IsSorted{}));
ASSERT_EQ(segments.at(0), d_segments_out[0]);
ASSERT_EQ(segments.at(1), d_segments_out[1] + n_duplicated);
}
TEST(DeviceHelpers, Reduce) {
size_t kSize = std::numeric_limits<uint32_t>::max();
auto it = thrust::make_counting_iterator(0ul);
dh::XGBCachingDeviceAllocator<char> alloc;
auto batched = dh::Reduce(thrust::cuda::par(alloc), it, it + kSize, 0ul, thrust::maximum<size_t>{});
CHECK_EQ(batched, kSize - 1);
}
TEST(SegmentedUnique, Regression) {
{
std::vector<SketchEntry> values{{3149, 3150, 1, 0.62392902374267578},
{3151, 3152, 1, 0.62418866157531738},
{3152, 3153, 1, 0.62419462203979492},
{3153, 3154, 1, 0.62431186437606812},
{3154, 3155, 1, 0.6244881153106689453125},
{3155, 3156, 1, 0.6244881153106689453125},
{3155, 3156, 1, 0.6244881153106689453125},
{3155, 3156, 1, 0.6244881153106689453125},
{3157, 3158, 1, 0.62552797794342041},
{3158, 3159, 1, 0.6256556510925293},
{3159, 3160, 1, 0.62571090459823608},
{3160, 3161, 1, 0.62577134370803833}};
TestSegmentedUniqueRegression(values, 3);
}
{
std::vector<SketchEntry> values{{3149, 3150, 1, 0.62392902374267578},
{3151, 3152, 1, 0.62418866157531738},
{3152, 3153, 1, 0.62419462203979492},
{3153, 3154, 1, 0.62431186437606812},
{3154, 3155, 1, 0.6244881153106689453125},
{3157, 3158, 1, 0.62552797794342041},
{3158, 3159, 1, 0.6256556510925293},
{3159, 3160, 1, 0.62571090459823608},
{3160, 3161, 1, 0.62577134370803833}};
TestSegmentedUniqueRegression(values, 0);
}
{
std::vector<SketchEntry> values;
TestSegmentedUniqueRegression(values, 0);
}
}
TEST(Allocator, OOM) {
auto size = dh::AvailableMemory(0) * 4;
ASSERT_THROW({dh::caching_device_vector<char> vec(size);}, dmlc::Error);
ASSERT_THROW({dh::device_vector<char> vec(size);}, dmlc::Error);
// Clear last error so we don't fail subsequent tests
cudaGetLastError();
}
TEST(DeviceHelpers, ArgSort) {
dh::device_vector<float> values(20);
dh::Iota(dh::ToSpan(values)); // accending
dh::device_vector<size_t> sorted_idx(20);
dh::ArgSort<false>(dh::ToSpan(values), dh::ToSpan(sorted_idx)); // sort to descending
ASSERT_TRUE(thrust::is_sorted(thrust::device, sorted_idx.begin(),
sorted_idx.end(), thrust::greater<size_t>{}));
dh::Iota(dh::ToSpan(values));
dh::device_vector<size_t> groups(3);
groups[0] = 0;
groups[1] = 10;
groups[2] = 20;
dh::SegmentedArgSort<false>(dh::ToSpan(values), dh::ToSpan(groups),
dh::ToSpan(sorted_idx));
ASSERT_FALSE(thrust::is_sorted(thrust::device, sorted_idx.begin(),
sorted_idx.end(), thrust::greater<size_t>{}));
ASSERT_TRUE(thrust::is_sorted(sorted_idx.begin(), sorted_idx.begin() + 10,
thrust::greater<size_t>{}));
ASSERT_TRUE(thrust::is_sorted(sorted_idx.begin() + 10, sorted_idx.end(),
thrust::greater<size_t>{}));
}
namespace {
// Atomic add as type cast for test.
XGBOOST_DEV_INLINE int64_t atomicAdd(int64_t *dst, int64_t src) { // NOLINT
uint64_t* u_dst = reinterpret_cast<uint64_t*>(dst);
uint64_t u_src = *reinterpret_cast<uint64_t*>(&src);
uint64_t ret = ::atomicAdd(u_dst, u_src);
return *reinterpret_cast<int64_t*>(&ret);
}
}
void TestAtomicAdd() {
size_t n_elements = 1024;
dh::device_vector<int64_t> result_a(1, 0);
auto d_result_a = result_a.data().get();
dh::device_vector<int64_t> result_b(1, 0);
auto d_result_b = result_b.data().get();
/**
* Test for simple inputs
*/
std::vector<int64_t> h_inputs(n_elements);
for (size_t i = 0; i < h_inputs.size(); ++i) {
h_inputs[i] = (i % 2 == 0) ? i : -i;
}
dh::device_vector<int64_t> inputs(h_inputs);
auto d_inputs = inputs.data().get();
dh::LaunchN(n_elements, [=] __device__(size_t i) {
dh::AtomicAdd64As32(d_result_a, d_inputs[i]);
atomicAdd(d_result_b, d_inputs[i]);
});
ASSERT_EQ(result_a[0], result_b[0]);
/**
* Test for positive values that don't fit into 32 bit integer.
*/
thrust::fill(inputs.begin(), inputs.end(),
(std::numeric_limits<uint32_t>::max() / 2));
thrust::fill(result_a.begin(), result_a.end(), 0);
thrust::fill(result_b.begin(), result_b.end(), 0);
dh::LaunchN(n_elements, [=] __device__(size_t i) {
dh::AtomicAdd64As32(d_result_a, d_inputs[i]);
atomicAdd(d_result_b, d_inputs[i]);
});
ASSERT_EQ(result_a[0], result_b[0]);
ASSERT_GT(result_a[0], std::numeric_limits<uint32_t>::max());
CHECK_EQ(thrust::reduce(inputs.begin(), inputs.end(), int64_t(0)), result_a[0]);
/**
* Test for negative values that don't fit into 32 bit integer.
*/
thrust::fill(inputs.begin(), inputs.end(),
(std::numeric_limits<int32_t>::min() / 2));
thrust::fill(result_a.begin(), result_a.end(), 0);
thrust::fill(result_b.begin(), result_b.end(), 0);
dh::LaunchN(n_elements, [=] __device__(size_t i) {
dh::AtomicAdd64As32(d_result_a, d_inputs[i]);
atomicAdd(d_result_b, d_inputs[i]);
});
ASSERT_EQ(result_a[0], result_b[0]);
ASSERT_LT(result_a[0], std::numeric_limits<int32_t>::min());
CHECK_EQ(thrust::reduce(inputs.begin(), inputs.end(), int64_t(0)), result_a[0]);
}
TEST(AtomicAdd, Int64) {
TestAtomicAdd();
}
} // namespace xgboost
|
5a9e227d4c30e2c16d8cd6d447755dba08460be0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#include <float.h>
/***********************************************************
* Vars
***********************************************************/
__constant__ const float gpu_pi = 3.14159265358979323846;
__constant__ const float gpu_twopi = 2*gpu_pi;
/***********************************************************
* Utils Host
***********************************************************/
// Get time
double time() {
timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + tv.tv_usec / 1000000.0;
}
/***********************************************************
* Data material structure
***********************************************************/
#ifndef MATERIALS
#define MATERIALS
// Structure for materials
struct Materials{
unsigned int nb_materials; // n
unsigned int nb_elements_total; // k
unsigned short int *nb_elements; // n
unsigned short int *index; // n
unsigned short int *mixture; // k
float *atom_num_dens; // k
float *nb_atoms_per_vol; // n
float *nb_electrons_per_vol; // n
float *electron_cut_energy; // n
float *electron_max_energy; // n
float *electron_mean_excitation_energy; // n
float *rad_length; // n
float *fX0; // n
float *fX1;
float *fD0;
float *fC;
float *fA;
float *fM;
};
#endif
// Materials device allocation
void materials_device_malloc(Materials &mat, unsigned int nb_mat, unsigned int nb_elm) {
mat.nb_materials = nb_mat;
mat.nb_elements_total = nb_elm;
unsigned int mem_mat_usi = nb_mat * sizeof(unsigned short int);
unsigned int mem_mat_float = nb_mat * sizeof(float);
unsigned int mem_elm_usi = nb_elm * sizeof(unsigned short int);
unsigned int mem_elm_float = nb_elm * sizeof(float);
hipMalloc((void**) &mat.nb_elements, mem_mat_usi);
hipMalloc((void**) &mat.index, mem_mat_usi);
hipMalloc((void**) &mat.mixture, mem_elm_usi);
hipMalloc((void**) &mat.atom_num_dens, mem_elm_float);
hipMalloc((void**) &mat.nb_atoms_per_vol, mem_mat_float);
hipMalloc((void**) &mat.nb_electrons_per_vol, mem_mat_float);
hipMalloc((void**) &mat.electron_cut_energy, mem_mat_float);
hipMalloc((void**) &mat.electron_max_energy, mem_mat_float);
hipMalloc((void**) &mat.electron_mean_excitation_energy, mem_mat_float);
hipMalloc((void**) &mat.rad_length, mem_mat_float);
hipMalloc((void**) &mat.fX0, mem_mat_float);
hipMalloc((void**) &mat.fX1, mem_mat_float);
hipMalloc((void**) &mat.fD0, mem_mat_float);
hipMalloc((void**) &mat.fC, mem_mat_float);
hipMalloc((void**) &mat.fA, mem_mat_float);
hipMalloc((void**) &mat.fM, mem_mat_float);
}
// Materials free device memory
void materials_device_free(Materials &mat) {
hipFree(mat.nb_elements);
hipFree(mat.index);
hipFree(mat.mixture);
hipFree(mat.atom_num_dens);
hipFree(mat.nb_atoms_per_vol);
hipFree(mat.nb_electrons_per_vol);
hipFree(mat.electron_cut_energy);
hipFree(mat.electron_max_energy);
hipFree(mat.electron_mean_excitation_energy);
hipFree(mat.rad_length);
hipFree(mat.fX0);
hipFree(mat.fX1);
hipFree(mat.fD0);
hipFree(mat.fC);
hipFree(mat.fA);
hipFree(mat.fM);
}
// Materials host allocation
void materials_host_malloc(Materials &mat, unsigned int nb_mat, unsigned int nb_elm) {
mat.nb_materials = nb_mat;
mat.nb_elements_total = nb_elm;
unsigned int mem_mat_usi = nb_mat * sizeof(unsigned short int);
unsigned int mem_mat_float = nb_mat * sizeof(float);
unsigned int mem_elm_usi = nb_elm * sizeof(unsigned short int);
unsigned int mem_elm_float = nb_elm * sizeof(float);
mat.nb_elements = (unsigned short int*)malloc(mem_mat_usi);
mat.index = (unsigned short int*)malloc(mem_mat_usi);
mat.mixture = (unsigned short int*)malloc(mem_elm_usi);
mat.atom_num_dens = (float*)malloc(mem_elm_float);
mat.nb_atoms_per_vol = (float*)malloc(mem_mat_float);
mat.nb_electrons_per_vol = (float*)malloc(mem_mat_float);
mat.electron_cut_energy = (float*)malloc(mem_mat_float);
mat.electron_max_energy = (float*)malloc(mem_mat_float);
mat.electron_mean_excitation_energy = (float*)malloc(mem_mat_float);
mat.rad_length = (float*)malloc(mem_mat_float);
mat.fX0 = (float*)malloc(mem_mat_float);
mat.fX1 = (float*)malloc(mem_mat_float);
mat.fD0 = (float*)malloc(mem_mat_float);
mat.fC = (float*)malloc(mem_mat_float);
mat.fA = (float*)malloc(mem_mat_float);
mat.fM = (float*)malloc(mem_mat_float);
}
// Materials free memory
void materials_host_free(Materials &mat) {
free(mat.nb_elements);
free(mat.index);
free(mat.mixture);
free(mat.atom_num_dens);
free(mat.nb_atoms_per_vol);
free(mat.nb_electrons_per_vol);
free(mat.electron_cut_energy);
free(mat.electron_max_energy);
free(mat.electron_mean_excitation_energy);
free(mat.rad_length);
free(mat.fX0);
free(mat.fX1);
free(mat.fD0);
free(mat.fC);
free(mat.fA);
free(mat.fM);
}
/***********************************************************
* Stack data particle structure
***********************************************************/
#ifndef STACKPARTICLE
#define STACKPARTICLE
// Stack of particles, format data is defined as SoA
struct StackParticle{
float* E;
float* dx;
float* dy;
float* dz;
float* px;
float* py;
float* pz;
float* t;
unsigned short int* type;
unsigned int* eventID;
unsigned int* trackID;
unsigned int* seed;
unsigned char* active;
unsigned char* endsimu;
unsigned long* table_x_brent;
unsigned int size;
}; //
#endif
// Stack host allocation
void stack_host_malloc(StackParticle &phasespace, int stack_size) {
phasespace.size = stack_size;
unsigned int mem_phasespace_float = stack_size * sizeof(float);
unsigned int mem_phasespace_uint = stack_size * sizeof(unsigned int);
unsigned int mem_phasespace_usint = stack_size * sizeof(unsigned short int);
unsigned int mem_phasespace_char = stack_size * sizeof(char);
phasespace.E = (float*)malloc(mem_phasespace_float);
phasespace.dx = (float*)malloc(mem_phasespace_float);
phasespace.dy = (float*)malloc(mem_phasespace_float);
phasespace.dz = (float*)malloc(mem_phasespace_float);
phasespace.px = (float*)malloc(mem_phasespace_float);
phasespace.py = (float*)malloc(mem_phasespace_float);
phasespace.pz = (float*)malloc(mem_phasespace_float);
phasespace.t = (float*)malloc(mem_phasespace_float);
phasespace.type = (unsigned short int*)malloc(mem_phasespace_usint);
phasespace.seed = (unsigned int*)malloc(mem_phasespace_uint);
phasespace.eventID = (unsigned int*)malloc(mem_phasespace_uint);
phasespace.trackID = (unsigned int*)malloc(mem_phasespace_uint);
phasespace.endsimu = (unsigned char*)malloc(mem_phasespace_char);
phasespace.active = (unsigned char*)malloc(mem_phasespace_char);
}
// free host mem
void stack_host_free(StackParticle &phasespace) {
free(phasespace.E);
free(phasespace.dx);
free(phasespace.dy);
free(phasespace.dz);
free(phasespace.px);
free(phasespace.py);
free(phasespace.pz);
free(phasespace.t);
free(phasespace.type);
free(phasespace.seed);
free(phasespace.eventID);
free(phasespace.trackID);
free(phasespace.endsimu);
free(phasespace.active);
}
// For PRNG Brent
#define UINT64 (sizeof(unsigned long)>>3)
#define UINT32 (1 - UINT64)
#define r (4*UINT64 + 8*UINT32)
// Stack device allocation
void stack_device_malloc(StackParticle &stackpart, int stack_size) {
stackpart.size = stack_size;
unsigned int mem_stackpart_float = stack_size * sizeof(float);
unsigned int mem_stackpart_uint = stack_size * sizeof(unsigned int);
unsigned int mem_stackpart_usint = stack_size * sizeof(unsigned short int);
unsigned int mem_stackpart_char = stack_size * sizeof(char);
unsigned int mem_brent;
if (r == 4) {mem_brent = stack_size * 6 * sizeof(unsigned long);}
else {mem_brent = stack_size * 10 * sizeof(unsigned long);}
hipMalloc((void**) &stackpart.E, mem_stackpart_float);
hipMalloc((void**) &stackpart.dx, mem_stackpart_float);
hipMalloc((void**) &stackpart.dy, mem_stackpart_float);
hipMalloc((void**) &stackpart.dz, mem_stackpart_float);
hipMalloc((void**) &stackpart.px, mem_stackpart_float);
hipMalloc((void**) &stackpart.py, mem_stackpart_float);
hipMalloc((void**) &stackpart.pz, mem_stackpart_float);
hipMalloc((void**) &stackpart.t, mem_stackpart_float);
hipMalloc((void**) &stackpart.type, mem_stackpart_usint);
hipMalloc((void**) &stackpart.seed, mem_stackpart_uint);
hipMalloc((void**) &stackpart.eventID, mem_stackpart_uint);
hipMalloc((void**) &stackpart.trackID, mem_stackpart_uint);
hipMalloc((void**) &stackpart.table_x_brent, mem_brent);
hipMalloc((void**) &stackpart.endsimu, mem_stackpart_char);
hipMalloc((void**) &stackpart.active, mem_stackpart_char);
}
#undef UINT64
#undef UINT32
#undef r
// free device mem
void stack_device_free(StackParticle &stackpart) {
hipFree(stackpart.E);
hipFree(stackpart.dx);
hipFree(stackpart.dy);
hipFree(stackpart.dz);
hipFree(stackpart.px);
hipFree(stackpart.py);
hipFree(stackpart.pz);
hipFree(stackpart.t);
hipFree(stackpart.type);
hipFree(stackpart.seed);
hipFree(stackpart.eventID);
hipFree(stackpart.trackID);
hipFree(stackpart.endsimu);
hipFree(stackpart.active);
hipFree(stackpart.table_x_brent);
}
/***********************************************************
* Volume data structure
***********************************************************/
#ifndef VOLUME
#define VOLUME
// Volume structure data
struct Volume {
unsigned short int *data;
unsigned int mem_data;
float3 size_in_mm;
int3 size_in_vox;
float3 voxel_size;
int nb_voxel_volume;
int nb_voxel_slice;
float3 position;
};
#endif
// Volume host allocation
void volume_host_malloc(Volume &vol, int nbvox) {
vol.mem_data = nbvox * sizeof(unsigned short int);
vol.data = (unsigned short int*)malloc(vol.mem_data);
}
// Free host memory
void volume_host_free(Volume &vol) {
free(vol.data);
}
// Volume device allocation
void volume_device_malloc(Volume &vol, int nbvox) {
vol.mem_data = nbvox * sizeof(unsigned short int);
hipMalloc((void**) &vol.data, vol.mem_data);
}
// Free device memory
void volume_device_free(Volume &vol) {
hipFree(vol.data);
}
/***********************************************************
* Dosimetry data structure
***********************************************************/
#ifndef DOSIMETRY
#define DOSIMETRY
struct Dosimetry {
float *edep;
float *edep2;
unsigned int mem_data;
float3 size_in_mm;
int3 size_in_vox;
float3 voxel_size;
int nb_voxel_volume;
int nb_voxel_slice;
float3 position;
};
#endif
// Dosimetry host allocation
void dosimetry_host_malloc(Dosimetry &vol, int nbvox) {
vol.mem_data = nbvox * sizeof(float);
vol.edep = (float*)malloc(vol.mem_data);
}
// Dosimetry free host memory
void dosimetry_host_free(Dosimetry &vol) {
free(vol.edep);
}
// Dosimetry volume device allocation
void dosimetry_device_malloc(Dosimetry &vol, int nbvox) {
vol.mem_data = nbvox * sizeof(float);
hipMalloc((void**) &vol.edep, vol.mem_data);
}
// Dosimetry free device memory
void dosimetry_device_free(Dosimetry &vol) {
hipFree(vol.edep);
}
// Dosimetry reset
void dosimetry_host_reset(Dosimetry &vol) {
int i=0; while(i<vol.nb_voxel_volume) {
vol.edep[i] = 0.0f;
++i;
}
}
/***********************************************************
* Activities structure
***********************************************************/
struct Activities {
unsigned int nb_activities;
float tot_activity;
unsigned int *act_index;
float *act_cdf;
};
// Host allocation
void activities_host_malloc(Activities &act, int nbact) {
act.act_index = (unsigned int*)malloc(nbact*sizeof(unsigned int));
act.act_cdf = (float*)malloc(nbact*sizeof(float));
}
// Device allocation
void activities_device_malloc(Activities &act, int nbact) {
hipMalloc((void**) &act.act_index, nbact*sizeof(float));
hipMalloc((void**) &act.act_cdf, nbact*sizeof(float));
}
// Free host mem
void activities_host_free(Activities &act) {
free(act.act_index);
free(act.act_cdf);
}
// Free device mem
void activities_device_free(Activities &act) {
hipFree(act.act_index);
hipFree(act.act_cdf);
}
/***********************************************************
* Copy structure functions
***********************************************************/
// Copy materials from host to device
void materials_copy_host2device(Materials &host, Materials &device) {
unsigned int nb_mat = host.nb_materials;
unsigned int nb_elm = host.nb_elements_total;
unsigned int mem_mat_usi = nb_mat * sizeof(unsigned short int);
unsigned int mem_mat_float = nb_mat * sizeof(float);
unsigned int mem_elm_usi = nb_elm * sizeof(unsigned short int);
unsigned int mem_elm_float = nb_elm * sizeof(float);
hipMemcpy(device.nb_elements, host.nb_elements, mem_mat_usi, hipMemcpyHostToDevice);
hipMemcpy(device.index, host.index, mem_mat_usi, hipMemcpyHostToDevice);
hipMemcpy(device.mixture, host.mixture, mem_elm_usi, hipMemcpyHostToDevice);
hipMemcpy(device.atom_num_dens, host.atom_num_dens, mem_elm_float, hipMemcpyHostToDevice);
hipMemcpy(device.nb_atoms_per_vol, host.nb_atoms_per_vol, mem_mat_float, hipMemcpyHostToDevice);
hipMemcpy(device.nb_electrons_per_vol, host.nb_electrons_per_vol, mem_mat_float, hipMemcpyHostToDevice);
hipMemcpy(device.electron_cut_energy, host.electron_cut_energy, mem_mat_float, hipMemcpyHostToDevice);
hipMemcpy(device.electron_max_energy, host.electron_max_energy, mem_mat_float, hipMemcpyHostToDevice);
hipMemcpy(device.electron_mean_excitation_energy, host.electron_mean_excitation_energy, mem_mat_float, hipMemcpyHostToDevice);
hipMemcpy(device.rad_length, host.rad_length, mem_mat_float, hipMemcpyHostToDevice);
hipMemcpy(device.fX0, host.fX0, mem_mat_float, hipMemcpyHostToDevice);
hipMemcpy(device.fX1, host.fX1, mem_mat_float, hipMemcpyHostToDevice);
hipMemcpy(device.fD0, host.fD0, mem_mat_float, hipMemcpyHostToDevice);
hipMemcpy(device.fC, host.fC, mem_mat_float, hipMemcpyHostToDevice);
hipMemcpy(device.fA, host.fA, mem_mat_float, hipMemcpyHostToDevice);
hipMemcpy(device.fM, host.fM, mem_mat_float, hipMemcpyHostToDevice);
}
// Copy stack from device to host
void stack_copy_device2host(StackParticle &stackpart, StackParticle &phasespace) {
int stack_size = stackpart.size;
unsigned int mem_stackpart_float = stack_size * sizeof(float);
unsigned int mem_stackpart_char = stack_size * sizeof(char);
unsigned int mem_stackpart_uint = stack_size * sizeof(unsigned int);
unsigned int mem_stackpart_usint = stack_size * sizeof(unsigned short int);
hipMemcpy(phasespace.E, stackpart.E, mem_stackpart_float, hipMemcpyDeviceToHost);
hipMemcpy(phasespace.dx, stackpart.dx, mem_stackpart_float, hipMemcpyDeviceToHost);
hipMemcpy(phasespace.dy, stackpart.dy, mem_stackpart_float, hipMemcpyDeviceToHost);
hipMemcpy(phasespace.dz, stackpart.dz, mem_stackpart_float, hipMemcpyDeviceToHost);
hipMemcpy(phasespace.px, stackpart.px, mem_stackpart_float, hipMemcpyDeviceToHost);
hipMemcpy(phasespace.py, stackpart.py, mem_stackpart_float, hipMemcpyDeviceToHost);
hipMemcpy(phasespace.pz, stackpart.pz, mem_stackpart_float, hipMemcpyDeviceToHost);
hipMemcpy(phasespace.t, stackpart.t, mem_stackpart_float, hipMemcpyDeviceToHost);
hipMemcpy(phasespace.type, stackpart.type, mem_stackpart_usint, hipMemcpyDeviceToHost);
hipMemcpy(phasespace.endsimu, stackpart.endsimu, mem_stackpart_char, hipMemcpyDeviceToHost);
hipMemcpy(phasespace.active, stackpart.active, mem_stackpart_char, hipMemcpyDeviceToHost);
hipMemcpy(phasespace.trackID, stackpart.trackID, mem_stackpart_uint, hipMemcpyDeviceToHost);
hipMemcpy(phasespace.eventID, stackpart.eventID, mem_stackpart_uint, hipMemcpyDeviceToHost);
}
// Copy stack from host to device
void stack_copy_host2device(StackParticle &phasespace, StackParticle &stackpart) {
int stack_size = phasespace.size;
unsigned int mem_stackpart_float = stack_size * sizeof(float);
unsigned int mem_stackpart_char = stack_size * sizeof(char);
unsigned int mem_stackpart_uint = stack_size * sizeof(unsigned int);
unsigned int mem_stackpart_usint = stack_size * sizeof(unsigned short int);
hipMemcpy(stackpart.E, phasespace.E, mem_stackpart_float, hipMemcpyHostToDevice);
hipMemcpy(stackpart.dx, phasespace.dx, mem_stackpart_float, hipMemcpyHostToDevice);
hipMemcpy(stackpart.dy, phasespace.dy, mem_stackpart_float, hipMemcpyHostToDevice);
hipMemcpy(stackpart.dz, phasespace.dz, mem_stackpart_float, hipMemcpyHostToDevice);
hipMemcpy(stackpart.px, phasespace.px, mem_stackpart_float, hipMemcpyHostToDevice);
hipMemcpy(stackpart.py, phasespace.py, mem_stackpart_float, hipMemcpyHostToDevice);
hipMemcpy(stackpart.pz, phasespace.pz, mem_stackpart_float, hipMemcpyHostToDevice);
hipMemcpy(stackpart.t, phasespace.t, mem_stackpart_float, hipMemcpyHostToDevice);
hipMemcpy(stackpart.type, phasespace.type, mem_stackpart_usint, hipMemcpyHostToDevice);
hipMemcpy(stackpart.endsimu, phasespace.endsimu, mem_stackpart_char, hipMemcpyHostToDevice);
hipMemcpy(stackpart.active, phasespace.active, mem_stackpart_char, hipMemcpyHostToDevice);
hipMemcpy(stackpart.trackID, phasespace.trackID, mem_stackpart_uint, hipMemcpyHostToDevice);
hipMemcpy(stackpart.eventID, phasespace.eventID, mem_stackpart_uint, hipMemcpyHostToDevice);
hipMemcpy(stackpart.seed, phasespace.seed, mem_stackpart_uint, hipMemcpyHostToDevice);
}
// Copy volume from device to host
void volume_copy_device2host(Volume &voldevice, Volume &volhost) {
volhost.size_in_vox = voldevice.size_in_vox;
volhost.voxel_size = voldevice.voxel_size;
volhost.size_in_mm = voldevice.size_in_mm;
volhost.nb_voxel_slice = voldevice.nb_voxel_slice;
volhost.nb_voxel_volume = voldevice.nb_voxel_volume;
volhost.mem_data = voldevice.mem_data;
volhost.position = voldevice.position;
hipMemcpy(volhost.data, voldevice.data, voldevice.mem_data, hipMemcpyDeviceToHost);
}
// Copy volume from host to device
void volume_copy_host2device(Volume &volhost, Volume &voldevice) {
voldevice.size_in_vox = volhost.size_in_vox;
voldevice.voxel_size = volhost.voxel_size;
voldevice.size_in_mm = volhost.size_in_mm;
voldevice.nb_voxel_slice = volhost.nb_voxel_slice;
voldevice.nb_voxel_volume = volhost.nb_voxel_volume;
voldevice.mem_data = volhost.mem_data;
voldevice.position = volhost.position;
hipMemcpy(voldevice.data, volhost.data, volhost.mem_data, hipMemcpyHostToDevice);
}
// Copy volume from device to host
void dosimetry_copy_device2host(Dosimetry &voldevice, Dosimetry &volhost) {
volhost.size_in_vox = voldevice.size_in_vox;
volhost.voxel_size = voldevice.voxel_size;
volhost.size_in_mm = voldevice.size_in_mm;
volhost.nb_voxel_slice = voldevice.nb_voxel_slice;
volhost.nb_voxel_volume = voldevice.nb_voxel_volume;
volhost.mem_data = voldevice.mem_data;
volhost.position = voldevice.position;
hipMemcpy(volhost.edep, voldevice.edep, voldevice.mem_data, hipMemcpyDeviceToHost);
}
// Copy dosimetry from host to device
void dosimetry_copy_host2device(Dosimetry &volhost, Dosimetry &voldevice) {
voldevice.size_in_vox = volhost.size_in_vox;
voldevice.voxel_size = volhost.voxel_size;
voldevice.size_in_mm = volhost.size_in_mm;
voldevice.nb_voxel_slice = volhost.nb_voxel_slice;
voldevice.nb_voxel_volume = volhost.nb_voxel_volume;
voldevice.mem_data = volhost.mem_data;
voldevice.position = volhost.position;
hipMemcpy(voldevice.edep, volhost.edep, volhost.mem_data, hipMemcpyHostToDevice);
}
// Copy activities from host to device
void activities_copy_host2device(Activities &acthost, Activities &actdevice) {
actdevice.nb_activities = acthost.nb_activities;
actdevice.tot_activity = acthost.tot_activity;
hipMemcpy(actdevice.act_index, acthost.act_index,
actdevice.nb_activities*sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(actdevice.act_cdf, acthost.act_cdf,
actdevice.nb_activities*sizeof(float), hipMemcpyHostToDevice);
}
/***********************************************************
* Utils Device
***********************************************************/
// rotateUz, function from CLHEP
__device__ float3 rotateUz(float3 vector, float3 newUz) {
float u1 = newUz.x;
float u2 = newUz.y;
float u3 = newUz.z;
float up = u1*u1 + u2*u2;
if (up>0) {
up = sqrtf(up);
float px = vector.x, py = vector.y, pz = vector.z;
vector.x = __fdividef(u1*u3*px - u2*py, up) + u1*pz;
vector.y = __fdividef(u2*u3*px + u1*py, up) + u2*pz;
vector.z = -up*px + u3*pz;
}
else if (u3 < 0.) { vector.x = -vector.x; vector.z = -vector.z; } // phi=0 theta=gpu_pi
return make_float3(vector.x, vector.y, vector.z);
}
// add vector
__device__ float3 add_vector(float3 u, float3 v) {
return make_float3(u.x+v.x, u.y+v.y, u.z+v.z);
}
// sub vector
__device__ float3 sub_vector(float3 u, float3 v) {
return make_float3(u.x-v.x, u.y-v.y, u.z-v.z);
}
// mul a vector by a scalar
__device__ float3 scale_vector(float3 u, float a) {
return make_float3(u.x*a, u.y*a, u.z*a);
}
// mul two vectors
__device__ float3 mul_vector(float3 u, float3 v) {
return make_float3(u.x*v.x, u.y*v.y, u.z*v.z);
}
// div two vectors
__device__ float3 div_vector(float3 u, float3 v) {
return make_float3(__fdividef(u.x, v.x),
__fdividef(u.y, v.y),
__fdividef(u.z, v.z));
}
// return an unitary vector
__device__ float3 unit_vector(float3 u) {
float imag = __fdividef(1.0f, sqrtf(u.x*u.x + u.y*u.y + u.z*u.z));
return make_float3(u.x*imag, u.y*imag, u.z*imag);
}
// return inverse vector
__device__ float3 inverse_vector(float3 u) {
return make_float3(__fdividef(1.0f, u.x), __fdividef(1.0f, u.y), __fdividef(1.0f, u.z));
}
//// Used for the validation
__device__ float mag_vector(float3 u) {
return sqrtf(u.x*u.x + u.y*u.y + u.z*u.z);
}
__device__ float dot_vector(float3 u, float3 v) {
return u.x*v.x + u.y*v.y + u.z*v.z;
}
//// Return the next voxel boundary distance, it is used by the standard navigator
__device__ float get_boundary_voxel_by_raycasting(int4 vox, float3 p, float3 d, float3 res) {
float xmin, xmax, ymin, ymax, zmin, zmax;
float3 di = inverse_vector(d);
float tmin, tmax, tymin, tymax, tzmin, tzmax, buf;
// Define the voxel bounding box
xmin = vox.x*res.x;
ymin = vox.y*res.y;
zmin = vox.z*res.z;
xmax = (d.x<0 && p.x==xmin) ? xmin-res.x : xmin+res.x;
ymax = (d.y<0 && p.y==ymin) ? ymin-res.y : ymin+res.y;
zmax = (d.z<0 && p.z==zmin) ? zmin-res.z : zmin+res.z;
tmin = -1e9f;
tmax = 1e9f;
// on x
if (d.x != 0.0f) {
tmin = (xmin - p.x) * di.x;
tmax = (xmax - p.x) * di.x;
if (tmin > tmax) {
buf = tmin;
tmin = tmax;
tmax = buf;
}
}
// on y
if (d.y != 0.0f) {
tymin = (ymin - p.y) * di.y;
tymax = (ymax - p.y) * di.y;
if (tymin > tymax) {
buf = tymin;
tymin = tymax;
tymax = buf;
}
if (tymin > tmin) {tmin = tymin;}
if (tymax < tmax) {tmax = tymax;}
}
// on z
if (d.z != 0.0f) {
tzmin = (zmin - p.z) * di.z;
tzmax = (zmax - p.z) * di.z;
if (tzmin > tzmax) {
buf = tzmin;
tzmin = tzmax;
tzmax = buf;
}
if (tzmin > tmin) {tmin = tzmin;}
if (tzmax < tmax) {tmax = tzmax;}
}
return tmax;
}
// Binary search
__device__ int binary_search(float *val, float key, int n) {
int min=0, max=n, mid;
while (min < max) {
mid = (min + max) >> 1;
if (key > val[mid]) {
min = mid + 1;
} else {
max = mid;
}
}
return min;
}
void dosimetry_dump(Dosimetry dosemap) {
// first write te header
FILE *pfile = fopen("dosemap.mhd", "w");
fprintf(pfile, "ObjectType = Image \n");
fprintf(pfile, "NDims = 3 \n");
fprintf(pfile, "BinaryData = True \n");
fprintf(pfile, "BinaryDataOrderMDB = False \n");
fprintf(pfile, "CompressedData = False \n");
fprintf(pfile, "ElementSpacing = %f %f %f \n", dosemap.voxel_size.x,
dosemap.voxel_size.y,
dosemap.voxel_size.z);
fprintf(pfile, "DimSize = %i %i %i \n", dosemap.size_in_vox.x,
dosemap.size_in_vox.y,
dosemap.size_in_vox.z);
fprintf(pfile, "ElementType = MET_FLOAT \n");
fprintf(pfile, "ElementDataFile = dosemap.raw\n");
fclose(pfile);
// then export data
pfile = fopen("dosemap.raw", "wb");
fwrite(dosemap.edep, dosemap.nb_voxel_volume, sizeof(float), pfile);
fclose(pfile);
}
/***********************************************************
* PRNG Brent xor256
***********************************************************/
// Brent PRNG integer version
__device__ unsigned long weyl;
__device__ unsigned long brent_int(unsigned int index, unsigned long *device_x_brent, unsigned long seed)
{
#define UINT64 (sizeof(unsigned long)>>3)
#define UINT32 (1 - UINT64)
#define wlen (64*UINT64 + 32*UINT32)
#define r (4*UINT64 + 8*UINT32)
#define s (3*UINT64 + 3*UINT32)
#define a (37*UINT64 + 18*UINT32)
#define b (27*UINT64 + 13*UINT32)
#define c (29*UINT64 + 14*UINT32)
#define d (33*UINT64 + 15*UINT32)
#define ws (27*UINT64 + 16*UINT32)
int z, z_w, z_i_brent;
if (r==4){
z=6; z_w=4; z_i_brent=5;}
else{
z=10; z_w=8; z_i_brent=9;}
unsigned long w = device_x_brent[z*index + z_w];
unsigned long i_brent = device_x_brent[z*index + z_i_brent];
unsigned long zero = 0;
unsigned long t, v;
int k;
if (seed != zero) { // Initialisation necessary
// weyl = odd approximation to 2**wlen*(3-sqrt(5))/2.
if (UINT32)
weyl = 0x61c88647;
else
weyl = ((((unsigned long)0x61c88646)<<16)<<16) + (unsigned long)0x80b583eb;
v = (seed!=zero)? seed:~seed; // v must be nonzero
for (k = wlen; k > 0; k--) { // Avoid correlations for close seeds
v ^= v<<10; v ^= v>>15; // Recurrence has period 2**wlen-1
v ^= v<<4; v ^= v>>13; // for wlen = 32 or 64
}
for (w = v, k = 0; k < r; k++) { // Initialise circular array
v ^= v<<10; v ^= v>>15;
v ^= v<<4; v ^= v>>13;
device_x_brent[k + z*index] = v + (w+=weyl);
}
for (i_brent = r-1, k = 4*r; k > 0; k--) { // Discard first 4*r results
t = device_x_brent[(i_brent = (i_brent+1)&(r-1)) + z*index]; t ^= t<<a; t ^= t>>b;
v = device_x_brent[((i_brent+(r-s))&(r-1)) + z*index]; v ^= v<<c; v ^= v>>d;
device_x_brent[i_brent + z*index] = t^v;
}
}
// Apart from initialisation (above), this is the generator
t = device_x_brent[(i_brent = (i_brent+1)&(r-1)) + z*index]; // Assumes that r is a power of two
v = device_x_brent[((i_brent+(r-s))&(r-1)) + z*index]; // Index is (i-s) mod r
t ^= t<<a; t ^= t>>b; // (I + L^a)(I + R^b)
v ^= v<<c; v ^= v>>d; // (I + L^c)(I + R^d)
device_x_brent[i_brent + z*index] = (v ^= t); // Update circular array
w += weyl; // Update Weyl generator
device_x_brent[z*index + z_w] = w;
device_x_brent[z*index + z_i_brent] = i_brent;
return (v + (w^(w>>ws))); // Return combination
#undef UINT64
#undef UINT32
#undef wlen
#undef r
#undef s
#undef a
#undef b
#undef c
#undef d
#undef ws
}
// Brent PRNG real version
__device__ double Brent_real(int index, unsigned long *device_x_brent, unsigned long seed)
{
#define UINT64 (sizeof(unsigned long)>>3)
#define UINT32 (1 - UINT64)
#define UREAL64 (sizeof(double)>>3)
#define UREAL32 (1 - UREAL64)
// sr = number of bits discarded = 11 for double, 40 or 8 for float
#define sr (11*UREAL64 +(40*UINT64 + 8*UINT32)*UREAL32)
// ss (used for scaling) is 53 or 21 for double, 24 for float
#define ss ((53*UINT64 + 21*UINT32)*UREAL64 + 24*UREAL32)
// SCALE is 0.5**ss, SC32 is 0.5**32
#define SCALE ((double)1/(double)((unsigned long)1<<ss))
#define SC32 ((double)1/((double)65536*(double)65536))
double res;
res = (double)0;
while (res == (double)0) // Loop until nonzero result.
{ // Usually only one iteration.
res = (double)(brent_int(index, device_x_brent, seed)>>sr); // Discard sr random bits.
seed = (unsigned long)0; // Zero seed for next time.
if (UINT32 && UREAL64) // Need another call to xor4096i.
res += SC32*(double)brent_int(index, device_x_brent, seed); // Add low-order 32 bits.
}
return (SCALE*res); // Return result in (0.0, 1.0).
#undef UINT64
#undef UINT32
#undef UREAL64
#undef UREAL32
#undef SCALE
#undef SC32
#undef sr
#undef ss
}
// Init Brent seed
__global__ void kernel_brent_init(StackParticle stackpart) {
unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (id < stackpart.size) {
unsigned int seed = stackpart.seed[id];
float dummy = brent_int(id, stackpart.table_x_brent, seed);
}
}
/***********************************************************
* Particles source
***********************************************************/
// Voxelized back2back source
__global__ void kernel_voxelized_source_b2b(StackParticle g1, StackParticle g2, Activities act,
float E, int3 size_in_vox, float3 voxel_size) {
unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (id >= g1.size) return;
float jump = (float)(size_in_vox.x * size_in_vox.y);
float ind, x, y, z;
float rnd = Brent_real(id, g1.table_x_brent, 0);
int pos = binary_search(act.act_cdf, rnd, act.nb_activities);
// get the voxel position (x, y, z)
ind = (float)act.act_index[pos];
z = floor(ind / jump);
ind -= (z * jump);
y = floor(ind / (float)(size_in_vox.x));
x = ind - y*size_in_vox.x;
// random position inside the voxel
x += Brent_real(id, g1.table_x_brent, 0);
y += Brent_real(id, g1.table_x_brent, 0);
z += Brent_real(id, g1.table_x_brent, 0);
// must be in mm
x *= voxel_size.x;
y *= voxel_size.y;
z *= voxel_size.z;
// random orientation
float phi = Brent_real(id, g1.table_x_brent, 0);
float theta = Brent_real(id, g1.table_x_brent, 0);
phi = gpu_twopi * phi;
theta = acosf(1.0f - 2.0f*theta);
// convert to cartesian
float dx = __cosf(phi)*__sinf(theta);
float dy = __sinf(phi)*__sinf(theta);
float dz = __cosf(theta);
// first gamma
g1.dx[id] = dx;
g1.dy[id] = dy;
g1.dz[id] = dz;
g1.E[id] = E;
g1.px[id] = x;
g1.py[id] = y;
g1.pz[id] = z;
g1.t[id] = 0.0f;
g1.active[id] = 1;
g1.endsimu[id] = 0;
g1.type[id] = GAMMA;
// second gamma
g2.dx[id] = -dx;
g2.dy[id] = -dy;
g2.dz[id] = -dz;
g2.E[id] = E;
g2.px[id] = x;
g2.py[id] = y;
g2.pz[id] = z;
g2.t[id] = 0.0f;
g2.active[id] = 1;
g2.endsimu[id] = 0;
g2.type[id] = GAMMA;
}
/***********************************************************
* Photons Physics Effects
***********************************************************/
//// Comptons Standard //////////////////////////////////////
// Compton Cross Section Per Atom (Standard - Klein-Nishina)
__device__ float Compton_CSPA_Standard(float E, unsigned short int Z) {
float CrossSection = 0.0;
if (Z<1 || E < 1e-4f) {return CrossSection;}
float p1Z = Z*(2.7965e-23f + 1.9756e-27f*Z + -3.9178e-29f*Z*Z);
float p2Z = Z*(-1.8300e-23f + -1.0205e-24f*Z + 6.8241e-27f*Z*Z);
float p3Z = Z*(6.7527e-22f + -7.3913e-24f*Z + 6.0480e-27f*Z*Z);
float p4Z = Z*(-1.9798e-21f + 2.7079e-24f*Z + 3.0274e-26f*Z*Z);
float T0 = (Z < 1.5f)? 40.0e-3f : 15.0e-3f;
float d1, d2, d3, d4, d5;
d1 = __fdividef(fmaxf(E, T0), 0.510998910f); // X
CrossSection = __fdividef(p1Z*__logf(1.0f+2.0f*d1), d1) + __fdividef(p2Z + p3Z*d1 + p4Z*d1*d1, 1.0f + 20.0f*d1 + 230.0f*d1*d1 + 440.0f*d1*d1*d1);
if (E < T0) {
d1 = __fdividef(T0+1.0e-3f, 0.510998910f); // X
d2 = __fdividef(p1Z*__logf(1.0f+2.0f*d1), d1) + __fdividef(p2Z + p3Z*d1 + p4Z*d1*d1, 1.0f + 20.0f*d1 + 230.0f*d1*d1 + 440.0f*d1*d1*d1); // sigma
d3 = __fdividef(-T0 * (d2 - CrossSection), CrossSection*1.0e-3f); // c1
d4 = (Z > 1.5f)? 0.375f-0.0556f*__logf(Z) : 0.15f; // c2
d5 = __logf(__fdividef(E, T0)); // y
CrossSection *= __expf(-d5 * (d3 + d4*d5));
}
return CrossSection;
}
// Compute the total Compton cross section for a given material
__device__ float Compton_CS_Standard(Materials materials, unsigned int mat, float E) {
float CS = 0.0f;
int i;
int index = materials.index[mat];
// Model standard
for (i = 0; i < materials.nb_elements[mat]; ++i) {
CS += (materials.atom_num_dens[index+i] * Compton_CSPA_Standard(E, materials.mixture[index+i]));
}
return CS;
}
// Compton Scatter (Standard - Klein-Nishina) without secondary
__device__ float Compton_Effect_Standard_NoSec(StackParticle photons,
unsigned int id,
int* count_d) {
float gamE0 = photons.E[id];
float E0 = __fdividef(gamE0, 0.510998910f);
float3 gamDir0 = make_float3(photons.dx[id], photons.dy[id], photons.dz[id]);
// sample the energy rate of the scattered gamma
float epszero = __fdividef(1.0f, (1.0f + 2.0f * E0));
float eps02 = epszero*epszero;
float a1 = -__logf(epszero);
float a2 = __fdividef(a1, (a1 + 0.5f*(1.0f-eps02)));
float greject, onecost, eps, eps2, sint2, cosTheta, sinTheta, phi;
do {
if (a2 > Brent_real(id, photons.table_x_brent, 0)) {
eps = __expf(-a1 * Brent_real(id, photons.table_x_brent, 0));
eps2 = eps*eps;
} else {
eps2 = eps02 + (1.0f - eps02) * Brent_real(id, photons.table_x_brent, 0);
eps = sqrt(eps2);
}
onecost = __fdividef(1.0f - eps, eps * E0);
sint2 = onecost * (2.0f - onecost);
greject = 1.0f - eps * __fdividef(sint2, 1.0f + eps2);
} while (greject < Brent_real(id, photons.table_x_brent, 0));
// scattered gamma angles
if (sint2 < 0.0f) {sint2 = 0.0f;}
cosTheta = 1.0f - onecost;
sinTheta = sqrt(sint2);
phi = Brent_real(id, photons.table_x_brent, 0) * gpu_twopi;
// update the scattered gamma
float3 gamDir1 = make_float3(sinTheta*__cosf(phi), sinTheta*__sinf(phi), cosTheta);
gamDir1 = rotateUz(gamDir1, gamDir0);
photons.dx[id] = gamDir1.x;
photons.dy[id] = gamDir1.y;
photons.dz[id] = gamDir1.z;
float gamE1 = gamE0 * eps;
if (gamE1 > 1.0e-06f) {photons.E[id] = gamE1;}
else {
photons.endsimu[id] = 1; // stop this particle
photons.active[id] = 0; // this particle is absorbed
atomicAdd(count_d, 1); // count simulated primaries
return gamE1; // Local energy deposit
}
return 0.0f;
}
// Compton Scatter (Standard - Klein-Nishina) with secondary (e-)
__device__ float Compton_Effect_Standard_WiSec(StackParticle photons,
StackParticle electrons,
float cutE,
unsigned int id,
int* count_d) {
float gamE0 = photons.E[id];
float E0 = __fdividef(gamE0, 0.510998910f);
float3 gamDir0 = make_float3(photons.dx[id], photons.dy[id], photons.dz[id]);
// sample the energy rate pf the scattered gamma
float epszero = __fdividef(1.0f, (1.0f + 2.0f * E0));
float eps02 = epszero*epszero;
float a1 = -__logf(epszero);
float a2 = __fdividef(a1, (a1 + 0.5f*(1.0f-eps02)));
float greject, onecost, eps, eps2, sint2, cosTheta, sinTheta, phi;
do {
if (a2 > Brent_real(id, photons.table_x_brent, 0)) {
eps = __expf(-a1 * Brent_real(id, photons.table_x_brent, 0));
eps2 = eps*eps;
} else {
eps2 = eps02 + (1.0f - eps02) * Brent_real(id, photons.table_x_brent, 0);
eps = sqrt(eps2);
}
onecost = __fdividef(1.0f - eps, eps * E0);
sint2 = onecost * (2.0f - onecost);
greject = 1.0f - eps * __fdividef(sint2, 1.0f + eps2);
} while (greject < Brent_real(id, photons.table_x_brent, 0));
// scattered gamma angles
if (sint2 < 0.0f) {sint2 = 0.0f;}
cosTheta = 1.0f - onecost;
sinTheta = sqrt(sint2);
phi = Brent_real(id, photons.table_x_brent, 0) * gpu_twopi;
// update the scattered gamma
float3 gamDir1 = make_float3(sinTheta*__cosf(phi), sinTheta*__sinf(phi), cosTheta);
gamDir1 = rotateUz(gamDir1, gamDir0);
photons.dx[id] = gamDir1.x;
photons.dy[id] = gamDir1.y;
photons.dz[id] = gamDir1.z;
float gamE1 = gamE0 * eps;
if (gamE1 > 1.0e-06f) {photons.E[id] = gamE1;}
else {
//printf("Compton => X\n");
photons.endsimu[id] = 1; // absorbed this particle
photons.active[id] = 0;
atomicAdd(count_d, 1); // count simulated primaries
return gamE1; // Local energy deposit
}
// kinematic of the scattered electron
float eKinE = gamE0 - gamE1;
// DBL_MIN cut production
if (eKinE > 1.0e-38f && eKinE > cutE) {
float3 eDir = sub_vector(scale_vector(gamDir0, gamE0), scale_vector(gamDir1, gamE1));
eDir = unit_vector(eDir);
electrons.dx[id] = eDir.x;
electrons.dy[id] = eDir.y;
electrons.dz[id] = eDir.z;
electrons.E[id] = eKinE;
electrons.px[id] = photons.px[id];
electrons.py[id] = photons.py[id];
electrons.pz[id] = photons.pz[id];
electrons.endsimu[id] = 0;
// Now start to track this electron and freeze the photon tracking
photons.active[id] = 0;
electrons.active[id] = 1;
//printf("Compton => e- cutE %e\n", cutE);
return 0.0f;
}
//printf("Compton => / cutE %e\n", cutE);
return eKinE;
}
//// PhotoElectric Standard //////////////////////////////////////
// Compute Theta distribution of the emitted electron, with respect to the incident Gamma
// The Sauter-Gavrila distribution for the K-shell is used
__device__ float PhotoElec_ElecCosThetaDistribution(StackParticle part,
unsigned int id,
float kineEnergy) {
float costeta = 1.0f;
float gamma = kineEnergy * 1.9569513367f + 1.0f; // 1/electron_mass_c2
if (gamma > 5.0f) {return costeta;}
float beta = __fdividef(sqrtf(gamma*gamma - 1.0f), gamma);
float b = 0.5f*gamma*(gamma - 1.0f)*(gamma - 2.0f);
float rndm, term, greject, grejsup;
if (gamma < 2.0f) {grejsup = gamma*gamma*(1.0f + b - beta*b);}
else {grejsup = gamma*gamma*(1.0f + b + beta*b);}
do {
rndm = 1.0f - 2.0f*Brent_real(id, part.table_x_brent, 0);
costeta = __fdividef(rndm + beta, rndm*beta + 1.0f);
term = 1.0f - beta*costeta;
greject = __fdividef((1.0f - costeta*costeta)*(1.0f + b*term), term*term);
} while(greject < Brent_real(id, part.table_x_brent, 0)*grejsup);
return costeta;
}
// PhotoElectric Cross Section Per Atom (Standard)
__device__ float PhotoElec_CSPA_Standard(float E, unsigned short int Z) {
// from Sandia, the same for all Z
float Emin = fmax(PhotoElec_std_IonizationPotentials[Z]*1e-6f, 0.01e-3f);
if (E < Emin) {return 0.0f;}
int start = PhotoElec_std_CumulIntervals[Z-1];
int stop = start + PhotoElec_std_NbIntervals[Z];
int pos=stop;
while (E < PhotoElec_std_SandiaTable[pos][0]*1.0e-3f){--pos;}
float AoverAvo = 0.0103642688246f * __fdividef((float)Z, PhotoElec_std_ZtoAratio[Z]);
float rE = __fdividef(1.0f, E);
float rE2 = rE*rE;
return rE * PhotoElec_std_SandiaTable[pos][1] * AoverAvo * 0.160217648e-22f
+ rE2 * PhotoElec_std_SandiaTable[pos][2] * AoverAvo * 0.160217648e-25f
+ rE * rE2 * PhotoElec_std_SandiaTable[pos][3] * AoverAvo * 0.160217648e-28f
+ rE2 * rE2 * PhotoElec_std_SandiaTable[pos][4] * AoverAvo * 0.160217648e-31f;
}
// Compute the total Compton cross section for a given material
__device__ float PhotoElec_CS_Standard(Materials materials, unsigned int mat, float E) {
float CS = 0.0f;
int i;
int index = materials.index[mat];
// Model standard
for (i = 0; i < materials.nb_elements[mat]; ++i) {
CS += (materials.atom_num_dens[index+i] * PhotoElec_CSPA_Standard(E, materials.mixture[index+i]));
}
return CS;
}
// PhotoElectric effect (Standard) without seconday
__device__ float PhotoElec_Effect_Standard_NoSec(StackParticle photons,
unsigned int id,
int* count_d) {
// Absorbed the photon
photons.endsimu[id] = 1; // stop the simulation
photons.active[id] = 0; // this particle is absorbed
atomicAdd(count_d, 1); // count simulated primaries
return 0.0f;
}
// PhotoElectric effect (Standard) with seconday (e-)
__device__ float PhotoElec_Effect_Standard_WiSec(StackParticle photons,
StackParticle electrons,
Materials mat,
float cutE,
unsigned int matindex,
unsigned int id,
int* count_d) {
float energy = photons.E[id];
float3 PhotonDirection = make_float3(photons.dx[id], photons.dy[id], photons.dz[id]);
// Select randomly one element constituing the material
unsigned int n = mat.nb_elements[matindex]-1;
unsigned int index = mat.index[matindex];
unsigned int Z = mat.mixture[index+n];
unsigned int i = 0;
if (n > 0) {
float x = Brent_real(id, photons.table_x_brent, 0) *
PhotoElec_CS_Standard(mat, matindex, energy);
float xsec = 0.0f;
for (i=0; i<n; ++i) {
xsec += mat.atom_num_dens[index+i] *
PhotoElec_CSPA_Standard(energy, mat.mixture[index+i]);
if (x <= xsec) {
Z = mat.mixture[index+i];
break;
}
}
}
//// Photo electron
// Select atomic shell
unsigned short int nShells = atom_NumberOfShells[Z];
index = atom_IndexOfShells[Z];
float bindingEnergy = atom_BindingEnergies[index]*1.0e-06f; // in eV
i=0; while (i < nShells && energy < bindingEnergy) {
++i;
bindingEnergy = atom_BindingEnergies[index + i]*1.0e-06f; // in ev
}
// no shell available
if (i == nShells) {return 0.0f;}
float ElecKineEnergy = energy - bindingEnergy;
float cosTeta = 0.0f;
// 1 eV cut production
if (ElecKineEnergy > 1.0e-06f && ElecKineEnergy > cutE) {
// direction of the photo electron
cosTeta = PhotoElec_ElecCosThetaDistribution(photons, id, ElecKineEnergy);
float sinTeta = sqrtf(1.0f - cosTeta*cosTeta);
float Phi = gpu_twopi * Brent_real(id, photons.table_x_brent, 0);
float3 ElecDirection = make_float3(sinTeta*cos(Phi), sinTeta*sin(Phi), cosTeta);
ElecDirection = rotateUz(ElecDirection, PhotonDirection);
// Create an electron
electrons.dx[id] = ElecDirection.x;
electrons.dy[id] = ElecDirection.y;
electrons.dz[id] = ElecDirection.z;
electrons.E[id] = ElecKineEnergy;
electrons.px[id] = photons.px[id];
electrons.py[id] = photons.py[id];
electrons.pz[id] = photons.pz[id];
electrons.endsimu[id] = 0;
// Start to track this electron
electrons.active[id] = 1;
//printf("PE => e-\n");
return bindingEnergy;
}
// Absorbed the photon
photons.endsimu[id] = 1; // stop the simulation
photons.active[id] = 0;
atomicAdd(count_d, 1); // count simulated primaries
// LocalEnergy Deposit
return bindingEnergy+ElecKineEnergy;
}
/***********************************************************
* Electrons Physics Effects
***********************************************************/
// eIonisation Cross Section Per Atom (Mller model)
__device__ float eIonisation_CSPA_Standard(float E, unsigned short int Z,
float cutE, float maxE) {
float CS = 0.0f;
float xmin = __fdividef(cutE, E);
float tmax = fmin(maxE, 0.5f*E);
float xmax = __fdividef(tmax, E);
float gam = E * 1.9569513367f + 1.0f; // 1/electron_mass_c2
float igam2 = __fdividef(1.0f, gam*gam);
float ibeta2 = __fdividef(1.0f, 1.0f - igam2);
float g = (2.0f*gam - 1.0f)*igam2;
if (cutE < tmax) {
// Cross Section per e-
CS = ((xmax-xmin) * (1.0f-g + __fdividef(1.0, (xmin*xmax)) + __fdividef(1.0f, (1.0f-xmin)*(1.0f-xmax))) - g*__logf( __fdividef(xmax*(1.0 - xmin), xmin*(1.0 - xmax)))) * ibeta2;
CS *= (__fdividef(2.549549299e-23f, E)); // gpu_twopi_mc2_rcl2
CS *= (float)Z;
}
return CS;
}
// Compute the total eIonisation cross section for a given material
__device__ float eIonisation_CS_Standard(Materials materials, unsigned int mat, float E) {
float CS = 0.0f;
int i;
int index = materials.index[mat];
float cutE = materials.electron_cut_energy[mat];
float maxE = materials.electron_max_energy[mat];
// Model standard
for (i = 0; i < materials.nb_elements[mat]; ++i) {
CS += (materials.atom_num_dens[index+i] *
eIonisation_CSPA_Standard(E, materials.mixture[index+i], cutE, maxE));
}
return CS;
}
// Compute the dE/dx due to the ionization
__device__ float eIonisation_dedx_Standard(Materials materials, unsigned int mat, float E) {
float meanExcitationEnergy = materials.electron_mean_excitation_energy[mat];
float cutE = materials.electron_cut_energy[mat];
float electronDensity = materials.nb_electrons_per_vol[mat];
float Natm = materials.nb_atoms_per_vol[mat];
float Zeff = __fdividef(electronDensity, Natm);
float th = 0.25f*sqrtf(Zeff) * 0.001f; // keV
unsigned short int flag_low_E = 0;
float tkin = E;
if (tkin < th) {tkin = th; flag_low_E = 1;};
float tau = tkin * 1.9569513367f; // 1/electron_mass_c2
float gam = tau + 1.0f;
float gam2 = gam*gam;
float beta2 = 1.0f - __fdividef(1.0f, gam2);
float eexc2 = meanExcitationEnergy * 1.9569513367f; // 1/electron_mass_c2
eexc2 = eexc2 * eexc2;
float d = (cutE < tkin*0.5f)? cutE : tkin*0.5f;
d = d * 1.9569513367f; // 1/electron_mass_c2
float dedx = __logf(2.0f * __fdividef(tau+2.0f, eexc2)) - 1.0f - beta2 + __logf((tau-d)*d) + __fdividef(tau, tau-d) + __fdividef(0.5f*d*d + (2.0f*tau + 1.0f) * __logf(1.0f - __fdividef(d, tau)), gam2);
// Density correction
float twoln10 = 2.0f*__logf(10.0f);
float x = __fdividef(__logf(beta2*gam2), twoln10);
float y = 0.0f;
if (x < materials.fX0[mat]) {
if (materials.fD0[mat] > 0.0f) {
y = materials.fD0[mat]*__powf(10.0f, 2.0f*(x-materials.fX0[mat]));
}
} else if (x >= materials.fX1[mat]) {
y = twoln10*x - materials.fC[mat];
} else {
y = twoln10*x - materials.fC[mat] + materials.fA[mat]
* __powf(materials.fX1[mat]-x, materials.fM[mat]);
}
dedx -= y;
// Total ionization loss
// gpu_twopi_mc2_rcl2
dedx *= __fdividef(2.549549299e-23f*electronDensity, beta2);
if (dedx < 0.0f) {dedx = 0.0f;};
// Low energy extrapolation
if (flag_low_E) {
// 200 eV
if (E >= 200.0e-06f) {dedx *= sqrtf( __fdividef(tkin, E));}
else {dedx *= __fdividef(sqrtf(tkin*E), 200.0e-06f);} // 200 eV
}
return dedx;
}
// Compute the scattering due to the ionization
__device__ float eIonisation_Effect_Standard_NoSec(StackParticle electrons,
StackParticle photons,
float tmin, float maxE, // tmin=cutE
unsigned int id, int *count_d) {
float E = electrons.E[id];
float tmax = E * 0.5f;
if (maxE < tmax) {tmax = maxE;};
if (tmin >= tmax) { // tmin is the same that cutE
// stop the simulation for this one
electrons.endsimu[id] = 1;
// Unfreeze the photon tracking
electrons.active[id] = 0;
photons.active[id] = 1;
atomicAdd(count_d, 1); // count simulated secondaries
return E;
}
float energy = E + 0.510998910f; // electron_mass_c2
float totalMomentum = sqrtf(E * (energy + 0.510998910f));
float xmin = __fdividef(tmin, E);
float xmax = __fdividef(tmax, E);
float gam = energy * 1.9569513367f; // 1/electron_mass_c2
float gamma2 = gam*gam;
float beta2 = 1.0f - __fdividef(1.0f, gamma2);
// GetMomentumDirection
float3 direction = make_float3(electrons.dx[id], electrons.dy[id], electrons.dz[id]);
// Moller (e-e-) scattering
float g = __fdividef(2.0f*gam - 1.0f, gamma2);
float y = 1.0f - xmax;
float grej = 1.0f - g*xmax + xmax*xmax*(1.0f - g + __fdividef(1.0f - g*y, y*y));
float x, z, q;
do {
q = Brent_real(id, electrons.table_x_brent, 0);
x = __fdividef(xmin*xmax, xmin*(1.0f - q) + xmax*q);
y = 1.0f - x;
z = 1.0f - g*x + x*x*(1.0f - g + __fdividef(1.0f - g*y, y*y));
} while(grej * Brent_real(id, electrons.table_x_brent, 0) > z);
float deltaKinEnergy = x * E;
float deltaMomentum = sqrtf(deltaKinEnergy * (deltaKinEnergy + 2.0f*0.510998910f)); // electron_mass_c2
float cost = deltaKinEnergy * __fdividef(energy + 0.510998910f, deltaMomentum * totalMomentum);
float sint = 1.0f - cost*cost;
if (sint > 0.0f) {sint = sqrtf(sint);};
float phi = gpu_twopi * Brent_real(id, electrons.table_x_brent, 0);
float3 deltaDirection = make_float3(sint*__cosf(phi), sint*__sinf(phi), cost);
deltaDirection = rotateUz(deltaDirection, direction);
electrons.E[id] = E - deltaKinEnergy;
float3 dir = sub_vector(scale_vector(direction, totalMomentum),
scale_vector(deltaDirection, deltaMomentum));
dir = unit_vector(dir);
electrons.dx[id] = dir.x;
electrons.dy[id] = dir.y;
electrons.dz[id] = dir.z;
return deltaKinEnergy;
}
// Multiple Scattering
__device__ float MSC_CSPA(float E, unsigned short int Z) {
float Z23 = __expf( 0.666666666666f*__logf((float)Z) );
float eTotalEnergy = E + 0.51099891f;
float beta2 = E * __fdividef(eTotalEnergy+0.51099891f, eTotalEnergy*eTotalEnergy);
double bg2 = E * __fdividef(eTotalEnergy+0.51099891f, 0.26111988f); // e_mass_c2*e_mass_c2
float eps = 37557.7634f * __fdividef(bg2, Z23); // epsfactor
float epsmin = 1.0e-04f;
float epsmax = 1.0e+10f;
float sigma;
if (eps<epsmin) sigma = 2.0f*eps*eps;
else if(eps<epsmax) sigma = __logf(1.0f+2.0f*eps) - 2.0f*__fdividef(eps, (1.0f+2.0f*eps));
else sigma = __logf(2.0f*eps) - 1.0f+__fdividef(1.0f, eps);
sigma *= __fdividef(Z*Z, (beta2*bg2));
// get bin number in Z
int iZ = 14;
while ((iZ >= 0) && (Zdat[iZ] >= Z)) iZ -= 1;
if (iZ == 14) iZ = 13;
if (iZ == -1) iZ = 0 ;
float Z1 = Zdat[iZ];
float Z2 = Zdat[iZ+1];
float ratZ = __fdividef((Z-Z1)*(Z+Z1), (Z2-Z1)*(Z2+Z1));
float c1, c2;
if(E <= 10.0f) { // Tlim = 10 MeV
// get bin number in T (beta2)
int iT = 21;
while ((iT >= 0) && (Tdat[iT] >= E)) iT -= 1;
if (iT == 21) iT = 20;
if (iT == -1) iT = 0 ;
// calculate betasquare values
float T = Tdat[iT];
float EE = T + 0.51099891f;
float b2small = T * __fdividef(EE + 0.51099891f, EE*EE);
T = Tdat[iT+1];
EE = T + 0.51099891f;
float b2big = T * __fdividef(EE + 0.51099891f, EE*EE);
float ratb2 = __fdividef(beta2-b2small, b2big-b2small);
c1 = celectron[iZ][iT];
c2 = celectron[iZ+1][iT];
float cc1 = c1 + ratZ*(c2-c1);
c1 = celectron[iZ][iT+1];
c2 = celectron[iZ+1][iT+1];
float cc2 = c1 + ratZ*(c2-c1);
sigma *= __fdividef(4.98934390e-23f, cc1 + ratb2*(cc2-cc1)); // sigmafactor
} else {
// bg2lim beta2lim
c1 = 422.104880f*sig0[iZ] * __fdividef(1.0f+hecorr[iZ] *(beta2-0.997636519f), bg2);
c2 = 422.104880f*sig0[iZ+1] * __fdividef(1.0f+hecorr[iZ+1]*(beta2-0.997636519f), bg2);
if ((Z >= Z1) && (Z <= Z2)) {
sigma = c1 + ratZ*(c2-c1);
} else if(Z < Z1) {
sigma = Z*Z*__fdividef(c1, (Z1*Z1));
} else if(Z > Z2) {
sigma = Z*Z*__fdividef(c2, (Z2*Z2));
}
}
return sigma;
}
// Compute the total MSC cross section for a given material
__device__ float MSC_CS(Materials materials, unsigned int mat, float E) {
float CS = 0.0f;
int i;
int index = materials.index[mat];
for (i = 0; i < materials.nb_elements[mat]; ++i) {
CS += (materials.atom_num_dens[index+i] *
MSC_CSPA(E, materials.mixture[index+i]));
}
return CS;
}
// Multiple Scattering effect
__device__ float MSC_Effect(StackParticle electrons, Materials materials, float trueStepLength,
unsigned int mat, unsigned int id) {
// double betacp = sqrt(currentKinEnergy*(currentKinEnergy+2.*mass)*KineticEnergy*(KineticEnergy+2.*mass)/((currentKinEnergy+mass)*(KineticEnergy+mass)));
//E = 1.0f;
float E = electrons.E[id];
// !!!! Approx Seb : currentKinEnergy = KineticEnergy
float betacp = E * __fdividef(E+1.02199782f, E+0.51099891f);
float y = __fdividef(trueStepLength, materials.rad_length[mat]);
float theta = 13.6f * __fdividef(__powf(y, 0.5f), betacp);
y = __logf(y);
// correction in theta formula
float Zeff = __fdividef(materials.nb_electrons_per_vol[mat],
materials.nb_atoms_per_vol[mat]);
float lnZ = __logf(Zeff);
float coeffth1 = (1.0f - __fdividef(8.7780e-2f, Zeff)) * (0.87f + 0.03f*lnZ);
float coeffth2 = (4.0780e-2f + 1.7315e-4f*Zeff) * (0.87f + 0.03f*lnZ);
float corr = coeffth1 + coeffth2 * y;
theta *= corr ;
float phi = gpu_twopi * Brent_real(id, electrons.table_x_brent, 0);
float3 direction = make_float3(electrons.dx[id], electrons.dy[id], electrons.dz[id]);
float3 deltaDirection = make_float3(__cosf(phi)*__sinf(theta),
__sinf(phi)*__sinf(theta),
__cosf(theta));
direction = rotateUz(deltaDirection, direction);
electrons.dx[id] = direction.x;
electrons.dy[id] = direction.y;
electrons.dz[id] = direction.z;
return 0.0f;
}
/***********************************************************
* Navigator
***********************************************************/
// Regular Navigator with voxelized phantom for photons without secondary
__global__ void kernel_NavRegularPhan_Photon_NoSec(StackParticle photons,
Volume phantom,
Materials materials,
int* count_d) {
unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (id >= photons.size) return;
if (photons.endsimu[id]) return;
if (!photons.active[id]) return;
//// Init ///////////////////////////////////////////////////////////////////
// Read position
float3 position; // mm
position.x = photons.px[id];
position.y = photons.py[id];
position.z = photons.pz[id];
//printf("%0.2f %0.2f %0.2f\n", position.x, position.y, position.z);
// Defined index phantom
int4 index_phantom;
float3 ivoxsize = inverse_vector(phantom.voxel_size);
index_phantom.x = int(position.x * ivoxsize.x);
index_phantom.y = int(position.y * ivoxsize.y);
index_phantom.z = int(position.z * ivoxsize.z);
index_phantom.w = index_phantom.z*phantom.nb_voxel_slice
+ index_phantom.y*phantom.size_in_vox.x
+ index_phantom.x; // linear index
/*
if (index_phantom.w >= phantom.nb_voxel_volume) {
printf(" pos %0.2f %0.2f %0.2f ispc %0.2f %0.2f %0.2f ind %i %i %i = %i max %i\n",
position.x, position.y, position.z, ivoxsize.x, ivoxsize.y, ivoxsize.z,
index_phantom.x, index_phantom.y, index_phantom.z,
index_phantom.w, phantom.nb_voxel_volume);
}
*/
// Read direction
float3 direction;
direction.x = photons.dx[id];
direction.y = photons.dy[id];
direction.z = photons.dz[id];
// Get energy
float energy = photons.E[id];
// Get material
unsigned short int mat = phantom.data[index_phantom.w];
//// Find next discrete interaction ///////////////////////////////////////
// Find next discrete interaction, total_dedx and next discrete intraction distance
float next_interaction_distance = FLT_MAX;
unsigned char next_discrete_process = 0;
float interaction_distance;
float cross_section;
// Photoelectric
cross_section = PhotoElec_CS_Standard(materials, mat, energy);
interaction_distance = __fdividef(-__logf(Brent_real(id, photons.table_x_brent, 0)),
cross_section);
if (interaction_distance < next_interaction_distance) {
next_interaction_distance = interaction_distance;
next_discrete_process = PHOTON_PHOTOELECTRIC;
}
// Compton
cross_section = Compton_CS_Standard(materials, mat, energy);
interaction_distance = __fdividef(-__logf(Brent_real(id, photons.table_x_brent, 0)),
cross_section);
if (interaction_distance < next_interaction_distance) {
next_interaction_distance = interaction_distance;
next_discrete_process = PHOTON_COMPTON;
}
// Distance to the next voxel boundary (raycasting)
interaction_distance = get_boundary_voxel_by_raycasting(index_phantom, position,
direction, phantom.voxel_size);
if (interaction_distance < next_interaction_distance) {
// overshoot the distance of 1 um to be inside the next voxel
next_interaction_distance = interaction_distance+1.0e-03f;
next_discrete_process = PHOTON_BOUNDARY_VOXEL;
}
//// Move particle //////////////////////////////////////////////////////
position.x += direction.x * next_interaction_distance;
position.y += direction.y * next_interaction_distance;
position.z += direction.z * next_interaction_distance;
photons.t[id] += (3.33564095198e-03f * next_interaction_distance);
photons.px[id] = position.x;
photons.py[id] = position.y;
photons.pz[id] = position.z;
// Stop simulation if out of phantom or no more energy
if ( position.x <= 0 || position.x >= phantom.size_in_mm.x
|| position.y <= 0 || position.y >= phantom.size_in_mm.y
|| position.z <= 0 || position.z >= phantom.size_in_mm.z ) {
photons.endsimu[id] = 1; // stop the simulation
atomicAdd(count_d, 1); // count simulated primaries
return;
}
//// Resolve discrete processe //////////////////////////////////////////
// Resolve discrete processes
if (next_discrete_process == PHOTON_PHOTOELECTRIC) {
float discrete_loss = PhotoElec_Effect_Standard_NoSec(photons, id, count_d);
}
if (next_discrete_process == PHOTON_COMPTON) {
float discrete_loss = Compton_Effect_Standard_NoSec(photons, id, count_d);
}
}
// Regular Navigator with voxelized phantom for photons with secondary
__global__ void kernel_NavRegularPhan_Photon_WiSec(StackParticle photons,
StackParticle electrons,
Volume phantom,
Materials materials,
Dosimetry dosemap,
int* count_d, float step_limiter) {
unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (id >= photons.size) return;
//printf("ID %i Nav gamma endsimu %i active %i\n",
// id, photons.endsimu[id], photons.active[id]);
if (photons.endsimu[id]) return;
if (!photons.active[id]) return;
//// Init ///////////////////////////////////////////////////////////////////
// Read position
float3 position; // mm
position.x = photons.px[id];
position.y = photons.py[id];
position.z = photons.pz[id];
// Defined index phantom
int4 index_phantom;
float3 ivoxsize = inverse_vector(phantom.voxel_size);
index_phantom.x = int(position.x * ivoxsize.x);
index_phantom.y = int(position.y * ivoxsize.y);
index_phantom.z = int(position.z * ivoxsize.z);
index_phantom.w = index_phantom.z*phantom.nb_voxel_slice
+ index_phantom.y*phantom.size_in_vox.x
+ index_phantom.x; // linear index
// Read direction
float3 direction;
direction.x = photons.dx[id];
direction.y = photons.dy[id];
direction.z = photons.dz[id];
// Get energy
float energy = photons.E[id];
// Get material
unsigned short int mat = phantom.data[index_phantom.w];
/// Debug ///
//printf("gamma %i E %e pos %.2f %.2f %.2f mat %i\n", id, energy, position.x, position.y, position.z, mat);
//// Find next discrete interaction ///////////////////////////////////////
// Find next discrete interaction, total_dedx and next discrete intraction distance
float next_interaction_distance = FLT_MAX;
unsigned char next_discrete_process = 0;
float interaction_distance;
float cross_section;
// Photoelectric
cross_section = PhotoElec_CS_Standard(materials, mat, energy);
interaction_distance = __fdividef(-__logf(Brent_real(id, photons.table_x_brent, 0)),
cross_section);
//printf("PE CS %e PIL %e\n", cross_section, interaction_distance);
if (interaction_distance < next_interaction_distance) {
next_interaction_distance = interaction_distance;
next_discrete_process = PHOTON_PHOTOELECTRIC;
}
// Compton
cross_section = Compton_CS_Standard(materials, mat, energy);
interaction_distance = __fdividef(-__logf(Brent_real(id, photons.table_x_brent, 0)),
cross_section);
//printf("Cpt CS %e PIL %e\n", cross_section, interaction_distance);
if (interaction_distance < next_interaction_distance) {
next_interaction_distance = interaction_distance;
next_discrete_process = PHOTON_COMPTON;
}
// Distance to the next voxel boundary (raycasting)
interaction_distance = get_boundary_voxel_by_raycasting(index_phantom, position,
direction, phantom.voxel_size);
//printf("Boundary PIL %e\n", interaction_distance);
if (interaction_distance < next_interaction_distance) {
// overshoot the distance of 1 um to be inside the next voxel
next_interaction_distance = interaction_distance+1.0e-03f;
next_discrete_process = PHOTON_BOUNDARY_VOXEL;
}
// step limiter
if (step_limiter < next_interaction_distance) {
next_interaction_distance = step_limiter;
next_discrete_process = PHOTON_STEP_LIMITER;
}
//// Move particle //////////////////////////////////////////////////////
position.x += direction.x * next_interaction_distance;
position.y += direction.y * next_interaction_distance;
position.z += direction.z * next_interaction_distance;
photons.t[id] += (3.33564095198e-03f * next_interaction_distance);
photons.px[id] = position.x;
photons.py[id] = position.y;
photons.pz[id] = position.z;
// Stop simulation if out of phantom
if ( position.x <= 0 || position.x >= phantom.size_in_mm.x
|| position.y <= 0 || position.y >= phantom.size_in_mm.y
|| position.z <= 0 || position.z >= phantom.size_in_mm.z ) {
photons.endsimu[id] = 1; // stop the simulation
atomicAdd(count_d, 1); // count simulated primaries
return;
}
//// Resolve discrete processe //////////////////////////////////////////
float discrete_loss = 0.0f;
if (next_discrete_process == PHOTON_BOUNDARY_VOXEL ||
next_discrete_process == PHOTON_STEP_LIMITER) {
//printf("boundary || step limiter\n");
return;
}
if (next_discrete_process == PHOTON_PHOTOELECTRIC) {
//printf("PE\n");
discrete_loss = PhotoElec_Effect_Standard_WiSec(photons, electrons, materials,
materials.electron_cut_energy[mat],
mat, id, count_d);
}
if (next_discrete_process == PHOTON_COMPTON) {
//printf("Compton\n");
discrete_loss = Compton_Effect_Standard_WiSec(photons, electrons,
materials.electron_cut_energy[mat],
id, count_d);
//printf("energy deposit %e\n", discrete_loss);
}
// Dosemap scoring
ivoxsize = inverse_vector(dosemap.voxel_size);
index_phantom.x = int(position.x * ivoxsize.x);
index_phantom.y = int(position.y * ivoxsize.y);
index_phantom.z = int(position.z * ivoxsize.z);
index_phantom.w = index_phantom.z*dosemap.nb_voxel_slice
+ index_phantom.y*dosemap.size_in_vox.x
+ index_phantom.x; // linear index
//printf("index dosemap %i\n", index_phantom.w);
atomicAdd(&dosemap.edep[index_phantom.w], discrete_loss);
}
// Regular Navigator with voxelized phantom for electrons bind with a photon
__global__ void kernel_NavRegularPhan_Electron_BdPhoton(StackParticle electrons,
StackParticle photons,
Volume phantom,
Materials materials,
Dosimetry dosemap,
int* count_d, float step_limiter) {
unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (id >= electrons.size) return;
//printf("\nNav e- endsimu %i active %i\n", electrons.endsimu[id], electrons.active[id]);
if (electrons.endsimu[id]) return;
if (!electrons.active[id]) return;
//// Init ///////////////////////////////////////////////////////////////////
// Read position
float3 position; // mm
position.x = electrons.px[id];
position.y = electrons.py[id];
position.z = electrons.pz[id];
// Defined index phantom
int4 index_phantom;
float3 ivoxsize = inverse_vector(phantom.voxel_size);
index_phantom.x = int(position.x * ivoxsize.x);
index_phantom.y = int(position.y * ivoxsize.y);
index_phantom.z = int(position.z * ivoxsize.z);
index_phantom.w = index_phantom.z*phantom.nb_voxel_slice
+ index_phantom.y*phantom.size_in_vox.x
+ index_phantom.x; // linear index
// Read direction
float3 direction;
direction.x = electrons.dx[id];
direction.y = electrons.dy[id];
direction.z = electrons.dz[id];
// Get energy
float energy = electrons.E[id];
// Get material
unsigned short int mat = phantom.data[index_phantom.w];
/// Debug ///
//printf("e- %i E %e pos %.2f %.2f %.2f\n", id, energy, position.x, position.y, position.z);
//// Find next discrete interaction ///////////////////////////////////////
// Find next discrete interaction, total_dedx and next discrete intraction distance
float next_interaction_distance = FLT_MAX;
float total_dedx = 0.0f;
unsigned char next_discrete_process = 0;
float interaction_distance;
float cross_section;
float probe = 0.0f; // DEBUG
// eIonisation
cross_section = eIonisation_CS_Standard(materials, mat, energy);
interaction_distance = __fdividef(-__logf(Brent_real(id, electrons.table_x_brent, 0)),
cross_section);
total_dedx += eIonisation_dedx_Standard(materials, mat, energy);
if (interaction_distance < next_interaction_distance) {
next_interaction_distance = interaction_distance;
next_discrete_process = ELECTRON_EIONISATION;
}
// Multiple Scattering
cross_section = MSC_CS(materials, mat, energy);
interaction_distance = __fdividef(-__logf(Brent_real(id, electrons.table_x_brent, 0)),
cross_section);
// dedx = 0.0
if (interaction_distance < next_interaction_distance) {
next_interaction_distance = interaction_distance;
next_discrete_process = ELECTRON_MSC;
}
// Distance to the next voxel boundary (raycasting)
interaction_distance = get_boundary_voxel_by_raycasting(index_phantom, position,
direction, phantom.voxel_size);
//printf("Boundary PIL %e\n", interaction_distance);
if (interaction_distance < next_interaction_distance) {
// overshoot the distance of 1 um to be inside the next voxel
next_interaction_distance = interaction_distance+1.0e-03f;
next_discrete_process = ELECTRON_BOUNDARY_VOXEL;
}
// FIXME STEP LIMITER was not valided yet!
// step limiter
if (step_limiter < next_interaction_distance) {
next_interaction_distance = step_limiter;
next_discrete_process = PHOTON_STEP_LIMITER;
}
//printf("E %e dist %e\n", energy, next_interaction_distance);
//// Resolve continuous processes ///////////////////////////////////////
float safety_distance = __fdividef(energy, total_dedx);
float continuous_loss = 0.0f;
//printf("Safety PIL %e\n", safety_distance);
if (safety_distance < next_interaction_distance) {
next_interaction_distance = safety_distance;
next_discrete_process = ELECTRON_SAFETY;
continuous_loss = energy;
} else {
continuous_loss = total_dedx * next_interaction_distance;
energy -= continuous_loss;
if (energy < 0.0f) energy = 0.0f;
electrons.E[id] = energy;
}
// continuous loss should be at random point along step
float rnd_dist = next_interaction_distance * Brent_real(id, electrons.table_x_brent, 0);
float3 rnd_pos;
rnd_pos.x = position.x - direction.x * rnd_dist;
rnd_pos.y = position.y - direction.y * rnd_dist;
rnd_pos.z = position.z - direction.z * rnd_dist;
if ( rnd_pos.x <= 0 || rnd_pos.x >= dosemap.size_in_mm.x
|| rnd_pos.y <= 0 || rnd_pos.y >= dosemap.size_in_mm.y
|| rnd_pos.z <= 0 || rnd_pos.z >= dosemap.size_in_mm.z ) {
rnd_pos = position;
}
ivoxsize = inverse_vector(dosemap.voxel_size);
index_phantom.x = int(rnd_pos.x * ivoxsize.x);
index_phantom.y = int(rnd_pos.y * ivoxsize.y);
index_phantom.z = int(rnd_pos.z * ivoxsize.z);
index_phantom.w = index_phantom.z*dosemap.nb_voxel_slice
+ index_phantom.y*dosemap.size_in_vox.x
+ index_phantom.x; // linear index
atomicAdd(&dosemap.edep[index_phantom.w], continuous_loss);
//// Move particle //////////////////////////////////////////////////////
//printf("E %e dist %e\n", energy, next_interaction_distance);
position.x += direction.x * next_interaction_distance;
position.y += direction.y * next_interaction_distance;
position.z += direction.z * next_interaction_distance;
electrons.t[id] += (3.33564095198e-03f * next_interaction_distance);
electrons.px[id] = position.x;
electrons.py[id] = position.y;
electrons.pz[id] = position.z;
// Stop simulation if out of phantom
if ( position.x <= 0 || position.x >= phantom.size_in_mm.x
|| position.y <= 0 || position.y >= phantom.size_in_mm.y
|| position.z <= 0 || position.z >= phantom.size_in_mm.z ) {
electrons.endsimu[id] = 1; // stop the simulation
electrons.active[id] = 0;
photons.active[id] = 1; // unfreeze the photon tracking
atomicAdd(count_d, 1); // count simulated secondaries
return;
}
//// Resolve discrete processe //////////////////////////////////////////
float discrete_loss = 0.0f;
if (next_discrete_process == ELECTRON_BOUNDARY_VOXEL ||
next_discrete_process == ELECTRON_STEP_LIMITER) {
//printf("Boundary || step limiter\n");
return;
}
if (next_discrete_process == ELECTRON_SAFETY) {
//printf("Safety\n");
electrons.endsimu[id] = 1; // stop the simulation
electrons.active[id] = 0;
photons.active[id] = 1; // unfreeze the photon tracking
atomicAdd(count_d, 1); // count simulated secondaries
return;
}
if (next_discrete_process == ELECTRON_EIONISATION) {
//printf("eIonisation\n");
discrete_loss = eIonisation_Effect_Standard_NoSec(electrons, photons,
materials.electron_cut_energy[mat],
materials.electron_max_energy[mat],
id, count_d);
}
if (next_discrete_process == ELECTRON_MSC) {
//printf("MSC\n");
// FIXME trueStepLength = next_interaction_distance?!
discrete_loss = MSC_Effect(electrons, materials, next_interaction_distance, mat, id);
}
// Dosemap scoring
ivoxsize = inverse_vector(dosemap.voxel_size);
index_phantom.x = int(position.x * ivoxsize.x);
index_phantom.y = int(position.y * ivoxsize.y);
index_phantom.z = int(position.z * ivoxsize.z);
index_phantom.w = index_phantom.z*dosemap.nb_voxel_slice
+ index_phantom.y*dosemap.size_in_vox.x
+ index_phantom.x; // linear index
//printf("index dosemap %i\n", index_phantom.w);
atomicAdd(&dosemap.edep[index_phantom.w], discrete_loss);
}
| 5a9e227d4c30e2c16d8cd6d447755dba08460be0.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#include <float.h>
/***********************************************************
* Vars
***********************************************************/
__constant__ const float gpu_pi = 3.14159265358979323846;
__constant__ const float gpu_twopi = 2*gpu_pi;
/***********************************************************
* Utils Host
***********************************************************/
// Get time
double time() {
timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + tv.tv_usec / 1000000.0;
}
/***********************************************************
* Data material structure
***********************************************************/
#ifndef MATERIALS
#define MATERIALS
// Structure for materials
struct Materials{
unsigned int nb_materials; // n
unsigned int nb_elements_total; // k
unsigned short int *nb_elements; // n
unsigned short int *index; // n
unsigned short int *mixture; // k
float *atom_num_dens; // k
float *nb_atoms_per_vol; // n
float *nb_electrons_per_vol; // n
float *electron_cut_energy; // n
float *electron_max_energy; // n
float *electron_mean_excitation_energy; // n
float *rad_length; // n
float *fX0; // n
float *fX1;
float *fD0;
float *fC;
float *fA;
float *fM;
};
#endif
// Materials device allocation
void materials_device_malloc(Materials &mat, unsigned int nb_mat, unsigned int nb_elm) {
mat.nb_materials = nb_mat;
mat.nb_elements_total = nb_elm;
unsigned int mem_mat_usi = nb_mat * sizeof(unsigned short int);
unsigned int mem_mat_float = nb_mat * sizeof(float);
unsigned int mem_elm_usi = nb_elm * sizeof(unsigned short int);
unsigned int mem_elm_float = nb_elm * sizeof(float);
cudaMalloc((void**) &mat.nb_elements, mem_mat_usi);
cudaMalloc((void**) &mat.index, mem_mat_usi);
cudaMalloc((void**) &mat.mixture, mem_elm_usi);
cudaMalloc((void**) &mat.atom_num_dens, mem_elm_float);
cudaMalloc((void**) &mat.nb_atoms_per_vol, mem_mat_float);
cudaMalloc((void**) &mat.nb_electrons_per_vol, mem_mat_float);
cudaMalloc((void**) &mat.electron_cut_energy, mem_mat_float);
cudaMalloc((void**) &mat.electron_max_energy, mem_mat_float);
cudaMalloc((void**) &mat.electron_mean_excitation_energy, mem_mat_float);
cudaMalloc((void**) &mat.rad_length, mem_mat_float);
cudaMalloc((void**) &mat.fX0, mem_mat_float);
cudaMalloc((void**) &mat.fX1, mem_mat_float);
cudaMalloc((void**) &mat.fD0, mem_mat_float);
cudaMalloc((void**) &mat.fC, mem_mat_float);
cudaMalloc((void**) &mat.fA, mem_mat_float);
cudaMalloc((void**) &mat.fM, mem_mat_float);
}
// Materials free device memory
void materials_device_free(Materials &mat) {
cudaFree(mat.nb_elements);
cudaFree(mat.index);
cudaFree(mat.mixture);
cudaFree(mat.atom_num_dens);
cudaFree(mat.nb_atoms_per_vol);
cudaFree(mat.nb_electrons_per_vol);
cudaFree(mat.electron_cut_energy);
cudaFree(mat.electron_max_energy);
cudaFree(mat.electron_mean_excitation_energy);
cudaFree(mat.rad_length);
cudaFree(mat.fX0);
cudaFree(mat.fX1);
cudaFree(mat.fD0);
cudaFree(mat.fC);
cudaFree(mat.fA);
cudaFree(mat.fM);
}
// Materials host allocation
void materials_host_malloc(Materials &mat, unsigned int nb_mat, unsigned int nb_elm) {
mat.nb_materials = nb_mat;
mat.nb_elements_total = nb_elm;
unsigned int mem_mat_usi = nb_mat * sizeof(unsigned short int);
unsigned int mem_mat_float = nb_mat * sizeof(float);
unsigned int mem_elm_usi = nb_elm * sizeof(unsigned short int);
unsigned int mem_elm_float = nb_elm * sizeof(float);
mat.nb_elements = (unsigned short int*)malloc(mem_mat_usi);
mat.index = (unsigned short int*)malloc(mem_mat_usi);
mat.mixture = (unsigned short int*)malloc(mem_elm_usi);
mat.atom_num_dens = (float*)malloc(mem_elm_float);
mat.nb_atoms_per_vol = (float*)malloc(mem_mat_float);
mat.nb_electrons_per_vol = (float*)malloc(mem_mat_float);
mat.electron_cut_energy = (float*)malloc(mem_mat_float);
mat.electron_max_energy = (float*)malloc(mem_mat_float);
mat.electron_mean_excitation_energy = (float*)malloc(mem_mat_float);
mat.rad_length = (float*)malloc(mem_mat_float);
mat.fX0 = (float*)malloc(mem_mat_float);
mat.fX1 = (float*)malloc(mem_mat_float);
mat.fD0 = (float*)malloc(mem_mat_float);
mat.fC = (float*)malloc(mem_mat_float);
mat.fA = (float*)malloc(mem_mat_float);
mat.fM = (float*)malloc(mem_mat_float);
}
// Materials free memory
void materials_host_free(Materials &mat) {
free(mat.nb_elements);
free(mat.index);
free(mat.mixture);
free(mat.atom_num_dens);
free(mat.nb_atoms_per_vol);
free(mat.nb_electrons_per_vol);
free(mat.electron_cut_energy);
free(mat.electron_max_energy);
free(mat.electron_mean_excitation_energy);
free(mat.rad_length);
free(mat.fX0);
free(mat.fX1);
free(mat.fD0);
free(mat.fC);
free(mat.fA);
free(mat.fM);
}
/***********************************************************
* Stack data particle structure
***********************************************************/
#ifndef STACKPARTICLE
#define STACKPARTICLE
// Stack of particles, format data is defined as SoA
struct StackParticle{
float* E;
float* dx;
float* dy;
float* dz;
float* px;
float* py;
float* pz;
float* t;
unsigned short int* type;
unsigned int* eventID;
unsigned int* trackID;
unsigned int* seed;
unsigned char* active;
unsigned char* endsimu;
unsigned long* table_x_brent;
unsigned int size;
}; //
#endif
// Stack host allocation
void stack_host_malloc(StackParticle &phasespace, int stack_size) {
phasespace.size = stack_size;
unsigned int mem_phasespace_float = stack_size * sizeof(float);
unsigned int mem_phasespace_uint = stack_size * sizeof(unsigned int);
unsigned int mem_phasespace_usint = stack_size * sizeof(unsigned short int);
unsigned int mem_phasespace_char = stack_size * sizeof(char);
phasespace.E = (float*)malloc(mem_phasespace_float);
phasespace.dx = (float*)malloc(mem_phasespace_float);
phasespace.dy = (float*)malloc(mem_phasespace_float);
phasespace.dz = (float*)malloc(mem_phasespace_float);
phasespace.px = (float*)malloc(mem_phasespace_float);
phasespace.py = (float*)malloc(mem_phasespace_float);
phasespace.pz = (float*)malloc(mem_phasespace_float);
phasespace.t = (float*)malloc(mem_phasespace_float);
phasespace.type = (unsigned short int*)malloc(mem_phasespace_usint);
phasespace.seed = (unsigned int*)malloc(mem_phasespace_uint);
phasespace.eventID = (unsigned int*)malloc(mem_phasespace_uint);
phasespace.trackID = (unsigned int*)malloc(mem_phasespace_uint);
phasespace.endsimu = (unsigned char*)malloc(mem_phasespace_char);
phasespace.active = (unsigned char*)malloc(mem_phasespace_char);
}
// free host mem
void stack_host_free(StackParticle &phasespace) {
free(phasespace.E);
free(phasespace.dx);
free(phasespace.dy);
free(phasespace.dz);
free(phasespace.px);
free(phasespace.py);
free(phasespace.pz);
free(phasespace.t);
free(phasespace.type);
free(phasespace.seed);
free(phasespace.eventID);
free(phasespace.trackID);
free(phasespace.endsimu);
free(phasespace.active);
}
// For PRNG Brent
#define UINT64 (sizeof(unsigned long)>>3)
#define UINT32 (1 - UINT64)
#define r (4*UINT64 + 8*UINT32)
// Stack device allocation
void stack_device_malloc(StackParticle &stackpart, int stack_size) {
stackpart.size = stack_size;
unsigned int mem_stackpart_float = stack_size * sizeof(float);
unsigned int mem_stackpart_uint = stack_size * sizeof(unsigned int);
unsigned int mem_stackpart_usint = stack_size * sizeof(unsigned short int);
unsigned int mem_stackpart_char = stack_size * sizeof(char);
unsigned int mem_brent;
if (r == 4) {mem_brent = stack_size * 6 * sizeof(unsigned long);}
else {mem_brent = stack_size * 10 * sizeof(unsigned long);}
cudaMalloc((void**) &stackpart.E, mem_stackpart_float);
cudaMalloc((void**) &stackpart.dx, mem_stackpart_float);
cudaMalloc((void**) &stackpart.dy, mem_stackpart_float);
cudaMalloc((void**) &stackpart.dz, mem_stackpart_float);
cudaMalloc((void**) &stackpart.px, mem_stackpart_float);
cudaMalloc((void**) &stackpart.py, mem_stackpart_float);
cudaMalloc((void**) &stackpart.pz, mem_stackpart_float);
cudaMalloc((void**) &stackpart.t, mem_stackpart_float);
cudaMalloc((void**) &stackpart.type, mem_stackpart_usint);
cudaMalloc((void**) &stackpart.seed, mem_stackpart_uint);
cudaMalloc((void**) &stackpart.eventID, mem_stackpart_uint);
cudaMalloc((void**) &stackpart.trackID, mem_stackpart_uint);
cudaMalloc((void**) &stackpart.table_x_brent, mem_brent);
cudaMalloc((void**) &stackpart.endsimu, mem_stackpart_char);
cudaMalloc((void**) &stackpart.active, mem_stackpart_char);
}
#undef UINT64
#undef UINT32
#undef r
// free device mem
void stack_device_free(StackParticle &stackpart) {
cudaFree(stackpart.E);
cudaFree(stackpart.dx);
cudaFree(stackpart.dy);
cudaFree(stackpart.dz);
cudaFree(stackpart.px);
cudaFree(stackpart.py);
cudaFree(stackpart.pz);
cudaFree(stackpart.t);
cudaFree(stackpart.type);
cudaFree(stackpart.seed);
cudaFree(stackpart.eventID);
cudaFree(stackpart.trackID);
cudaFree(stackpart.endsimu);
cudaFree(stackpart.active);
cudaFree(stackpart.table_x_brent);
}
/***********************************************************
* Volume data structure
***********************************************************/
#ifndef VOLUME
#define VOLUME
// Volume structure data
struct Volume {
unsigned short int *data;
unsigned int mem_data;
float3 size_in_mm;
int3 size_in_vox;
float3 voxel_size;
int nb_voxel_volume;
int nb_voxel_slice;
float3 position;
};
#endif
// Volume host allocation
void volume_host_malloc(Volume &vol, int nbvox) {
vol.mem_data = nbvox * sizeof(unsigned short int);
vol.data = (unsigned short int*)malloc(vol.mem_data);
}
// Free host memory
void volume_host_free(Volume &vol) {
free(vol.data);
}
// Volume device allocation
void volume_device_malloc(Volume &vol, int nbvox) {
vol.mem_data = nbvox * sizeof(unsigned short int);
cudaMalloc((void**) &vol.data, vol.mem_data);
}
// Free device memory
void volume_device_free(Volume &vol) {
cudaFree(vol.data);
}
/***********************************************************
* Dosimetry data structure
***********************************************************/
#ifndef DOSIMETRY
#define DOSIMETRY
struct Dosimetry {
float *edep;
float *edep2;
unsigned int mem_data;
float3 size_in_mm;
int3 size_in_vox;
float3 voxel_size;
int nb_voxel_volume;
int nb_voxel_slice;
float3 position;
};
#endif
// Dosimetry host allocation
void dosimetry_host_malloc(Dosimetry &vol, int nbvox) {
vol.mem_data = nbvox * sizeof(float);
vol.edep = (float*)malloc(vol.mem_data);
}
// Dosimetry free host memory
void dosimetry_host_free(Dosimetry &vol) {
free(vol.edep);
}
// Dosimetry volume device allocation
void dosimetry_device_malloc(Dosimetry &vol, int nbvox) {
vol.mem_data = nbvox * sizeof(float);
cudaMalloc((void**) &vol.edep, vol.mem_data);
}
// Dosimetry free device memory
void dosimetry_device_free(Dosimetry &vol) {
cudaFree(vol.edep);
}
// Dosimetry reset
void dosimetry_host_reset(Dosimetry &vol) {
int i=0; while(i<vol.nb_voxel_volume) {
vol.edep[i] = 0.0f;
++i;
}
}
/***********************************************************
* Activities structure
***********************************************************/
struct Activities {
unsigned int nb_activities;
float tot_activity;
unsigned int *act_index;
float *act_cdf;
};
// Host allocation
void activities_host_malloc(Activities &act, int nbact) {
act.act_index = (unsigned int*)malloc(nbact*sizeof(unsigned int));
act.act_cdf = (float*)malloc(nbact*sizeof(float));
}
// Device allocation
void activities_device_malloc(Activities &act, int nbact) {
cudaMalloc((void**) &act.act_index, nbact*sizeof(float));
cudaMalloc((void**) &act.act_cdf, nbact*sizeof(float));
}
// Free host mem
void activities_host_free(Activities &act) {
free(act.act_index);
free(act.act_cdf);
}
// Free device mem
void activities_device_free(Activities &act) {
cudaFree(act.act_index);
cudaFree(act.act_cdf);
}
/***********************************************************
* Copy structure functions
***********************************************************/
// Copy materials from host to device
void materials_copy_host2device(Materials &host, Materials &device) {
unsigned int nb_mat = host.nb_materials;
unsigned int nb_elm = host.nb_elements_total;
unsigned int mem_mat_usi = nb_mat * sizeof(unsigned short int);
unsigned int mem_mat_float = nb_mat * sizeof(float);
unsigned int mem_elm_usi = nb_elm * sizeof(unsigned short int);
unsigned int mem_elm_float = nb_elm * sizeof(float);
cudaMemcpy(device.nb_elements, host.nb_elements, mem_mat_usi, cudaMemcpyHostToDevice);
cudaMemcpy(device.index, host.index, mem_mat_usi, cudaMemcpyHostToDevice);
cudaMemcpy(device.mixture, host.mixture, mem_elm_usi, cudaMemcpyHostToDevice);
cudaMemcpy(device.atom_num_dens, host.atom_num_dens, mem_elm_float, cudaMemcpyHostToDevice);
cudaMemcpy(device.nb_atoms_per_vol, host.nb_atoms_per_vol, mem_mat_float, cudaMemcpyHostToDevice);
cudaMemcpy(device.nb_electrons_per_vol, host.nb_electrons_per_vol, mem_mat_float, cudaMemcpyHostToDevice);
cudaMemcpy(device.electron_cut_energy, host.electron_cut_energy, mem_mat_float, cudaMemcpyHostToDevice);
cudaMemcpy(device.electron_max_energy, host.electron_max_energy, mem_mat_float, cudaMemcpyHostToDevice);
cudaMemcpy(device.electron_mean_excitation_energy, host.electron_mean_excitation_energy, mem_mat_float, cudaMemcpyHostToDevice);
cudaMemcpy(device.rad_length, host.rad_length, mem_mat_float, cudaMemcpyHostToDevice);
cudaMemcpy(device.fX0, host.fX0, mem_mat_float, cudaMemcpyHostToDevice);
cudaMemcpy(device.fX1, host.fX1, mem_mat_float, cudaMemcpyHostToDevice);
cudaMemcpy(device.fD0, host.fD0, mem_mat_float, cudaMemcpyHostToDevice);
cudaMemcpy(device.fC, host.fC, mem_mat_float, cudaMemcpyHostToDevice);
cudaMemcpy(device.fA, host.fA, mem_mat_float, cudaMemcpyHostToDevice);
cudaMemcpy(device.fM, host.fM, mem_mat_float, cudaMemcpyHostToDevice);
}
// Copy stack from device to host
void stack_copy_device2host(StackParticle &stackpart, StackParticle &phasespace) {
int stack_size = stackpart.size;
unsigned int mem_stackpart_float = stack_size * sizeof(float);
unsigned int mem_stackpart_char = stack_size * sizeof(char);
unsigned int mem_stackpart_uint = stack_size * sizeof(unsigned int);
unsigned int mem_stackpart_usint = stack_size * sizeof(unsigned short int);
cudaMemcpy(phasespace.E, stackpart.E, mem_stackpart_float, cudaMemcpyDeviceToHost);
cudaMemcpy(phasespace.dx, stackpart.dx, mem_stackpart_float, cudaMemcpyDeviceToHost);
cudaMemcpy(phasespace.dy, stackpart.dy, mem_stackpart_float, cudaMemcpyDeviceToHost);
cudaMemcpy(phasespace.dz, stackpart.dz, mem_stackpart_float, cudaMemcpyDeviceToHost);
cudaMemcpy(phasespace.px, stackpart.px, mem_stackpart_float, cudaMemcpyDeviceToHost);
cudaMemcpy(phasespace.py, stackpart.py, mem_stackpart_float, cudaMemcpyDeviceToHost);
cudaMemcpy(phasespace.pz, stackpart.pz, mem_stackpart_float, cudaMemcpyDeviceToHost);
cudaMemcpy(phasespace.t, stackpart.t, mem_stackpart_float, cudaMemcpyDeviceToHost);
cudaMemcpy(phasespace.type, stackpart.type, mem_stackpart_usint, cudaMemcpyDeviceToHost);
cudaMemcpy(phasespace.endsimu, stackpart.endsimu, mem_stackpart_char, cudaMemcpyDeviceToHost);
cudaMemcpy(phasespace.active, stackpart.active, mem_stackpart_char, cudaMemcpyDeviceToHost);
cudaMemcpy(phasespace.trackID, stackpart.trackID, mem_stackpart_uint, cudaMemcpyDeviceToHost);
cudaMemcpy(phasespace.eventID, stackpart.eventID, mem_stackpart_uint, cudaMemcpyDeviceToHost);
}
// Copy stack from host to device
void stack_copy_host2device(StackParticle &phasespace, StackParticle &stackpart) {
int stack_size = phasespace.size;
unsigned int mem_stackpart_float = stack_size * sizeof(float);
unsigned int mem_stackpart_char = stack_size * sizeof(char);
unsigned int mem_stackpart_uint = stack_size * sizeof(unsigned int);
unsigned int mem_stackpart_usint = stack_size * sizeof(unsigned short int);
cudaMemcpy(stackpart.E, phasespace.E, mem_stackpart_float, cudaMemcpyHostToDevice);
cudaMemcpy(stackpart.dx, phasespace.dx, mem_stackpart_float, cudaMemcpyHostToDevice);
cudaMemcpy(stackpart.dy, phasespace.dy, mem_stackpart_float, cudaMemcpyHostToDevice);
cudaMemcpy(stackpart.dz, phasespace.dz, mem_stackpart_float, cudaMemcpyHostToDevice);
cudaMemcpy(stackpart.px, phasespace.px, mem_stackpart_float, cudaMemcpyHostToDevice);
cudaMemcpy(stackpart.py, phasespace.py, mem_stackpart_float, cudaMemcpyHostToDevice);
cudaMemcpy(stackpart.pz, phasespace.pz, mem_stackpart_float, cudaMemcpyHostToDevice);
cudaMemcpy(stackpart.t, phasespace.t, mem_stackpart_float, cudaMemcpyHostToDevice);
cudaMemcpy(stackpart.type, phasespace.type, mem_stackpart_usint, cudaMemcpyHostToDevice);
cudaMemcpy(stackpart.endsimu, phasespace.endsimu, mem_stackpart_char, cudaMemcpyHostToDevice);
cudaMemcpy(stackpart.active, phasespace.active, mem_stackpart_char, cudaMemcpyHostToDevice);
cudaMemcpy(stackpart.trackID, phasespace.trackID, mem_stackpart_uint, cudaMemcpyHostToDevice);
cudaMemcpy(stackpart.eventID, phasespace.eventID, mem_stackpart_uint, cudaMemcpyHostToDevice);
cudaMemcpy(stackpart.seed, phasespace.seed, mem_stackpart_uint, cudaMemcpyHostToDevice);
}
// Copy volume from device to host
void volume_copy_device2host(Volume &voldevice, Volume &volhost) {
volhost.size_in_vox = voldevice.size_in_vox;
volhost.voxel_size = voldevice.voxel_size;
volhost.size_in_mm = voldevice.size_in_mm;
volhost.nb_voxel_slice = voldevice.nb_voxel_slice;
volhost.nb_voxel_volume = voldevice.nb_voxel_volume;
volhost.mem_data = voldevice.mem_data;
volhost.position = voldevice.position;
cudaMemcpy(volhost.data, voldevice.data, voldevice.mem_data, cudaMemcpyDeviceToHost);
}
// Copy volume from host to device
void volume_copy_host2device(Volume &volhost, Volume &voldevice) {
voldevice.size_in_vox = volhost.size_in_vox;
voldevice.voxel_size = volhost.voxel_size;
voldevice.size_in_mm = volhost.size_in_mm;
voldevice.nb_voxel_slice = volhost.nb_voxel_slice;
voldevice.nb_voxel_volume = volhost.nb_voxel_volume;
voldevice.mem_data = volhost.mem_data;
voldevice.position = volhost.position;
cudaMemcpy(voldevice.data, volhost.data, volhost.mem_data, cudaMemcpyHostToDevice);
}
// Copy volume from device to host
void dosimetry_copy_device2host(Dosimetry &voldevice, Dosimetry &volhost) {
volhost.size_in_vox = voldevice.size_in_vox;
volhost.voxel_size = voldevice.voxel_size;
volhost.size_in_mm = voldevice.size_in_mm;
volhost.nb_voxel_slice = voldevice.nb_voxel_slice;
volhost.nb_voxel_volume = voldevice.nb_voxel_volume;
volhost.mem_data = voldevice.mem_data;
volhost.position = voldevice.position;
cudaMemcpy(volhost.edep, voldevice.edep, voldevice.mem_data, cudaMemcpyDeviceToHost);
}
// Copy dosimetry from host to device
void dosimetry_copy_host2device(Dosimetry &volhost, Dosimetry &voldevice) {
voldevice.size_in_vox = volhost.size_in_vox;
voldevice.voxel_size = volhost.voxel_size;
voldevice.size_in_mm = volhost.size_in_mm;
voldevice.nb_voxel_slice = volhost.nb_voxel_slice;
voldevice.nb_voxel_volume = volhost.nb_voxel_volume;
voldevice.mem_data = volhost.mem_data;
voldevice.position = volhost.position;
cudaMemcpy(voldevice.edep, volhost.edep, volhost.mem_data, cudaMemcpyHostToDevice);
}
// Copy activities from host to device
void activities_copy_host2device(Activities &acthost, Activities &actdevice) {
actdevice.nb_activities = acthost.nb_activities;
actdevice.tot_activity = acthost.tot_activity;
cudaMemcpy(actdevice.act_index, acthost.act_index,
actdevice.nb_activities*sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(actdevice.act_cdf, acthost.act_cdf,
actdevice.nb_activities*sizeof(float), cudaMemcpyHostToDevice);
}
/***********************************************************
* Utils Device
***********************************************************/
// rotateUz, function from CLHEP
__device__ float3 rotateUz(float3 vector, float3 newUz) {
float u1 = newUz.x;
float u2 = newUz.y;
float u3 = newUz.z;
float up = u1*u1 + u2*u2;
if (up>0) {
up = sqrtf(up);
float px = vector.x, py = vector.y, pz = vector.z;
vector.x = __fdividef(u1*u3*px - u2*py, up) + u1*pz;
vector.y = __fdividef(u2*u3*px + u1*py, up) + u2*pz;
vector.z = -up*px + u3*pz;
}
else if (u3 < 0.) { vector.x = -vector.x; vector.z = -vector.z; } // phi=0 theta=gpu_pi
return make_float3(vector.x, vector.y, vector.z);
}
// add vector
__device__ float3 add_vector(float3 u, float3 v) {
return make_float3(u.x+v.x, u.y+v.y, u.z+v.z);
}
// sub vector
__device__ float3 sub_vector(float3 u, float3 v) {
return make_float3(u.x-v.x, u.y-v.y, u.z-v.z);
}
// mul a vector by a scalar
__device__ float3 scale_vector(float3 u, float a) {
return make_float3(u.x*a, u.y*a, u.z*a);
}
// mul two vectors
__device__ float3 mul_vector(float3 u, float3 v) {
return make_float3(u.x*v.x, u.y*v.y, u.z*v.z);
}
// div two vectors
__device__ float3 div_vector(float3 u, float3 v) {
return make_float3(__fdividef(u.x, v.x),
__fdividef(u.y, v.y),
__fdividef(u.z, v.z));
}
// return an unitary vector
__device__ float3 unit_vector(float3 u) {
float imag = __fdividef(1.0f, sqrtf(u.x*u.x + u.y*u.y + u.z*u.z));
return make_float3(u.x*imag, u.y*imag, u.z*imag);
}
// return inverse vector
__device__ float3 inverse_vector(float3 u) {
return make_float3(__fdividef(1.0f, u.x), __fdividef(1.0f, u.y), __fdividef(1.0f, u.z));
}
//// Used for the validation
__device__ float mag_vector(float3 u) {
return sqrtf(u.x*u.x + u.y*u.y + u.z*u.z);
}
__device__ float dot_vector(float3 u, float3 v) {
return u.x*v.x + u.y*v.y + u.z*v.z;
}
//// Return the next voxel boundary distance, it is used by the standard navigator
__device__ float get_boundary_voxel_by_raycasting(int4 vox, float3 p, float3 d, float3 res) {
float xmin, xmax, ymin, ymax, zmin, zmax;
float3 di = inverse_vector(d);
float tmin, tmax, tymin, tymax, tzmin, tzmax, buf;
// Define the voxel bounding box
xmin = vox.x*res.x;
ymin = vox.y*res.y;
zmin = vox.z*res.z;
xmax = (d.x<0 && p.x==xmin) ? xmin-res.x : xmin+res.x;
ymax = (d.y<0 && p.y==ymin) ? ymin-res.y : ymin+res.y;
zmax = (d.z<0 && p.z==zmin) ? zmin-res.z : zmin+res.z;
tmin = -1e9f;
tmax = 1e9f;
// on x
if (d.x != 0.0f) {
tmin = (xmin - p.x) * di.x;
tmax = (xmax - p.x) * di.x;
if (tmin > tmax) {
buf = tmin;
tmin = tmax;
tmax = buf;
}
}
// on y
if (d.y != 0.0f) {
tymin = (ymin - p.y) * di.y;
tymax = (ymax - p.y) * di.y;
if (tymin > tymax) {
buf = tymin;
tymin = tymax;
tymax = buf;
}
if (tymin > tmin) {tmin = tymin;}
if (tymax < tmax) {tmax = tymax;}
}
// on z
if (d.z != 0.0f) {
tzmin = (zmin - p.z) * di.z;
tzmax = (zmax - p.z) * di.z;
if (tzmin > tzmax) {
buf = tzmin;
tzmin = tzmax;
tzmax = buf;
}
if (tzmin > tmin) {tmin = tzmin;}
if (tzmax < tmax) {tmax = tzmax;}
}
return tmax;
}
// Binary search
__device__ int binary_search(float *val, float key, int n) {
int min=0, max=n, mid;
while (min < max) {
mid = (min + max) >> 1;
if (key > val[mid]) {
min = mid + 1;
} else {
max = mid;
}
}
return min;
}
void dosimetry_dump(Dosimetry dosemap) {
// first write te header
FILE *pfile = fopen("dosemap.mhd", "w");
fprintf(pfile, "ObjectType = Image \n");
fprintf(pfile, "NDims = 3 \n");
fprintf(pfile, "BinaryData = True \n");
fprintf(pfile, "BinaryDataOrderMDB = False \n");
fprintf(pfile, "CompressedData = False \n");
fprintf(pfile, "ElementSpacing = %f %f %f \n", dosemap.voxel_size.x,
dosemap.voxel_size.y,
dosemap.voxel_size.z);
fprintf(pfile, "DimSize = %i %i %i \n", dosemap.size_in_vox.x,
dosemap.size_in_vox.y,
dosemap.size_in_vox.z);
fprintf(pfile, "ElementType = MET_FLOAT \n");
fprintf(pfile, "ElementDataFile = dosemap.raw\n");
fclose(pfile);
// then export data
pfile = fopen("dosemap.raw", "wb");
fwrite(dosemap.edep, dosemap.nb_voxel_volume, sizeof(float), pfile);
fclose(pfile);
}
/***********************************************************
* PRNG Brent xor256
***********************************************************/
// Brent PRNG integer version
__device__ unsigned long weyl;
__device__ unsigned long brent_int(unsigned int index, unsigned long *device_x_brent, unsigned long seed)
{
#define UINT64 (sizeof(unsigned long)>>3)
#define UINT32 (1 - UINT64)
#define wlen (64*UINT64 + 32*UINT32)
#define r (4*UINT64 + 8*UINT32)
#define s (3*UINT64 + 3*UINT32)
#define a (37*UINT64 + 18*UINT32)
#define b (27*UINT64 + 13*UINT32)
#define c (29*UINT64 + 14*UINT32)
#define d (33*UINT64 + 15*UINT32)
#define ws (27*UINT64 + 16*UINT32)
int z, z_w, z_i_brent;
if (r==4){
z=6; z_w=4; z_i_brent=5;}
else{
z=10; z_w=8; z_i_brent=9;}
unsigned long w = device_x_brent[z*index + z_w];
unsigned long i_brent = device_x_brent[z*index + z_i_brent];
unsigned long zero = 0;
unsigned long t, v;
int k;
if (seed != zero) { // Initialisation necessary
// weyl = odd approximation to 2**wlen*(3-sqrt(5))/2.
if (UINT32)
weyl = 0x61c88647;
else
weyl = ((((unsigned long)0x61c88646)<<16)<<16) + (unsigned long)0x80b583eb;
v = (seed!=zero)? seed:~seed; // v must be nonzero
for (k = wlen; k > 0; k--) { // Avoid correlations for close seeds
v ^= v<<10; v ^= v>>15; // Recurrence has period 2**wlen-1
v ^= v<<4; v ^= v>>13; // for wlen = 32 or 64
}
for (w = v, k = 0; k < r; k++) { // Initialise circular array
v ^= v<<10; v ^= v>>15;
v ^= v<<4; v ^= v>>13;
device_x_brent[k + z*index] = v + (w+=weyl);
}
for (i_brent = r-1, k = 4*r; k > 0; k--) { // Discard first 4*r results
t = device_x_brent[(i_brent = (i_brent+1)&(r-1)) + z*index]; t ^= t<<a; t ^= t>>b;
v = device_x_brent[((i_brent+(r-s))&(r-1)) + z*index]; v ^= v<<c; v ^= v>>d;
device_x_brent[i_brent + z*index] = t^v;
}
}
// Apart from initialisation (above), this is the generator
t = device_x_brent[(i_brent = (i_brent+1)&(r-1)) + z*index]; // Assumes that r is a power of two
v = device_x_brent[((i_brent+(r-s))&(r-1)) + z*index]; // Index is (i-s) mod r
t ^= t<<a; t ^= t>>b; // (I + L^a)(I + R^b)
v ^= v<<c; v ^= v>>d; // (I + L^c)(I + R^d)
device_x_brent[i_brent + z*index] = (v ^= t); // Update circular array
w += weyl; // Update Weyl generator
device_x_brent[z*index + z_w] = w;
device_x_brent[z*index + z_i_brent] = i_brent;
return (v + (w^(w>>ws))); // Return combination
#undef UINT64
#undef UINT32
#undef wlen
#undef r
#undef s
#undef a
#undef b
#undef c
#undef d
#undef ws
}
// Brent PRNG real version
__device__ double Brent_real(int index, unsigned long *device_x_brent, unsigned long seed)
{
#define UINT64 (sizeof(unsigned long)>>3)
#define UINT32 (1 - UINT64)
#define UREAL64 (sizeof(double)>>3)
#define UREAL32 (1 - UREAL64)
// sr = number of bits discarded = 11 for double, 40 or 8 for float
#define sr (11*UREAL64 +(40*UINT64 + 8*UINT32)*UREAL32)
// ss (used for scaling) is 53 or 21 for double, 24 for float
#define ss ((53*UINT64 + 21*UINT32)*UREAL64 + 24*UREAL32)
// SCALE is 0.5**ss, SC32 is 0.5**32
#define SCALE ((double)1/(double)((unsigned long)1<<ss))
#define SC32 ((double)1/((double)65536*(double)65536))
double res;
res = (double)0;
while (res == (double)0) // Loop until nonzero result.
{ // Usually only one iteration.
res = (double)(brent_int(index, device_x_brent, seed)>>sr); // Discard sr random bits.
seed = (unsigned long)0; // Zero seed for next time.
if (UINT32 && UREAL64) // Need another call to xor4096i.
res += SC32*(double)brent_int(index, device_x_brent, seed); // Add low-order 32 bits.
}
return (SCALE*res); // Return result in (0.0, 1.0).
#undef UINT64
#undef UINT32
#undef UREAL64
#undef UREAL32
#undef SCALE
#undef SC32
#undef sr
#undef ss
}
// Init Brent seed
__global__ void kernel_brent_init(StackParticle stackpart) {
unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (id < stackpart.size) {
unsigned int seed = stackpart.seed[id];
float dummy = brent_int(id, stackpart.table_x_brent, seed);
}
}
/***********************************************************
* Particles source
***********************************************************/
// Voxelized back2back source
__global__ void kernel_voxelized_source_b2b(StackParticle g1, StackParticle g2, Activities act,
float E, int3 size_in_vox, float3 voxel_size) {
unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (id >= g1.size) return;
float jump = (float)(size_in_vox.x * size_in_vox.y);
float ind, x, y, z;
float rnd = Brent_real(id, g1.table_x_brent, 0);
int pos = binary_search(act.act_cdf, rnd, act.nb_activities);
// get the voxel position (x, y, z)
ind = (float)act.act_index[pos];
z = floor(ind / jump);
ind -= (z * jump);
y = floor(ind / (float)(size_in_vox.x));
x = ind - y*size_in_vox.x;
// random position inside the voxel
x += Brent_real(id, g1.table_x_brent, 0);
y += Brent_real(id, g1.table_x_brent, 0);
z += Brent_real(id, g1.table_x_brent, 0);
// must be in mm
x *= voxel_size.x;
y *= voxel_size.y;
z *= voxel_size.z;
// random orientation
float phi = Brent_real(id, g1.table_x_brent, 0);
float theta = Brent_real(id, g1.table_x_brent, 0);
phi = gpu_twopi * phi;
theta = acosf(1.0f - 2.0f*theta);
// convert to cartesian
float dx = __cosf(phi)*__sinf(theta);
float dy = __sinf(phi)*__sinf(theta);
float dz = __cosf(theta);
// first gamma
g1.dx[id] = dx;
g1.dy[id] = dy;
g1.dz[id] = dz;
g1.E[id] = E;
g1.px[id] = x;
g1.py[id] = y;
g1.pz[id] = z;
g1.t[id] = 0.0f;
g1.active[id] = 1;
g1.endsimu[id] = 0;
g1.type[id] = GAMMA;
// second gamma
g2.dx[id] = -dx;
g2.dy[id] = -dy;
g2.dz[id] = -dz;
g2.E[id] = E;
g2.px[id] = x;
g2.py[id] = y;
g2.pz[id] = z;
g2.t[id] = 0.0f;
g2.active[id] = 1;
g2.endsimu[id] = 0;
g2.type[id] = GAMMA;
}
/***********************************************************
* Photons Physics Effects
***********************************************************/
//// Comptons Standard //////////////////////////////////////
// Compton Cross Section Per Atom (Standard - Klein-Nishina)
__device__ float Compton_CSPA_Standard(float E, unsigned short int Z) {
float CrossSection = 0.0;
if (Z<1 || E < 1e-4f) {return CrossSection;}
float p1Z = Z*(2.7965e-23f + 1.9756e-27f*Z + -3.9178e-29f*Z*Z);
float p2Z = Z*(-1.8300e-23f + -1.0205e-24f*Z + 6.8241e-27f*Z*Z);
float p3Z = Z*(6.7527e-22f + -7.3913e-24f*Z + 6.0480e-27f*Z*Z);
float p4Z = Z*(-1.9798e-21f + 2.7079e-24f*Z + 3.0274e-26f*Z*Z);
float T0 = (Z < 1.5f)? 40.0e-3f : 15.0e-3f;
float d1, d2, d3, d4, d5;
d1 = __fdividef(fmaxf(E, T0), 0.510998910f); // X
CrossSection = __fdividef(p1Z*__logf(1.0f+2.0f*d1), d1) + __fdividef(p2Z + p3Z*d1 + p4Z*d1*d1, 1.0f + 20.0f*d1 + 230.0f*d1*d1 + 440.0f*d1*d1*d1);
if (E < T0) {
d1 = __fdividef(T0+1.0e-3f, 0.510998910f); // X
d2 = __fdividef(p1Z*__logf(1.0f+2.0f*d1), d1) + __fdividef(p2Z + p3Z*d1 + p4Z*d1*d1, 1.0f + 20.0f*d1 + 230.0f*d1*d1 + 440.0f*d1*d1*d1); // sigma
d3 = __fdividef(-T0 * (d2 - CrossSection), CrossSection*1.0e-3f); // c1
d4 = (Z > 1.5f)? 0.375f-0.0556f*__logf(Z) : 0.15f; // c2
d5 = __logf(__fdividef(E, T0)); // y
CrossSection *= __expf(-d5 * (d3 + d4*d5));
}
return CrossSection;
}
// Compute the total Compton cross section for a given material
__device__ float Compton_CS_Standard(Materials materials, unsigned int mat, float E) {
float CS = 0.0f;
int i;
int index = materials.index[mat];
// Model standard
for (i = 0; i < materials.nb_elements[mat]; ++i) {
CS += (materials.atom_num_dens[index+i] * Compton_CSPA_Standard(E, materials.mixture[index+i]));
}
return CS;
}
// Compton Scatter (Standard - Klein-Nishina) without secondary
__device__ float Compton_Effect_Standard_NoSec(StackParticle photons,
unsigned int id,
int* count_d) {
float gamE0 = photons.E[id];
float E0 = __fdividef(gamE0, 0.510998910f);
float3 gamDir0 = make_float3(photons.dx[id], photons.dy[id], photons.dz[id]);
// sample the energy rate of the scattered gamma
float epszero = __fdividef(1.0f, (1.0f + 2.0f * E0));
float eps02 = epszero*epszero;
float a1 = -__logf(epszero);
float a2 = __fdividef(a1, (a1 + 0.5f*(1.0f-eps02)));
float greject, onecost, eps, eps2, sint2, cosTheta, sinTheta, phi;
do {
if (a2 > Brent_real(id, photons.table_x_brent, 0)) {
eps = __expf(-a1 * Brent_real(id, photons.table_x_brent, 0));
eps2 = eps*eps;
} else {
eps2 = eps02 + (1.0f - eps02) * Brent_real(id, photons.table_x_brent, 0);
eps = sqrt(eps2);
}
onecost = __fdividef(1.0f - eps, eps * E0);
sint2 = onecost * (2.0f - onecost);
greject = 1.0f - eps * __fdividef(sint2, 1.0f + eps2);
} while (greject < Brent_real(id, photons.table_x_brent, 0));
// scattered gamma angles
if (sint2 < 0.0f) {sint2 = 0.0f;}
cosTheta = 1.0f - onecost;
sinTheta = sqrt(sint2);
phi = Brent_real(id, photons.table_x_brent, 0) * gpu_twopi;
// update the scattered gamma
float3 gamDir1 = make_float3(sinTheta*__cosf(phi), sinTheta*__sinf(phi), cosTheta);
gamDir1 = rotateUz(gamDir1, gamDir0);
photons.dx[id] = gamDir1.x;
photons.dy[id] = gamDir1.y;
photons.dz[id] = gamDir1.z;
float gamE1 = gamE0 * eps;
if (gamE1 > 1.0e-06f) {photons.E[id] = gamE1;}
else {
photons.endsimu[id] = 1; // stop this particle
photons.active[id] = 0; // this particle is absorbed
atomicAdd(count_d, 1); // count simulated primaries
return gamE1; // Local energy deposit
}
return 0.0f;
}
// Compton Scatter (Standard - Klein-Nishina) with secondary (e-)
__device__ float Compton_Effect_Standard_WiSec(StackParticle photons,
StackParticle electrons,
float cutE,
unsigned int id,
int* count_d) {
float gamE0 = photons.E[id];
float E0 = __fdividef(gamE0, 0.510998910f);
float3 gamDir0 = make_float3(photons.dx[id], photons.dy[id], photons.dz[id]);
// sample the energy rate pf the scattered gamma
float epszero = __fdividef(1.0f, (1.0f + 2.0f * E0));
float eps02 = epszero*epszero;
float a1 = -__logf(epszero);
float a2 = __fdividef(a1, (a1 + 0.5f*(1.0f-eps02)));
float greject, onecost, eps, eps2, sint2, cosTheta, sinTheta, phi;
do {
if (a2 > Brent_real(id, photons.table_x_brent, 0)) {
eps = __expf(-a1 * Brent_real(id, photons.table_x_brent, 0));
eps2 = eps*eps;
} else {
eps2 = eps02 + (1.0f - eps02) * Brent_real(id, photons.table_x_brent, 0);
eps = sqrt(eps2);
}
onecost = __fdividef(1.0f - eps, eps * E0);
sint2 = onecost * (2.0f - onecost);
greject = 1.0f - eps * __fdividef(sint2, 1.0f + eps2);
} while (greject < Brent_real(id, photons.table_x_brent, 0));
// scattered gamma angles
if (sint2 < 0.0f) {sint2 = 0.0f;}
cosTheta = 1.0f - onecost;
sinTheta = sqrt(sint2);
phi = Brent_real(id, photons.table_x_brent, 0) * gpu_twopi;
// update the scattered gamma
float3 gamDir1 = make_float3(sinTheta*__cosf(phi), sinTheta*__sinf(phi), cosTheta);
gamDir1 = rotateUz(gamDir1, gamDir0);
photons.dx[id] = gamDir1.x;
photons.dy[id] = gamDir1.y;
photons.dz[id] = gamDir1.z;
float gamE1 = gamE0 * eps;
if (gamE1 > 1.0e-06f) {photons.E[id] = gamE1;}
else {
//printf("Compton => X\n");
photons.endsimu[id] = 1; // absorbed this particle
photons.active[id] = 0;
atomicAdd(count_d, 1); // count simulated primaries
return gamE1; // Local energy deposit
}
// kinematic of the scattered electron
float eKinE = gamE0 - gamE1;
// DBL_MIN cut production
if (eKinE > 1.0e-38f && eKinE > cutE) {
float3 eDir = sub_vector(scale_vector(gamDir0, gamE0), scale_vector(gamDir1, gamE1));
eDir = unit_vector(eDir);
electrons.dx[id] = eDir.x;
electrons.dy[id] = eDir.y;
electrons.dz[id] = eDir.z;
electrons.E[id] = eKinE;
electrons.px[id] = photons.px[id];
electrons.py[id] = photons.py[id];
electrons.pz[id] = photons.pz[id];
electrons.endsimu[id] = 0;
// Now start to track this electron and freeze the photon tracking
photons.active[id] = 0;
electrons.active[id] = 1;
//printf("Compton => e- cutE %e\n", cutE);
return 0.0f;
}
//printf("Compton => / cutE %e\n", cutE);
return eKinE;
}
//// PhotoElectric Standard //////////////////////////////////////
// Compute Theta distribution of the emitted electron, with respect to the incident Gamma
// The Sauter-Gavrila distribution for the K-shell is used
__device__ float PhotoElec_ElecCosThetaDistribution(StackParticle part,
unsigned int id,
float kineEnergy) {
float costeta = 1.0f;
float gamma = kineEnergy * 1.9569513367f + 1.0f; // 1/electron_mass_c2
if (gamma > 5.0f) {return costeta;}
float beta = __fdividef(sqrtf(gamma*gamma - 1.0f), gamma);
float b = 0.5f*gamma*(gamma - 1.0f)*(gamma - 2.0f);
float rndm, term, greject, grejsup;
if (gamma < 2.0f) {grejsup = gamma*gamma*(1.0f + b - beta*b);}
else {grejsup = gamma*gamma*(1.0f + b + beta*b);}
do {
rndm = 1.0f - 2.0f*Brent_real(id, part.table_x_brent, 0);
costeta = __fdividef(rndm + beta, rndm*beta + 1.0f);
term = 1.0f - beta*costeta;
greject = __fdividef((1.0f - costeta*costeta)*(1.0f + b*term), term*term);
} while(greject < Brent_real(id, part.table_x_brent, 0)*grejsup);
return costeta;
}
// PhotoElectric Cross Section Per Atom (Standard)
__device__ float PhotoElec_CSPA_Standard(float E, unsigned short int Z) {
// from Sandia, the same for all Z
float Emin = fmax(PhotoElec_std_IonizationPotentials[Z]*1e-6f, 0.01e-3f);
if (E < Emin) {return 0.0f;}
int start = PhotoElec_std_CumulIntervals[Z-1];
int stop = start + PhotoElec_std_NbIntervals[Z];
int pos=stop;
while (E < PhotoElec_std_SandiaTable[pos][0]*1.0e-3f){--pos;}
float AoverAvo = 0.0103642688246f * __fdividef((float)Z, PhotoElec_std_ZtoAratio[Z]);
float rE = __fdividef(1.0f, E);
float rE2 = rE*rE;
return rE * PhotoElec_std_SandiaTable[pos][1] * AoverAvo * 0.160217648e-22f
+ rE2 * PhotoElec_std_SandiaTable[pos][2] * AoverAvo * 0.160217648e-25f
+ rE * rE2 * PhotoElec_std_SandiaTable[pos][3] * AoverAvo * 0.160217648e-28f
+ rE2 * rE2 * PhotoElec_std_SandiaTable[pos][4] * AoverAvo * 0.160217648e-31f;
}
// Compute the total Compton cross section for a given material
__device__ float PhotoElec_CS_Standard(Materials materials, unsigned int mat, float E) {
float CS = 0.0f;
int i;
int index = materials.index[mat];
// Model standard
for (i = 0; i < materials.nb_elements[mat]; ++i) {
CS += (materials.atom_num_dens[index+i] * PhotoElec_CSPA_Standard(E, materials.mixture[index+i]));
}
return CS;
}
// PhotoElectric effect (Standard) without seconday
__device__ float PhotoElec_Effect_Standard_NoSec(StackParticle photons,
unsigned int id,
int* count_d) {
// Absorbed the photon
photons.endsimu[id] = 1; // stop the simulation
photons.active[id] = 0; // this particle is absorbed
atomicAdd(count_d, 1); // count simulated primaries
return 0.0f;
}
// PhotoElectric effect (Standard) with seconday (e-)
__device__ float PhotoElec_Effect_Standard_WiSec(StackParticle photons,
StackParticle electrons,
Materials mat,
float cutE,
unsigned int matindex,
unsigned int id,
int* count_d) {
float energy = photons.E[id];
float3 PhotonDirection = make_float3(photons.dx[id], photons.dy[id], photons.dz[id]);
// Select randomly one element constituing the material
unsigned int n = mat.nb_elements[matindex]-1;
unsigned int index = mat.index[matindex];
unsigned int Z = mat.mixture[index+n];
unsigned int i = 0;
if (n > 0) {
float x = Brent_real(id, photons.table_x_brent, 0) *
PhotoElec_CS_Standard(mat, matindex, energy);
float xsec = 0.0f;
for (i=0; i<n; ++i) {
xsec += mat.atom_num_dens[index+i] *
PhotoElec_CSPA_Standard(energy, mat.mixture[index+i]);
if (x <= xsec) {
Z = mat.mixture[index+i];
break;
}
}
}
//// Photo electron
// Select atomic shell
unsigned short int nShells = atom_NumberOfShells[Z];
index = atom_IndexOfShells[Z];
float bindingEnergy = atom_BindingEnergies[index]*1.0e-06f; // in eV
i=0; while (i < nShells && energy < bindingEnergy) {
++i;
bindingEnergy = atom_BindingEnergies[index + i]*1.0e-06f; // in ev
}
// no shell available
if (i == nShells) {return 0.0f;}
float ElecKineEnergy = energy - bindingEnergy;
float cosTeta = 0.0f;
// 1 eV cut production
if (ElecKineEnergy > 1.0e-06f && ElecKineEnergy > cutE) {
// direction of the photo electron
cosTeta = PhotoElec_ElecCosThetaDistribution(photons, id, ElecKineEnergy);
float sinTeta = sqrtf(1.0f - cosTeta*cosTeta);
float Phi = gpu_twopi * Brent_real(id, photons.table_x_brent, 0);
float3 ElecDirection = make_float3(sinTeta*cos(Phi), sinTeta*sin(Phi), cosTeta);
ElecDirection = rotateUz(ElecDirection, PhotonDirection);
// Create an electron
electrons.dx[id] = ElecDirection.x;
electrons.dy[id] = ElecDirection.y;
electrons.dz[id] = ElecDirection.z;
electrons.E[id] = ElecKineEnergy;
electrons.px[id] = photons.px[id];
electrons.py[id] = photons.py[id];
electrons.pz[id] = photons.pz[id];
electrons.endsimu[id] = 0;
// Start to track this electron
electrons.active[id] = 1;
//printf("PE => e-\n");
return bindingEnergy;
}
// Absorbed the photon
photons.endsimu[id] = 1; // stop the simulation
photons.active[id] = 0;
atomicAdd(count_d, 1); // count simulated primaries
// LocalEnergy Deposit
return bindingEnergy+ElecKineEnergy;
}
/***********************************************************
* Electrons Physics Effects
***********************************************************/
// eIonisation Cross Section Per Atom (Möller model)
__device__ float eIonisation_CSPA_Standard(float E, unsigned short int Z,
float cutE, float maxE) {
float CS = 0.0f;
float xmin = __fdividef(cutE, E);
float tmax = fmin(maxE, 0.5f*E);
float xmax = __fdividef(tmax, E);
float gam = E * 1.9569513367f + 1.0f; // 1/electron_mass_c2
float igam2 = __fdividef(1.0f, gam*gam);
float ibeta2 = __fdividef(1.0f, 1.0f - igam2);
float g = (2.0f*gam - 1.0f)*igam2;
if (cutE < tmax) {
// Cross Section per e-
CS = ((xmax-xmin) * (1.0f-g + __fdividef(1.0, (xmin*xmax)) + __fdividef(1.0f, (1.0f-xmin)*(1.0f-xmax))) - g*__logf( __fdividef(xmax*(1.0 - xmin), xmin*(1.0 - xmax)))) * ibeta2;
CS *= (__fdividef(2.549549299e-23f, E)); // gpu_twopi_mc2_rcl2
CS *= (float)Z;
}
return CS;
}
// Compute the total eIonisation cross section for a given material
__device__ float eIonisation_CS_Standard(Materials materials, unsigned int mat, float E) {
float CS = 0.0f;
int i;
int index = materials.index[mat];
float cutE = materials.electron_cut_energy[mat];
float maxE = materials.electron_max_energy[mat];
// Model standard
for (i = 0; i < materials.nb_elements[mat]; ++i) {
CS += (materials.atom_num_dens[index+i] *
eIonisation_CSPA_Standard(E, materials.mixture[index+i], cutE, maxE));
}
return CS;
}
// Compute the dE/dx due to the ionization
__device__ float eIonisation_dedx_Standard(Materials materials, unsigned int mat, float E) {
float meanExcitationEnergy = materials.electron_mean_excitation_energy[mat];
float cutE = materials.electron_cut_energy[mat];
float electronDensity = materials.nb_electrons_per_vol[mat];
float Natm = materials.nb_atoms_per_vol[mat];
float Zeff = __fdividef(electronDensity, Natm);
float th = 0.25f*sqrtf(Zeff) * 0.001f; // keV
unsigned short int flag_low_E = 0;
float tkin = E;
if (tkin < th) {tkin = th; flag_low_E = 1;};
float tau = tkin * 1.9569513367f; // 1/electron_mass_c2
float gam = tau + 1.0f;
float gam2 = gam*gam;
float beta2 = 1.0f - __fdividef(1.0f, gam2);
float eexc2 = meanExcitationEnergy * 1.9569513367f; // 1/electron_mass_c2
eexc2 = eexc2 * eexc2;
float d = (cutE < tkin*0.5f)? cutE : tkin*0.5f;
d = d * 1.9569513367f; // 1/electron_mass_c2
float dedx = __logf(2.0f * __fdividef(tau+2.0f, eexc2)) - 1.0f - beta2 + __logf((tau-d)*d) + __fdividef(tau, tau-d) + __fdividef(0.5f*d*d + (2.0f*tau + 1.0f) * __logf(1.0f - __fdividef(d, tau)), gam2);
// Density correction
float twoln10 = 2.0f*__logf(10.0f);
float x = __fdividef(__logf(beta2*gam2), twoln10);
float y = 0.0f;
if (x < materials.fX0[mat]) {
if (materials.fD0[mat] > 0.0f) {
y = materials.fD0[mat]*__powf(10.0f, 2.0f*(x-materials.fX0[mat]));
}
} else if (x >= materials.fX1[mat]) {
y = twoln10*x - materials.fC[mat];
} else {
y = twoln10*x - materials.fC[mat] + materials.fA[mat]
* __powf(materials.fX1[mat]-x, materials.fM[mat]);
}
dedx -= y;
// Total ionization loss
// gpu_twopi_mc2_rcl2
dedx *= __fdividef(2.549549299e-23f*electronDensity, beta2);
if (dedx < 0.0f) {dedx = 0.0f;};
// Low energy extrapolation
if (flag_low_E) {
// 200 eV
if (E >= 200.0e-06f) {dedx *= sqrtf( __fdividef(tkin, E));}
else {dedx *= __fdividef(sqrtf(tkin*E), 200.0e-06f);} // 200 eV
}
return dedx;
}
// Compute the scattering due to the ionization
__device__ float eIonisation_Effect_Standard_NoSec(StackParticle electrons,
StackParticle photons,
float tmin, float maxE, // tmin=cutE
unsigned int id, int *count_d) {
float E = electrons.E[id];
float tmax = E * 0.5f;
if (maxE < tmax) {tmax = maxE;};
if (tmin >= tmax) { // tmin is the same that cutE
// stop the simulation for this one
electrons.endsimu[id] = 1;
// Unfreeze the photon tracking
electrons.active[id] = 0;
photons.active[id] = 1;
atomicAdd(count_d, 1); // count simulated secondaries
return E;
}
float energy = E + 0.510998910f; // electron_mass_c2
float totalMomentum = sqrtf(E * (energy + 0.510998910f));
float xmin = __fdividef(tmin, E);
float xmax = __fdividef(tmax, E);
float gam = energy * 1.9569513367f; // 1/electron_mass_c2
float gamma2 = gam*gam;
float beta2 = 1.0f - __fdividef(1.0f, gamma2);
// GetMomentumDirection
float3 direction = make_float3(electrons.dx[id], electrons.dy[id], electrons.dz[id]);
// Moller (e-e-) scattering
float g = __fdividef(2.0f*gam - 1.0f, gamma2);
float y = 1.0f - xmax;
float grej = 1.0f - g*xmax + xmax*xmax*(1.0f - g + __fdividef(1.0f - g*y, y*y));
float x, z, q;
do {
q = Brent_real(id, electrons.table_x_brent, 0);
x = __fdividef(xmin*xmax, xmin*(1.0f - q) + xmax*q);
y = 1.0f - x;
z = 1.0f - g*x + x*x*(1.0f - g + __fdividef(1.0f - g*y, y*y));
} while(grej * Brent_real(id, electrons.table_x_brent, 0) > z);
float deltaKinEnergy = x * E;
float deltaMomentum = sqrtf(deltaKinEnergy * (deltaKinEnergy + 2.0f*0.510998910f)); // electron_mass_c2
float cost = deltaKinEnergy * __fdividef(energy + 0.510998910f, deltaMomentum * totalMomentum);
float sint = 1.0f - cost*cost;
if (sint > 0.0f) {sint = sqrtf(sint);};
float phi = gpu_twopi * Brent_real(id, electrons.table_x_brent, 0);
float3 deltaDirection = make_float3(sint*__cosf(phi), sint*__sinf(phi), cost);
deltaDirection = rotateUz(deltaDirection, direction);
electrons.E[id] = E - deltaKinEnergy;
float3 dir = sub_vector(scale_vector(direction, totalMomentum),
scale_vector(deltaDirection, deltaMomentum));
dir = unit_vector(dir);
electrons.dx[id] = dir.x;
electrons.dy[id] = dir.y;
electrons.dz[id] = dir.z;
return deltaKinEnergy;
}
// Multiple Scattering
__device__ float MSC_CSPA(float E, unsigned short int Z) {
float Z23 = __expf( 0.666666666666f*__logf((float)Z) );
float eTotalEnergy = E + 0.51099891f;
float beta2 = E * __fdividef(eTotalEnergy+0.51099891f, eTotalEnergy*eTotalEnergy);
double bg2 = E * __fdividef(eTotalEnergy+0.51099891f, 0.26111988f); // e_mass_c2*e_mass_c2
float eps = 37557.7634f * __fdividef(bg2, Z23); // epsfactor
float epsmin = 1.0e-04f;
float epsmax = 1.0e+10f;
float sigma;
if (eps<epsmin) sigma = 2.0f*eps*eps;
else if(eps<epsmax) sigma = __logf(1.0f+2.0f*eps) - 2.0f*__fdividef(eps, (1.0f+2.0f*eps));
else sigma = __logf(2.0f*eps) - 1.0f+__fdividef(1.0f, eps);
sigma *= __fdividef(Z*Z, (beta2*bg2));
// get bin number in Z
int iZ = 14;
while ((iZ >= 0) && (Zdat[iZ] >= Z)) iZ -= 1;
if (iZ == 14) iZ = 13;
if (iZ == -1) iZ = 0 ;
float Z1 = Zdat[iZ];
float Z2 = Zdat[iZ+1];
float ratZ = __fdividef((Z-Z1)*(Z+Z1), (Z2-Z1)*(Z2+Z1));
float c1, c2;
if(E <= 10.0f) { // Tlim = 10 MeV
// get bin number in T (beta2)
int iT = 21;
while ((iT >= 0) && (Tdat[iT] >= E)) iT -= 1;
if (iT == 21) iT = 20;
if (iT == -1) iT = 0 ;
// calculate betasquare values
float T = Tdat[iT];
float EE = T + 0.51099891f;
float b2small = T * __fdividef(EE + 0.51099891f, EE*EE);
T = Tdat[iT+1];
EE = T + 0.51099891f;
float b2big = T * __fdividef(EE + 0.51099891f, EE*EE);
float ratb2 = __fdividef(beta2-b2small, b2big-b2small);
c1 = celectron[iZ][iT];
c2 = celectron[iZ+1][iT];
float cc1 = c1 + ratZ*(c2-c1);
c1 = celectron[iZ][iT+1];
c2 = celectron[iZ+1][iT+1];
float cc2 = c1 + ratZ*(c2-c1);
sigma *= __fdividef(4.98934390e-23f, cc1 + ratb2*(cc2-cc1)); // sigmafactor
} else {
// bg2lim beta2lim
c1 = 422.104880f*sig0[iZ] * __fdividef(1.0f+hecorr[iZ] *(beta2-0.997636519f), bg2);
c2 = 422.104880f*sig0[iZ+1] * __fdividef(1.0f+hecorr[iZ+1]*(beta2-0.997636519f), bg2);
if ((Z >= Z1) && (Z <= Z2)) {
sigma = c1 + ratZ*(c2-c1);
} else if(Z < Z1) {
sigma = Z*Z*__fdividef(c1, (Z1*Z1));
} else if(Z > Z2) {
sigma = Z*Z*__fdividef(c2, (Z2*Z2));
}
}
return sigma;
}
// Compute the total MSC cross section for a given material
__device__ float MSC_CS(Materials materials, unsigned int mat, float E) {
float CS = 0.0f;
int i;
int index = materials.index[mat];
for (i = 0; i < materials.nb_elements[mat]; ++i) {
CS += (materials.atom_num_dens[index+i] *
MSC_CSPA(E, materials.mixture[index+i]));
}
return CS;
}
// Multiple Scattering effect
__device__ float MSC_Effect(StackParticle electrons, Materials materials, float trueStepLength,
unsigned int mat, unsigned int id) {
// double betacp = sqrt(currentKinEnergy*(currentKinEnergy+2.*mass)*KineticEnergy*(KineticEnergy+2.*mass)/((currentKinEnergy+mass)*(KineticEnergy+mass)));
//E = 1.0f;
float E = electrons.E[id];
// !!!! Approx Seb : currentKinEnergy = KineticEnergy
float betacp = E * __fdividef(E+1.02199782f, E+0.51099891f);
float y = __fdividef(trueStepLength, materials.rad_length[mat]);
float theta = 13.6f * __fdividef(__powf(y, 0.5f), betacp);
y = __logf(y);
// correction in theta formula
float Zeff = __fdividef(materials.nb_electrons_per_vol[mat],
materials.nb_atoms_per_vol[mat]);
float lnZ = __logf(Zeff);
float coeffth1 = (1.0f - __fdividef(8.7780e-2f, Zeff)) * (0.87f + 0.03f*lnZ);
float coeffth2 = (4.0780e-2f + 1.7315e-4f*Zeff) * (0.87f + 0.03f*lnZ);
float corr = coeffth1 + coeffth2 * y;
theta *= corr ;
float phi = gpu_twopi * Brent_real(id, electrons.table_x_brent, 0);
float3 direction = make_float3(electrons.dx[id], electrons.dy[id], electrons.dz[id]);
float3 deltaDirection = make_float3(__cosf(phi)*__sinf(theta),
__sinf(phi)*__sinf(theta),
__cosf(theta));
direction = rotateUz(deltaDirection, direction);
electrons.dx[id] = direction.x;
electrons.dy[id] = direction.y;
electrons.dz[id] = direction.z;
return 0.0f;
}
/***********************************************************
* Navigator
***********************************************************/
// Regular Navigator with voxelized phantom for photons without secondary
__global__ void kernel_NavRegularPhan_Photon_NoSec(StackParticle photons,
Volume phantom,
Materials materials,
int* count_d) {
unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (id >= photons.size) return;
if (photons.endsimu[id]) return;
if (!photons.active[id]) return;
//// Init ///////////////////////////////////////////////////////////////////
// Read position
float3 position; // mm
position.x = photons.px[id];
position.y = photons.py[id];
position.z = photons.pz[id];
//printf("%0.2f %0.2f %0.2f\n", position.x, position.y, position.z);
// Defined index phantom
int4 index_phantom;
float3 ivoxsize = inverse_vector(phantom.voxel_size);
index_phantom.x = int(position.x * ivoxsize.x);
index_phantom.y = int(position.y * ivoxsize.y);
index_phantom.z = int(position.z * ivoxsize.z);
index_phantom.w = index_phantom.z*phantom.nb_voxel_slice
+ index_phantom.y*phantom.size_in_vox.x
+ index_phantom.x; // linear index
/*
if (index_phantom.w >= phantom.nb_voxel_volume) {
printf(" pos %0.2f %0.2f %0.2f ispc %0.2f %0.2f %0.2f ind %i %i %i = %i max %i\n",
position.x, position.y, position.z, ivoxsize.x, ivoxsize.y, ivoxsize.z,
index_phantom.x, index_phantom.y, index_phantom.z,
index_phantom.w, phantom.nb_voxel_volume);
}
*/
// Read direction
float3 direction;
direction.x = photons.dx[id];
direction.y = photons.dy[id];
direction.z = photons.dz[id];
// Get energy
float energy = photons.E[id];
// Get material
unsigned short int mat = phantom.data[index_phantom.w];
//// Find next discrete interaction ///////////////////////////////////////
// Find next discrete interaction, total_dedx and next discrete intraction distance
float next_interaction_distance = FLT_MAX;
unsigned char next_discrete_process = 0;
float interaction_distance;
float cross_section;
// Photoelectric
cross_section = PhotoElec_CS_Standard(materials, mat, energy);
interaction_distance = __fdividef(-__logf(Brent_real(id, photons.table_x_brent, 0)),
cross_section);
if (interaction_distance < next_interaction_distance) {
next_interaction_distance = interaction_distance;
next_discrete_process = PHOTON_PHOTOELECTRIC;
}
// Compton
cross_section = Compton_CS_Standard(materials, mat, energy);
interaction_distance = __fdividef(-__logf(Brent_real(id, photons.table_x_brent, 0)),
cross_section);
if (interaction_distance < next_interaction_distance) {
next_interaction_distance = interaction_distance;
next_discrete_process = PHOTON_COMPTON;
}
// Distance to the next voxel boundary (raycasting)
interaction_distance = get_boundary_voxel_by_raycasting(index_phantom, position,
direction, phantom.voxel_size);
if (interaction_distance < next_interaction_distance) {
// overshoot the distance of 1 um to be inside the next voxel
next_interaction_distance = interaction_distance+1.0e-03f;
next_discrete_process = PHOTON_BOUNDARY_VOXEL;
}
//// Move particle //////////////////////////////////////////////////////
position.x += direction.x * next_interaction_distance;
position.y += direction.y * next_interaction_distance;
position.z += direction.z * next_interaction_distance;
photons.t[id] += (3.33564095198e-03f * next_interaction_distance);
photons.px[id] = position.x;
photons.py[id] = position.y;
photons.pz[id] = position.z;
// Stop simulation if out of phantom or no more energy
if ( position.x <= 0 || position.x >= phantom.size_in_mm.x
|| position.y <= 0 || position.y >= phantom.size_in_mm.y
|| position.z <= 0 || position.z >= phantom.size_in_mm.z ) {
photons.endsimu[id] = 1; // stop the simulation
atomicAdd(count_d, 1); // count simulated primaries
return;
}
//// Resolve discrete processe //////////////////////////////////////////
// Resolve discrete processes
if (next_discrete_process == PHOTON_PHOTOELECTRIC) {
float discrete_loss = PhotoElec_Effect_Standard_NoSec(photons, id, count_d);
}
if (next_discrete_process == PHOTON_COMPTON) {
float discrete_loss = Compton_Effect_Standard_NoSec(photons, id, count_d);
}
}
// Regular Navigator with voxelized phantom for photons with secondary
__global__ void kernel_NavRegularPhan_Photon_WiSec(StackParticle photons,
StackParticle electrons,
Volume phantom,
Materials materials,
Dosimetry dosemap,
int* count_d, float step_limiter) {
unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (id >= photons.size) return;
//printf("ID %i Nav gamma endsimu %i active %i\n",
// id, photons.endsimu[id], photons.active[id]);
if (photons.endsimu[id]) return;
if (!photons.active[id]) return;
//// Init ///////////////////////////////////////////////////////////////////
// Read position
float3 position; // mm
position.x = photons.px[id];
position.y = photons.py[id];
position.z = photons.pz[id];
// Defined index phantom
int4 index_phantom;
float3 ivoxsize = inverse_vector(phantom.voxel_size);
index_phantom.x = int(position.x * ivoxsize.x);
index_phantom.y = int(position.y * ivoxsize.y);
index_phantom.z = int(position.z * ivoxsize.z);
index_phantom.w = index_phantom.z*phantom.nb_voxel_slice
+ index_phantom.y*phantom.size_in_vox.x
+ index_phantom.x; // linear index
// Read direction
float3 direction;
direction.x = photons.dx[id];
direction.y = photons.dy[id];
direction.z = photons.dz[id];
// Get energy
float energy = photons.E[id];
// Get material
unsigned short int mat = phantom.data[index_phantom.w];
/// Debug ///
//printf("gamma %i E %e pos %.2f %.2f %.2f mat %i\n", id, energy, position.x, position.y, position.z, mat);
//// Find next discrete interaction ///////////////////////////////////////
// Find next discrete interaction, total_dedx and next discrete intraction distance
float next_interaction_distance = FLT_MAX;
unsigned char next_discrete_process = 0;
float interaction_distance;
float cross_section;
// Photoelectric
cross_section = PhotoElec_CS_Standard(materials, mat, energy);
interaction_distance = __fdividef(-__logf(Brent_real(id, photons.table_x_brent, 0)),
cross_section);
//printf("PE CS %e PIL %e\n", cross_section, interaction_distance);
if (interaction_distance < next_interaction_distance) {
next_interaction_distance = interaction_distance;
next_discrete_process = PHOTON_PHOTOELECTRIC;
}
// Compton
cross_section = Compton_CS_Standard(materials, mat, energy);
interaction_distance = __fdividef(-__logf(Brent_real(id, photons.table_x_brent, 0)),
cross_section);
//printf("Cpt CS %e PIL %e\n", cross_section, interaction_distance);
if (interaction_distance < next_interaction_distance) {
next_interaction_distance = interaction_distance;
next_discrete_process = PHOTON_COMPTON;
}
// Distance to the next voxel boundary (raycasting)
interaction_distance = get_boundary_voxel_by_raycasting(index_phantom, position,
direction, phantom.voxel_size);
//printf("Boundary PIL %e\n", interaction_distance);
if (interaction_distance < next_interaction_distance) {
// overshoot the distance of 1 um to be inside the next voxel
next_interaction_distance = interaction_distance+1.0e-03f;
next_discrete_process = PHOTON_BOUNDARY_VOXEL;
}
// step limiter
if (step_limiter < next_interaction_distance) {
next_interaction_distance = step_limiter;
next_discrete_process = PHOTON_STEP_LIMITER;
}
//// Move particle //////////////////////////////////////////////////////
position.x += direction.x * next_interaction_distance;
position.y += direction.y * next_interaction_distance;
position.z += direction.z * next_interaction_distance;
photons.t[id] += (3.33564095198e-03f * next_interaction_distance);
photons.px[id] = position.x;
photons.py[id] = position.y;
photons.pz[id] = position.z;
// Stop simulation if out of phantom
if ( position.x <= 0 || position.x >= phantom.size_in_mm.x
|| position.y <= 0 || position.y >= phantom.size_in_mm.y
|| position.z <= 0 || position.z >= phantom.size_in_mm.z ) {
photons.endsimu[id] = 1; // stop the simulation
atomicAdd(count_d, 1); // count simulated primaries
return;
}
//// Resolve discrete processe //////////////////////////////////////////
float discrete_loss = 0.0f;
if (next_discrete_process == PHOTON_BOUNDARY_VOXEL ||
next_discrete_process == PHOTON_STEP_LIMITER) {
//printf("boundary || step limiter\n");
return;
}
if (next_discrete_process == PHOTON_PHOTOELECTRIC) {
//printf("PE\n");
discrete_loss = PhotoElec_Effect_Standard_WiSec(photons, electrons, materials,
materials.electron_cut_energy[mat],
mat, id, count_d);
}
if (next_discrete_process == PHOTON_COMPTON) {
//printf("Compton\n");
discrete_loss = Compton_Effect_Standard_WiSec(photons, electrons,
materials.electron_cut_energy[mat],
id, count_d);
//printf("energy deposit %e\n", discrete_loss);
}
// Dosemap scoring
ivoxsize = inverse_vector(dosemap.voxel_size);
index_phantom.x = int(position.x * ivoxsize.x);
index_phantom.y = int(position.y * ivoxsize.y);
index_phantom.z = int(position.z * ivoxsize.z);
index_phantom.w = index_phantom.z*dosemap.nb_voxel_slice
+ index_phantom.y*dosemap.size_in_vox.x
+ index_phantom.x; // linear index
//printf("index dosemap %i\n", index_phantom.w);
atomicAdd(&dosemap.edep[index_phantom.w], discrete_loss);
}
// Regular Navigator with voxelized phantom for electrons bind with a photon
__global__ void kernel_NavRegularPhan_Electron_BdPhoton(StackParticle electrons,
StackParticle photons,
Volume phantom,
Materials materials,
Dosimetry dosemap,
int* count_d, float step_limiter) {
unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (id >= electrons.size) return;
//printf("\nNav e- endsimu %i active %i\n", electrons.endsimu[id], electrons.active[id]);
if (electrons.endsimu[id]) return;
if (!electrons.active[id]) return;
//// Init ///////////////////////////////////////////////////////////////////
// Read position
float3 position; // mm
position.x = electrons.px[id];
position.y = electrons.py[id];
position.z = electrons.pz[id];
// Defined index phantom
int4 index_phantom;
float3 ivoxsize = inverse_vector(phantom.voxel_size);
index_phantom.x = int(position.x * ivoxsize.x);
index_phantom.y = int(position.y * ivoxsize.y);
index_phantom.z = int(position.z * ivoxsize.z);
index_phantom.w = index_phantom.z*phantom.nb_voxel_slice
+ index_phantom.y*phantom.size_in_vox.x
+ index_phantom.x; // linear index
// Read direction
float3 direction;
direction.x = electrons.dx[id];
direction.y = electrons.dy[id];
direction.z = electrons.dz[id];
// Get energy
float energy = electrons.E[id];
// Get material
unsigned short int mat = phantom.data[index_phantom.w];
/// Debug ///
//printf("e- %i E %e pos %.2f %.2f %.2f\n", id, energy, position.x, position.y, position.z);
//// Find next discrete interaction ///////////////////////////////////////
// Find next discrete interaction, total_dedx and next discrete intraction distance
float next_interaction_distance = FLT_MAX;
float total_dedx = 0.0f;
unsigned char next_discrete_process = 0;
float interaction_distance;
float cross_section;
float probe = 0.0f; // DEBUG
// eIonisation
cross_section = eIonisation_CS_Standard(materials, mat, energy);
interaction_distance = __fdividef(-__logf(Brent_real(id, electrons.table_x_brent, 0)),
cross_section);
total_dedx += eIonisation_dedx_Standard(materials, mat, energy);
if (interaction_distance < next_interaction_distance) {
next_interaction_distance = interaction_distance;
next_discrete_process = ELECTRON_EIONISATION;
}
// Multiple Scattering
cross_section = MSC_CS(materials, mat, energy);
interaction_distance = __fdividef(-__logf(Brent_real(id, electrons.table_x_brent, 0)),
cross_section);
// dedx = 0.0
if (interaction_distance < next_interaction_distance) {
next_interaction_distance = interaction_distance;
next_discrete_process = ELECTRON_MSC;
}
// Distance to the next voxel boundary (raycasting)
interaction_distance = get_boundary_voxel_by_raycasting(index_phantom, position,
direction, phantom.voxel_size);
//printf("Boundary PIL %e\n", interaction_distance);
if (interaction_distance < next_interaction_distance) {
// overshoot the distance of 1 um to be inside the next voxel
next_interaction_distance = interaction_distance+1.0e-03f;
next_discrete_process = ELECTRON_BOUNDARY_VOXEL;
}
// FIXME STEP LIMITER was not valided yet!
// step limiter
if (step_limiter < next_interaction_distance) {
next_interaction_distance = step_limiter;
next_discrete_process = PHOTON_STEP_LIMITER;
}
//printf("E %e dist %e\n", energy, next_interaction_distance);
//// Resolve continuous processes ///////////////////////////////////////
float safety_distance = __fdividef(energy, total_dedx);
float continuous_loss = 0.0f;
//printf("Safety PIL %e\n", safety_distance);
if (safety_distance < next_interaction_distance) {
next_interaction_distance = safety_distance;
next_discrete_process = ELECTRON_SAFETY;
continuous_loss = energy;
} else {
continuous_loss = total_dedx * next_interaction_distance;
energy -= continuous_loss;
if (energy < 0.0f) energy = 0.0f;
electrons.E[id] = energy;
}
// continuous loss should be at random point along step
float rnd_dist = next_interaction_distance * Brent_real(id, electrons.table_x_brent, 0);
float3 rnd_pos;
rnd_pos.x = position.x - direction.x * rnd_dist;
rnd_pos.y = position.y - direction.y * rnd_dist;
rnd_pos.z = position.z - direction.z * rnd_dist;
if ( rnd_pos.x <= 0 || rnd_pos.x >= dosemap.size_in_mm.x
|| rnd_pos.y <= 0 || rnd_pos.y >= dosemap.size_in_mm.y
|| rnd_pos.z <= 0 || rnd_pos.z >= dosemap.size_in_mm.z ) {
rnd_pos = position;
}
ivoxsize = inverse_vector(dosemap.voxel_size);
index_phantom.x = int(rnd_pos.x * ivoxsize.x);
index_phantom.y = int(rnd_pos.y * ivoxsize.y);
index_phantom.z = int(rnd_pos.z * ivoxsize.z);
index_phantom.w = index_phantom.z*dosemap.nb_voxel_slice
+ index_phantom.y*dosemap.size_in_vox.x
+ index_phantom.x; // linear index
atomicAdd(&dosemap.edep[index_phantom.w], continuous_loss);
//// Move particle //////////////////////////////////////////////////////
//printf("E %e dist %e\n", energy, next_interaction_distance);
position.x += direction.x * next_interaction_distance;
position.y += direction.y * next_interaction_distance;
position.z += direction.z * next_interaction_distance;
electrons.t[id] += (3.33564095198e-03f * next_interaction_distance);
electrons.px[id] = position.x;
electrons.py[id] = position.y;
electrons.pz[id] = position.z;
// Stop simulation if out of phantom
if ( position.x <= 0 || position.x >= phantom.size_in_mm.x
|| position.y <= 0 || position.y >= phantom.size_in_mm.y
|| position.z <= 0 || position.z >= phantom.size_in_mm.z ) {
electrons.endsimu[id] = 1; // stop the simulation
electrons.active[id] = 0;
photons.active[id] = 1; // unfreeze the photon tracking
atomicAdd(count_d, 1); // count simulated secondaries
return;
}
//// Resolve discrete processe //////////////////////////////////////////
float discrete_loss = 0.0f;
if (next_discrete_process == ELECTRON_BOUNDARY_VOXEL ||
next_discrete_process == ELECTRON_STEP_LIMITER) {
//printf("Boundary || step limiter\n");
return;
}
if (next_discrete_process == ELECTRON_SAFETY) {
//printf("Safety\n");
electrons.endsimu[id] = 1; // stop the simulation
electrons.active[id] = 0;
photons.active[id] = 1; // unfreeze the photon tracking
atomicAdd(count_d, 1); // count simulated secondaries
return;
}
if (next_discrete_process == ELECTRON_EIONISATION) {
//printf("eIonisation\n");
discrete_loss = eIonisation_Effect_Standard_NoSec(electrons, photons,
materials.electron_cut_energy[mat],
materials.electron_max_energy[mat],
id, count_d);
}
if (next_discrete_process == ELECTRON_MSC) {
//printf("MSC\n");
// FIXME trueStepLength = next_interaction_distance?!
discrete_loss = MSC_Effect(electrons, materials, next_interaction_distance, mat, id);
}
// Dosemap scoring
ivoxsize = inverse_vector(dosemap.voxel_size);
index_phantom.x = int(position.x * ivoxsize.x);
index_phantom.y = int(position.y * ivoxsize.y);
index_phantom.z = int(position.z * ivoxsize.z);
index_phantom.w = index_phantom.z*dosemap.nb_voxel_slice
+ index_phantom.y*dosemap.size_in_vox.x
+ index_phantom.x; // linear index
//printf("index dosemap %i\n", index_phantom.w);
atomicAdd(&dosemap.edep[index_phantom.w], discrete_loss);
}
|
5e3f113e17011f53761ccfb5379eddcdcfc5b636.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef __PROJECTION_CU__
#define __PROJECTION_CU__
#include "ProjCell.h"
#include <math.h>
#include <cutil_inline.h>
//#define EMULATE
#ifdef EMULATE
#define ATOMIC_ADD(a,v) *a+v
#define ATOMIC_MIN(a,v) min(*a,v)
#define ATOMIC_MAX(a,v) max(*a,v)
#else
#define ATOMIC_ADD(a,v) atomicAdd(a,v)
#define ATOMIC_MIN(a,v) atomicMin(a,v)
#define ATOMIC_MAX(a,v) atomicMax(a,v)
#endif
__global__ void gpu_calcProjection_kernel(float* d_voxGrid, int voxSize, int dx, int dy, int dz, float thresh, ProjCell* d_cells) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int z = i / (dx * dy);
int r = i % (dx * dy);
int y = r / (dx);
int x = r % (dx);
if(((i < voxSize) && (x < dx -1)) && ((y < dy -1) && (z < dz - 1))) {
float val = d_voxGrid[i];
if(val > thresh) {
ProjCell *cell = &d_cells[x + (z * dx)];
ATOMIC_MIN(&cell->min, y);
ATOMIC_MAX(&cell->max, y);
ATOMIC_MIN(&cell->min, y);
ATOMIC_ADD(&cell->total, val);
ATOMIC_ADD(&cell->cnt,1);
}
//int vi = validPoint ? (vz * divX * divY) + (vy * divY) + vx : i % (divX*divY*divY);
//float inc = validPoint ? 1.0f : 0.0f;
//ATOMIC_ADD(&voxGrid[vi], inc);
}
};
__global__ void gpu_conv_kernel(float* d_src, float* d_dst, int w, int h, float *d_conv, int cWidth, int cHeight, bool mirrorBoarder) {
int ind = (blockIdx.x * blockDim.x) + threadIdx.x;
if( ind < (w*h)) {
int y = ind / w;
int x = ind % w;
float origCellVal = d_src[x + y*w];
float sum = 0;
int halfWidth = cWidth/2;
int halfHeight = cHeight/2;
int convX = 0;
for(int i = -halfWidth; i <= halfWidth; i++) {
int convY = 0;
for(int j = -halfHeight; j <= halfHeight; j++) {
int xOff = x + i;
int yOff = y + j;
float cellVal = 0;
if(((xOff<0) || (yOff<0))||((xOff>=w)||(yOff>=h))) {
if(mirrorBoarder)
cellVal = origCellVal;
} else {
cellVal = d_src[xOff + (yOff * w)];
}
sum+= cellVal * d_conv[convX + (convY * cWidth)];
convY++;
}
convX++;
}
d_dst[x+y*w] = sum;
}
}
extern "C" void gpu_calcProjection(float* d_voxGrid, int dx, int dy, int dz, float thresh, ProjCell* d_cells) {
int threadsPerBlock = 256;
int voxSize = dx*dy*dz;
int blocks = (int) ceilf(voxSize/(float) threadsPerBlock);
hipLaunchKernelGGL(( gpu_calcProjection_kernel) , dim3(blocks),dim3(threadsPerBlock), 0, 0, d_voxGrid, voxSize, dx, dy, dz, thresh, d_cells);
}
extern "C" void gpu_conv(float* d_src, float* d_dst, int w, int h, float *d_conv, int cWidth, int cHeight, bool mirrorBoarder) {
int threadsPerBlock = 256;
int voxSize = w*h;
int blocks = (int) ceilf(voxSize/(float) threadsPerBlock);
hipLaunchKernelGGL(( gpu_conv_kernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, d_src, d_dst, w, h, d_conv, cWidth, cHeight, mirrorBoarder);
}
#endif | 5e3f113e17011f53761ccfb5379eddcdcfc5b636.cu | #ifndef __PROJECTION_CU__
#define __PROJECTION_CU__
#include "ProjCell.h"
#include <math.h>
#include <cutil_inline.h>
//#define EMULATE
#ifdef EMULATE
#define ATOMIC_ADD(a,v) *a+v
#define ATOMIC_MIN(a,v) min(*a,v)
#define ATOMIC_MAX(a,v) max(*a,v)
#else
#define ATOMIC_ADD(a,v) atomicAdd(a,v)
#define ATOMIC_MIN(a,v) atomicMin(a,v)
#define ATOMIC_MAX(a,v) atomicMax(a,v)
#endif
__global__ void gpu_calcProjection_kernel(float* d_voxGrid, int voxSize, int dx, int dy, int dz, float thresh, ProjCell* d_cells) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int z = i / (dx * dy);
int r = i % (dx * dy);
int y = r / (dx);
int x = r % (dx);
if(((i < voxSize) && (x < dx -1)) && ((y < dy -1) && (z < dz - 1))) {
float val = d_voxGrid[i];
if(val > thresh) {
ProjCell *cell = &d_cells[x + (z * dx)];
ATOMIC_MIN(&cell->min, y);
ATOMIC_MAX(&cell->max, y);
ATOMIC_MIN(&cell->min, y);
ATOMIC_ADD(&cell->total, val);
ATOMIC_ADD(&cell->cnt,1);
}
//int vi = validPoint ? (vz * divX * divY) + (vy * divY) + vx : i % (divX*divY*divY);
//float inc = validPoint ? 1.0f : 0.0f;
//ATOMIC_ADD(&voxGrid[vi], inc);
}
};
__global__ void gpu_conv_kernel(float* d_src, float* d_dst, int w, int h, float *d_conv, int cWidth, int cHeight, bool mirrorBoarder) {
int ind = (blockIdx.x * blockDim.x) + threadIdx.x;
if( ind < (w*h)) {
int y = ind / w;
int x = ind % w;
float origCellVal = d_src[x + y*w];
float sum = 0;
int halfWidth = cWidth/2;
int halfHeight = cHeight/2;
int convX = 0;
for(int i = -halfWidth; i <= halfWidth; i++) {
int convY = 0;
for(int j = -halfHeight; j <= halfHeight; j++) {
int xOff = x + i;
int yOff = y + j;
float cellVal = 0;
if(((xOff<0) || (yOff<0))||((xOff>=w)||(yOff>=h))) {
if(mirrorBoarder)
cellVal = origCellVal;
} else {
cellVal = d_src[xOff + (yOff * w)];
}
sum+= cellVal * d_conv[convX + (convY * cWidth)];
convY++;
}
convX++;
}
d_dst[x+y*w] = sum;
}
}
extern "C" void gpu_calcProjection(float* d_voxGrid, int dx, int dy, int dz, float thresh, ProjCell* d_cells) {
int threadsPerBlock = 256;
int voxSize = dx*dy*dz;
int blocks = (int) ceilf(voxSize/(float) threadsPerBlock);
gpu_calcProjection_kernel <<<blocks,threadsPerBlock>>> (d_voxGrid, voxSize, dx, dy, dz, thresh, d_cells);
}
extern "C" void gpu_conv(float* d_src, float* d_dst, int w, int h, float *d_conv, int cWidth, int cHeight, bool mirrorBoarder) {
int threadsPerBlock = 256;
int voxSize = w*h;
int blocks = (int) ceilf(voxSize/(float) threadsPerBlock);
gpu_conv_kernel<<<blocks, threadsPerBlock>>>(d_src, d_dst, w, h, d_conv, cWidth, cHeight, mirrorBoarder);
}
#endif |
33dabd48e8caeb4a08df934cd9e7dc8fd8cdbaa5.hip | // !!! This is a file automatically generated by hipify!!!
#include "../include/NbodySystem.cuh"
#include <stdio.h>
#include <hip/hip_runtime_api.h>
// data
static size_t numBodies;
static double *delta_time;
static double *cudaPositions;
static double *cudaVelocity;
static double *cudaForces;
static double *cudaMass;
// Constants
__constant__ double cudaG;
__constant__ double cudaDt;
// Kernel prototypes
void __global__ cudaCalculateForce(size_t numBodies, double *cudaPositions, double *cudaForces, double *cudaMass);
void __global__ cudaUpdatePositions(size_t numBodies, double *cudaPositions, double *cudaVelocity, double *cudaForces, double *cudaMass);
void initializeNbodySystem(double G, double *dt, double *positions, double *velocity, double *mass, size_t in_numBodies, Config *config){
if((config->getDebugLevel() & 0x10) == 16){
printf("NbodySystem.cu\t\tInitializing\n");
}
// Init
numBodies = in_numBodies;
delta_time = dt;
// Allocating memory
hipMalloc(&cudaPositions, numBodies*3*sizeof(double));
hipMalloc(&cudaVelocity, numBodies*3*sizeof(double));
hipMalloc(&cudaForces, numBodies*3*sizeof(double));
hipMalloc(&cudaMass, numBodies*sizeof(double));
// Setting initial data
hipMemcpy(cudaPositions, positions, numBodies*3*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(cudaVelocity, velocity, numBodies*3*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(cudaMass, mass, numBodies*sizeof(double), hipMemcpyHostToDevice);
hipMemcpyToSymbol(cudaG, &G, sizeof(double), 0, hipMemcpyHostToDevice);
// Error check
hipError_t error = hipGetLastError();
if(error != 0){
printf("NbodySystem.cu\t\tError: %s\n", hipGetErrorString(error));
}
}
void update(double *newPositions, double *newVelocities){
// CUDA
dim3 grid((numBodies/512) + 1);
dim3 block(512);
hipLaunchKernelGGL(( cudaCalculateForce), dim3(grid), dim3(block), 0, 0, numBodies, cudaPositions, cudaForces, cudaMass);
hipLaunchKernelGGL(( cudaUpdatePositions), dim3(grid), dim3(block), 0, 0, numBodies, cudaPositions, cudaVelocity, cudaForces, cudaMass);
// Copy DT in case it has changed (should check if it has changed)
hipMemcpyToSymbol(cudaDt, delta_time, sizeof(double), 0, hipMemcpyHostToDevice);
// Getting new data
hipMemcpy(newPositions, cudaPositions, numBodies*3*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(newVelocities, cudaVelocity, numBodies*3*sizeof(double), hipMemcpyDeviceToHost);
// Error check
hipError_t error = hipGetLastError();
if(error != 0){
printf("NbodySystem.cu\t\tError: %s\n", hipGetErrorString(error));
}
}
// has speedup potential by using SHARED memory
// 48 KiB can contain the data needed for 614 bodies (double3 + double3 + double3 + double)
void __global__ cudaCalculateForce(size_t numBodies, double *cudaPositions, double *cudaForces, double *cudaMass){
size_t bodyId = (blockIdx.x * blockDim.x) + threadIdx.x;
if(bodyId >= numBodies){return;}
// Initialize force
double3 position;
position.x = cudaPositions[bodyId*3 + 0];
position.y = cudaPositions[bodyId*3 + 1];
position.z = cudaPositions[bodyId*3 + 2];
double3 force;
force.x = 0.0;
force.y = 0.0;
force.z = 0.0;
double mass = cudaMass[bodyId];
// Looping bodies
for(size_t i=0; i<numBodies; i++){
if(i != bodyId){
// Calculating distance between bodies
double dist_x = cudaPositions[i*3 + 0] - position.x;
double dist_y = cudaPositions[i*3 + 1] - position.y;
double dist_z = cudaPositions[i*3 + 2] - position.z;
double abs_dist = sqrt(dist_x*dist_x + dist_y*dist_y + dist_z*dist_z);
abs_dist = abs_dist*abs_dist*abs_dist;
// Updating force
force.x += (cudaG * mass * cudaMass[i])/abs_dist * dist_x;
force.y += (cudaG * mass * cudaMass[i])/abs_dist * dist_y;
force.z += (cudaG * mass * cudaMass[i])/abs_dist * dist_z;
}
}
cudaForces[bodyId*3 + 0] = force.x;
cudaForces[bodyId*3 + 1] = force.y;
cudaForces[bodyId*3 + 2] = force.z;
}
void __global__ cudaUpdatePositions(size_t numBodies, double *cudaPositions, double *cudaVelocity, double *cudaForces, double *cudaMass){
int bodyId = (blockIdx.x * blockDim.x) + threadIdx.x;
if(bodyId >= numBodies){return;}
// Reading body data
double mass;
mass = cudaMass[bodyId];
double3 force;
force.x = cudaForces[bodyId*3 + 0];
force.y = cudaForces[bodyId*3 + 1];
force.z = cudaForces[bodyId*3 + 2];
double3 position;
position.x = cudaPositions[bodyId*3 + 0];
position.y = cudaPositions[bodyId*3 + 1];
position.z = cudaPositions[bodyId*3 + 2];
double3 velocity;
velocity.x = cudaVelocity[bodyId*3 + 0];
velocity.y = cudaVelocity[bodyId*3 + 1];
velocity.z = cudaVelocity[bodyId*3 + 2];
// Calculating delta
double3 delta;
delta.x = cudaDt * velocity.x;
delta.y = cudaDt * velocity.y;
delta.z = cudaDt * velocity.z;
// Updating new position based on delta
position.x += delta.x;
position.y += delta.y;
position.z += delta.z;
cudaPositions[bodyId*3 + 0] = position.x;
cudaPositions[bodyId*3 + 1] = position.y;
cudaPositions[bodyId*3 + 2] = position.z;
// Updating new velocity
velocity.x += cudaDt * force.x/mass;
velocity.y += cudaDt * force.y/mass;
velocity.z += cudaDt * force.z/mass;
cudaVelocity[bodyId*3 + 0] = velocity.x;
cudaVelocity[bodyId*3 + 1] = velocity.y;
cudaVelocity[bodyId*3 + 2] = velocity.z;
}
| 33dabd48e8caeb4a08df934cd9e7dc8fd8cdbaa5.cu | #include "../include/NbodySystem.cuh"
#include <stdio.h>
#include <cuda_runtime_api.h>
// data
static size_t numBodies;
static double *delta_time;
static double *cudaPositions;
static double *cudaVelocity;
static double *cudaForces;
static double *cudaMass;
// Constants
__constant__ double cudaG;
__constant__ double cudaDt;
// Kernel prototypes
void __global__ cudaCalculateForce(size_t numBodies, double *cudaPositions, double *cudaForces, double *cudaMass);
void __global__ cudaUpdatePositions(size_t numBodies, double *cudaPositions, double *cudaVelocity, double *cudaForces, double *cudaMass);
void initializeNbodySystem(double G, double *dt, double *positions, double *velocity, double *mass, size_t in_numBodies, Config *config){
if((config->getDebugLevel() & 0x10) == 16){
printf("NbodySystem.cu\t\tInitializing\n");
}
// Init
numBodies = in_numBodies;
delta_time = dt;
// Allocating memory
cudaMalloc(&cudaPositions, numBodies*3*sizeof(double));
cudaMalloc(&cudaVelocity, numBodies*3*sizeof(double));
cudaMalloc(&cudaForces, numBodies*3*sizeof(double));
cudaMalloc(&cudaMass, numBodies*sizeof(double));
// Setting initial data
cudaMemcpy(cudaPositions, positions, numBodies*3*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(cudaVelocity, velocity, numBodies*3*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(cudaMass, mass, numBodies*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(cudaG, &G, sizeof(double), 0, cudaMemcpyHostToDevice);
// Error check
cudaError_t error = cudaGetLastError();
if(error != 0){
printf("NbodySystem.cu\t\tError: %s\n", cudaGetErrorString(error));
}
}
void update(double *newPositions, double *newVelocities){
// CUDA
dim3 grid((numBodies/512) + 1);
dim3 block(512);
cudaCalculateForce<<<grid, block>>>(numBodies, cudaPositions, cudaForces, cudaMass);
cudaUpdatePositions<<<grid, block>>>(numBodies, cudaPositions, cudaVelocity, cudaForces, cudaMass);
// Copy DT in case it has changed (should check if it has changed)
cudaMemcpyToSymbol(cudaDt, delta_time, sizeof(double), 0, cudaMemcpyHostToDevice);
// Getting new data
cudaMemcpy(newPositions, cudaPositions, numBodies*3*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(newVelocities, cudaVelocity, numBodies*3*sizeof(double), cudaMemcpyDeviceToHost);
// Error check
cudaError_t error = cudaGetLastError();
if(error != 0){
printf("NbodySystem.cu\t\tError: %s\n", cudaGetErrorString(error));
}
}
// has speedup potential by using SHARED memory
// 48 KiB can contain the data needed for 614 bodies (double3 + double3 + double3 + double)
void __global__ cudaCalculateForce(size_t numBodies, double *cudaPositions, double *cudaForces, double *cudaMass){
size_t bodyId = (blockIdx.x * blockDim.x) + threadIdx.x;
if(bodyId >= numBodies){return;}
// Initialize force
double3 position;
position.x = cudaPositions[bodyId*3 + 0];
position.y = cudaPositions[bodyId*3 + 1];
position.z = cudaPositions[bodyId*3 + 2];
double3 force;
force.x = 0.0;
force.y = 0.0;
force.z = 0.0;
double mass = cudaMass[bodyId];
// Looping bodies
for(size_t i=0; i<numBodies; i++){
if(i != bodyId){
// Calculating distance between bodies
double dist_x = cudaPositions[i*3 + 0] - position.x;
double dist_y = cudaPositions[i*3 + 1] - position.y;
double dist_z = cudaPositions[i*3 + 2] - position.z;
double abs_dist = sqrt(dist_x*dist_x + dist_y*dist_y + dist_z*dist_z);
abs_dist = abs_dist*abs_dist*abs_dist;
// Updating force
force.x += (cudaG * mass * cudaMass[i])/abs_dist * dist_x;
force.y += (cudaG * mass * cudaMass[i])/abs_dist * dist_y;
force.z += (cudaG * mass * cudaMass[i])/abs_dist * dist_z;
}
}
cudaForces[bodyId*3 + 0] = force.x;
cudaForces[bodyId*3 + 1] = force.y;
cudaForces[bodyId*3 + 2] = force.z;
}
void __global__ cudaUpdatePositions(size_t numBodies, double *cudaPositions, double *cudaVelocity, double *cudaForces, double *cudaMass){
int bodyId = (blockIdx.x * blockDim.x) + threadIdx.x;
if(bodyId >= numBodies){return;}
// Reading body data
double mass;
mass = cudaMass[bodyId];
double3 force;
force.x = cudaForces[bodyId*3 + 0];
force.y = cudaForces[bodyId*3 + 1];
force.z = cudaForces[bodyId*3 + 2];
double3 position;
position.x = cudaPositions[bodyId*3 + 0];
position.y = cudaPositions[bodyId*3 + 1];
position.z = cudaPositions[bodyId*3 + 2];
double3 velocity;
velocity.x = cudaVelocity[bodyId*3 + 0];
velocity.y = cudaVelocity[bodyId*3 + 1];
velocity.z = cudaVelocity[bodyId*3 + 2];
// Calculating delta
double3 delta;
delta.x = cudaDt * velocity.x;
delta.y = cudaDt * velocity.y;
delta.z = cudaDt * velocity.z;
// Updating new position based on delta
position.x += delta.x;
position.y += delta.y;
position.z += delta.z;
cudaPositions[bodyId*3 + 0] = position.x;
cudaPositions[bodyId*3 + 1] = position.y;
cudaPositions[bodyId*3 + 2] = position.z;
// Updating new velocity
velocity.x += cudaDt * force.x/mass;
velocity.y += cudaDt * force.y/mass;
velocity.z += cudaDt * force.z/mass;
cudaVelocity[bodyId*3 + 0] = velocity.x;
cudaVelocity[bodyId*3 + 1] = velocity.y;
cudaVelocity[bodyId*3 + 2] = velocity.z;
}
|
f321de6375153f5da525213651b895e4c60da214.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "make_and_count_seg.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *vec = NULL;
hipMalloc(&vec, XSIZE*YSIZE);
int *bin = NULL;
hipMalloc(&bin, XSIZE*YSIZE);
int *segcounter = NULL;
hipMalloc(&segcounter, XSIZE*YSIZE);
const int length = 1;
const int countlength = 1;
const int HighLength = 1;
const int HighSegmentLength = 1;
const int threadsHigh = 1;
const int LowSegmentLength = 1;
const float low = 1;
const float high = 1;
const float slope = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
make_and_count_seg), dim3(gridBlock),dim3(threadBlock), 0, 0, vec,bin,segcounter,length,countlength,HighLength,HighSegmentLength,threadsHigh,LowSegmentLength,low,high,slope);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
make_and_count_seg), dim3(gridBlock),dim3(threadBlock), 0, 0, vec,bin,segcounter,length,countlength,HighLength,HighSegmentLength,threadsHigh,LowSegmentLength,low,high,slope);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
make_and_count_seg), dim3(gridBlock),dim3(threadBlock), 0, 0, vec,bin,segcounter,length,countlength,HighLength,HighSegmentLength,threadsHigh,LowSegmentLength,low,high,slope);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f321de6375153f5da525213651b895e4c60da214.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "make_and_count_seg.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *vec = NULL;
cudaMalloc(&vec, XSIZE*YSIZE);
int *bin = NULL;
cudaMalloc(&bin, XSIZE*YSIZE);
int *segcounter = NULL;
cudaMalloc(&segcounter, XSIZE*YSIZE);
const int length = 1;
const int countlength = 1;
const int HighLength = 1;
const int HighSegmentLength = 1;
const int threadsHigh = 1;
const int LowSegmentLength = 1;
const float low = 1;
const float high = 1;
const float slope = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
make_and_count_seg<<<gridBlock,threadBlock>>>(vec,bin,segcounter,length,countlength,HighLength,HighSegmentLength,threadsHigh,LowSegmentLength,low,high,slope);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
make_and_count_seg<<<gridBlock,threadBlock>>>(vec,bin,segcounter,length,countlength,HighLength,HighSegmentLength,threadsHigh,LowSegmentLength,low,high,slope);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
make_and_count_seg<<<gridBlock,threadBlock>>>(vec,bin,segcounter,length,countlength,HighLength,HighSegmentLength,threadsHigh,LowSegmentLength,low,high,slope);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
173fa35e8885df3f969490c3f842779b542d74f2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "cuda_kernels.h"
#include "../include/parse-args.h"
#include <hiprand/hiprand_kernel.h>
#define typedef uint unsigned long
//__device__ int dummy = 0;
template<int v>
__global__ void scatter_t(double* target,
double* source,
long* ti,
long* si)
{
extern __shared__ char space[];
int gid = v*(blockIdx.x * blockDim.x + threadIdx.x);
double buf[v];
long idx[v];
for(int i = 0; i < v; i++){
buf[i] = source[gid+i];
}
for(int i = 0; i < v; i++){
idx[i] = ti[gid+i];
}
for(int i = 0; i < v; i++){
target[idx[i]] = buf[i];
}
}
template<int v>
__global__ void gather_t(double* target,
double* source,
long* ti,
long* si)
{
extern __shared__ char space[];
int gid = v*(blockIdx.x * blockDim.x + threadIdx.x);
double buf[v];
for(int i = 0; i < v; i++){
buf[i] = source[si[gid+i]];
}
for(int i = 0; i < v; i++){
target[gid+i] = buf[i];
}
}
//__global__ void gather_new(double *target,
template<int v>
__global__ void sg_t(double* target,
double* source,
long* ti,
long* si)
{
extern __shared__ char space[];
int gid = v*(blockIdx.x * blockDim.x + threadIdx.x);
long sidx[v];
long tidx[v];
for(int i = 0; i < v; i++){
sidx[i] = si[gid+i];
}
for(int i = 0; i < v; i++){
tidx[i] = ti[gid+i];
}
for(int i = 0; i < v; i++){
target[tidx[i]] = source[sidx[i]];
}
}
#define INSTANTIATE(V)\
template __global__ void scatter_t<V>(double* target, double* source, long* ti, long* si);\
template __global__ void gather_t<V>(double* target, double* source, long* ti, long* si); \
template __global__ void sg_t<V>(double* target, double* source, long* ti, long* si);
INSTANTIATE(1);
INSTANTIATE(2);
INSTANTIATE(4);
INSTANTIATE(5);
INSTANTIATE(8);
INSTANTIATE(16);
INSTANTIATE(32);
INSTANTIATE(64);
extern "C" int translate_args(unsigned int dim, unsigned int* grid, unsigned int* block, dim3 *grid_dim, dim3 *block_dim){
if (!grid || !block || dim == 0 || dim > 3) {
return 1;
}
if (dim == 1) {
*grid_dim = dim3(grid[0]);
*block_dim = dim3(block[0]);
}else if (dim == 2) {
*grid_dim = dim3(grid[0], grid[1]);
*block_dim = dim3(block[0], block[1]);
}else if (dim == 3) {
*grid_dim = dim3(grid[0], grid[1], grid[2]);
*block_dim = dim3(block[0], block[1], block[2]);
}
return 0;
}
extern "C" float cuda_sg_wrapper(enum sg_kernel kernel,
size_t vector_len,
uint dim, uint* grid, uint* block,
double* target, double *source,
long* ti, long* si,
unsigned int shmem){
dim3 grid_dim, block_dim;
hipEvent_t start, stop;
if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0;
hipEventCreate(&start);
hipEventCreate(&stop);
hipDeviceSynchronize();
hipEventRecord(start);
if(kernel == SCATTER)
{
if (vector_len == 1)
hipLaunchKernelGGL(( scatter_t<1>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 2)
hipLaunchKernelGGL(( scatter_t<2>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 4)
hipLaunchKernelGGL(( scatter_t<4>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 5)
hipLaunchKernelGGL(( scatter_t<5>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 8)
hipLaunchKernelGGL(( scatter_t<8>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 16)
hipLaunchKernelGGL(( scatter_t<16>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 32)
hipLaunchKernelGGL(( scatter_t<32>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 64)
hipLaunchKernelGGL(( scatter_t<64>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else
{
printf("ERROR: UNSUPPORTED VECTOR LENGTH\n");
exit(1);
}
}
else if(kernel == GATHER)
{
if (vector_len == 1)
hipLaunchKernelGGL(( gather_t<1>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 2)
hipLaunchKernelGGL(( gather_t<2>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 4)
hipLaunchKernelGGL(( gather_t<4>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 5)
hipLaunchKernelGGL(( gather_t<5>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 8)
hipLaunchKernelGGL(( gather_t<8>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 16)
hipLaunchKernelGGL(( gather_t<16>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 32)
hipLaunchKernelGGL(( gather_t<32>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 64)
hipLaunchKernelGGL(( gather_t<64>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else
{
printf("ERROR: UNSUPPORTED VECTOR LENGTH\n");
exit(1);
}
}
else if(kernel == SG)
{
if (vector_len == 1)
hipLaunchKernelGGL(( sg_t<1>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 2)
hipLaunchKernelGGL(( sg_t<2>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 4)
hipLaunchKernelGGL(( sg_t<4>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 5)
hipLaunchKernelGGL(( sg_t<5>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 8)
hipLaunchKernelGGL(( sg_t<8>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 16)
hipLaunchKernelGGL(( sg_t<16>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 32)
hipLaunchKernelGGL(( sg_t<32>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 64)
hipLaunchKernelGGL(( sg_t<64>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else
{
printf("ERROR: UNSUPPORTED VECTOR LENGTH\n");
exit(1);
}
}
else
{
printf("ERROR UNRECOGNIZED KERNEL\n");
exit(1);
}
hipEventRecord(stop);
hipEventSynchronize(stop);
float time_ms = 0;
hipEventElapsedTime(&time_ms, start, stop);
return time_ms;
}
//assume block size >= index buffer size
//assume index buffer size divides block size
template<int V>
__global__ void scatter_block(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb)
{
__shared__ int idx_shared[V];
int tid = threadIdx.x;
int bid = blockIdx.x;
if (tid < V) {
idx_shared[tid] = idx[tid];
}
int ngatherperblock = blockDim.x / V;
int gatherid = tid / V;
double *src_loc = src + (bid*ngatherperblock+gatherid)*delta;
//for (int i = 0; i < wpb; i++) {
src_loc[idx_shared[tid%V]] = idx_shared[tid%V];
//src_loc[idx_shared[tid%V]] = 1337.;
//src_loc += delta;
//}
}
//assume block size >= index buffer size
//assume index buffer size divides block size
template<int V>
__global__ void scatter_block_random(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, size_t seed, size_t n)
{
__shared__ int idx_shared[V];
int tid = threadIdx.x;
int bid = blockIdx.x;
int ngatherperblock = blockDim.x / V;
int gatherid = tid / V;
unsigned long long sequence = blockIdx.x; //all thread blocks can use same sequence
unsigned long long offset = gatherid;
hiprandState_t state;
hiprand_init(seed, sequence, offset, &state);//everyone with same gather id should get same src_loc
int random_gatherid = (int)(n * hiprand_uniform(&state));
if (tid < V) {
idx_shared[tid] = idx[tid];
}
double *src_loc = src + (bid*ngatherperblock+random_gatherid)*delta;
//for (int i = 0; i < wpb; i++) {
src_loc[idx_shared[tid%V]] = idx_shared[tid%V];
//src_loc[idx_shared[tid%V]] = 1337.;
//src_loc += delta;
//}
}
//V2 = 8
//assume block size >= index buffer size
//assume index buffer size divides block size
template<int V>
__global__ void gather_block(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb)
{
__shared__ int idx_shared[V];
int tid = threadIdx.x;
int bid = blockIdx.x;
if (tid < V) {
idx_shared[tid] = idx[tid];
}
int ngatherperblock = blockDim.x / V;
int gatherid = tid / V;
double *src_loc = src + (bid*ngatherperblock+gatherid)*delta;
double x;
//for (int i = 0; i < wpb; i++) {
x = src_loc[idx_shared[tid%V]];
//src_loc[idx_shared[tid%V]] = 1337.;
//src_loc += delta;
//}
if (x==0.5) src[0] = x;
}
template<int V>
__global__ void gather_block_random(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, size_t seed, size_t n)
{
__shared__ int idx_shared[V];
int tid = threadIdx.x;
int bid = blockIdx.x;
int ngatherperblock = blockDim.x / V;
int gatherid = tid / V;
unsigned long long sequence = blockIdx.x; //all thread blocks can use same sequence
unsigned long long offset = gatherid;
hiprandState_t state;
hiprand_init(seed, sequence, offset, &state);//everyone with same gather id should get same src_loc
int random_gatherid = (int)(n * hiprand_uniform(&state));
if (tid < V) {
idx_shared[tid] = idx[tid];
}
double *src_loc = src + (bid*ngatherperblock+random_gatherid)*delta;
double x;
//for (int i = 0; i < wpb; i++) {
x = src_loc[idx_shared[tid%V]];
//src_loc[idx_shared[tid%V]] = 1337.;
//src_loc += delta;
//}
if (x==0.5) src[0] = x;
}
//todo -- add WRAP
template<int V>
__global__ void gather_new(double* source,
sgIdx_t* idx, size_t delta, int dummy, int wpt)
{
__shared__ int idx_shared[V];
int tid = threadIdx.x;
//int bid = blockIdx.x;
//int nblk = blockDim.x;
if (tid < V) {
idx_shared[tid] = idx[tid];
}
int gid = (blockIdx.x * blockDim.x + threadIdx.x);
double *sl = source + wpt*gid*delta;
double buf[V];
for (int j = 0; j < wpt; j++) {
for (int i = 0; i < V; i++) {
buf[i] = sl[idx_shared[i]];
//source[i+gid*delta] = 8;
//sl[i] = sl[idx[i]];
}
sl = sl + delta;
}
if (dummy) {
sl[idx_shared[0]] = buf[dummy];
}
/*
for (int i = 0; i < V; i++) {
if (buf[i] == 199402) {
printf("oop\n");
}
}
*/
//printf("idx[1]: %d\n", idx[1]);
/*
for (int i = 0; i < V; i++) {
printf("idx %d is %zu", i, idx[i]);
}
printf("\n");
*/
}
#define INSTANTIATE2(V)\
template __global__ void gather_new<V>(double* source, sgIdx_t* idx, size_t delta, int dummy, int wpt); \
template __global__ void gather_block<V>(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb);\
template __global__ void scatter_block<V>(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb); \
template __global__ void gather_block_random<V>(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, size_t seed, size_t n); \
template __global__ void scatter_block_random<V>(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, size_t seed, size_t n);
//INSTANTIATE2(1);
//INSTANTIATE2(2);
//INSTANTIATE2(4);
//INSTANTIATE2(5);
INSTANTIATE2(8);
INSTANTIATE2(16);
INSTANTIATE2(32);
INSTANTIATE2(64);
INSTANTIATE2(128);
INSTANTIATE2(256);
INSTANTIATE2(512);
INSTANTIATE2(1024);
extern "C" float cuda_block_wrapper(uint dim, uint* grid, uint* block,
enum sg_kernel kernel,
double *source,
sgIdx_t* pat_dev,
sgIdx_t* pat,
size_t pat_len,
size_t delta,
size_t n,
size_t wrap,
int wpt)
{
dim3 grid_dim, block_dim;
hipEvent_t start, stop;
if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0;
hipMemcpy(pat_dev, pat, sizeof(sgIdx_t)*pat_len, hipMemcpyHostToDevice);
hipEventCreate(&start);
hipEventCreate(&stop);
hipDeviceSynchronize();
hipEventRecord(start);
// KERNEL
if (kernel == GATHER) {
if (pat_len == 8) {
hipLaunchKernelGGL(( gather_block<8>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 16) {
hipLaunchKernelGGL(( gather_block<16>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 32) {
hipLaunchKernelGGL(( gather_block<32>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 64) {
hipLaunchKernelGGL(( gather_block<64>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 128) {
hipLaunchKernelGGL(( gather_block<128>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt);
}else if (pat_len ==256) {
hipLaunchKernelGGL(( gather_block<256>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 512) {
hipLaunchKernelGGL(( gather_block<512>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 1024) {
hipLaunchKernelGGL(( gather_block<1024>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt);
} else {
printf("ERROR NOT SUPPORTED: %zu\n", pat_len);
}
} else if (kernel == SCATTER) {
if (pat_len == 8) {
hipLaunchKernelGGL(( scatter_block<8>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 16) {
hipLaunchKernelGGL(( scatter_block<16>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 32) {
hipLaunchKernelGGL(( scatter_block<32>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 64) {
hipLaunchKernelGGL(( scatter_block<64>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 128) {
hipLaunchKernelGGL(( scatter_block<128>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt);
}else if (pat_len ==256) {
hipLaunchKernelGGL(( scatter_block<256>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 512) {
hipLaunchKernelGGL(( scatter_block<512>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 1024) {
hipLaunchKernelGGL(( scatter_block<1024>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt);
} else {
printf("ERROR NOT SUPPORTED, %zu\n", pat_len);
}
}
hipEventRecord(stop);
hipEventSynchronize(stop);
float time_ms = 0;
hipEventElapsedTime(&time_ms, start, stop);
return time_ms;
}
extern "C" float cuda_block_random_wrapper(uint dim, uint* grid, uint* block,
enum sg_kernel kernel,
double *source,
sgIdx_t* pat_dev,
sgIdx_t* pat,
size_t pat_len,
size_t delta,
size_t n,
size_t wrap,
int wpt, size_t seed)
{
dim3 grid_dim, block_dim;
hipEvent_t start, stop;
if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0;
hipMemcpy(pat_dev, pat, sizeof(sgIdx_t)*pat_len, hipMemcpyHostToDevice);
hipEventCreate(&start);
hipEventCreate(&stop);
hipDeviceSynchronize();
hipEventRecord(start);
// KERNEL
if (kernel == GATHER) {
if (pat_len == 8) {
hipLaunchKernelGGL(( gather_block_random<8>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 16) {
hipLaunchKernelGGL(( gather_block_random<16>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 32) {
hipLaunchKernelGGL(( gather_block_random<32>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 64) {
hipLaunchKernelGGL(( gather_block_random<64>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 128) {
hipLaunchKernelGGL(( gather_block_random<128>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len ==256) {
hipLaunchKernelGGL(( gather_block_random<256>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 512) {
hipLaunchKernelGGL(( gather_block_random<512>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 1024) {
hipLaunchKernelGGL(( gather_block_random<1024>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n);
} else {
printf("ERROR NOT SUPPORTED: %zu\n", pat_len);
}
} else if (kernel == SCATTER) {
if (pat_len == 8) {
hipLaunchKernelGGL(( scatter_block_random<8>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 16) {
hipLaunchKernelGGL(( scatter_block_random<16>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 32) {
hipLaunchKernelGGL(( scatter_block_random<32>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 64) {
hipLaunchKernelGGL(( scatter_block_random<64>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 128) {
hipLaunchKernelGGL(( scatter_block_random<128>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len ==256) {
hipLaunchKernelGGL(( scatter_block_random<256>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 512) {
hipLaunchKernelGGL(( scatter_block_random<512>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 1024) {
hipLaunchKernelGGL(( scatter_block_random<1024>), dim3(grid_dim), dim3(block_dim), 0, 0, source, pat_dev, pat_len, delta, wpt, seed, n);
} else {
printf("ERROR NOT SUPPORTED, %zu\n", pat_len);
}
}
hipEventRecord(stop);
hipEventSynchronize(stop);
float time_ms = 0;
hipEventElapsedTime(&time_ms, start, stop);
return time_ms;
}
extern "C" float cuda_new_wrapper(uint dim, uint* grid, uint* block,
enum sg_kernel kernel,
double *source,
sgIdx_t* pat_dev,
sgIdx_t* pat,
size_t pat_len,
size_t delta,
size_t n,
size_t wrap,
int wpt)
{
dim3 grid_dim, block_dim;
hipEvent_t start, stop;
if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0;
hipMemcpy(pat_dev, pat, sizeof(sgIdx_t)*pat_len, hipMemcpyHostToDevice);
hipEventCreate(&start);
hipEventCreate(&stop);
hipDeviceSynchronize();
hipEventRecord(start);
// KERNEL
if (pat_len == 8) {
hipLaunchKernelGGL(( gather_new<8>), dim3(grid_dim),dim3(block_dim), 0, 0, source, pat_dev, (long)delta, 0, wpt);
}else if (pat_len == 16) {
hipLaunchKernelGGL(( gather_new<16>), dim3(grid_dim),dim3(block_dim), 0, 0, source, pat_dev, (long)delta, 0, wpt);
}else if (pat_len == 64) {
hipLaunchKernelGGL(( gather_new<64>), dim3(grid_dim),dim3(block_dim), 0, 0, source, pat_dev, (long)delta, 0, wpt);
}else if (pat_len ==256) {
hipLaunchKernelGGL(( gather_new<256>), dim3(grid_dim),dim3(block_dim), 0, 0, source, pat_dev, (long)delta, 0, wpt);
}else if (pat_len == 512) {
hipLaunchKernelGGL(( gather_new<512>), dim3(grid_dim),dim3(block_dim), 0, 0, source, pat_dev, (long)delta, 0, wpt);
} else {
printf("ERROR NOT SUPPORTED\n");
}
hipEventRecord(stop);
hipEventSynchronize(stop);
float time_ms = 0;
hipEventElapsedTime(&time_ms, start, stop);
return time_ms;
}
/*
dim3 grid_dim, block_dim;
hipEvent_t start, stop;
if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0;
hipEventCreate(&start);
hipEventCreate(&stop);
hipDeviceSynchronize();
hipEventRecord(start);
if(kernel == SCATTER)
{
if (vector_len == 1)
hipLaunchKernelGGL(( scatter_t<1>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 2)
hipLaunchKernelGGL(( scatter_t<2>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 4)
hipLaunchKernelGGL(( scatter_t<4>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 5)
hipLaunchKernelGGL(( scatter_t<5>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 8)
hipLaunchKernelGGL(( scatter_t<8>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 16)
hipLaunchKernelGGL(( scatter_t<16>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 32)
hipLaunchKernelGGL(( scatter_t<32>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 64)
hipLaunchKernelGGL(( scatter_t<64>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else
{
printf("ERROR: UNSUPPORTED VECTOR LENGTH\n");
exit(1);
}
}
else if(kernel == GATHER)
{
if (vector_len == 1)
hipLaunchKernelGGL(( gather_t<1>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 2)
hipLaunchKernelGGL(( gather_t<2>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 4)
hipLaunchKernelGGL(( gather_t<4>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 5)
hipLaunchKernelGGL(( gather_t<5>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 8)
hipLaunchKernelGGL(( gather_t<8>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 16)
hipLaunchKernelGGL(( gather_t<16>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 32)
hipLaunchKernelGGL(( gather_t<32>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 64)
hipLaunchKernelGGL(( gather_t<64>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else
{
printf("ERROR: UNSUPPORTED VECTOR LENGTH\n");
exit(1);
}
}
else if(kernel == SG)
{
if (vector_len == 1)
hipLaunchKernelGGL(( sg_t<1>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 2)
hipLaunchKernelGGL(( sg_t<2>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 4)
hipLaunchKernelGGL(( sg_t<4>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 5)
hipLaunchKernelGGL(( sg_t<5>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 8)
hipLaunchKernelGGL(( sg_t<8>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 16)
hipLaunchKernelGGL(( sg_t<16>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 32)
hipLaunchKernelGGL(( sg_t<32>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else if (vector_len == 64)
hipLaunchKernelGGL(( sg_t<64>), dim3(grid_dim),dim3(block_dim),shmem, 0, target, source, ti, si);
else
{
printf("ERROR: UNSUPPORTED VECTOR LENGTH\n");
exit(1);
}
}
else
{
printf("ERROR UNRECOGNIZED KERNEL\n");
exit(1);
}
hipEventRecord(stop);
hipEventSynchronize(stop);
float time_ms = 0;
hipEventElapsedTime(&time_ms, start, stop);
return time_ms;
}*/
| 173fa35e8885df3f969490c3f842779b542d74f2.cu | #include <stdio.h>
#include "cuda_kernels.h"
#include "../include/parse-args.h"
#include <curand_kernel.h>
#define typedef uint unsigned long
//__device__ int dummy = 0;
template<int v>
__global__ void scatter_t(double* target,
double* source,
long* ti,
long* si)
{
extern __shared__ char space[];
int gid = v*(blockIdx.x * blockDim.x + threadIdx.x);
double buf[v];
long idx[v];
for(int i = 0; i < v; i++){
buf[i] = source[gid+i];
}
for(int i = 0; i < v; i++){
idx[i] = ti[gid+i];
}
for(int i = 0; i < v; i++){
target[idx[i]] = buf[i];
}
}
template<int v>
__global__ void gather_t(double* target,
double* source,
long* ti,
long* si)
{
extern __shared__ char space[];
int gid = v*(blockIdx.x * blockDim.x + threadIdx.x);
double buf[v];
for(int i = 0; i < v; i++){
buf[i] = source[si[gid+i]];
}
for(int i = 0; i < v; i++){
target[gid+i] = buf[i];
}
}
//__global__ void gather_new(double *target,
template<int v>
__global__ void sg_t(double* target,
double* source,
long* ti,
long* si)
{
extern __shared__ char space[];
int gid = v*(blockIdx.x * blockDim.x + threadIdx.x);
long sidx[v];
long tidx[v];
for(int i = 0; i < v; i++){
sidx[i] = si[gid+i];
}
for(int i = 0; i < v; i++){
tidx[i] = ti[gid+i];
}
for(int i = 0; i < v; i++){
target[tidx[i]] = source[sidx[i]];
}
}
#define INSTANTIATE(V)\
template __global__ void scatter_t<V>(double* target, double* source, long* ti, long* si);\
template __global__ void gather_t<V>(double* target, double* source, long* ti, long* si); \
template __global__ void sg_t<V>(double* target, double* source, long* ti, long* si);
INSTANTIATE(1);
INSTANTIATE(2);
INSTANTIATE(4);
INSTANTIATE(5);
INSTANTIATE(8);
INSTANTIATE(16);
INSTANTIATE(32);
INSTANTIATE(64);
extern "C" int translate_args(unsigned int dim, unsigned int* grid, unsigned int* block, dim3 *grid_dim, dim3 *block_dim){
if (!grid || !block || dim == 0 || dim > 3) {
return 1;
}
if (dim == 1) {
*grid_dim = dim3(grid[0]);
*block_dim = dim3(block[0]);
}else if (dim == 2) {
*grid_dim = dim3(grid[0], grid[1]);
*block_dim = dim3(block[0], block[1]);
}else if (dim == 3) {
*grid_dim = dim3(grid[0], grid[1], grid[2]);
*block_dim = dim3(block[0], block[1], block[2]);
}
return 0;
}
extern "C" float cuda_sg_wrapper(enum sg_kernel kernel,
size_t vector_len,
uint dim, uint* grid, uint* block,
double* target, double *source,
long* ti, long* si,
unsigned int shmem){
dim3 grid_dim, block_dim;
cudaEvent_t start, stop;
if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaDeviceSynchronize();
cudaEventRecord(start);
if(kernel == SCATTER)
{
if (vector_len == 1)
scatter_t<1><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 2)
scatter_t<2><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 4)
scatter_t<4><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 5)
scatter_t<5><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 8)
scatter_t<8><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 16)
scatter_t<16><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 32)
scatter_t<32><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 64)
scatter_t<64><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else
{
printf("ERROR: UNSUPPORTED VECTOR LENGTH\n");
exit(1);
}
}
else if(kernel == GATHER)
{
if (vector_len == 1)
gather_t<1><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 2)
gather_t<2><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 4)
gather_t<4><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 5)
gather_t<5><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 8)
gather_t<8><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 16)
gather_t<16><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 32)
gather_t<32><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 64)
gather_t<64><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else
{
printf("ERROR: UNSUPPORTED VECTOR LENGTH\n");
exit(1);
}
}
else if(kernel == SG)
{
if (vector_len == 1)
sg_t<1><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 2)
sg_t<2><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 4)
sg_t<4><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 5)
sg_t<5><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 8)
sg_t<8><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 16)
sg_t<16><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 32)
sg_t<32><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 64)
sg_t<64><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else
{
printf("ERROR: UNSUPPORTED VECTOR LENGTH\n");
exit(1);
}
}
else
{
printf("ERROR UNRECOGNIZED KERNEL\n");
exit(1);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float time_ms = 0;
cudaEventElapsedTime(&time_ms, start, stop);
return time_ms;
}
//assume block size >= index buffer size
//assume index buffer size divides block size
template<int V>
__global__ void scatter_block(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb)
{
__shared__ int idx_shared[V];
int tid = threadIdx.x;
int bid = blockIdx.x;
if (tid < V) {
idx_shared[tid] = idx[tid];
}
int ngatherperblock = blockDim.x / V;
int gatherid = tid / V;
double *src_loc = src + (bid*ngatherperblock+gatherid)*delta;
//for (int i = 0; i < wpb; i++) {
src_loc[idx_shared[tid%V]] = idx_shared[tid%V];
//src_loc[idx_shared[tid%V]] = 1337.;
//src_loc += delta;
//}
}
//assume block size >= index buffer size
//assume index buffer size divides block size
template<int V>
__global__ void scatter_block_random(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, size_t seed, size_t n)
{
__shared__ int idx_shared[V];
int tid = threadIdx.x;
int bid = blockIdx.x;
int ngatherperblock = blockDim.x / V;
int gatherid = tid / V;
unsigned long long sequence = blockIdx.x; //all thread blocks can use same sequence
unsigned long long offset = gatherid;
curandState_t state;
curand_init(seed, sequence, offset, &state);//everyone with same gather id should get same src_loc
int random_gatherid = (int)(n * curand_uniform(&state));
if (tid < V) {
idx_shared[tid] = idx[tid];
}
double *src_loc = src + (bid*ngatherperblock+random_gatherid)*delta;
//for (int i = 0; i < wpb; i++) {
src_loc[idx_shared[tid%V]] = idx_shared[tid%V];
//src_loc[idx_shared[tid%V]] = 1337.;
//src_loc += delta;
//}
}
//V2 = 8
//assume block size >= index buffer size
//assume index buffer size divides block size
template<int V>
__global__ void gather_block(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb)
{
__shared__ int idx_shared[V];
int tid = threadIdx.x;
int bid = blockIdx.x;
if (tid < V) {
idx_shared[tid] = idx[tid];
}
int ngatherperblock = blockDim.x / V;
int gatherid = tid / V;
double *src_loc = src + (bid*ngatherperblock+gatherid)*delta;
double x;
//for (int i = 0; i < wpb; i++) {
x = src_loc[idx_shared[tid%V]];
//src_loc[idx_shared[tid%V]] = 1337.;
//src_loc += delta;
//}
if (x==0.5) src[0] = x;
}
template<int V>
__global__ void gather_block_random(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, size_t seed, size_t n)
{
__shared__ int idx_shared[V];
int tid = threadIdx.x;
int bid = blockIdx.x;
int ngatherperblock = blockDim.x / V;
int gatherid = tid / V;
unsigned long long sequence = blockIdx.x; //all thread blocks can use same sequence
unsigned long long offset = gatherid;
curandState_t state;
curand_init(seed, sequence, offset, &state);//everyone with same gather id should get same src_loc
int random_gatherid = (int)(n * curand_uniform(&state));
if (tid < V) {
idx_shared[tid] = idx[tid];
}
double *src_loc = src + (bid*ngatherperblock+random_gatherid)*delta;
double x;
//for (int i = 0; i < wpb; i++) {
x = src_loc[idx_shared[tid%V]];
//src_loc[idx_shared[tid%V]] = 1337.;
//src_loc += delta;
//}
if (x==0.5) src[0] = x;
}
//todo -- add WRAP
template<int V>
__global__ void gather_new(double* source,
sgIdx_t* idx, size_t delta, int dummy, int wpt)
{
__shared__ int idx_shared[V];
int tid = threadIdx.x;
//int bid = blockIdx.x;
//int nblk = blockDim.x;
if (tid < V) {
idx_shared[tid] = idx[tid];
}
int gid = (blockIdx.x * blockDim.x + threadIdx.x);
double *sl = source + wpt*gid*delta;
double buf[V];
for (int j = 0; j < wpt; j++) {
for (int i = 0; i < V; i++) {
buf[i] = sl[idx_shared[i]];
//source[i+gid*delta] = 8;
//sl[i] = sl[idx[i]];
}
sl = sl + delta;
}
if (dummy) {
sl[idx_shared[0]] = buf[dummy];
}
/*
for (int i = 0; i < V; i++) {
if (buf[i] == 199402) {
printf("oop\n");
}
}
*/
//printf("idx[1]: %d\n", idx[1]);
/*
for (int i = 0; i < V; i++) {
printf("idx %d is %zu", i, idx[i]);
}
printf("\n");
*/
}
#define INSTANTIATE2(V)\
template __global__ void gather_new<V>(double* source, sgIdx_t* idx, size_t delta, int dummy, int wpt); \
template __global__ void gather_block<V>(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb);\
template __global__ void scatter_block<V>(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb); \
template __global__ void gather_block_random<V>(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, size_t seed, size_t n); \
template __global__ void scatter_block_random<V>(double *src, sgIdx_t* idx, int idx_len, size_t delta, int wpb, size_t seed, size_t n);
//INSTANTIATE2(1);
//INSTANTIATE2(2);
//INSTANTIATE2(4);
//INSTANTIATE2(5);
INSTANTIATE2(8);
INSTANTIATE2(16);
INSTANTIATE2(32);
INSTANTIATE2(64);
INSTANTIATE2(128);
INSTANTIATE2(256);
INSTANTIATE2(512);
INSTANTIATE2(1024);
extern "C" float cuda_block_wrapper(uint dim, uint* grid, uint* block,
enum sg_kernel kernel,
double *source,
sgIdx_t* pat_dev,
sgIdx_t* pat,
size_t pat_len,
size_t delta,
size_t n,
size_t wrap,
int wpt)
{
dim3 grid_dim, block_dim;
cudaEvent_t start, stop;
if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0;
cudaMemcpy(pat_dev, pat, sizeof(sgIdx_t)*pat_len, cudaMemcpyHostToDevice);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaDeviceSynchronize();
cudaEventRecord(start);
// KERNEL
if (kernel == GATHER) {
if (pat_len == 8) {
gather_block<8><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 16) {
gather_block<16><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 32) {
gather_block<32><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 64) {
gather_block<64><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 128) {
gather_block<128><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt);
}else if (pat_len ==256) {
gather_block<256><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 512) {
gather_block<512><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 1024) {
gather_block<1024><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt);
} else {
printf("ERROR NOT SUPPORTED: %zu\n", pat_len);
}
} else if (kernel == SCATTER) {
if (pat_len == 8) {
scatter_block<8><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 16) {
scatter_block<16><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 32) {
scatter_block<32><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 64) {
scatter_block<64><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 128) {
scatter_block<128><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt);
}else if (pat_len ==256) {
scatter_block<256><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 512) {
scatter_block<512><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt);
}else if (pat_len == 1024) {
scatter_block<1024><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt);
} else {
printf("ERROR NOT SUPPORTED, %zu\n", pat_len);
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float time_ms = 0;
cudaEventElapsedTime(&time_ms, start, stop);
return time_ms;
}
extern "C" float cuda_block_random_wrapper(uint dim, uint* grid, uint* block,
enum sg_kernel kernel,
double *source,
sgIdx_t* pat_dev,
sgIdx_t* pat,
size_t pat_len,
size_t delta,
size_t n,
size_t wrap,
int wpt, size_t seed)
{
dim3 grid_dim, block_dim;
cudaEvent_t start, stop;
if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0;
cudaMemcpy(pat_dev, pat, sizeof(sgIdx_t)*pat_len, cudaMemcpyHostToDevice);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaDeviceSynchronize();
cudaEventRecord(start);
// KERNEL
if (kernel == GATHER) {
if (pat_len == 8) {
gather_block_random<8><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 16) {
gather_block_random<16><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 32) {
gather_block_random<32><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 64) {
gather_block_random<64><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 128) {
gather_block_random<128><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len ==256) {
gather_block_random<256><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 512) {
gather_block_random<512><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 1024) {
gather_block_random<1024><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n);
} else {
printf("ERROR NOT SUPPORTED: %zu\n", pat_len);
}
} else if (kernel == SCATTER) {
if (pat_len == 8) {
scatter_block_random<8><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 16) {
scatter_block_random<16><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 32) {
scatter_block_random<32><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 64) {
scatter_block_random<64><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 128) {
scatter_block_random<128><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len ==256) {
scatter_block_random<256><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 512) {
scatter_block_random<512><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n);
}else if (pat_len == 1024) {
scatter_block_random<1024><<<grid_dim, block_dim>>>(source, pat_dev, pat_len, delta, wpt, seed, n);
} else {
printf("ERROR NOT SUPPORTED, %zu\n", pat_len);
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float time_ms = 0;
cudaEventElapsedTime(&time_ms, start, stop);
return time_ms;
}
extern "C" float cuda_new_wrapper(uint dim, uint* grid, uint* block,
enum sg_kernel kernel,
double *source,
sgIdx_t* pat_dev,
sgIdx_t* pat,
size_t pat_len,
size_t delta,
size_t n,
size_t wrap,
int wpt)
{
dim3 grid_dim, block_dim;
cudaEvent_t start, stop;
if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0;
cudaMemcpy(pat_dev, pat, sizeof(sgIdx_t)*pat_len, cudaMemcpyHostToDevice);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaDeviceSynchronize();
cudaEventRecord(start);
// KERNEL
if (pat_len == 8) {
gather_new<8><<<grid_dim,block_dim>>>(source, pat_dev, (long)delta, 0, wpt);
}else if (pat_len == 16) {
gather_new<16><<<grid_dim,block_dim>>>(source, pat_dev, (long)delta, 0, wpt);
}else if (pat_len == 64) {
gather_new<64><<<grid_dim,block_dim>>>(source, pat_dev, (long)delta, 0, wpt);
}else if (pat_len ==256) {
gather_new<256><<<grid_dim,block_dim>>>(source, pat_dev, (long)delta, 0, wpt);
}else if (pat_len == 512) {
gather_new<512><<<grid_dim,block_dim>>>(source, pat_dev, (long)delta, 0, wpt);
} else {
printf("ERROR NOT SUPPORTED\n");
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float time_ms = 0;
cudaEventElapsedTime(&time_ms, start, stop);
return time_ms;
}
/*
dim3 grid_dim, block_dim;
cudaEvent_t start, stop;
if(translate_args(dim, grid, block, &grid_dim, &block_dim)) return 0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaDeviceSynchronize();
cudaEventRecord(start);
if(kernel == SCATTER)
{
if (vector_len == 1)
scatter_t<1><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 2)
scatter_t<2><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 4)
scatter_t<4><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 5)
scatter_t<5><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 8)
scatter_t<8><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 16)
scatter_t<16><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 32)
scatter_t<32><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 64)
scatter_t<64><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else
{
printf("ERROR: UNSUPPORTED VECTOR LENGTH\n");
exit(1);
}
}
else if(kernel == GATHER)
{
if (vector_len == 1)
gather_t<1><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 2)
gather_t<2><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 4)
gather_t<4><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 5)
gather_t<5><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 8)
gather_t<8><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 16)
gather_t<16><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 32)
gather_t<32><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 64)
gather_t<64><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else
{
printf("ERROR: UNSUPPORTED VECTOR LENGTH\n");
exit(1);
}
}
else if(kernel == SG)
{
if (vector_len == 1)
sg_t<1><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 2)
sg_t<2><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 4)
sg_t<4><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 5)
sg_t<5><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 8)
sg_t<8><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 16)
sg_t<16><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 32)
sg_t<32><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else if (vector_len == 64)
sg_t<64><<<grid_dim,block_dim,shmem>>>(target, source, ti, si);
else
{
printf("ERROR: UNSUPPORTED VECTOR LENGTH\n");
exit(1);
}
}
else
{
printf("ERROR UNRECOGNIZED KERNEL\n");
exit(1);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float time_ms = 0;
cudaEventElapsedTime(&time_ms, start, stop);
return time_ms;
}*/
|
f7ffe08ce2f516fd1ec6ec4807f91e0e5bf7aeb2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <hiprand/hiprand.h>
#include <ctime>
#include <assert.h>
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(hipError_t stat, const char *file, int line) {
if (stat != hipSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(hiprandStatus_t stat, const char *file, int line) {
if (stat != HIPRAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
#include <mma.h>
using namespace nvcuda;
//enum MatrixLayout{
#define ROW_MAJOR 0
#define COL_MAJOR 1
//};
//ONLY THE PARAMETER HERE NEEDS TO BE CHANGED
// Must be multiples of 16 for wmma code to work
#define MATRIX_M (16)
#define MATRIX_N (16)
#define MATRIX_K (16)
const int WMMA_M =16;
const int WMMA_N =16;
const int WMMA_K =16;
typedef unsigned char atype;
typedef unsigned char btype;
typedef int ctype;
typedef int dtype;
typedef int host_type;
#define A_LAYOUT ROW_MAJOR
#define B_LAYOUT ROW_MAJOR
#define C_LAYOUT ROW_MAJOR
#define D_LAYOUT ROW_MAJOR
#define NUM_CTA 1
#define WARP_IN_CTA 1
//Don't change anything after here
#define THREAD_IN_WARP 32
#if A_LAYOUT==ROW_MAJOR
#define LAYOUT_A wmma::row_major
#define A_STRIDE MATRIX_K
#else
#define LAYOUT_A wmma::col_major
#define A_STRIDE MATRIX_M
#endif
#if B_LAYOUT==ROW_MAJOR
#define LAYOUT_B wmma::row_major
#define B_STRIDE MATRIX_N
#else
#define LAYOUT_B wmma::col_major
#define B_STRIDE MATRIX_K
#endif
#if C_LAYOUT==ROW_MAJOR
#define LAYOUT_C wmma::mem_row_major
#define C_STRIDE MATRIX_N
#else
#define LAYOUT_C wmma::mem_col_major
#define C_STRIDE MATRIX_M
#endif
#if D_LAYOUT==ROW_MAJOR
#define LAYOUT_D wmma::mem_row_major
#define D_STRIDE MATRIX_N
#else
#define LAYOUT_D wmma::mem_col_major
#define D_STRIDE MATRIX_M
#endif
enum MatrixInitializationType{
ZERO,
ONE,
RANDOM,
IDENTITY,
LINEAR
};
int get_value(MatrixInitializationType init_type,int randomRange=6,bool RESET=false){
static int val=0;
switch(init_type){
case ZERO:
break;
case ONE:
val=1;
break;
case RANDOM:
val=rand()%randomRange;
break;
case LINEAR:
val++;
break;
default :
printf("illegal MatrixInitializationType\n");
abort();
break;
}
if(RESET)
val=0;
return val;
}
template <typename T>
void print_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
T val;
if(layout==ROW_MAJOR)
val=matrix[row*col_size+col];
else
val=matrix[col*row_size+row];
printf("%.2f ",static_cast<float>(val));
}
printf(";\n");
}
}
template <typename T>
void initialize_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout,MatrixInitializationType init_type){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
if(init_type==IDENTITY){
assert(row_size==col_size);//only for square matrix can be used
matrix[row*row_size+col]=static_cast<T>(1);
}
else{
if(layout==ROW_MAJOR){
matrix[row*col_size+col]=static_cast<T>(get_value(init_type));
}
else{
matrix[col*row_size+row]=static_cast<T>(get_value(init_type));
}
}
}
}
get_value(init_type,10,true);//reseting the val counter
print_matrix<T>(matrix,row_size,col_size,layout);
}
int get_index(int row,int col,int row_size,int col_size,int/*MatrixLayout*/ layout){
int index=0;
if(layout==ROW_MAJOR){
index=row*col_size+col;
}
else{
index=col*row_size+row;
}
return index;
}
template <typename T>
void matrix_multiply(T *result_matrix, T *matrix_a,T* matrix_b,T *matrix_c,int M,int N,int K,int/*MatrixLayout*/ resultlayout,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout,int/*MatrixLayout*/ clayout){
for(int row=0;row<M;row++){
for(int col=0;col<N;col++){
int rindex=get_index(row,col,M,N,resultlayout);
int cindex=get_index(row,col,M,N,clayout);
for(int k=0;k<K;k++){
int aindex=get_index(row,k,M,K,alayout);
int bindex=get_index(k,col,K,N,blayout);
result_matrix[rindex]+=matrix_a[aindex]*matrix_b[bindex];
}
result_matrix[rindex]+=matrix_c[cindex];
}
}
print_matrix<T>(result_matrix,M,N,resultlayout);
}
template <typename T>
void compare_matrix(T *matrix_a, T *matrix_b,int row_size,int col_size,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
int index_a,index_b;
index_a=get_index(row,col,row_size,col_size,alayout);
index_b=get_index(row,col,row_size,col_size,alayout);
if(matrix_a[index_a]!=matrix_b[index_b])
printf("ERROR at index row=%d col=%d\n",row,col);
}
}
}
__global__ void wmma_example(atype *a, btype *b, ctype *c,dtype *d)
{
unsigned int start_time=0,end_time=0;
// Declare the fragments
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, atype , LAYOUT_A> a_frag;
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, btype , LAYOUT_B> b_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, ctype> c_frag;
// Bounds checking
wmma::load_matrix_sync(a_frag, a, A_STRIDE);
wmma::load_matrix_sync(b_frag, b, B_STRIDE);
wmma::load_matrix_sync(c_frag, c, C_STRIDE,LAYOUT_C);
start_time=clock();
wmma::mma_sync(c_frag, a_frag, b_frag, c_frag);
end_time=clock();
wmma::store_matrix_sync(d, c_frag, D_STRIDE, LAYOUT_D);
printf("Time=%d\n",end_time-start_time);
}
template <typename T1,typename T2>
__global__ void convert(T1 *out, T2 *in, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = in[idx];
}
}
int main(int argc, char* argv[]) {
//data on device in host type format
host_type *a_htype;
host_type *b_htype;
host_type *c_htype;
host_type *d_htype;
//data on device in gemm format
atype *a_atype;
btype *b_btype;
ctype *c_ctype;
dtype *d_dtype;
srand(time(NULL));
host_type *a_host_wmma;
host_type *b_host_wmma;
host_type *c_host_wmma;
host_type *d_host_wmma;
host_type *d_cal_host_wmma;
hipEvent_t startWMMA;
hipEvent_t stopWMMA;
cudaErrCheck(hipEventCreate(&startWMMA));
cudaErrCheck(hipEventCreate(&stopWMMA));
// Use tensor cores
cudaErrCheck(hipMalloc((void**)&a_htype, MATRIX_M * MATRIX_K * sizeof(host_type)));
cudaErrCheck(hipMalloc((void**)&b_htype, MATRIX_K * MATRIX_N * sizeof(host_type)));
cudaErrCheck(hipMalloc((void**)&c_htype, MATRIX_M * MATRIX_N * sizeof(host_type)));
cudaErrCheck(hipMalloc((void**)&d_htype, MATRIX_M * MATRIX_N * sizeof(host_type)));
cudaErrCheck(hipMalloc((void**)&a_atype, MATRIX_M * MATRIX_K * sizeof(atype)));
cudaErrCheck(hipMalloc((void**)&b_btype, MATRIX_K * MATRIX_N * sizeof(btype)));
cudaErrCheck(hipMalloc((void**)&c_ctype, MATRIX_M * MATRIX_N * sizeof(ctype)));
cudaErrCheck(hipMalloc((void**)&d_dtype, MATRIX_M * MATRIX_N * sizeof(dtype)));
a_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_K * sizeof(host_type));
b_host_wmma = (host_type*)malloc(MATRIX_K * MATRIX_N * sizeof(host_type));
c_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
d_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
d_cal_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
printf("a_host\n");
initialize_matrix<host_type>(a_host_wmma,MATRIX_M,MATRIX_K,A_LAYOUT,RANDOM);
printf("b_host\n");
initialize_matrix<host_type>(b_host_wmma,MATRIX_K,MATRIX_N,B_LAYOUT,RANDOM);
printf("c_host\n");
initialize_matrix<host_type>(c_host_wmma,MATRIX_M,MATRIX_N,C_LAYOUT,ZERO);
printf("d_cal_host\n");
initialize_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,ZERO);
printf("d_cal_host\n");
matrix_multiply<host_type>(d_cal_host_wmma,a_host_wmma,b_host_wmma,c_host_wmma,MATRIX_M,MATRIX_N,MATRIX_K,D_LAYOUT,A_LAYOUT,B_LAYOUT,C_LAYOUT);
cudaErrCheck(hipMemcpy(a_htype,a_host_wmma, MATRIX_M * MATRIX_K * sizeof(host_type), hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(b_htype,b_host_wmma, MATRIX_K * MATRIX_N * sizeof(host_type), hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(c_htype,c_host_wmma, MATRIX_M * MATRIX_N * sizeof(host_type), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( convert<atype,host_type>) , dim3((MATRIX_M * MATRIX_K + 255) / 256), dim3(256) , 0, 0, a_atype, a_htype, MATRIX_M * MATRIX_K);
hipLaunchKernelGGL(( convert<btype,host_type>) , dim3((MATRIX_K * MATRIX_N + 255) / 256), dim3(256) , 0, 0, b_btype, b_htype, MATRIX_K * MATRIX_N);
hipLaunchKernelGGL(( convert<ctype,host_type>) , dim3((MATRIX_M * MATRIX_N + 255) / 256), dim3(256) , 0, 0, c_ctype, c_htype, MATRIX_M * MATRIX_N);
printf("\nM = %d, N = %d, K = %d. \n", MATRIX_M, MATRIX_N, MATRIX_K);
printf("Running with wmma...\n");
cudaErrCheck(hipEventRecord(startWMMA));
hipLaunchKernelGGL(( wmma_example) , dim3(NUM_CTA),dim3(WARP_IN_CTA*THREAD_IN_WARP), 0, 0, a_atype, b_btype, c_ctype, d_dtype);
cudaErrCheck(hipEventRecord(stopWMMA));
hipLaunchKernelGGL(( convert<host_type,dtype>) , dim3((MATRIX_M * MATRIX_N + 255) / 256), dim3(256) , 0, 0, d_htype, d_dtype, MATRIX_M * MATRIX_N);
cudaErrCheck(hipEventSynchronize(stopWMMA));
// Error checking
printf("\nChecking results...\n");
cudaErrCheck(hipMemcpy(d_host_wmma, d_htype, MATRIX_M * MATRIX_N * sizeof(host_type), hipMemcpyDeviceToHost));
printf("Results verified: cublas and WMMA agree.\n\n");
float wmmaTime;
cudaErrCheck(hipEventElapsedTime(&wmmaTime, startWMMA, stopWMMA));
printf("wmma took %.2fms\n", wmmaTime);
cudaErrCheck(hipEventDestroy(startWMMA));
cudaErrCheck(hipEventDestroy(stopWMMA));
printf("D_CALCULATED\n");
print_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT);
printf("D_WMMA\n");
print_matrix<host_type>(d_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT);
printf("CHECKING\n");
compare_matrix<host_type>(d_host_wmma,d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,D_LAYOUT);
cudaErrCheck(hipFree(a_htype));
cudaErrCheck(hipFree(b_htype));
cudaErrCheck(hipFree(c_htype));
cudaErrCheck(hipFree(d_htype));
cudaErrCheck(hipFree(a_atype));
cudaErrCheck(hipFree(b_btype));
cudaErrCheck(hipFree(c_ctype));
cudaErrCheck(hipFree(d_dtype));
free(a_host_wmma);
free(b_host_wmma);
free(c_host_wmma);
free(d_host_wmma);
free(d_cal_host_wmma);
cudaErrCheck(hipDeviceReset());
return 0;
}
| f7ffe08ce2f516fd1ec6ec4807f91e0e5bf7aeb2.cu | #include <stdio.h>
#include <curand.h>
#include <ctime>
#include <assert.h>
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
if (stat != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(curandStatus_t stat, const char *file, int line) {
if (stat != CURAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
#include <mma.h>
using namespace nvcuda;
//enum MatrixLayout{
#define ROW_MAJOR 0
#define COL_MAJOR 1
//};
//ONLY THE PARAMETER HERE NEEDS TO BE CHANGED
// Must be multiples of 16 for wmma code to work
#define MATRIX_M (16)
#define MATRIX_N (16)
#define MATRIX_K (16)
const int WMMA_M =16;
const int WMMA_N =16;
const int WMMA_K =16;
typedef unsigned char atype;
typedef unsigned char btype;
typedef int ctype;
typedef int dtype;
typedef int host_type;
#define A_LAYOUT ROW_MAJOR
#define B_LAYOUT ROW_MAJOR
#define C_LAYOUT ROW_MAJOR
#define D_LAYOUT ROW_MAJOR
#define NUM_CTA 1
#define WARP_IN_CTA 1
//Don't change anything after here
#define THREAD_IN_WARP 32
#if A_LAYOUT==ROW_MAJOR
#define LAYOUT_A wmma::row_major
#define A_STRIDE MATRIX_K
#else
#define LAYOUT_A wmma::col_major
#define A_STRIDE MATRIX_M
#endif
#if B_LAYOUT==ROW_MAJOR
#define LAYOUT_B wmma::row_major
#define B_STRIDE MATRIX_N
#else
#define LAYOUT_B wmma::col_major
#define B_STRIDE MATRIX_K
#endif
#if C_LAYOUT==ROW_MAJOR
#define LAYOUT_C wmma::mem_row_major
#define C_STRIDE MATRIX_N
#else
#define LAYOUT_C wmma::mem_col_major
#define C_STRIDE MATRIX_M
#endif
#if D_LAYOUT==ROW_MAJOR
#define LAYOUT_D wmma::mem_row_major
#define D_STRIDE MATRIX_N
#else
#define LAYOUT_D wmma::mem_col_major
#define D_STRIDE MATRIX_M
#endif
enum MatrixInitializationType{
ZERO,
ONE,
RANDOM,
IDENTITY,
LINEAR
};
int get_value(MatrixInitializationType init_type,int randomRange=6,bool RESET=false){
static int val=0;
switch(init_type){
case ZERO:
break;
case ONE:
val=1;
break;
case RANDOM:
val=rand()%randomRange;
break;
case LINEAR:
val++;
break;
default :
printf("illegal MatrixInitializationType\n");
abort();
break;
}
if(RESET)
val=0;
return val;
}
template <typename T>
void print_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
T val;
if(layout==ROW_MAJOR)
val=matrix[row*col_size+col];
else
val=matrix[col*row_size+row];
printf("%.2f ",static_cast<float>(val));
}
printf(";\n");
}
}
template <typename T>
void initialize_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout,MatrixInitializationType init_type){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
if(init_type==IDENTITY){
assert(row_size==col_size);//only for square matrix can be used
matrix[row*row_size+col]=static_cast<T>(1);
}
else{
if(layout==ROW_MAJOR){
matrix[row*col_size+col]=static_cast<T>(get_value(init_type));
}
else{
matrix[col*row_size+row]=static_cast<T>(get_value(init_type));
}
}
}
}
get_value(init_type,10,true);//reseting the val counter
print_matrix<T>(matrix,row_size,col_size,layout);
}
int get_index(int row,int col,int row_size,int col_size,int/*MatrixLayout*/ layout){
int index=0;
if(layout==ROW_MAJOR){
index=row*col_size+col;
}
else{
index=col*row_size+row;
}
return index;
}
template <typename T>
void matrix_multiply(T *result_matrix, T *matrix_a,T* matrix_b,T *matrix_c,int M,int N,int K,int/*MatrixLayout*/ resultlayout,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout,int/*MatrixLayout*/ clayout){
for(int row=0;row<M;row++){
for(int col=0;col<N;col++){
int rindex=get_index(row,col,M,N,resultlayout);
int cindex=get_index(row,col,M,N,clayout);
for(int k=0;k<K;k++){
int aindex=get_index(row,k,M,K,alayout);
int bindex=get_index(k,col,K,N,blayout);
result_matrix[rindex]+=matrix_a[aindex]*matrix_b[bindex];
}
result_matrix[rindex]+=matrix_c[cindex];
}
}
print_matrix<T>(result_matrix,M,N,resultlayout);
}
template <typename T>
void compare_matrix(T *matrix_a, T *matrix_b,int row_size,int col_size,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout){
for(int row=0;row<row_size;row++){
for(int col=0;col<col_size;col++){
int index_a,index_b;
index_a=get_index(row,col,row_size,col_size,alayout);
index_b=get_index(row,col,row_size,col_size,alayout);
if(matrix_a[index_a]!=matrix_b[index_b])
printf("ERROR at index row=%d col=%d\n",row,col);
}
}
}
__global__ void wmma_example(atype *a, btype *b, ctype *c,dtype *d)
{
unsigned int start_time=0,end_time=0;
// Declare the fragments
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, atype , LAYOUT_A> a_frag;
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, btype , LAYOUT_B> b_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, ctype> c_frag;
// Bounds checking
wmma::load_matrix_sync(a_frag, a, A_STRIDE);
wmma::load_matrix_sync(b_frag, b, B_STRIDE);
wmma::load_matrix_sync(c_frag, c, C_STRIDE,LAYOUT_C);
start_time=clock();
wmma::mma_sync(c_frag, a_frag, b_frag, c_frag);
end_time=clock();
wmma::store_matrix_sync(d, c_frag, D_STRIDE, LAYOUT_D);
printf("Time=%d\n",end_time-start_time);
}
template <typename T1,typename T2>
__global__ void convert(T1 *out, T2 *in, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
out[idx] = in[idx];
}
}
int main(int argc, char* argv[]) {
//data on device in host type format
host_type *a_htype;
host_type *b_htype;
host_type *c_htype;
host_type *d_htype;
//data on device in gemm format
atype *a_atype;
btype *b_btype;
ctype *c_ctype;
dtype *d_dtype;
srand(time(NULL));
host_type *a_host_wmma;
host_type *b_host_wmma;
host_type *c_host_wmma;
host_type *d_host_wmma;
host_type *d_cal_host_wmma;
cudaEvent_t startWMMA;
cudaEvent_t stopWMMA;
cudaErrCheck(cudaEventCreate(&startWMMA));
cudaErrCheck(cudaEventCreate(&stopWMMA));
// Use tensor cores
cudaErrCheck(cudaMalloc((void**)&a_htype, MATRIX_M * MATRIX_K * sizeof(host_type)));
cudaErrCheck(cudaMalloc((void**)&b_htype, MATRIX_K * MATRIX_N * sizeof(host_type)));
cudaErrCheck(cudaMalloc((void**)&c_htype, MATRIX_M * MATRIX_N * sizeof(host_type)));
cudaErrCheck(cudaMalloc((void**)&d_htype, MATRIX_M * MATRIX_N * sizeof(host_type)));
cudaErrCheck(cudaMalloc((void**)&a_atype, MATRIX_M * MATRIX_K * sizeof(atype)));
cudaErrCheck(cudaMalloc((void**)&b_btype, MATRIX_K * MATRIX_N * sizeof(btype)));
cudaErrCheck(cudaMalloc((void**)&c_ctype, MATRIX_M * MATRIX_N * sizeof(ctype)));
cudaErrCheck(cudaMalloc((void**)&d_dtype, MATRIX_M * MATRIX_N * sizeof(dtype)));
a_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_K * sizeof(host_type));
b_host_wmma = (host_type*)malloc(MATRIX_K * MATRIX_N * sizeof(host_type));
c_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
d_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
d_cal_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type));
printf("a_host\n");
initialize_matrix<host_type>(a_host_wmma,MATRIX_M,MATRIX_K,A_LAYOUT,RANDOM);
printf("b_host\n");
initialize_matrix<host_type>(b_host_wmma,MATRIX_K,MATRIX_N,B_LAYOUT,RANDOM);
printf("c_host\n");
initialize_matrix<host_type>(c_host_wmma,MATRIX_M,MATRIX_N,C_LAYOUT,ZERO);
printf("d_cal_host\n");
initialize_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,ZERO);
printf("d_cal_host\n");
matrix_multiply<host_type>(d_cal_host_wmma,a_host_wmma,b_host_wmma,c_host_wmma,MATRIX_M,MATRIX_N,MATRIX_K,D_LAYOUT,A_LAYOUT,B_LAYOUT,C_LAYOUT);
cudaErrCheck(cudaMemcpy(a_htype,a_host_wmma, MATRIX_M * MATRIX_K * sizeof(host_type), cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(b_htype,b_host_wmma, MATRIX_K * MATRIX_N * sizeof(host_type), cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(c_htype,c_host_wmma, MATRIX_M * MATRIX_N * sizeof(host_type), cudaMemcpyHostToDevice));
convert<atype,host_type> <<< (MATRIX_M * MATRIX_K + 255) / 256, 256 >>> (a_atype, a_htype, MATRIX_M * MATRIX_K);
convert<btype,host_type> <<< (MATRIX_K * MATRIX_N + 255) / 256, 256 >>> (b_btype, b_htype, MATRIX_K * MATRIX_N);
convert<ctype,host_type> <<< (MATRIX_M * MATRIX_N + 255) / 256, 256 >>> (c_ctype, c_htype, MATRIX_M * MATRIX_N);
printf("\nM = %d, N = %d, K = %d. \n", MATRIX_M, MATRIX_N, MATRIX_K);
printf("Running with wmma...\n");
cudaErrCheck(cudaEventRecord(startWMMA));
wmma_example <<< NUM_CTA,WARP_IN_CTA*THREAD_IN_WARP>>> (a_atype, b_btype, c_ctype, d_dtype);
cudaErrCheck(cudaEventRecord(stopWMMA));
convert<host_type,dtype> <<< (MATRIX_M * MATRIX_N + 255) / 256, 256 >>> (d_htype, d_dtype, MATRIX_M * MATRIX_N);
cudaErrCheck(cudaEventSynchronize(stopWMMA));
// Error checking
printf("\nChecking results...\n");
cudaErrCheck(cudaMemcpy(d_host_wmma, d_htype, MATRIX_M * MATRIX_N * sizeof(host_type), cudaMemcpyDeviceToHost));
printf("Results verified: cublas and WMMA agree.\n\n");
float wmmaTime;
cudaErrCheck(cudaEventElapsedTime(&wmmaTime, startWMMA, stopWMMA));
printf("wmma took %.2fms\n", wmmaTime);
cudaErrCheck(cudaEventDestroy(startWMMA));
cudaErrCheck(cudaEventDestroy(stopWMMA));
printf("D_CALCULATED\n");
print_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT);
printf("D_WMMA\n");
print_matrix<host_type>(d_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT);
printf("CHECKING\n");
compare_matrix<host_type>(d_host_wmma,d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,D_LAYOUT);
cudaErrCheck(cudaFree(a_htype));
cudaErrCheck(cudaFree(b_htype));
cudaErrCheck(cudaFree(c_htype));
cudaErrCheck(cudaFree(d_htype));
cudaErrCheck(cudaFree(a_atype));
cudaErrCheck(cudaFree(b_btype));
cudaErrCheck(cudaFree(c_ctype));
cudaErrCheck(cudaFree(d_dtype));
free(a_host_wmma);
free(b_host_wmma);
free(c_host_wmma);
free(d_host_wmma);
free(d_cal_host_wmma);
cudaErrCheck(cudaDeviceReset());
return 0;
}
|
35ab5c82783390de40bff2e25e7d419f53713919.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "image.cuh"
__global__ void mixImages(DATA32* im1data, DATA32* im2data, float k, DATA32* dstData)
{
int n = blockDim.x * blockIdx.x + threadIdx.x;
unsigned char* currentPixel1 = (unsigned char *)(im1data + n);
unsigned char* currentPixel2 = (unsigned char *)(im2data + n);
unsigned char* dstPixel = (unsigned char *)(dstData + n);
dstPixel[0] = currentPixel1[0] * k + currentPixel2[0] * (1.0 - k);
dstPixel[1] = currentPixel1[1] * k + currentPixel2[1] * (1.0 - k);
dstPixel[2] = currentPixel1[2] * k + currentPixel2[2] * (1.0 - k);
}
__global__ void padImage(DATA32* src, int srcW, int srcH, DATA32* dst, int dstW, int dstH)
{
// int xStart = (dstW - srcW) / 2;
// int xEnd = xStart + srcW;
int xStart = 0;
int xEnd = srcW;
int yStart = (dstH - srcH) / 2;
int yEnd = yStart + srcH;
for (int x = 0; x < dstW; x++)
{
for (int y = 0; y < dstH; y++)
{
if (x < xStart || x > xEnd || y < yStart || y > yEnd)
{
dst[y * dstW + x] = 0;
}
else
{
dst[y * dstW + x] = src[(y - yStart) * srcW + (x - xStart)];
}
}
}
}
__global__ void bicubicInterpolation(DATA32* src, int srcW, int srcH, DATA32* dst, int dstW, int dstH)
{
int srcX = blockIdx.x * blockDim.x + threadIdx.x;
int srcY = blockIdx.y * blockDim.y + threadIdx.y;
if (srcX >= srcW || srcY >= srcH)
{
return;
}
unsigned char p[4][4][3];
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
for (int k = 0; k < 3; k++)
{
int x = srcX + i;
if (x >= srcW)
{
x = srcW - 1;
}
int y = srcY + j;
if (y >= srcH)
{
y = srcH - 1;
}
unsigned char* srcPixel = (unsigned char*)(src + y * srcW + x);
p[i][j][k] = srcPixel[k];
}
}
}
float a[4][4][3];
for (int i = 0; i < 3; i++)
a[0][0][i] = p[1][1][i];
for (int i = 0; i < 3; i++)
a[0][1][i] = -.5 * p[1][0][i] + .5 * p[1][2][i];
for (int i = 0; i < 3; i++)
a[0][2][i] = p[1][0][i] - 2.5 * p[1][1][i] + 2 * p[1][2][i] - .5 * p[1][3][i];
for (int i = 0; i < 3; i++)
a[0][3][i] = -.5 * p[1][0][i] + 1.5 * p[1][1][i] - 1.5 * p[1][2][i] + .5 * p[1][3][i];
for (int i = 0; i < 3; i++)
a[1][0][i] = -.5 * p[0][1][i] + .5 * p[2][1][i];
for (int i = 0; i < 3; i++)
a[1][1][i] = .25 * p[0][0][i] - .25 * p[0][2][i] - .25 * p[2][0][i] + .25 * p[2][2][i];
for (int i = 0; i < 3; i++)
a[1][2][i] = -.5 * p[0][0][i] + 1.25 * p[0][1][i] - p[0][2][i] + .25 * p[0][3][i]
+ .5 * p[2][0][i] - 1.25 * p[2][1][i] + p[2][2][i] - .25 * p[2][3][i];
for (int i = 0; i < 3; i++)
a[1][3][i] = .25 * p[0][0][i] - .75 * p[0][1][i] + .75 * p[0][2][i] - .25 * p[0][3][i]
- .25 * p[2][0][i] + .75 * p[2][1][i] - .75 * p[2][2][i] + .25 * p[2][3][i];
for (int i = 0; i < 3; i++)
a[2][0][i] = p[0][1][i] - 2.5 * p[1][1][i] + 2 * p[2][1][i] - .5 * p[3][1][i];
for (int i = 0; i < 3; i++)
a[2][1][i] = -.5 * p[0][0][i] + .5 * p[0][2][i] + 1.25 * p[1][0][i] - 1.25 * p[1][2][i]
- p[2][0][i] + p[2][2][i] + .25 * p[3][0][i] - .25 * p[3][2][i];
for (int i = 0; i < 3; i++)
a[2][2][i] = p[0][0][i] - 2.5 * p[0][1][i] + 2 * p[0][2][i] - .5 * p[0][3][i] - 2.5 * p[1][0][i]
+ 6.25 * p[1][1][i] - 5 * p[1][2][i] + 1.25 * p[1][3][i] + 2 * p[2][0][i]
- 5 * p[2][1][i] + 4 * p[2][2][i] - p[2][3][i] - .5 * p[3][0][i]
+ 1.25 * p[3][1][i] - p[3][2][i] + .25 * p[3][3][i];
for (int i = 0; i < 3; i++)
a[2][3][i] = -.5 * p[0][0][i] + 1.5 * p[0][1][i] - 1.5 * p[0][2][i] + .5 * p[0][3][i]
+ 1.25 * p[1][0][i] - 3.75 * p[1][1][i] + 3.75 * p[1][2][i]
- 1.25 * p[1][3][i] - p[2][0][i] + 3 * p[2][1][i] - 3 * p[2][2][i] + p[2][3][i]
+ .25 * p[3][0][i] - .75 * p[3][1][i] + .75 * p[3][2][i] - .25 * p[3][3][i];
for (int i = 0; i < 3; i++)
a[3][0][i] = -.5 * p[0][1][i] + 1.5 * p[1][1][i] - 1.5 * p[2][1][i] + .5 * p[3][1][i];
for (int i = 0; i < 3; i++)
a[3][1][i] = .25 * p[0][0][i] - .25 * p[0][2][i] - .75 * p[1][0][i] + .75 * p[1][2][i]
+ .75 * p[2][0][i] - .75 * p[2][2][i] - .25 * p[3][0][i] + .25 * p[3][2][i];
for (int i = 0; i < 3; i++)
a[3][2][i] = -.5 * p[0][0][i] + 1.25 * p[0][1][i] - p[0][2][i] + .25 * p[0][3][i]
+ 1.5 * p[1][0][i] - 3.75 * p[1][1][i] + 3 * p[1][2][i] - .75 * p[1][3][i]
- 1.5 * p[2][0][i] + 3.75 * p[2][1][i] - 3 * p[2][2][i] + .75 * p[2][3][i]
+ .5 * p[3][0][i] - 1.25 * p[3][1][i] + p[3][2][i] - .25 * p[3][3][i];
for (int i = 0; i < 3; i++)
a[3][3][i] = .25 * p[0][0][i] - .75 * p[0][1][i] + .75 * p[0][2][i] - .25 * p[0][3][i]
- .75 * p[1][0][i] + 2.25 * p[1][1][i] - 2.25 * p[1][2][i] + .75 * p[1][3][i]
+ .75 * p[2][0][i] - 2.25 * p[2][1][i] + 2.25 * p[2][2][i] - .75 * p[2][3][i]
- .25 * p[3][0][i] + .75 * p[3][1][i] - .75 * p[3][2][i] + .25 * p[3][3][i];
float hx = (float)dstW / (float)srcW;
float hy = (float)dstH / (float)srcH;
for (int x = 0; x < hx; x++)
{
for (int y = 0; y < hy; y++)
{
float _x = (float)x / hx;
float _y = (float)y / hy;
float _x2 = _x * _x;
float _x3 = _x2 * _x;
float _y2 = _y * _y;
float _y3 = _y2 * _y;
int dstX = srcX * hx + x;
int dstY = srcY * hy + y;
unsigned char* dstPixel = (unsigned char*)(dst + dstY * dstW + dstX);
for (int i = 0; i < 3; i++)
{
int value = (a[0][0][i] + a[0][1][i] * _y + a[0][2][i] * _y2 + a[0][3][i] * _y3) +
(a[1][0][i] + a[1][1][i] * _y + a[1][2][i] * _y2 + a[1][3][i] * _y3) * _x +
(a[2][0][i] + a[2][1][i] * _y + a[2][2][i] * _y2 + a[2][3][i] * _y3) * _x2 +
(a[3][0][i] + a[3][1][i] * _y + a[3][2][i] * _y2 + a[3][3][i] * _y3) * _x3;
if (value < 0)
{
value = 0;
}
if (value > 255)
{
value = 255;
}
dstPixel[i] = value;
}
}
}
}
| 35ab5c82783390de40bff2e25e7d419f53713919.cu | #include "image.cuh"
__global__ void mixImages(DATA32* im1data, DATA32* im2data, float k, DATA32* dstData)
{
int n = blockDim.x * blockIdx.x + threadIdx.x;
unsigned char* currentPixel1 = (unsigned char *)(im1data + n);
unsigned char* currentPixel2 = (unsigned char *)(im2data + n);
unsigned char* dstPixel = (unsigned char *)(dstData + n);
dstPixel[0] = currentPixel1[0] * k + currentPixel2[0] * (1.0 - k);
dstPixel[1] = currentPixel1[1] * k + currentPixel2[1] * (1.0 - k);
dstPixel[2] = currentPixel1[2] * k + currentPixel2[2] * (1.0 - k);
}
__global__ void padImage(DATA32* src, int srcW, int srcH, DATA32* dst, int dstW, int dstH)
{
// int xStart = (dstW - srcW) / 2;
// int xEnd = xStart + srcW;
int xStart = 0;
int xEnd = srcW;
int yStart = (dstH - srcH) / 2;
int yEnd = yStart + srcH;
for (int x = 0; x < dstW; x++)
{
for (int y = 0; y < dstH; y++)
{
if (x < xStart || x > xEnd || y < yStart || y > yEnd)
{
dst[y * dstW + x] = 0;
}
else
{
dst[y * dstW + x] = src[(y - yStart) * srcW + (x - xStart)];
}
}
}
}
__global__ void bicubicInterpolation(DATA32* src, int srcW, int srcH, DATA32* dst, int dstW, int dstH)
{
int srcX = blockIdx.x * blockDim.x + threadIdx.x;
int srcY = blockIdx.y * blockDim.y + threadIdx.y;
if (srcX >= srcW || srcY >= srcH)
{
return;
}
unsigned char p[4][4][3];
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
for (int k = 0; k < 3; k++)
{
int x = srcX + i;
if (x >= srcW)
{
x = srcW - 1;
}
int y = srcY + j;
if (y >= srcH)
{
y = srcH - 1;
}
unsigned char* srcPixel = (unsigned char*)(src + y * srcW + x);
p[i][j][k] = srcPixel[k];
}
}
}
float a[4][4][3];
for (int i = 0; i < 3; i++)
a[0][0][i] = p[1][1][i];
for (int i = 0; i < 3; i++)
a[0][1][i] = -.5 * p[1][0][i] + .5 * p[1][2][i];
for (int i = 0; i < 3; i++)
a[0][2][i] = p[1][0][i] - 2.5 * p[1][1][i] + 2 * p[1][2][i] - .5 * p[1][3][i];
for (int i = 0; i < 3; i++)
a[0][3][i] = -.5 * p[1][0][i] + 1.5 * p[1][1][i] - 1.5 * p[1][2][i] + .5 * p[1][3][i];
for (int i = 0; i < 3; i++)
a[1][0][i] = -.5 * p[0][1][i] + .5 * p[2][1][i];
for (int i = 0; i < 3; i++)
a[1][1][i] = .25 * p[0][0][i] - .25 * p[0][2][i] - .25 * p[2][0][i] + .25 * p[2][2][i];
for (int i = 0; i < 3; i++)
a[1][2][i] = -.5 * p[0][0][i] + 1.25 * p[0][1][i] - p[0][2][i] + .25 * p[0][3][i]
+ .5 * p[2][0][i] - 1.25 * p[2][1][i] + p[2][2][i] - .25 * p[2][3][i];
for (int i = 0; i < 3; i++)
a[1][3][i] = .25 * p[0][0][i] - .75 * p[0][1][i] + .75 * p[0][2][i] - .25 * p[0][3][i]
- .25 * p[2][0][i] + .75 * p[2][1][i] - .75 * p[2][2][i] + .25 * p[2][3][i];
for (int i = 0; i < 3; i++)
a[2][0][i] = p[0][1][i] - 2.5 * p[1][1][i] + 2 * p[2][1][i] - .5 * p[3][1][i];
for (int i = 0; i < 3; i++)
a[2][1][i] = -.5 * p[0][0][i] + .5 * p[0][2][i] + 1.25 * p[1][0][i] - 1.25 * p[1][2][i]
- p[2][0][i] + p[2][2][i] + .25 * p[3][0][i] - .25 * p[3][2][i];
for (int i = 0; i < 3; i++)
a[2][2][i] = p[0][0][i] - 2.5 * p[0][1][i] + 2 * p[0][2][i] - .5 * p[0][3][i] - 2.5 * p[1][0][i]
+ 6.25 * p[1][1][i] - 5 * p[1][2][i] + 1.25 * p[1][3][i] + 2 * p[2][0][i]
- 5 * p[2][1][i] + 4 * p[2][2][i] - p[2][3][i] - .5 * p[3][0][i]
+ 1.25 * p[3][1][i] - p[3][2][i] + .25 * p[3][3][i];
for (int i = 0; i < 3; i++)
a[2][3][i] = -.5 * p[0][0][i] + 1.5 * p[0][1][i] - 1.5 * p[0][2][i] + .5 * p[0][3][i]
+ 1.25 * p[1][0][i] - 3.75 * p[1][1][i] + 3.75 * p[1][2][i]
- 1.25 * p[1][3][i] - p[2][0][i] + 3 * p[2][1][i] - 3 * p[2][2][i] + p[2][3][i]
+ .25 * p[3][0][i] - .75 * p[3][1][i] + .75 * p[3][2][i] - .25 * p[3][3][i];
for (int i = 0; i < 3; i++)
a[3][0][i] = -.5 * p[0][1][i] + 1.5 * p[1][1][i] - 1.5 * p[2][1][i] + .5 * p[3][1][i];
for (int i = 0; i < 3; i++)
a[3][1][i] = .25 * p[0][0][i] - .25 * p[0][2][i] - .75 * p[1][0][i] + .75 * p[1][2][i]
+ .75 * p[2][0][i] - .75 * p[2][2][i] - .25 * p[3][0][i] + .25 * p[3][2][i];
for (int i = 0; i < 3; i++)
a[3][2][i] = -.5 * p[0][0][i] + 1.25 * p[0][1][i] - p[0][2][i] + .25 * p[0][3][i]
+ 1.5 * p[1][0][i] - 3.75 * p[1][1][i] + 3 * p[1][2][i] - .75 * p[1][3][i]
- 1.5 * p[2][0][i] + 3.75 * p[2][1][i] - 3 * p[2][2][i] + .75 * p[2][3][i]
+ .5 * p[3][0][i] - 1.25 * p[3][1][i] + p[3][2][i] - .25 * p[3][3][i];
for (int i = 0; i < 3; i++)
a[3][3][i] = .25 * p[0][0][i] - .75 * p[0][1][i] + .75 * p[0][2][i] - .25 * p[0][3][i]
- .75 * p[1][0][i] + 2.25 * p[1][1][i] - 2.25 * p[1][2][i] + .75 * p[1][3][i]
+ .75 * p[2][0][i] - 2.25 * p[2][1][i] + 2.25 * p[2][2][i] - .75 * p[2][3][i]
- .25 * p[3][0][i] + .75 * p[3][1][i] - .75 * p[3][2][i] + .25 * p[3][3][i];
float hx = (float)dstW / (float)srcW;
float hy = (float)dstH / (float)srcH;
for (int x = 0; x < hx; x++)
{
for (int y = 0; y < hy; y++)
{
float _x = (float)x / hx;
float _y = (float)y / hy;
float _x2 = _x * _x;
float _x3 = _x2 * _x;
float _y2 = _y * _y;
float _y3 = _y2 * _y;
int dstX = srcX * hx + x;
int dstY = srcY * hy + y;
unsigned char* dstPixel = (unsigned char*)(dst + dstY * dstW + dstX);
for (int i = 0; i < 3; i++)
{
int value = (a[0][0][i] + a[0][1][i] * _y + a[0][2][i] * _y2 + a[0][3][i] * _y3) +
(a[1][0][i] + a[1][1][i] * _y + a[1][2][i] * _y2 + a[1][3][i] * _y3) * _x +
(a[2][0][i] + a[2][1][i] * _y + a[2][2][i] * _y2 + a[2][3][i] * _y3) * _x2 +
(a[3][0][i] + a[3][1][i] * _y + a[3][2][i] * _y2 + a[3][3][i] * _y3) * _x3;
if (value < 0)
{
value = 0;
}
if (value > 255)
{
value = 255;
}
dstPixel[i] = value;
}
}
}
}
|
4476956184491d9d5dd20b762401a8268b0d9525.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "parrots_cuda_helper.hpp"
#include "roi_pool_kernel.cuh"
void ROIPoolForwardCUDAKernelLauncher(const DArrayLite input,
const DArrayLite rois, DArrayLite output,
DArrayLite argmax, int pooled_height,
int pooled_width, float spatial_scale,
hipStream_t stream) {
int output_size = output.size();
int channels = input.dim(1);
int height = input.dim(2);
int width = input.dim(3);
PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF(input.elemType().prim(), [&] {
hipLaunchKernelGGL(( roi_pool_forward_cuda_kernel<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, input.ptr<scalar_t>(), rois.ptr<scalar_t>(),
output.ptr<scalar_t>(), argmax.ptr<int>(), pooled_height,
pooled_width, spatial_scale, channels, height, width);
});
PARROTS_CUDA_CHECK(hipGetLastError());
}
void ROIPoolBackwardCUDAKernelLauncher(const DArrayLite grad_output,
const DArrayLite rois,
const DArrayLite argmax,
DArrayLite grad_input, int pooled_height,
int pooled_width, float spatial_scale,
hipStream_t stream) {
int output_size = grad_output.size();
int channels = grad_output.dim(1);
int height = grad_output.dim(2);
int width = grad_output.dim(3);
PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF(grad_output.elemType().prim(), [&] {
hipLaunchKernelGGL(( roi_pool_backward_cuda_kernel<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, grad_output.ptr<scalar_t>(), rois.ptr<scalar_t>(),
argmax.ptr<int>(), grad_input.ptr<scalar_t>(), pooled_height,
pooled_width, channels, height, width);
});
PARROTS_CUDA_CHECK(hipGetLastError());
}
| 4476956184491d9d5dd20b762401a8268b0d9525.cu | #include "parrots_cuda_helper.hpp"
#include "roi_pool_kernel.cuh"
void ROIPoolForwardCUDAKernelLauncher(const DArrayLite input,
const DArrayLite rois, DArrayLite output,
DArrayLite argmax, int pooled_height,
int pooled_width, float spatial_scale,
cudaStream_t stream) {
int output_size = output.size();
int channels = input.dim(1);
int height = input.dim(2);
int width = input.dim(3);
PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF(input.elemType().prim(), [&] {
roi_pool_forward_cuda_kernel<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
output_size, input.ptr<scalar_t>(), rois.ptr<scalar_t>(),
output.ptr<scalar_t>(), argmax.ptr<int>(), pooled_height,
pooled_width, spatial_scale, channels, height, width);
});
PARROTS_CUDA_CHECK(cudaGetLastError());
}
void ROIPoolBackwardCUDAKernelLauncher(const DArrayLite grad_output,
const DArrayLite rois,
const DArrayLite argmax,
DArrayLite grad_input, int pooled_height,
int pooled_width, float spatial_scale,
cudaStream_t stream) {
int output_size = grad_output.size();
int channels = grad_output.dim(1);
int height = grad_output.dim(2);
int width = grad_output.dim(3);
PARROTS_DISPATCH_FLOATING_TYPES_AND_HALF(grad_output.elemType().prim(), [&] {
roi_pool_backward_cuda_kernel<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
output_size, grad_output.ptr<scalar_t>(), rois.ptr<scalar_t>(),
argmax.ptr<int>(), grad_input.ptr<scalar_t>(), pooled_height,
pooled_width, channels, height, width);
});
PARROTS_CUDA_CHECK(cudaGetLastError());
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.