hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
43715a9ba272d411dbd716974369392a4ce4f47f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
__global__ void
zvjacobisetup_gpu( int num_rows,
int num_vecs,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *c,
magmaDoubleComplex *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++ ){
c[row+i*num_rows] = b[row+i*num_rows] / d[row];
x[row+i*num_rows] = c[row+i*num_rows];
}
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[out]
c magma_z_matrix*
c = D^(-1) * b
@param[out]
x magma_z_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zjacobisetup_vector_gpu(
magma_int_t num_rows,
magma_z_matrix b,
magma_z_matrix d,
magma_z_matrix c,
magma_z_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, BLOCK_SIZE ) );
int num_vecs = b.num_rows / num_rows;
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( zvjacobisetup_gpu), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
num_rows, num_vecs, b.dval, d.dval, c.dval, x->val );
return MAGMA_SUCCESS;
}
__global__ void
zjacobidiagscal_kernel( int num_rows,
int num_vecs,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *c)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++)
c[row+i*num_rows] = b[row+i*num_rows] * d[row];
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[out]
c magma_z_matrix*
c = D^(-1) * b
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zjacobi_diagscal(
magma_int_t num_rows,
magma_z_matrix d,
magma_z_matrix b,
magma_z_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, 512 ));
int num_vecs = b.num_rows*b.num_cols/num_rows;
magma_int_t threads = 512;
hipLaunchKernelGGL(( zjacobidiagscal_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), num_rows, num_vecs, b.dval, d.dval, c->val );
return MAGMA_SUCCESS;
}
__global__ void
zjacobiupdate_kernel( int num_rows,
int num_cols,
magmaDoubleComplex *t,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_cols; i++)
x[row+i*num_rows] += (b[row+i*num_rows]-t[row+i*num_rows]) * d[row];
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-t)
where d is the diagonal of the system matrix A and t=Ax.
Arguments
---------
@param[in]
t magma_z_matrix
t = A*x
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[out]
x magma_z_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zjacobiupdate(
magma_z_matrix t,
magma_z_matrix b,
magma_z_matrix d,
magma_z_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( zjacobiupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
return MAGMA_SUCCESS;
}
__global__ void
zjacobispmvupdate_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex *t,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *x )
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if(row<num_rows){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_z_matrix
system matrix
@param[in]
t magma_z_matrix
workspace
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[out]
x magma_z_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zjacobispmvupdate(
magma_int_t maxiter,
magma_z_matrix A,
magma_z_matrix t,
magma_z_matrix b,
magma_z_matrix d,
magma_z_matrix *x,
magma_queue_t queue )
{
// local variables
//magmaDoubleComplex c_zero = MAGMA_Z_ZERO;
//magmaDoubleComplex c_one = MAGMA_Z_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_z_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
//hipLaunchKernelGGL(( zjacobiupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
hipLaunchKernelGGL(( zjacobispmvupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
zjacobispmvupdate_bw_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex *t,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *x )
{
int row_tmp = blockDim.x * blockIdx.x + threadIdx.x;
int row = num_rows-1 - row_tmp;
int j;
if( row>-1 ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel processes the thread blocks in reversed order.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_z_matrix
system matrix
@param[in]
t magma_z_matrix
workspace
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[out]
x magma_z_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zjacobispmvupdate_bw(
magma_int_t maxiter,
magma_z_matrix A,
magma_z_matrix t,
magma_z_matrix b,
magma_z_matrix d,
magma_z_matrix *x,
magma_queue_t queue )
{
// local variables
//magmaDoubleComplex c_zero = MAGMA_Z_ZERO;
//magmaDoubleComplex c_one = MAGMA_Z_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_z_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
//hipLaunchKernelGGL(( zjacobiupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
hipLaunchKernelGGL(( zjacobispmvupdate_bw_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
zjacobispmvupdateselect_kernel(
int num_rows,
int num_cols,
int num_updates,
magma_index_t * indices,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex *t,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *x,
magmaDoubleComplex *y )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if( idx<num_updates){
int row = indices[ idx ];
printf(" ");
//if( row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] = x[row+i*num_rows] + (b[row+i*num_rows]-dot) * d[row];
//magmaDoubleComplex add = (b[row+i*num_rows]-dot) * d[row];
//#if defined(PRECISION_s) //|| defined(PRECISION_d)
// atomicAdd( x + row + i*num_rows, add );
//#endif
// ( unsigned int* address, unsigned int val);
//}
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel allows for overlapping domains: the indices-array contains
the locations that are updated. Locations may be repeated to simulate
overlapping domains.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
num_updates magma_int_t
number of updates - length of the indices array
@param[in]
indices magma_index_t*
indices, which entries of x to update
@param[in]
A magma_z_matrix
system matrix
@param[in]
t magma_z_matrix
workspace
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[in]
tmp magma_z_matrix
workspace
@param[out]
x magma_z_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zjacobispmvupdateselect(
magma_int_t maxiter,
magma_int_t num_updates,
magma_index_t *indices,
magma_z_matrix A,
magma_z_matrix t,
magma_z_matrix b,
magma_z_matrix d,
magma_z_matrix tmp,
magma_z_matrix *x,
magma_queue_t queue )
{
// local variables
//magmaDoubleComplex c_zero = MAGMA_Z_ZERO
//magmaDoubleComplex c_one = MAGMA_Z_ONE;
//magma_z_matrix swp;
dim3 grid( magma_ceildiv( num_updates, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
printf("num updates:%d %d %d\n", int(num_updates), int(threads), int(grid.x) );
for( magma_int_t i=0; i<maxiter; i++ ) {
hipLaunchKernelGGL(( zjacobispmvupdateselect_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
t.num_rows, t.num_cols, num_updates, indices, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval, tmp.dval );
magma_queue_sync( queue );
//magma_device_sync();
//swp.dval = x->dval;
//x->dval = tmp.dval;
//tmp.dval = swp.dval;
}
return MAGMA_SUCCESS;
}
| 43715a9ba272d411dbd716974369392a4ce4f47f.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
__global__ void
zvjacobisetup_gpu( int num_rows,
int num_vecs,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *c,
magmaDoubleComplex *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++ ){
c[row+i*num_rows] = b[row+i*num_rows] / d[row];
x[row+i*num_rows] = c[row+i*num_rows];
}
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[out]
c magma_z_matrix*
c = D^(-1) * b
@param[out]
x magma_z_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zjacobisetup_vector_gpu(
magma_int_t num_rows,
magma_z_matrix b,
magma_z_matrix d,
magma_z_matrix c,
magma_z_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, BLOCK_SIZE ) );
int num_vecs = b.num_rows / num_rows;
magma_int_t threads = BLOCK_SIZE;
zvjacobisetup_gpu<<< grid, threads, 0, queue->cuda_stream()>>>
( num_rows, num_vecs, b.dval, d.dval, c.dval, x->val );
return MAGMA_SUCCESS;
}
__global__ void
zjacobidiagscal_kernel( int num_rows,
int num_vecs,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *c)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++)
c[row+i*num_rows] = b[row+i*num_rows] * d[row];
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[out]
c magma_z_matrix*
c = D^(-1) * b
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zjacobi_diagscal(
magma_int_t num_rows,
magma_z_matrix d,
magma_z_matrix b,
magma_z_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, 512 ));
int num_vecs = b.num_rows*b.num_cols/num_rows;
magma_int_t threads = 512;
zjacobidiagscal_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( num_rows, num_vecs, b.dval, d.dval, c->val );
return MAGMA_SUCCESS;
}
__global__ void
zjacobiupdate_kernel( int num_rows,
int num_cols,
magmaDoubleComplex *t,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_cols; i++)
x[row+i*num_rows] += (b[row+i*num_rows]-t[row+i*num_rows]) * d[row];
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-t)
where d is the diagonal of the system matrix A and t=Ax.
Arguments
---------
@param[in]
t magma_z_matrix
t = A*x
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[out]
x magma_z_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zjacobiupdate(
magma_z_matrix t,
magma_z_matrix b,
magma_z_matrix d,
magma_z_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
zjacobiupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
return MAGMA_SUCCESS;
}
__global__ void
zjacobispmvupdate_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex *t,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *x )
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if(row<num_rows){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_z_matrix
system matrix
@param[in]
t magma_z_matrix
workspace
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[out]
x magma_z_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zjacobispmvupdate(
magma_int_t maxiter,
magma_z_matrix A,
magma_z_matrix t,
magma_z_matrix b,
magma_z_matrix d,
magma_z_matrix *x,
magma_queue_t queue )
{
// local variables
//magmaDoubleComplex c_zero = MAGMA_Z_ZERO;
//magmaDoubleComplex c_one = MAGMA_Z_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_z_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
// zjacobiupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
zjacobispmvupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
zjacobispmvupdate_bw_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex *t,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *x )
{
int row_tmp = blockDim.x * blockIdx.x + threadIdx.x;
int row = num_rows-1 - row_tmp;
int j;
if( row>-1 ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel processes the thread blocks in reversed order.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_z_matrix
system matrix
@param[in]
t magma_z_matrix
workspace
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[out]
x magma_z_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zjacobispmvupdate_bw(
magma_int_t maxiter,
magma_z_matrix A,
magma_z_matrix t,
magma_z_matrix b,
magma_z_matrix d,
magma_z_matrix *x,
magma_queue_t queue )
{
// local variables
//magmaDoubleComplex c_zero = MAGMA_Z_ZERO;
//magmaDoubleComplex c_one = MAGMA_Z_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_z_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
// zjacobiupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
zjacobispmvupdate_bw_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
zjacobispmvupdateselect_kernel(
int num_rows,
int num_cols,
int num_updates,
magma_index_t * indices,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex *t,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *x,
magmaDoubleComplex *y )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if( idx<num_updates){
int row = indices[ idx ];
printf(" ");
//if( row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] = x[row+i*num_rows] + (b[row+i*num_rows]-dot) * d[row];
//magmaDoubleComplex add = (b[row+i*num_rows]-dot) * d[row];
//#if defined(PRECISION_s) //|| defined(PRECISION_d)
// atomicAdd( x + row + i*num_rows, add );
//#endif
// ( unsigned int* address, unsigned int val);
//}
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel allows for overlapping domains: the indices-array contains
the locations that are updated. Locations may be repeated to simulate
overlapping domains.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
num_updates magma_int_t
number of updates - length of the indices array
@param[in]
indices magma_index_t*
indices, which entries of x to update
@param[in]
A magma_z_matrix
system matrix
@param[in]
t magma_z_matrix
workspace
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[in]
tmp magma_z_matrix
workspace
@param[out]
x magma_z_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zjacobispmvupdateselect(
magma_int_t maxiter,
magma_int_t num_updates,
magma_index_t *indices,
magma_z_matrix A,
magma_z_matrix t,
magma_z_matrix b,
magma_z_matrix d,
magma_z_matrix tmp,
magma_z_matrix *x,
magma_queue_t queue )
{
// local variables
//magmaDoubleComplex c_zero = MAGMA_Z_ZERO
//magmaDoubleComplex c_one = MAGMA_Z_ONE;
//magma_z_matrix swp;
dim3 grid( magma_ceildiv( num_updates, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
printf("num updates:%d %d %d\n", int(num_updates), int(threads), int(grid.x) );
for( magma_int_t i=0; i<maxiter; i++ ) {
zjacobispmvupdateselect_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( t.num_rows, t.num_cols, num_updates, indices, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval, tmp.dval );
magma_queue_sync( queue );
//magma_device_sync();
//swp.dval = x->dval;
//x->dval = tmp.dval;
//tmp.dval = swp.dval;
}
return MAGMA_SUCCESS;
}
|
3d8e27c3e2d05139ef3b7299006a4d7552fabcc6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#foreach( $degs in $degrees )
// P(X)/Q(X) = a_0 + a_1*X + a_2*X^2 + ... + a_n*X^n / eps + |b_0 + b_1*X + b_2*X^2 + ... + b_n*X^n|
// eps = 0.1
#set( $degs_a = $degs[0] )
#set( $degs_b = $degs[1] )
#set( $coefs_a = $degs_a )
#set( $coefs_b = $degs_b )
#set( $a_counts = $coefs_a + 1 )
#set( $b_counts = $coefs_b + 1 )
#set( $max_x = $degs[2] )
template <typename scalar_t>
__global__ void rational_cuda_forward_C_kernel_$degs[0]_$degs[1]( const scalar_t* __restrict__ x, const scalar_t* __restrict__ a,
const scalar_t* __restrict__ b, scalar_t* __restrict__ result, size_t x_size) {
#foreach( $idx in [0..$coefs_a] )
scalar_t a_$idx = a[$idx];
#end
#foreach( $idx in [0..$coefs_b] )
scalar_t b_$idx = b[$idx];
#end
scalar_t eps = scalar_t(0.1);
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < x_size;
index += blockDim.x * gridDim.x){
scalar_t xp1 = x[index];
#foreach( $idx in [2..$max_x] )#set( $value = $idx - 1 )
scalar_t xp$idx = xp$value * xp1;
#end
scalar_t P = a_0
#foreach( $idx in [1..$coefs_a] )
+ a_$idx * xp$idx
#end
;
scalar_t Q = eps + abs(b_0
#foreach( $idx in [1..$coefs_b] )
+ b_$idx * xp$idx
#end
);
result[index] = P/Q;
}
}
at::Tensor rational_cuda_forward_C_$degs[0]_$degs[1](torch::Tensor x, torch::Tensor n, torch::Tensor d){
auto result = at::empty_like(x);
const auto x_size = x.numel();
int blockSize = THREADS_PER_BLOCK;
int numBlocks = (x_size + blockSize - 1) / blockSize;
AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "rational_cuda_forward_C_$degs[0]_$degs[1]", ([&] {
rational_cuda_forward_C_kernel_$degs[0]_$degshipLaunchKernelGGL(([1]<scalar_t>)
, dim3(numBlocks), dim3(blockSize), 0, 0,
x.data_ptr<scalar_t>(),
n.data_ptr<scalar_t>(),
d.data_ptr<scalar_t>(),
result.data_ptr<scalar_t>(),
x_size);
}));
return result;
}
template <typename scalar_t>
__global__ void rational_cuda_backward_C_kernel_$degs[0]_$degs[1](
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ x,
const scalar_t* __restrict__ a,
const scalar_t* __restrict__ b,
scalar_t* __restrict__ d_x,
double* __restrict__ d_a,
double* __restrict__ d_b,
size_t x_size) {
__shared__ double sda[$a_counts];
__shared__ double sdb[$b_counts];
scalar_t eps = scalar_t(0.1);
if( threadIdx.x == 0){
#foreach( $idx in [0..$coefs_a] )
sda[$idx] = 0;
#end
#foreach( $idx in [0..$coefs_b] )
sdb[$idx] = 0;
#end
}
__syncthreads();
#foreach( $idx in [0..$coefs_a] )
scalar_t d_a$idx = 0;
scalar_t a_$idx = a[$idx];
#end
#foreach( $idx in [0..$coefs_b] )
scalar_t d_b$idx = 0;
scalar_t b_$idx = b[$idx];
#end
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < x_size;
index += blockDim.x * gridDim.x)
{
scalar_t xp1 = x[index];
#foreach( $idx in [2..$max_x] )#set( $value = $idx - 1 )
scalar_t xp$idx = xp$value * xp1;
#end
scalar_t P = a_0
#foreach( $idx in [1..$coefs_a] )
+ a_$idx*xp$idx
#end
;
scalar_t A = b_0
#foreach( $idx in [1..$coefs_b] )
+ b_$idx * xp$idx
#end
;
scalar_t Q = eps + abs(A);
scalar_t R = a_1
#foreach( $idx in [2..$coefs_a] )#set( $value = $idx - 1 )
+ scalar_t($idx.0) * a_$idx * xp$value
#end
;
scalar_t S = copysign( scalar_t(1.0), A ) * (b_1
#foreach( $idx in [2..$coefs_b] )#set( $value = $idx - 1 )
+ scalar_t($idx.0) * b_$idx * xp$value
#end
);
scalar_t mpq2 = -P/(Q*Q);
scalar_t grad_o = grad_output[index];
scalar_t d_i_x = (R/Q + S*mpq2);
d_x[index] = d_i_x * grad_o;
scalar_t d_i_b0 = mpq2 * copysign( scalar_t(1.0), A );
d_b0 += d_i_b0 * grad_o;
#foreach( $idx in [1..$coefs_b] )
scalar_t d_i_b$idx = mpq2 * copysign( scalar_t(1.0), A ) * xp$idx;
d_b$idx += d_i_b$idx * grad_o;
#end
scalar_t d_i_a0 = scalar_t(1.0)/Q;
d_a0 += d_i_a0 * grad_o;
#foreach( $idx in [1..$coefs_a] )#set( $value = $idx - 1 )
scalar_t d_i_a$idx = xp$idx/Q;
d_a$idx += d_i_a$idx * grad_o;
#end
}
#foreach( $idx in [0..$coefs_a] )
atomicAdd(&sda[$idx], d_a$idx);
#end
#foreach( $idx in [0..$coefs_b] )
atomicAdd(&sdb[$idx], d_b$idx);
#end
__syncthreads();
if( threadIdx.x == 0){
#foreach( $idx in [0..$coefs_a] )
atomicAdd(&d_a[$idx], sda[$idx]);
#end
#foreach( $idx in [0..$coefs_b] )
atomicAdd(&d_b[$idx], sdb[$idx]);
#end
}
}
std::vector<torch::Tensor> rational_cuda_backward_C_$degs[0]_$degs[1](torch::Tensor grad_output, torch::Tensor x, torch::Tensor n, torch::Tensor d){
const auto x_size = x.numel();
auto d_x = at::empty_like(x);
auto d_n = at::zeros_like(n).toType(at::kDouble);
auto d_d = at::zeros_like(d).toType(at::kDouble);
int blockSize = THREADS_PER_BLOCK;
AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "rational_cuda_backward_C_$degs[0]_$degs[1]", ([&] {
rational_cuda_backward_C_kernel_$degs[0]_$degshipLaunchKernelGGL(([1]<scalar_t>)
, dim3(16), dim3(blockSize), 0, 0,
grad_output.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
n.data_ptr<scalar_t>(),
d.data_ptr<scalar_t>(),
d_x.data_ptr<scalar_t>(),
d_n.data_ptr<double>(),
d_d.data_ptr<double>(),
x_size);
}));
return {d_x, d_n.toType(at::kFloat), d_d.toType(at::kFloat)};
}
#end
| 3d8e27c3e2d05139ef3b7299006a4d7552fabcc6.cu | #foreach( $degs in $degrees )
// P(X)/Q(X) = a_0 + a_1*X + a_2*X^2 + ... + a_n*X^n / eps + |b_0 + b_1*X + b_2*X^2 + ... + b_n*X^n|
// eps = 0.1
#set( $degs_a = $degs[0] )
#set( $degs_b = $degs[1] )
#set( $coefs_a = $degs_a )
#set( $coefs_b = $degs_b )
#set( $a_counts = $coefs_a + 1 )
#set( $b_counts = $coefs_b + 1 )
#set( $max_x = $degs[2] )
template <typename scalar_t>
__global__ void rational_cuda_forward_C_kernel_$degs[0]_$degs[1]( const scalar_t* __restrict__ x, const scalar_t* __restrict__ a,
const scalar_t* __restrict__ b, scalar_t* __restrict__ result, size_t x_size) {
#foreach( $idx in [0..$coefs_a] )
scalar_t a_$idx = a[$idx];
#end
#foreach( $idx in [0..$coefs_b] )
scalar_t b_$idx = b[$idx];
#end
scalar_t eps = scalar_t(0.1);
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < x_size;
index += blockDim.x * gridDim.x){
scalar_t xp1 = x[index];
#foreach( $idx in [2..$max_x] )#set( $value = $idx - 1 )
scalar_t xp$idx = xp$value * xp1;
#end
scalar_t P = a_0
#foreach( $idx in [1..$coefs_a] )
+ a_$idx * xp$idx
#end
;
scalar_t Q = eps + abs(b_0
#foreach( $idx in [1..$coefs_b] )
+ b_$idx * xp$idx
#end
);
result[index] = P/Q;
}
}
at::Tensor rational_cuda_forward_C_$degs[0]_$degs[1](torch::Tensor x, torch::Tensor n, torch::Tensor d){
auto result = at::empty_like(x);
const auto x_size = x.numel();
int blockSize = THREADS_PER_BLOCK;
int numBlocks = (x_size + blockSize - 1) / blockSize;
AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "rational_cuda_forward_C_$degs[0]_$degs[1]", ([&] {
rational_cuda_forward_C_kernel_$degs[0]_$degs[1]<scalar_t>
<<<numBlocks, blockSize>>>(
x.data_ptr<scalar_t>(),
n.data_ptr<scalar_t>(),
d.data_ptr<scalar_t>(),
result.data_ptr<scalar_t>(),
x_size);
}));
return result;
}
template <typename scalar_t>
__global__ void rational_cuda_backward_C_kernel_$degs[0]_$degs[1](
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ x,
const scalar_t* __restrict__ a,
const scalar_t* __restrict__ b,
scalar_t* __restrict__ d_x,
double* __restrict__ d_a,
double* __restrict__ d_b,
size_t x_size) {
__shared__ double sda[$a_counts];
__shared__ double sdb[$b_counts];
scalar_t eps = scalar_t(0.1);
if( threadIdx.x == 0){
#foreach( $idx in [0..$coefs_a] )
sda[$idx] = 0;
#end
#foreach( $idx in [0..$coefs_b] )
sdb[$idx] = 0;
#end
}
__syncthreads();
#foreach( $idx in [0..$coefs_a] )
scalar_t d_a$idx = 0;
scalar_t a_$idx = a[$idx];
#end
#foreach( $idx in [0..$coefs_b] )
scalar_t d_b$idx = 0;
scalar_t b_$idx = b[$idx];
#end
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < x_size;
index += blockDim.x * gridDim.x)
{
scalar_t xp1 = x[index];
#foreach( $idx in [2..$max_x] )#set( $value = $idx - 1 )
scalar_t xp$idx = xp$value * xp1;
#end
scalar_t P = a_0
#foreach( $idx in [1..$coefs_a] )
+ a_$idx*xp$idx
#end
;
scalar_t A = b_0
#foreach( $idx in [1..$coefs_b] )
+ b_$idx * xp$idx
#end
;
scalar_t Q = eps + abs(A);
scalar_t R = a_1
#foreach( $idx in [2..$coefs_a] )#set( $value = $idx - 1 )
+ scalar_t($idx.0) * a_$idx * xp$value
#end
;
scalar_t S = copysign( scalar_t(1.0), A ) * (b_1
#foreach( $idx in [2..$coefs_b] )#set( $value = $idx - 1 )
+ scalar_t($idx.0) * b_$idx * xp$value
#end
);
scalar_t mpq2 = -P/(Q*Q);
scalar_t grad_o = grad_output[index];
scalar_t d_i_x = (R/Q + S*mpq2);
d_x[index] = d_i_x * grad_o;
scalar_t d_i_b0 = mpq2 * copysign( scalar_t(1.0), A );
d_b0 += d_i_b0 * grad_o;
#foreach( $idx in [1..$coefs_b] )
scalar_t d_i_b$idx = mpq2 * copysign( scalar_t(1.0), A ) * xp$idx;
d_b$idx += d_i_b$idx * grad_o;
#end
scalar_t d_i_a0 = scalar_t(1.0)/Q;
d_a0 += d_i_a0 * grad_o;
#foreach( $idx in [1..$coefs_a] )#set( $value = $idx - 1 )
scalar_t d_i_a$idx = xp$idx/Q;
d_a$idx += d_i_a$idx * grad_o;
#end
}
#foreach( $idx in [0..$coefs_a] )
atomicAdd(&sda[$idx], d_a$idx);
#end
#foreach( $idx in [0..$coefs_b] )
atomicAdd(&sdb[$idx], d_b$idx);
#end
__syncthreads();
if( threadIdx.x == 0){
#foreach( $idx in [0..$coefs_a] )
atomicAdd(&d_a[$idx], sda[$idx]);
#end
#foreach( $idx in [0..$coefs_b] )
atomicAdd(&d_b[$idx], sdb[$idx]);
#end
}
}
std::vector<torch::Tensor> rational_cuda_backward_C_$degs[0]_$degs[1](torch::Tensor grad_output, torch::Tensor x, torch::Tensor n, torch::Tensor d){
const auto x_size = x.numel();
auto d_x = at::empty_like(x);
auto d_n = at::zeros_like(n).toType(at::kDouble);
auto d_d = at::zeros_like(d).toType(at::kDouble);
int blockSize = THREADS_PER_BLOCK;
AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "rational_cuda_backward_C_$degs[0]_$degs[1]", ([&] {
rational_cuda_backward_C_kernel_$degs[0]_$degs[1]<scalar_t>
<<<16, blockSize>>>(
grad_output.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
n.data_ptr<scalar_t>(),
d.data_ptr<scalar_t>(),
d_x.data_ptr<scalar_t>(),
d_n.data_ptr<double>(),
d_d.data_ptr<double>(),
x_size);
}));
return {d_x, d_n.toType(at::kFloat), d_d.toType(at::kFloat)};
}
#end
|
456390b3fdc149af4f71e4d13611a01139d70aa4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
//M and N number of threads (grid and block)
#define M 1
#define N 10
__global__ void multiply( const int a[] ,const int b[], int c[] , const int dim,const int thread_number)
{
int index = blockIdx.x* blockDim.x* blockDim.y* blockDim.z+threadIdx.z* blockDim.y* blockDim.x+ threadIdx.y* blockDim.x+ threadIdx.x;
if(index<dim){
if(dim<=thread_number){ //if more threads than array size
printf("Thread %i; Modifying value of index %i\n ", index, index);
c[index]=a[index]+b[index];
}
else{ //if less threads than array size
if(index!=thread_number-1){//if not last thread deal with size_array/thread_nb array entries
for(int i=index*(int)(dim/thread_number); i< index*(int)(dim/thread_number)+(int)(dim/thread_number); i++){
printf("Thread %i; Modifying value of index %i \n", index, i);
c[i]=a[i]+b[i];
}
}
else{ //if last thread deal with all remaining array entries
for(int i=index*(int)(dim/thread_number); i< dim; i++){
printf("Thread %i; Modifying value of index %i\n",index, i );
c[i]=a[i]+b[i];
}
}
}
}
}
int main(int argc, char *argv[]){
//Measure time
clock_t time_begin;
time_begin=clock();
// pointers to host & device arrays
int *d_array1 = 0,*d_array2 = 0,*d_array3 = 0;
int *h_array1 = 0,*h_array2 = 0,*h_array3 = 0;
int size_array=9; //here, size_array =L hqs to be a square
// malloc columns of host arrays
h_array1 = (int*)malloc( size_array * sizeof(int));
h_array2 = (int*)malloc( size_array * sizeof(int));
h_array3 = (int*)malloc( size_array * sizeof(int));
for(int i=0; i<size_array; i++){
h_array1[i]=rand()%10;
h_array2[i]=rand()%10;
printf("%i|%i\t", h_array1[i], h_array2[i]);
if((i+1)%(int)sqrt(size_array)==0)
printf("\n");
}
printf("\n");
// hipMalloc a device array
hipMalloc(&d_array1,size_array * sizeof(int));
hipMalloc(&d_array2,size_array * sizeof(int));
hipMalloc(&d_array3,size_array * sizeof(int));
// download and inspect the result on the host:
hipMemcpy(d_array1, h_array1, sizeof(int)*size_array, hipMemcpyHostToDevice);
hipMemcpy(d_array2, h_array2, sizeof(int)*size_array, hipMemcpyHostToDevice);
dim3 bloque(N,N); //Bloque bidimensional de N*N hilos
dim3 grid(M,M); //Grid bidimensional de M*M bloques
int thread_number= N*N*M*M;
hipLaunchKernelGGL(( multiply), dim3(grid), dim3(bloque), 0, 0, d_array1, d_array2 , d_array3,size_array, thread_number);
hipDeviceSynchronize();
// download and inspect the result on the host:
hipMemcpy(h_array3, d_array3, sizeof(int)*size_array, hipMemcpyDeviceToHost);
for(int i=0; i<size_array; i++){
printf("%i\t", h_array3[i]);
if((i+1)%(int)(sqrt(size_array))==0)
printf("\n");
}
printf("\n");
// deallocate memory
free(h_array3); free(h_array2); free(h_array1);
hipFree(d_array3);hipFree(d_array2);hipFree(d_array1);
printf("Time elapsed: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); //1.18s
}
| 456390b3fdc149af4f71e4d13611a01139d70aa4.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
//M and N number of threads (grid and block)
#define M 1
#define N 10
__global__ void multiply( const int a[] ,const int b[], int c[] , const int dim,const int thread_number)
{
int index = blockIdx.x* blockDim.x* blockDim.y* blockDim.z+threadIdx.z* blockDim.y* blockDim.x+ threadIdx.y* blockDim.x+ threadIdx.x;
if(index<dim){
if(dim<=thread_number){ //if more threads than array size
printf("Thread %i; Modifying value of index %i\n ", index, index);
c[index]=a[index]+b[index];
}
else{ //if less threads than array size
if(index!=thread_number-1){//if not last thread deal with size_array/thread_nb array entries
for(int i=index*(int)(dim/thread_number); i< index*(int)(dim/thread_number)+(int)(dim/thread_number); i++){
printf("Thread %i; Modifying value of index %i \n", index, i);
c[i]=a[i]+b[i];
}
}
else{ //if last thread deal with all remaining array entries
for(int i=index*(int)(dim/thread_number); i< dim; i++){
printf("Thread %i; Modifying value of index %i\n",index, i );
c[i]=a[i]+b[i];
}
}
}
}
}
int main(int argc, char *argv[]){
//Measure time
clock_t time_begin;
time_begin=clock();
// pointers to host & device arrays
int *d_array1 = 0,*d_array2 = 0,*d_array3 = 0;
int *h_array1 = 0,*h_array2 = 0,*h_array3 = 0;
int size_array=9; //here, size_array =L hqs to be a square
// malloc columns of host arrays
h_array1 = (int*)malloc( size_array * sizeof(int));
h_array2 = (int*)malloc( size_array * sizeof(int));
h_array3 = (int*)malloc( size_array * sizeof(int));
for(int i=0; i<size_array; i++){
h_array1[i]=rand()%10;
h_array2[i]=rand()%10;
printf("%i|%i\t", h_array1[i], h_array2[i]);
if((i+1)%(int)sqrt(size_array)==0)
printf("\n");
}
printf("\n");
// cudaMalloc a device array
cudaMalloc(&d_array1,size_array * sizeof(int));
cudaMalloc(&d_array2,size_array * sizeof(int));
cudaMalloc(&d_array3,size_array * sizeof(int));
// download and inspect the result on the host:
cudaMemcpy(d_array1, h_array1, sizeof(int)*size_array, cudaMemcpyHostToDevice);
cudaMemcpy(d_array2, h_array2, sizeof(int)*size_array, cudaMemcpyHostToDevice);
dim3 bloque(N,N); //Bloque bidimensional de N*N hilos
dim3 grid(M,M); //Grid bidimensional de M*M bloques
int thread_number= N*N*M*M;
multiply<<<grid, bloque>>>(d_array1, d_array2 , d_array3,size_array, thread_number);
cudaThreadSynchronize();
// download and inspect the result on the host:
cudaMemcpy(h_array3, d_array3, sizeof(int)*size_array, cudaMemcpyDeviceToHost);
for(int i=0; i<size_array; i++){
printf("%i\t", h_array3[i]);
if((i+1)%(int)(sqrt(size_array))==0)
printf("\n");
}
printf("\n");
// deallocate memory
free(h_array3); free(h_array2); free(h_array1);
cudaFree(d_array3);cudaFree(d_array2);cudaFree(d_array1);
printf("Time elapsed: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); //1.18s
}
|
cacc15547fd75abebcfbc98e4dd48f3a07209cee.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| cacc15547fd75abebcfbc98e4dd48f3a07209cee.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
e7095e642d8b818e7929d2ff5f04f43149fe895a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by search_multi_cta_00_generate.py
*
* Make changes there and run in this directory:
*
* > python search_multi_cta_00_generate.py
*
*/
#include <raft/neighbors/detail/cagra/search_multi_cta_kernel-inl.cuh>
namespace raft::neighbors::cagra::detail::multi_cta_search {
#define instantiate_kernel_selection(TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T) \
template void select_and_run<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T>( \
raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, \
raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, \
INDEX_T* const topk_indices_ptr, \
DISTANCE_T* const topk_distances_ptr, \
const DATA_T* const queries_ptr, \
const uint32_t num_queries, \
const INDEX_T* dev_seed_ptr, \
uint32_t* const num_executed_iterations, \
uint32_t topk, \
uint32_t block_size, \
uint32_t result_buffer_size, \
uint32_t smem_size, \
int64_t hash_bitlen, \
INDEX_T* hashmap_ptr, \
uint32_t num_cta_per_query, \
uint32_t num_random_samplings, \
uint64_t rand_xor_mask, \
uint32_t num_seeds, \
size_t itopk_size, \
size_t search_widthh, \
size_t min_iterations, \
size_t max_iterations, \
hipStream_t stream);
instantiate_kernel_selection(32, 1024, uint8_t, uint32_t, float);
#undef instantiate_kernel_selection
} // namespace raft::neighbors::cagra::detail::multi_cta_search
| e7095e642d8b818e7929d2ff5f04f43149fe895a.cu |
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by search_multi_cta_00_generate.py
*
* Make changes there and run in this directory:
*
* > python search_multi_cta_00_generate.py
*
*/
#include <raft/neighbors/detail/cagra/search_multi_cta_kernel-inl.cuh>
namespace raft::neighbors::cagra::detail::multi_cta_search {
#define instantiate_kernel_selection(TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T) \
template void select_and_run<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T>( \
raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, \
raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, \
INDEX_T* const topk_indices_ptr, \
DISTANCE_T* const topk_distances_ptr, \
const DATA_T* const queries_ptr, \
const uint32_t num_queries, \
const INDEX_T* dev_seed_ptr, \
uint32_t* const num_executed_iterations, \
uint32_t topk, \
uint32_t block_size, \
uint32_t result_buffer_size, \
uint32_t smem_size, \
int64_t hash_bitlen, \
INDEX_T* hashmap_ptr, \
uint32_t num_cta_per_query, \
uint32_t num_random_samplings, \
uint64_t rand_xor_mask, \
uint32_t num_seeds, \
size_t itopk_size, \
size_t search_widthh, \
size_t min_iterations, \
size_t max_iterations, \
cudaStream_t stream);
instantiate_kernel_selection(32, 1024, uint8_t, uint32_t, float);
#undef instantiate_kernel_selection
} // namespace raft::neighbors::cagra::detail::multi_cta_search
|
fd830b8240e926179beac4e11073deb812b9425d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "test_utils.h"
#include <gtest/gtest.h>
#include <linalg/eltwise2d.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace MLCommon {
namespace LinAlg {
template <typename Type>
__global__ void naiveEltwise2DAddKernel(int rows,
int cols,
const Type* aPtr,
const Type* bPtr,
const Type* cPtr,
Type* dPtr,
Type alpha,
Type beta)
{
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < cols * rows) {
const auto x = tid % cols;
const auto y = tid / cols;
const auto d = dPtr[tid];
const auto a = aPtr[y];
const auto b = bPtr[x];
Type accm = alpha * (a + b + d);
if (beta) { accm += beta * cPtr[tid]; }
dPtr[tid] = accm;
}
}
template <typename Type>
void naiveEltwise2DAdd(int rows,
int cols,
const Type* aPtr,
const Type* bPtr,
const Type* cPtr,
Type* dPtr,
Type alpha,
Type beta,
hipStream_t stream)
{
static const int TPB = 64;
int nblks = raft::ceildiv(rows * cols, TPB);
hipLaunchKernelGGL(( naiveEltwise2DAddKernel<Type>)
, dim3(nblks), dim3(TPB), 0, stream, rows, cols, aPtr, bPtr, cPtr, dPtr, alpha, beta);
RAFT_CUDA_TRY(hipPeekAtLastError());
}
template <typename T>
struct Eltwise2dInputs {
T tolerance;
int w;
int h;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const Eltwise2dInputs<T>& dims)
{
return os;
}
template <typename Type>
void WrapperEltwise2d(int rows,
int cols,
const Type* aPtr,
const Type* bPtr,
const Type* cPtr,
Type* dPtr,
Type alpha,
Type beta)
{
auto op_ = [] __device__(Type a, Type b, Type c) { return a + b + c; };
eltwise2D<Type>(rows, cols, aPtr, bPtr, cPtr, dPtr, alpha, beta, op_, 0);
}
template <typename T>
class Eltwise2dTest : public ::testing::TestWithParam<Eltwise2dInputs<T>> {
protected:
Eltwise2dTest() : out_ref(0, stream), out(0, stream) {}
void SetUp() override
{
params = ::testing::TestWithParam<Eltwise2dInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
RAFT_CUDA_TRY(hipStreamCreate(&stream));
auto w = params.w;
auto h = params.h;
auto len = w * h;
rmm::device_uvector<T> in1(h, stream);
rmm::device_uvector<T> in2(w, stream);
out_ref.resize(len, stream);
out.resize(len, stream);
r.uniform(in1.data(), h, T(-1.0), T(1.0), stream);
r.uniform(in2.data(), w, T(-1.0), T(1.0), stream);
naiveEltwise2DAdd(
h, w, in1.data(), in2.data(), out_ref.data(), out_ref.data(), (T)1, (T)1, stream);
WrapperEltwise2d<T>(h, w, in1.data(), in2.data(), out.data(), out.data(), (T)1, (T)1);
RAFT_CUDA_TRY(hipStreamDestroy(stream));
}
protected:
hipStream_t stream = 0;
Eltwise2dInputs<T> params;
rmm::device_uvector<T> out_ref, out;
};
const std::vector<Eltwise2dInputs<float>> inputsf2 = {{0.000001f, 1024, 1024, 1234ULL}};
const std::vector<Eltwise2dInputs<double>> inputsd2 = {{0.00000001, 1024, 1024, 1234ULL}};
typedef Eltwise2dTest<float> Eltwise2dTestF;
TEST_P(Eltwise2dTestF, Result)
{
ASSERT_TRUE(MLCommon::devArrMatch(out_ref.data(),
out.data(),
params.w * params.h,
MLCommon::CompareApprox<float>(params.tolerance)));
}
typedef Eltwise2dTest<double> Eltwise2dTestD;
TEST_P(Eltwise2dTestD, Result)
{
ASSERT_TRUE(MLCommon::devArrMatch(out_ref.data(),
out.data(),
params.w * params.h,
MLCommon::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(Eltwise2dTests, Eltwise2dTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(Eltwise2dTests, Eltwise2dTestD, ::testing::ValuesIn(inputsd2));
} // end namespace LinAlg
} // end namespace MLCommon
| fd830b8240e926179beac4e11073deb812b9425d.cu | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "test_utils.h"
#include <gtest/gtest.h>
#include <linalg/eltwise2d.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace MLCommon {
namespace LinAlg {
template <typename Type>
__global__ void naiveEltwise2DAddKernel(int rows,
int cols,
const Type* aPtr,
const Type* bPtr,
const Type* cPtr,
Type* dPtr,
Type alpha,
Type beta)
{
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < cols * rows) {
const auto x = tid % cols;
const auto y = tid / cols;
const auto d = dPtr[tid];
const auto a = aPtr[y];
const auto b = bPtr[x];
Type accm = alpha * (a + b + d);
if (beta) { accm += beta * cPtr[tid]; }
dPtr[tid] = accm;
}
}
template <typename Type>
void naiveEltwise2DAdd(int rows,
int cols,
const Type* aPtr,
const Type* bPtr,
const Type* cPtr,
Type* dPtr,
Type alpha,
Type beta,
cudaStream_t stream)
{
static const int TPB = 64;
int nblks = raft::ceildiv(rows * cols, TPB);
naiveEltwise2DAddKernel<Type>
<<<nblks, TPB, 0, stream>>>(rows, cols, aPtr, bPtr, cPtr, dPtr, alpha, beta);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename T>
struct Eltwise2dInputs {
T tolerance;
int w;
int h;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const Eltwise2dInputs<T>& dims)
{
return os;
}
template <typename Type>
void WrapperEltwise2d(int rows,
int cols,
const Type* aPtr,
const Type* bPtr,
const Type* cPtr,
Type* dPtr,
Type alpha,
Type beta)
{
auto op_ = [] __device__(Type a, Type b, Type c) { return a + b + c; };
eltwise2D<Type>(rows, cols, aPtr, bPtr, cPtr, dPtr, alpha, beta, op_, 0);
}
template <typename T>
class Eltwise2dTest : public ::testing::TestWithParam<Eltwise2dInputs<T>> {
protected:
Eltwise2dTest() : out_ref(0, stream), out(0, stream) {}
void SetUp() override
{
params = ::testing::TestWithParam<Eltwise2dInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
RAFT_CUDA_TRY(cudaStreamCreate(&stream));
auto w = params.w;
auto h = params.h;
auto len = w * h;
rmm::device_uvector<T> in1(h, stream);
rmm::device_uvector<T> in2(w, stream);
out_ref.resize(len, stream);
out.resize(len, stream);
r.uniform(in1.data(), h, T(-1.0), T(1.0), stream);
r.uniform(in2.data(), w, T(-1.0), T(1.0), stream);
naiveEltwise2DAdd(
h, w, in1.data(), in2.data(), out_ref.data(), out_ref.data(), (T)1, (T)1, stream);
WrapperEltwise2d<T>(h, w, in1.data(), in2.data(), out.data(), out.data(), (T)1, (T)1);
RAFT_CUDA_TRY(cudaStreamDestroy(stream));
}
protected:
cudaStream_t stream = 0;
Eltwise2dInputs<T> params;
rmm::device_uvector<T> out_ref, out;
};
const std::vector<Eltwise2dInputs<float>> inputsf2 = {{0.000001f, 1024, 1024, 1234ULL}};
const std::vector<Eltwise2dInputs<double>> inputsd2 = {{0.00000001, 1024, 1024, 1234ULL}};
typedef Eltwise2dTest<float> Eltwise2dTestF;
TEST_P(Eltwise2dTestF, Result)
{
ASSERT_TRUE(MLCommon::devArrMatch(out_ref.data(),
out.data(),
params.w * params.h,
MLCommon::CompareApprox<float>(params.tolerance)));
}
typedef Eltwise2dTest<double> Eltwise2dTestD;
TEST_P(Eltwise2dTestD, Result)
{
ASSERT_TRUE(MLCommon::devArrMatch(out_ref.data(),
out.data(),
params.w * params.h,
MLCommon::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(Eltwise2dTests, Eltwise2dTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(Eltwise2dTests, Eltwise2dTestD, ::testing::ValuesIn(inputsd2));
} // end namespace LinAlg
} // end namespace MLCommon
|
a0a88da545c2c2030dbe513a6d0d6a4c172c7e08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*#############################################################################
******************************************************************************
* <name> hydro_calcFlux2d_cuda </name>
******************************************************************************
*
* <purpose>
* This file provides CUDA kernels to compute the fluxes for the low-order
* scheme in 2D using different types if artificial viscosities.
* </purpose>
*
*#############################################################################
*/
#include <stdio.h>
#include <cmath>
#include <cfloat>
#include <iostream>
#include <coproc_core.h>
#include <coproc_storage_cuda.h>
#include "cudaGatherScatter.h"
#ifdef HAS_INLINE_PTX
#include "cudaDMA.h"
#endif
#include "flagship.h"
#include "cudaMacros.h"
#include "models/hydro/hydro.h"
#include "kernel/System/fmath.h"
// Define CUDA kernel which does not make use of the CUDADMA library
// and is applied to the remaining edges which are not processed in groups
// #define BASELINE_KERNEL hydro_calcFlux2d_shmem
#define BASELINE_KERNEL hydro_calcFlux2d_baseline
// Defines for baseline implementation
#define BASELINE_THREADS_PER_CTA 32*2
#define BASELINE_NEDGE_PER_THREAD 1
// Defines for shared memory implementation
#define SHMEM_DATA_TRANSPOSE true
#define SHMEM_DATA_IDX3 IDX3T
#define SHMEM_NEDGE_PER_THREAD BASELINE_NEDGE_PER_THREAD
#ifdef HAS_INLINE_PTX
// Define CUDA kernel which makes use of the CUDADMA library to achive
// higher throughput between global and shared memory on the device
// #define CUDADMA_PREFETCH_SINGLE
#endif
// Defines for cudaDMA implementation without warp specialisation
#ifdef CUDADMA_NOSPEC
#define CUDADMA_KERNEL hydro_calcFlux2d_cudaDMA_nospec
#define CUDADMA_COMPUTE_THREADS_PER_CTA 32*2
#define CUDADMA_THREADS_PER_LD 0
#define CUDADMA_NEDGE_PER_THREAD 1
#define CUDADMA_DMA_LDS_IND 0
#define CUDADMA_DMA_LDS_SRC 0
#define CUDADMA_DMA_LDS_DEST 0
#define CUDADMA_DMA_LDS_COEFF 0
#define CUDADMA_DMA_LDS 0
#endif
// Defines for cudaDMA single buffer implementation with prefetching of indices
#ifdef CUDADMA_PREFETCH_SINGLE
#define CUDADMA_KERNEL hydro_calcFlux2d_cudaDMA_prefetch_single
#define CUDADMA_COMPUTE_THREADS_PER_CTA 32*4
#define CUDADMA_THREADS_PER_LD 32*1
#define CUDADMA_NEDGE_PER_THREAD 1*1
#define CUDADMA_DMA_LDS_IND 0
#define CUDADMA_DMA_LDS_SRC 1
#define CUDADMA_DMA_LDS_DEST 1
#define CUDADMA_DMA_LDS_COEFF 1
#define CUDADMA_DMA_LDS (CUDADMA_DMA_LDS_IND + \
CUDADMA_DMA_LDS_SRC + \
CUDADMA_DMA_LDS_DEST + \
CUDADMA_DMA_LDS_COEFF)
#endif
// Defines for cudaDMA double buffer implementation with prefetching of indices
#ifdef CUDADMA_PREFETCH_DOUBLE
#define CUDADMA_KERNEL hydro_calcFlux2d_cudaDMA_prefetch_double
#define CUDADMA_COMPUTE_THREADS_PER_CTA 32*4
#define CUDADMA_THREADS_PER_LD 32*1
#define CUDADMA_NEDGE_PER_THREAD 4*1
#define CUDADMA_DMA_LDS_IND 0
#define CUDADMA_DMA_LDS_SRC 1
#define CUDADMA_DMA_LDS_DEST 1
#define CUDADMA_DMA_LDS_COEFF 1
#define CUDADMA_DMA_LDS (CUDADMA_DMA_LDS_IND + \
CUDADMA_DMA_LDS_SRC + \
CUDADMA_DMA_LDS_DEST + \
CUDADMA_DMA_LDS_COEFF)
#endif
// Defines for cudaDMA double buffer implementation
#ifdef CUDADMA_DOUBLE
#define CUDADMA_KERNEL hydro_calcFlux2d_cudaDMA_double
#define CUDADMA_COMPUTE_THREADS_PER_CTA 32*2
#define CUDADMA_THREADS_PER_LD 32*1
#define CUDADMA_NEDGE_PER_THREAD 6*1
#define CUDADMA_DMA_LDS_IND 1
#define CUDADMA_DMA_LDS_SRC 1
#define CUDADMA_DMA_LDS_DEST 1
#define CUDADMA_DMA_LDS_COEFF 0
#define CUDADMA_DMA_LDS (3*CUDADMA_DMA_LDS_IND + \
2*CUDADMA_DMA_LDS_SRC + \
2*CUDADMA_DMA_LDS_DEST)
#endif
// Defines for cudaDMA manual buffer implementation
#ifdef CUDADMA_MANUAL
#define CUDADMA_KERNEL hydro_calcFlux2d_cudaDMA_manual
#define CUDADMA_COMPUTE_THREADS_PER_CTA 32*2
#define CUDADMA_THREADS_PER_LD 32*1
#define CUDADMA_NEDGE_PER_THREAD 6*1
#define CUDADMA_DMA_LDS_IND 1
#define CUDADMA_DMA_LDS_SRC 1
#define CUDADMA_DMA_LDS_DEST 1
#define CUDADMA_DMA_LDS_COEFF 1
#define CUDADMA_DMA_LDS (CUDADMA_DMA_LDS_IND + \
CUDADMA_DMA_LDS_SRC + \
CUDADMA_DMA_LDS_DEST + \
CUDADMA_DMA_LDS_COEFF)
#endif
// Defines for empty cudaDMA implementation
#ifndef CUDADMA_KERNEL
#define CUDADMA_COMPUTE_THREADS_PER_CTA 0
#define CUDADMA_THREADS_PER_LD 0
#define CUDADMA_NEDGE_PER_THREAD 0
#define CUDADMA_DMA_LDS_IND 0
#define CUDADMA_DMA_LDS_SRC 0
#define CUDADMA_DMA_LDS_DEST 0
#define CUDADMA_DMA_LDS_COEFF 0
#define CUDADMA_DMA_LDS 0
#endif
using namespace std;
namespace hydro2d_cuda
{
/*****************************************************************************
* FluxBase: Compute inviscid fluxes for nedgesim edges
****************************************************************************/
struct InviscidFluxBase
{
/*
* Calculate the inviscid flux for x-direction (not skew-symmetric)
*/
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
bool boverwrite, typename Td, typename Ti>
__device__ __forceinline__
static void calcFluxXdir(Td *Fxi,
Td *Fxj,
Td *DataAtEdge,
Td ui,
Td uj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc)
{
if (boverwrite) {
// Overwrite destination vector
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
IDX2T(Fxi,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxj,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
else {
// Destination vector is transposed
IDX2T(Fxi,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxj,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
IDX2(Fxi,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxj,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
else {
// Both vectors are not transposed
IDX2(Fxi,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxj,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
}
}
else {
// Keep content of destination vector
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
IDX2T(Fxi,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxj,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
else {
// Destination vector is transposed
IDX2T(Fxi,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxj,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
IDX2(Fxi,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxj,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
else {
// Both vectors are not transposed
IDX2(Fxi,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxj,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
}
}
}
/*
* Calculate the inviscid flux for y-direction (not skew-symmetric)
*/
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
bool boverwrite, typename Td, typename Ti>
__device__ __forceinline__
static void calcFluxYdir(Td *Fyi,
Td *Fyj,
Td *DataAtEdge,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc)
{
if (boverwrite) {
// Overwrite destination vector
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
IDX2T(Fyi,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyj,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
else {
// Destination vector is transposed
IDX2T(Fyi,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyj,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
IDX2(Fyi,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyj,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
else {
// Both vectors are not transposed
IDX2(Fyi,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyj,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
}
}
else {
// Keep content of destination vector
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
IDX2T(Fyi,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyj,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
else {
// Destination vector is transposed
IDX2T(Fyi,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyj,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
IDX2(Fyi,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyj,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
else {
// Both vectors are not transposed
IDX2(Fyi,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyj,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
}
}
}
/*
* Calculate the inviscid flux for x-direction (skew-symmetric)
*/
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
bool boverwrite, typename Td, typename Ti>
__device__ __forceinline__
static void calcFluxXdir(Td *Fx_ij,
Td *DataAtEdge,
Td ui,
Td uj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc)
{
if (boverwrite) {
// Overwrite destination vector
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
IDX2T(Fx_ij,1,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3t,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,2,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,3,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,4,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
else {
// Destination vector is transposed
IDX2T(Fx_ij,1,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,2,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,3,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,4,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
IDX2(Fx_ij,1,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,2,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,3,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,4,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
else {
// Both vectors are not transposed
IDX2(Fx_ij,1,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,2,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,3,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,4,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
}
}
else {
// Keep content of destination vector
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
IDX2T(Fx_ij,1,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3t,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,2,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,3,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,4,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
else {
// Destination vector is transposed
IDX2T(Fx_ij,1,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,2,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,3,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,4,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
IDX2(Fx_ij,1,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,2,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,3,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,4,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
else {
// Both vectors are not transposed
IDX2(Fx_ij,1,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,2,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,3,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,4,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
}
}
}
/*
* Calculate the inviscid flux for y-direction (skew-symmetric)
*/
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
bool boverwrite, typename Td, typename Ti>
__device__ __forceinline__
static void calcFluxYdir(Td *Fy_ij,
Td *DataAtEdge,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc)
{
if (boverwrite) {
// Overwrite destination vector
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
IDX2T(Fy_ij,1,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,2,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,3,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,4,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
else {
// Destination vector is transposed
IDX2T(Fy_ij,1,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,2,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,3,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,4,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
IDX2(Fy_ij,1,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,2,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,3,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,4,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
else {
// Both vectors are not transposed
IDX2(Fy_ij,1,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,2,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,3,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,4,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
}
}
else {
// Keep content of destination vector
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
IDX2T(Fy_ij,1,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,2,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,3,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,4,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
else {
// Destination vector is transposed
IDX2T(Fy_ij,1,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,2,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,3,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,4,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
IDX2(Fy_ij,1,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,2,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,3,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,4,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
else {
// Both vectors are not transposed
IDX2(Fy_ij,1,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,2,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,3,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,4,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
}
}
}
/*
* Calculate the inviscid fluxes in all directions (not skew-symmetric)
*/
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
bool boverwrite, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *Fxi,
Td *Fxj,
Td *Fyi,
Td *Fyj,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc)
{
// Compute the Galerkin fluxes for x-direction
InviscidFluxBase::
calcFluxXdir<nedgesimDest,nedgesimSrc,btransposeDest,btransposeSrc,boverwrite>
(Fxi,Fxj,DataAtEdge,ui,uj,pi,pj,iposDest,iposSrc);
// Compute Galerkin fluxes for y-direction
InviscidFluxBase::
calcFluxYdir<nedgesimDest,nedgesimSrc,btransposeDest,btransposeSrc,boverwrite>
(Fyi,Fyj,DataAtEdge,vi,vj,pi,pj,iposDest,iposSrc);
}
/*
* Calculate the inviscid fluxes in all directions (skew-symmetric)
*/
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
bool boverwrite, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *Fx_ij,
Td *Fy_ij,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc)
{
// Compute Galerkin flux difference for x-direction
InviscidFluxBase::
calcFluxXdir<nedgesimDest,nedgesimSrc,btransposeDest,btransposeSrc,boverwrite>
(Fx_ij,DataAtEdge,ui,uj,pi,pj,iposDest,iposSrc);
// Compute Galerkin flux difference for y-direction
InviscidFluxBase::
calcFluxYdir<nedgesimDest,nedgesimSrc,btransposeDest,btransposeSrc,boverwrite>
(Fy_ij,DataAtEdge,vi,vj,pi,pj,iposDest,iposSrc);
}
/*
* Calculate the inviscid fluxes in all directions (not skew-symmetric)
* and multiply them by the precomputed finite element coefficients
*/
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
bool boverwrite, typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *FluxesAtEdge,
Tc *CoeffsAtEdge,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Td scale,
Ti iposDest,
Ti iposSrc,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
Td aux;
#ifdef HYDRO_USE_IBP
// Calculate skew-symmetric inviscid fluxes
// Flux component 1
if (btransposeSrc) {
aux = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj)
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi));
}
else {
aux = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj)
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi));
}
if (boverwrite) {
if (btransposeDest) {
IDX3T(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) = aux;
IDX3T(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) = -aux;
}
else {
IDX3(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) = aux;
IDX3(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) = -aux;
}
}
else {
if (btransposeDest) {
IDX3T(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) += aux;
IDX3T(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) -= aux;
}
else {
IDX3(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) += aux;
IDX3(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) -= aux;
}
}
// Flux component 2
if (btransposeSrc) {
aux = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj)
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi));
}
else {
aux = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj)
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi));
}
if (boverwrite) {
if (btransposeDest) {
IDX3T(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) = aux;
IDX3T(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) = -aux;
}
else {
IDX3(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) = aux;
IDX3(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) = -aux;
}
}
else {
if (btransposeDest) {
IDX3T(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) += aux;
IDX3T(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) -= aux;
}
else {
IDX3(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) += aux;
IDX3(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) -= aux;
}
}
// Flux component 3
if (btransposeSrc) {
aux = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj)
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi));
}
else {
aux = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj)
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi));
}
if (boverwrite) {
if (btransposeDest) {
IDX3T(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) = aux;
IDX3T(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) = -aux;
}
else {
IDX3(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) = aux;
IDX3(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) = -aux;
}
}
else {
if (btransposeDest) {
IDX3T(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) += aux;
IDX3T(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) -= aux;
}
else {
IDX3(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) += aux;
IDX3(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) -= aux;
}
}
// Flux component 4
if (btransposeSrc) {
aux = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj)
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi));
}
else {
aux = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj)
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi));
}
if (boverwrite) {
if (btransposeDest) {
IDX3T(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) = aux;
IDX3T(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) = -aux;
}
else {
IDX3(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) = aux;
IDX3(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) = -aux;
}
}
else {
if (btransposeDest) {
IDX3T(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) += aux;
IDX3T(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) -= aux;
}
else {
IDX3(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) += aux;
IDX3(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) -= aux;
}
}
#else
// Calculate inviscid fluxes (not skew-symmetric)
if (boverwrite) {
// Overwrite destination vector
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
IDX3T(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
}
else {
// Destination vector is transposed
IDX3T(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
IDX3(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
}
else {
// Both vectors are not transposed
IDX3(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
}
}
}
else {
// Keep content of destination vector
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
IDX3T(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
}
else {
// Destination vector is transposed
IDX3T(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
IDX3(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
}
else {
// Both vectors are not transposed
IDX3(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
}
}
}
#endif
}
};
/*****************************************************************************
* InviscidFlux
****************************************************************************/
struct InviscidFlux : public InviscidFluxBase
{
// Enable use of inherited functions
using InviscidFluxBase::calcFluxXdir;
using InviscidFluxBase::calcFluxYdir;
using InviscidFluxBase::calcEdgeData;
/**************************************************************************
* Wrapper routine for processing a single edge
*************************************************************************/
/*
* Calculate the inviscid flux for x-direction (not skew-symmetric)
*/
template <bool boverwrite, typename Td>
__device__ __forceinline__
static void calcFluxXdir(Td *Fxi,
Td *Fxj,
Td *DataAtEdge,
Td ui,
Td uj,
Td pi,
Td pj)
{
InviscidFluxBase::calcFluxXdir<1,1,false,false,boverwrite>
(Fxi,Fxj,DataAtEdge,ui,uj,pi,pj,1,1);
}
/*
* Calculate the inviscid flux for y-direction (not skew-symmetric)
*/
template <bool boverwrite, typename Td>
__device__ __forceinline__
static void calcFluxYdir(Td *Fyi,
Td *Fyj,
Td *DataAtEdge,
Td vi,
Td vj,
Td pi,
Td pj)
{
InviscidFluxBase::calcFluxYdir<1,1,false,false,boverwrite>
(Fyi,Fyj,DataAtEdge,vi,vj,pi,pj,1,1);
}
/*
* Calculate the inviscid flux for x-direction (skew-symmetric)
*/
template <bool boverwrite, typename Td>
__device__ __forceinline__
static void calcFluxXdir(Td *Fx_ij,
Td *DataAtEdge,
Td ui,
Td uj,
Td pi,
Td pj)
{
InviscidFluxBase::calcFluxXdir<1,1,false,false,boverwrite>
(Fx_ij,DataAtEdge,ui,uj,pi,pj,1,1);
}
/*
* Calculate the inviscid flux for y-direction (skew-symmetric)
*/
template <bool boverwrite, typename Td>
__device__ __forceinline__
static void calcFluxYdir(Td *Fy_ij,
Td *DataAtEdge,
Td vi,
Td vj,
Td pi,
Td pj)
{
InviscidFluxBase::calcFluxYdir<1,1,false,false,boverwrite>
(Fy_ij,DataAtEdge,vi,vj,pi,pj,1,1);
}
/*
* Calculate the inviscid fluxes in all directions (not skew-symmetric)
*/
template <bool boverwrite, typename Td>
__device__ __forceinline__
static void calcEdgeData(Td *Fxi,
Td *Fxj,
Td *Fyi,
Td *Fyj,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj)
{
InviscidFluxBase::calcEdgeData<1,1,false,false,boverwrite>
(Fxi,Fxj,Fyi,Fyj,DataAtEdge,ui,uj,vi,vj,pi,pj,1,1);
}
/*
* Calculate the inviscid fluxes in all directions (skew-symmetric)
*/
template <bool boverwrite, typename Td>
__device__ __forceinline__
static void calcEdgeData(Td *Fx_ij,
Td *Fy_ij,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj)
{
InviscidFluxBase::calcEdgeData<1,1,false,false,boverwrite>
(Fx_ij,Fy_ij,DataAtEdge,ui,uj,vi,vj,pi,pj,1,1);
}
/*
* Calculate the inviscid fluxes in all directions (not skew-symmetric)
* and multiply them by the precomputed finite element coefficients
*/
template <bool boverwrite, typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *FluxesAtEdge,
Tc *CoeffsAtEdge,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Td scale,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
InviscidFluxBase::calcEdgeData<1,1,false,false,boverwrite>
(FluxesAtEdge,CoeffsAtEdge,DataAtEdge,ui,uj,vi,vj,pi,pj,
scale,1,1,iedge,nedge,ncoeff);
}
};
/*****************************************************************************
* InviscidFluxDissipationBase (basic functionality individual specialisations)
****************************************************************************/
template <int idissipationtype>
struct InviscidFluxDissipationBase
{
};
/*****************************************************************************
* InviscidFluxDissipationBase: Specialisation for computing zero
* artificial dissipation, aka standard Galerkin
****************************************************************************/
template <>
struct InviscidFluxDissipationBase<DISSIPATION_ZERO>
{
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *VectorAtEdge,
Tc *CoeffsAtEdge,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
if (btransposeDest) {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) = 0.0;
}
else {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) = 0.0;
}
}
};
/*****************************************************************************
* InviscidFluxDissipationBase: Specialisation for computing scalar
* artificial dissipation proportional to the spectral radius
* (largest eigenvector) of the cumulative Roe matrix.
****************************************************************************/
template <>
struct InviscidFluxDissipationBase<DISSIPATION_SCALAR>
{
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *VectorAtEdge,
Tc *CoeffsAtEdge,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
Td ri,rj,hi,hj;
if (btransposeSrc) {
// Compute densities
ri = DENSITY3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc);
rj = DENSITY3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc);
// Compute enthalpies
hi = (TOTALENERGY3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc)+pi)/ri;
hj = (TOTALENERGY3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc)+pj)/rj;
}
else {
// Compute densities
ri = DENSITY3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc);
rj = DENSITY3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc);
// Compute enthalpies
hi = (TOTALENERGY3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc)+pi)/ri;
hj = (TOTALENERGY3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc)+pj)/rj;
}
// Compute Roe mean values
Td aux = ROE_MEAN_RATIO(ri,rj);
Td u_ij = ROE_MEAN_VALUE(ui,uj,aux);
Td v_ij = ROE_MEAN_VALUE(vi,vj,aux);
Td H_ij = ROE_MEAN_VALUE(hi,hj,aux);
// Compute skew-symmetric coefficient
Td a[2];
a[0] = DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge));
a[1] = DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge));
// Compute auxiliary variables
Td q_ij = DCONST(0.5) * (u_ij * u_ij + v_ij * v_ij);
Td vel_ij = u_ij * a[0] + v_ij * a[1];
// Compute the speed of sound
Td c_ij = sqrt(max(((HYDRO_GAMMA)-DCONST(1.0))*(H_ij-q_ij), DBL_EPSILON));
// Compute scalar dissipation
Td d_ij = abs(vel_ij) + sqrt(a[0] * a[0] + a[1] * a[1])*c_ij;
// Multiply the solution difference by the scalar dissipation
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
else {
// Destination vector is transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
else {
// Both vectors are not transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
}
}
};
/*****************************************************************************
* InviscidFluxDissipationBase: Specialisation for computing scalar
* artificial dissipation proportional to the spectral radius
* (largest eigenvector) of the dimensional-split Roe matrix.
****************************************************************************/
template <>
struct InviscidFluxDissipationBase<DISSIPATION_SCALAR_DSPLIT>
{
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *VectorAtEdge,
Tc *CoeffsAtEdge,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
Td ri,rj,hi,hj;
if (btransposeSrc) {
// Compute densities
ri = DENSITY3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc);
rj = DENSITY3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc);
// Compute enthalpies
hi = (TOTALENERGY3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc)+pi)/ri;
hj = (TOTALENERGY3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc)+pj)/rj;
}
else {
// Compute densities
ri = DENSITY3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc);
rj = DENSITY3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc);
// Compute enthalpies
hi = (TOTALENERGY3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc)+pi)/ri;
hj = (TOTALENERGY3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc)+pj)/rj;
}
// Compute Roe mean values
Td aux = ROE_MEAN_RATIO(ri,rj);
Td u_ij = ROE_MEAN_VALUE(ui,uj,aux);
Td v_ij = ROE_MEAN_VALUE(vi,vj,aux);
Td H_ij = ROE_MEAN_VALUE(hi,hj,aux);
// Compute auxiliary variables
Td q_ij = DCONST(0.5) *(u_ij * u_ij + v_ij * v_ij);
// Compute the speed of sound
Td c_ij = sqrt(max(((HYDRO_GAMMA)-DCONST(1.0))*(H_ij-q_ij), DBL_EPSILON));
// Compute skew-symmetric coefficient
Td a[2];
a[0] = DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge));
a[1] = DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge));
// Compute scalar dissipation
Td d_ij = ( abs(a[0]*u_ij) + abs(a[0])*c_ij +
abs(a[1]*v_ij) + abs(a[1])*c_ij );
// Multiply the solution difference by the scalar dissipation
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
else {
// Destination vector is transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
else {
// Both vectors are not transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
}
}
};
/*****************************************************************************
* InviscidFluxDissipationBase: Specialisation for computing
* tensorial artificial dissipation of Roe-type.
****************************************************************************/
template <>
struct InviscidFluxDissipationBase<DISSIPATION_ROE>
{
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *VectorAtEdge,
Tc *CoeffsAtEdge,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
// Compute skew-symmetric coefficient
Td a[2];
a[0] = DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge));
a[1] = DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge));
Td anorm = sqrt(a[0] * a[0] + a[1] * a[1]);
if (anorm > DBL_EPSILON) {
// Normalise the skew-symmetric coefficient
a[0] = a[0]/anorm;
a[1] = a[1]/anorm;
Td ri,rj,hi,hj;
if (btransposeSrc) {
// Compute densities
ri = DENSITY3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc);
rj = DENSITY3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc);
// Compute enthalpies
hi = (TOTALENERGY3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc)+pi)/ri;
hj = (TOTALENERGY3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc)+pj)/rj;
}
else {
// Compute densities
ri = DENSITY3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc);
rj = DENSITY3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc);
// Compute enthalpies
hi = (TOTALENERGY3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc)+pi)/ri;
hj = (TOTALENERGY3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc)+pj)/rj;
}
// Compute Roe mean values
Td aux = ROE_MEAN_RATIO(ri,rj);
Td u_ij = ROE_MEAN_VALUE(ui,uj,aux);
Td v_ij = ROE_MEAN_VALUE(vi,vj,aux);
Td H_ij = ROE_MEAN_VALUE(hi,hj,aux);
// Compute auxiliary variables
Td vel_ij = u_ij * a[0] + v_ij * a[1];
Td q_ij = DCONST(0.5) * (u_ij * u_ij + v_ij * v_ij);
// Compute the speed of sound
Td c2_ij = max(((HYDRO_GAMMA)-DCONST(1.0))*(H_ij-q_ij), DBL_EPSILON);
Td c_ij = sqrt(c2_ij);
// Compute eigenvalues
Td l1 = abs(vel_ij-c_ij);
Td l2 = abs(vel_ij);
Td l3 = abs(vel_ij+c_ij);
Td l4 = abs(vel_ij);
// Compute solution difference U_j-U_i
Td Diff[NVAR2D];
if (btransposeSrc) {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
Diff[i-1] = IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc);
}
else {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
Diff[i-1] = IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc);
}
// Compute auxiliary quantities for characteristic variables
Td aux1 = ((HYDRO_GAMMA)-DCONST(1.0))*(q_ij*Diff[0]
-u_ij*Diff[1]
-v_ij*Diff[2]
+Diff[3])/DCONST(2.0)/c2_ij;
Td aux2 = (vel_ij*Diff[0]
-a[0]*Diff[1]
-a[1]*Diff[2])/DCONST(2.0)/c_ij;
// Compute characteristic variables multiplied by the corresponding eigenvalue
Td w1 = l1 * (aux1 + aux2);
Td w2 = l2 * ((DCONST(1.0)-((HYDRO_GAMMA)-DCONST(1.0))*q_ij/c2_ij)*Diff[0]
+((HYDRO_GAMMA)-DCONST(1.0))*(u_ij*Diff[1]
+v_ij*Diff[2]
-Diff[3])/c2_ij);
Td w3 = l3 * (aux1 - aux2);
Td w4 = l4 * ((a[0]*v_ij-a[1]*u_ij)*Diff[0]
+a[1]*Diff[1]
-a[0]*Diff[2]);
// Compute "R_ij * |Lbd_ij| * L_ij * dU"
if (btransposeDest) {
IDX2T(VectorAtEdge,1,iposDest,NVAR2D,nedgesimDest) = anorm * ( w1 + w2 + w3 );
IDX2T(VectorAtEdge,2,iposDest,NVAR2D,nedgesimDest) = anorm * ( (u_ij-c_ij*a[0])*w1 + u_ij*w2 +
(u_ij+c_ij*a[0])*w3 + a[1]*w4 );
IDX2T(VectorAtEdge,3,iposDest,NVAR2D,nedgesimDest) = anorm * ( (v_ij-c_ij*a[1])*w1 + v_ij*w2 +
(v_ij+c_ij*a[1])*w3 - a[0]*w4 );
IDX2T(VectorAtEdge,4,iposDest,NVAR2D,nedgesimDest) = anorm * ( (H_ij-c_ij*vel_ij)*w1 + q_ij*w2 +
(H_ij+c_ij*vel_ij)*w3 + (u_ij*a[1]-v_ij*a[0])*w4 );
}
else {
IDX2(VectorAtEdge,1,iposDest,NVAR2D,nedgesimDest) = anorm * ( w1 + w2 + w3 );
IDX2(VectorAtEdge,2,iposDest,NVAR2D,nedgesimDest) = anorm * ( (u_ij-c_ij*a[0])*w1 + u_ij*w2 +
(u_ij+c_ij*a[0])*w3 + a[1]*w4 );
IDX2(VectorAtEdge,3,iposDest,NVAR2D,nedgesimDest) = anorm * ( (v_ij-c_ij*a[1])*w1 + v_ij*w2 +
(v_ij+c_ij*a[1])*w3 - a[0]*w4 );
IDX2(VectorAtEdge,4,iposDest,NVAR2D,nedgesimDest) = anorm * ( (H_ij-c_ij*vel_ij)*w1 + q_ij*w2 +
(H_ij+c_ij*vel_ij)*w3 + (u_ij*a[1]-v_ij*a[0])*w4 );
}
} else {
if (btransposeDest) {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) = 0.0;
}
else {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) = 0.0;
}
}
}
};
/*****************************************************************************
* InviscidFluxDissipationBase: Specialisation for computing
* tensorial artificial dissipation of Roe-type using dimensional splitting.
****************************************************************************/
template <>
struct InviscidFluxDissipationBase<DISSIPATION_ROE_DSPLIT>
{
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *VectorAtEdge,
Tc *CoeffsAtEdge,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
// Compute skew-symmetric coefficient
Td a[2];
a[0] = DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge));
a[1] = DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge));
Td anorm = sqrt(a[0] * a[0] + a[1] * a[1]);
if (anorm > DBL_EPSILON) {
// Compute the absolute value
a[0] = abs(a[0]);
a[1] = abs(a[1]);
Td ri,rj,hi,hj;
if (btransposeSrc) {
// Compute densities
ri = DENSITY3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc);
rj = DENSITY3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc);
// Compute enthalpies
hi = (TOTALENERGY3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc)+pi)/ri;
hj = (TOTALENERGY3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc)+pj)/rj;
}
else {
// Compute densities
ri = DENSITY3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc);
rj = DENSITY3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc);
// Compute enthalpies
hi = (TOTALENERGY3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc)+pi)/ri;
hj = (TOTALENERGY3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc)+pj)/rj;
}
// Compute Roe mean values
Td aux = ROE_MEAN_RATIO(ri,rj);
Td u_ij = ROE_MEAN_VALUE(ui,uj,aux);
Td v_ij = ROE_MEAN_VALUE(vi,vj,aux);
Td H_ij = ROE_MEAN_VALUE(hi,hj,aux);
// Compute auxiliary variable
Td q_ij = DCONST(0.5) * (u_ij * u_ij + v_ij * v_ij);
// Compute the speed of sound
Td c2_ij = max(((HYDRO_GAMMA)-DCONST(1.0))*(H_ij-q_ij), DBL_EPSILON);
Td c_ij = sqrt(c2_ij);
//----------------------------------------------------------------------
// Dimensional splitting: x-direction
//----------------------------------------------------------------------
// Compute eigenvalues
Td l1 = abs(u_ij-c_ij);
Td l2 = abs(u_ij);
Td l3 = abs(u_ij+c_ij);
Td l4 = abs(u_ij);
// Compute solution difference U_j-U_i
Td Diff[NVAR2D];
if (btransposeSrc) {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
Diff[i-1] = IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc);
}
else {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
Diff[i-1] = IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc);
}
// Compute auxiliary quantities for characteristic variables
Td aux1 = ((HYDRO_GAMMA)-DCONST(1.0))*(q_ij*Diff[0]
-u_ij*Diff[1]
-v_ij*Diff[2]
+Diff[3])/DCONST(2.0)/c2_ij;
Td aux2 = (u_ij*Diff[0]
-Diff[1])/DCONST(2.0)/c_ij;
// Compute characteristic variables multiplied by the corresponding eigenvalue
Td w1 = l1 * (aux1 + aux2);
Td w2 = l2 * ((DCONST(1.0)-((HYDRO_GAMMA)-DCONST(1.0))*q_ij/c2_ij)*Diff[0]
+((HYDRO_GAMMA)-DCONST(1.0))*(u_ij*Diff[1]
+v_ij*Diff[2]
-Diff[3])/c2_ij);
Td w3 = l3 * (aux1 - aux2);
Td w4 = l4 * (v_ij*Diff[0]-Diff[2]);
// Compute "R_ij * |Lbd_ij| * L_ij * dU"
if (btransposeDest) {
IDX2T(VectorAtEdge,1,iposDest,NVAR2D,nedgesimDest) = a[0] * ( w1 + w2 + w3 );
IDX2T(VectorAtEdge,2,iposDest,NVAR2D,nedgesimDest) = a[0] * ( (u_ij-c_ij)*w1 + u_ij*w2 + (u_ij+c_ij)*w3 );
IDX2T(VectorAtEdge,3,iposDest,NVAR2D,nedgesimDest) = a[0] * ( v_ij*w1 + v_ij*w2 + v_ij*w3 - w4 );
IDX2T(VectorAtEdge,4,iposDest,NVAR2D,nedgesimDest) = a[0] * ( (H_ij-c_ij*u_ij)*w1 + q_ij*w2 +
(H_ij+c_ij*u_ij)*w3 - v_ij*w4 );
}
else {
IDX2(VectorAtEdge,1,iposDest,NVAR2D,nedgesimDest) = a[0] * ( w1 + w2 + w3 );
IDX2(VectorAtEdge,2,iposDest,NVAR2D,nedgesimDest) = a[0] * ( (u_ij-c_ij)*w1 + u_ij*w2 + (u_ij+c_ij)*w3 );
IDX2(VectorAtEdge,3,iposDest,NVAR2D,nedgesimDest) = a[0] * ( v_ij*w1 + v_ij*w2 + v_ij*w3 - w4 );
IDX2(VectorAtEdge,4,iposDest,NVAR2D,nedgesimDest) = a[0] * ( (H_ij-c_ij*u_ij)*w1 + q_ij*w2 +
(H_ij+c_ij*u_ij)*w3 - v_ij*w4 );
}
//----------------------------------------------------------------------
// Dimensional splitting: y-direction
//----------------------------------------------------------------------
// Compute eigenvalues
l1 = abs(v_ij-c_ij);
l2 = abs(v_ij);
l3 = abs(v_ij+c_ij);
l4 = abs(v_ij);
// Compute solution difference U_j-U_i
if (btransposeSrc) {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
Diff[i-1] = IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc);
}
else {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
Diff[i-1] = IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc);
}
// Compute auxiliary quantities for characteristic variables
aux1 = ((HYDRO_GAMMA)-DCONST(1.0))*(q_ij*Diff[0]
-u_ij*Diff[1]
-v_ij*Diff[2]
+Diff[3])/DCONST(2.0)/c2_ij;
aux2 = (v_ij*Diff[0]-Diff[2])/DCONST(2.0)/c_ij;
// Compute characteristic variables multiplied by the corresponding eigenvalue
w1 = l1 * (aux1 + aux2);
w2 = l2 * ((DCONST(1.0)-((HYDRO_GAMMA)-DCONST(1.0))*q_ij/c2_ij)*Diff[0]
+((HYDRO_GAMMA)-DCONST(1.0))*(u_ij*Diff[1]
+v_ij*Diff[2]
-Diff[3])/c2_ij);
w3 = l3 * (aux1 - aux2);
w4 = l4 * (-u_ij*Diff[0]+Diff[1]);
// Compute "R_ij * |Lbd_ij| * L_ij * dU"
if (btransposeDest) {
IDX2T(VectorAtEdge,1,iposDest,NVAR2D,nedgesimDest) += a[1] * ( w1 + w2 + w3 );
IDX2T(VectorAtEdge,2,iposDest,NVAR2D,nedgesimDest) += a[1] * ( u_ij*w1 + u_ij*w2 + u_ij*w3 + w4 );
IDX2T(VectorAtEdge,3,iposDest,NVAR2D,nedgesimDest) += a[1] * ( (v_ij-c_ij)*w1 + v_ij*w2 + (v_ij+c_ij)*w3 );
IDX2T(VectorAtEdge,4,iposDest,NVAR2D,nedgesimDest) += a[1] * ( (H_ij-c_ij*v_ij)*w1 + q_ij*w2 +
(H_ij+c_ij*v_ij)*w3 + u_ij*w4 );
}
else {
IDX2(VectorAtEdge,1,iposDest,NVAR2D,nedgesimDest) += a[1] * ( w1 + w2 + w3 );
IDX2(VectorAtEdge,2,iposDest,NVAR2D,nedgesimDest) += a[1] * ( u_ij*w1 + u_ij*w2 + u_ij*w3 + w4 );
IDX2(VectorAtEdge,3,iposDest,NVAR2D,nedgesimDest) += a[1] * ( (v_ij-c_ij)*w1 + v_ij*w2 + (v_ij+c_ij)*w3 );
IDX2(VectorAtEdge,4,iposDest,NVAR2D,nedgesimDest) += a[1] * ( (H_ij-c_ij*v_ij)*w1 + q_ij*w2 +
(H_ij+c_ij*v_ij)*w3 + u_ij*w4 );
}
} else {
if (btransposeDest) {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) = 0.0;
}
else {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) = 0.0;
}
}
}
};
/*****************************************************************************
* InviscidFluxDissipationBase: Specialisation for computing
* scalar artificial dissipation of Rusanov-type.
****************************************************************************/
template <>
struct InviscidFluxDissipationBase<DISSIPATION_RUSANOV>
{
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *VectorAtEdge,
Tc *CoeffsAtEdge,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
Td Ei,Ej;
if (btransposeSrc) {
// Compute specific energies
Ei = SPECIFICTOTALENERGY3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc);
Ej = SPECIFICTOTALENERGY3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc);
}
else {
// Compute specific energies
Ei = SPECIFICTOTALENERGY3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc);
Ej = SPECIFICTOTALENERGY3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc);
}
// Compute the speed of sound
Td ci = sqrt(max(((HYDRO_GAMMA)-DCONST(1.0))*
(HYDRO_GAMMA)*(Ei-DCONST(0.5)*(ui*ui+vi*vi)), DBL_EPSILON));
Td cj = sqrt(max(((HYDRO_GAMMA)-DCONST(1.0))*
(HYDRO_GAMMA)*(Ej-DCONST(0.5)*(uj*uj+vj*vj)), DBL_EPSILON));
#ifdef HYDRO_USE_IBP
// Compute scalar dissipation based on the skew-symmetric part
// which does not include the symmetric boundary contribution
Td d_ij = max( abs(DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge))*uj+
DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge))*vj)+
DCONST(0.5)*sqrt(POW(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge),2)+
POW(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge),2))*cj,
abs(DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge))*ui+
DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge))*vi)+
DCONST(0.5)*sqrt(POW(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge),2)+
POW(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge),2))*ci );
#else
// Compute scalar dissipation
Td d_ij = max( abs(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*uj+
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*vj)+
sqrt(POW(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge),2)+
POW(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge),2))*cj,
abs(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*ui+
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*vi)+
sqrt(POW(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge),2)+
POW(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge),2))*ci );
#endif
// Multiply the solution difference by the scalar dissipation
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
else {
// Destination vector is transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
else {
// Both vectors are not transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
}
}
};
/*****************************************************************************
* InviscidFluxDissipationBase: Specialisation for computing
* scalar artificial dissipation of Rusanov-type using dimensional splitting.
****************************************************************************/
template <>
struct InviscidFluxDissipationBase<DISSIPATION_RUSANOV_DSPLIT>
{
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *VectorAtEdge,
Tc *CoeffsAtEdge,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
Td Ei,Ej;
if (btransposeSrc) {
// Compute specific energies
Ei = SPECIFICTOTALENERGY3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc);
Ej = SPECIFICTOTALENERGY3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc);
}
else {
// Compute specific energies
Ei = SPECIFICTOTALENERGY3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc);
Ej = SPECIFICTOTALENERGY3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc);
}
// Compute the speed of sound
Td ci = sqrt(max(((HYDRO_GAMMA)-DCONST(1.0))*
(HYDRO_GAMMA)*(Ei-DCONST(0.5)*(ui*ui+vi*vi)), DBL_EPSILON));
Td cj = sqrt(max(((HYDRO_GAMMA)-DCONST(1.0))*
(HYDRO_GAMMA)*(Ej-DCONST(0.5)*(uj*uj+vj*vj)), DBL_EPSILON));
#ifdef HYDRO_USE_IBP
// Compute scalar dissipation with dimensional splitting based on
// the skew-symmetric part which does not include the symmetric
// boundary contribution
Td d_ij = max( abs(DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge))*uj)+
abs(DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)))*cj,
abs(DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge))*ui)+
abs(DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)))*ci )
+ max( abs(DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge))*vj)+
abs(DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)))*cj,
abs(DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge))*vi)+
abs(DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)))*ci );
#else
// Compute scalar dissipation with dimensional splitting
Td d_ij = max( abs(IDX3(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*uj)+
abs(IDX3(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge))*cj,
abs(IDX3(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*ui)+
abs(IDX3(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge))*ci )
+ max( abs(IDX3(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*vj)+
abs(IDX3(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge))*cj,
abs(IDX3(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*vi)+
abs(IDX3(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge))*ci );
#endif
// Multiply the solution difference by the scalar dissipation
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
else {
// Destination vector is transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
else {
// Both vectors are not transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
}
}
};
/*****************************************************************************
* InviscidFluxDissipation: Artificial dissipation
****************************************************************************/
template <int idissipationtype>
struct InviscidFluxDissipation : public InviscidFluxDissipationBase<idissipationtype>
{
// Enable use of inherited functions
using InviscidFluxDissipationBase<idissipationtype>::calcEdgeData;
/***************************************************************************
* Wrapper routine for processing a single edge
**************************************************************************/
template <typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *VectorAtEdge,
Tc *CoeffsAtEdge,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
InviscidFluxDissipationBase<idissipationtype>::calcEdgeData<1,1,false,false>
(VectorAtEdge,CoeffsAtEdge,DataAtEdge,ui,uj,vi,vj,pi,pj,1,1,iedge,nedge,ncoeff);
}
};
/*****************************************************************************
* FluxBase
****************************************************************************/
struct FluxBase
{
/*
* Combine inviscid fluxes (not skew-symmetric) and artificial diffusion
*/
template <int nedgesimDest, int nedgesimSrc, bool boverwrite,
typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void combineEdgeData(Td *FluxesAtEdge,
Tc *CoeffsAtEdge,
Td *Fxi,
Td *Fxj,
Td *Fyi,
Td *Fyj,
Td *Diff,
Td scale,
Ti iposDest,
Ti iposSrc,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
if (boverwrite) {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*IDX2(Fxj,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*IDX2(Fyj,i,iposSrc,NVAR2D,nedgesimSrc)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*IDX2(Fxi,i,iposSrc,NVAR2D,nedgesimSrc)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*IDX2(Fyi,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc));
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,2,iposDest,NVAR2D,2,nedgesimDest) = -IDX3(FluxesAtEdge,i,1,iposDest,NVAR2D,2,nedgesimDest);
}
else {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*IDX2(Fxj,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*IDX2(Fyj,i,iposSrc,NVAR2D,nedgesimSrc)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*IDX2(Fxi,i,iposSrc,NVAR2D,nedgesimSrc)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*IDX2(Fyi,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc));
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*IDX2(Fxj,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*IDX2(Fyj,i,iposSrc,NVAR2D,nedgesimSrc)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*IDX2(Fxi,i,iposSrc,NVAR2D,nedgesimSrc)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*IDX2(Fyi,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc));
}
}
/*
* Combine inviscid fluxes (skew-symmetric) and artificial diffusion
*/
template <int nedgesimDest, int nedgesimSrc, bool boverwrite,
typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void combineEdgeData(Td *FluxesAtEdge,
Tc *CoeffsAtEdge,
Td *Fx_ij,
Td *Fy_ij,
Td *Diff,
Td scale,
Ti iposDest,
Ti iposSrc,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
if (boverwrite) {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*IDX2(Fx_ij,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*IDX2(Fy_ij,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc));
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*IDX2(Fx_ij,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*IDX2(Fy_ij,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc));
}
else {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*IDX2(Fx_ij,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*IDX2(Fy_ij,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc));
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*IDX2(Fx_ij,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*IDX2(Fy_ij,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc));
}
}
/*
* Combine inviscid fluxes with artificial diffusion
*/
template <int nedgesimDest, int nedgesimSrc, bool boverwrite,
typename Td, typename Ti>
__device__ __forceinline__
static void combineEdgeData(Td *FluxesAtEdge,
Td *Diff,
Td scale,
Ti iposDest,
Ti iposSrc)
{
if (boverwrite) {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,1,iposDest,NVAR2D,2,nedgesimDest) = scale * IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc);
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,2,iposDest,NVAR2D,2,nedgesimDest) = -scale * IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc);
}
else {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,1,iposDest,NVAR2D,2,nedgesimDest) += scale * IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc);
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,2,iposDest,NVAR2D,2,nedgesimDest) -= scale * IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc);
}
}
};
/*****************************************************************************
* Flux
****************************************************************************/
struct Flux : public FluxBase
{
// Enable use of inherited functions
using FluxBase::combineEdgeData;
/***************************************************************************
* Wrapper routines for processing a single edge
**************************************************************************/
/*
* Combine inviscid fluxes (not skew-symmetric) and artificial diffusion
*/
template <bool boverwrite, typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void combineEdgeData(Td *FluxesAtEdge,
Tc *CoeffsAtEdge,
Td *Fxi,
Td *Fxj,
Td *Fyi,
Td *Fyj,
Td *Diff,
Td scale,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
FluxBase::combineEdgeData<1,1,boverwrite>
(FluxesAtEdge,CoeffsAtEdge,Fxi,Fxj,Fyi,Fyj,Diff,scale,1,1,iedge,nedge,ncoeff);
}
/*
* Combine inviscid fluxes (skew-symmetric) and artificial diffusion
*/
template <bool boverwrite, typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void combineEdgeData(Td *FluxesAtEdge,
Tc *CoeffsAtEdge,
Td *Fx_ij,
Td *Fy_ij,
Td *Diff,
Td scale,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
FluxBase::combineEdgeData<1,1,boverwrite>
(FluxesAtEdge,CoeffsAtEdge,Fx_ij,Fy_ij,Diff,scale,1,1,iedge,nedge,ncoeff);
}
/*
* Combine inviscid fluxes with artificial diffusion
*/
template <bool boverwrite, typename Td>
__device__ __forceinline__
static void combineEdgeData(Td *FluxesAtEdge,
Td *Diff,
Td scale)
{
FluxBase::combineEdgeData<1,1,boverwrite>
(FluxesAtEdge,Diff,scale,1,1);
}
};
/*****************************************************************************
* This CUDA kernel calculates the inviscid fluxes and applies
* artificial dissipation if required) (baseline implementation).
****************************************************************************/
template <typename Tc,
typename TdSrc,
typename TdDest,
typename Ti,
int isystemformat,
int idissipationtype,
int threads_per_cta>
__launch_bounds__(threads_per_cta)
__global__ void hydro_calcFlux2d_baseline(Tc *CoeffsAtEdge,
Ti *IedgeList,
TdSrc *vecSrc,
TdDest *vecDest,
TdDest scale,
Ti neq,
Ti nedge,
Ti ncoeff,
Ti nedge_last,
Ti nedge_per_thread=1,
Ti nedge_offset=0)
{
// Loop over all items per thread
for (int ipt=0; ipt<nedge_per_thread; ++ipt) {
// Global edge ID
Ti idx = (ipt*gridDim.x+blockIdx.x)*threads_per_cta+nedge_offset+threadIdx.x;
if (threadIdx.x<threads_per_cta && idx<nedge_last)
{
// Get positions of edge endpoints (idx starts at zero)
Ti i = IDX2_EDGELIST(IedgeList,1,idx+1,6,nedge);
Ti j = IDX2_EDGELIST(IedgeList,2,idx+1,6,nedge);
// Local variables
TdDest DataAtEdge[2*NVAR2D];
// Get solution values at edge endpoints
Vector<NVAR2D,isystemformat==SYSTEM_BLOCK>::
gatherEdgeData<true>(DataAtEdge,vecSrc,i,j,neq);
// Compute velocities
TdDest ui = XVELOCITY2_2D(DataAtEdge,IDX2,1,NVAR2D,2);
TdDest vi = YVELOCITY2_2D(DataAtEdge,IDX2,1,NVAR2D,2);
TdDest uj = XVELOCITY2_2D(DataAtEdge,IDX2,2,NVAR2D,2);
TdDest vj = YVELOCITY2_2D(DataAtEdge,IDX2,2,NVAR2D,2);
// Compute pressures
TdDest pi = PRESSURE2_2D(DataAtEdge,IDX2,1,NVAR2D,2);
TdDest pj = PRESSURE2_2D(DataAtEdge,IDX2,2,NVAR2D,2);
// Local variables
TdDest FluxAtEdge[2*NVAR2D];
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::calcEdgeData
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,DataAtEdge,ui,uj,vi,vj,pi,pj,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<false>
(FluxAtEdge,CoeffsAtEdge,DataAtEdge,ui,uj,vi,vj,pi,pj,scale,idx+1,nedge,ncoeff);
// Build fluxes into nodal vector
Vector<NVAR2D,isystemformat==SYSTEM_BLOCK>::
scatterEdgeData<false>(vecDest,FluxAtEdge,i,j,neq);
}
}
};
/*****************************************************************************
* This CUDA kernel calculates the inviscid fluxes and applies
* artificial dissipation if required) (shared memory implementation)
****************************************************************************/
template <typename Tc,
typename TdSrc,
typename TdDest,
typename Ti,
int isystemformat,
int idissipationtype,
int threads_per_cta>
__launch_bounds__(threads_per_cta)
__global__ void hydro_calcFlux2d_shmem(Tc *CoeffsAtEdge,
Ti *IedgeList,
TdSrc *vecSrc,
TdDest *vecDest,
TdDest scale,
Ti neq,
Ti nedge,
Ti ncoeff,
Ti nedge_last,
Ti nedge_per_thread=1,
Ti nedge_offset=0)
{
// Shared memory
__shared__ TdSrc s_DataAtEdge[2*NVAR2D*threads_per_cta];
// Loop over all items per thread
for (int ipt=0; ipt<nedge_per_thread; ++ipt) {
// Global edge ID
Ti idx = (ipt*gridDim.x+blockIdx.x)*threads_per_cta+nedge_offset+threadIdx.x;
if (threadIdx.x<threads_per_cta && idx<nedge_last)
{
// Get positions of edge endpoints (idx starts at zero)
Ti i = IDX2_EDGELIST(IedgeList,1,idx+1,6,nedge);
Ti j = IDX2_EDGELIST(IedgeList,2,idx+1,6,nedge);
// Get solution values at edge endpoints
Vector<NVAR2D,isystemformat==SYSTEM_BLOCK>::
gatherEdgeData<threads_per_cta,SHMEM_DATA_TRANSPOSE,true>
(s_DataAtEdge,vecSrc,(int)threadIdx.x+1,i,j,neq);
// Compute velocities
TdDest ui = XVELOCITY3_2D(s_DataAtEdge,SHMEM_DATA_IDX3,1,(int)threadIdx.x+1,NVAR2D,2,threads_per_cta);
TdDest vi = YVELOCITY3_2D(s_DataAtEdge,SHMEM_DATA_IDX3,1,(int)threadIdx.x+1,NVAR2D,2,threads_per_cta);
TdDest uj = XVELOCITY3_2D(s_DataAtEdge,SHMEM_DATA_IDX3,2,(int)threadIdx.x+1,NVAR2D,2,threads_per_cta);
TdDest vj = YVELOCITY3_2D(s_DataAtEdge,SHMEM_DATA_IDX3,2,(int)threadIdx.x+1,NVAR2D,2,threads_per_cta);
// Compute pressures
TdDest pi = PRESSURE3_3D(s_DataAtEdge,SHMEM_DATA_IDX3,1,(int)threadIdx.x+1,NVAR2D,2,threads_per_cta);
TdDest pj = PRESSURE3_3D(s_DataAtEdge,SHMEM_DATA_IDX3,2,(int)threadIdx.x+1,NVAR2D,2,threads_per_cta);
// Local variables
TdDest FluxAtEdge[2*NVAR2D];
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,threads_per_cta,false,SHMEM_DATA_TRANSPOSE>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_DataAtEdge,ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,threads_per_cta,false,SHMEM_DATA_TRANSPOSE,false>
(FluxAtEdge,CoeffsAtEdge,s_DataAtEdge,ui,uj,vi,vj,pi,pj,
scale,1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Build fluxes into nodal vector
Vector<NVAR2D,isystemformat==SYSTEM_BLOCK>::
scatterEdgeData<false>(vecDest,FluxAtEdge,i,j,neq);
}
}
};
#ifdef HAS_INLINE_PTX
/*****************************************************************************
* This CUDA kernel calculates the inviscid fluxes and applies
* artificial dissipation if required) (cudaDMA implementation
* without warp specialisation).
****************************************************************************/
#define TOTAL_THREADS_PER_CTA compute_threads_per_cta+dma_threads_per_ld* \
(CUDADMA_DMA_LDS_IND+CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST)
template <typename Tc,
typename TdSrc,
typename TdDest,
typename Ti,
int isystemformat,
int idissipationtype,
int compute_threads_per_cta,
int dma_threads_per_ld>
__launch_bounds__(TOTAL_THREADS_PER_CTA)
__global__ void hydro_calcFlux2d_cudaDMA_nospec(Tc *CoeffsAtEdge,
Ti *IedgeList,
TdSrc *vecSrc,
TdDest *vecDest,
TdDest scale,
Ti neq,
Ti nedge,
Ti ncoeff,
Ti nedge_last,
Ti nedge_per_thread=1,
Ti nedge_offset=0)
{
// Shared memory
__shared__ Ti s_IedgeList[2*compute_threads_per_cta];
__shared__ TdSrc s_DataAtEdge[NVAR2D*2*compute_threads_per_cta];
__shared__ Tc s_CoeffsAtEdge[2*2*compute_threads_per_cta];
//--------------------------------------------------------------------------
#if EDGELIST_DEVICE == SOA
// List of edges is stored as structure of arrays, that is, we
// have 6 integer subarrays of length nedge which store:
//
// 0-subarray: first end point i,
// 1-subarray: second end point j,
// 2-subarray: matrix entry ij,
// 3-subarray: matrix entry ji,
// 4-subarray: matrix entry ii,
// 5-subarray: matrix entry jj.
//
// For the flux assembly, only the two endpoints (i,j) are
// required. Therefore, only subarrays 0 and 1 are transfered.
// Sequential cudaDMA thread to transfer edge list from integer
// array IedgeList into shared memory s_IedgeList
cudaDMASequential<false, 2*sizeof(Ti),
2*compute_threads_per_cta*sizeof(Ti),
TOTAL_THREADS_PER_CTA>dma_ind;
#else
// List of edges is stored as array of structures, that is, we
// have nedge integer subarrays of length 6 which store:
//
// (i,j,ij,jj,ii,jj) for each edge iedge
//
// For the flux assembly, only the two entpoins (i,j) are
// required. Therefore, only the first two entries of each edge
// are transfered using strided DMA.
// Strided cudaDMA thread to transfer edge list from integer
// array IedgeList into shared memory s_IedgeList
cudaDMAStrided<false, 2*sizeof(Ti), 2*sizeof(Ti),
TOTAL_THREADS_PER_CTA,
compute_threads_per_cta>dma_ind(6*sizeof(Ti));
#endif
//--------------------------------------------------------------------------
// Indirect cudaDMA thread to transfer nodal data from vecSrc into
// shared memory s_DataAtEdge, we need to distinguish between vecSrc
// stored in interleaved format and vecSrc stored in block format
cudaDMAIndirect<true, false,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
TOTAL_THREADS_PER_CTA, 2*compute_threads_per_cta>dma_vec;
//--------------------------------------------------------------------------
#if COEFFSATEDGE_DEVICE == SOA
// Coefficients at edges are stored as structure of arrays, that
// is, we have 2*ncoeff subarrays of length nedge which store:
//
// 0-subarray: ij-coefficients for x-direction,
// 1-subarray: ji-coefficients for x-direction,
// 2-subarray: ij-coefficients for y-direction,
// 3-subarray: ji-coefficients for y-direction,
// ...
// n-subarray: further coefficients not required here
// Strided cudaDMA thread to transfer precomputed coefficients
// CoeffsAtEdge into shared memory s_CoeffsAtEdge
cudaDMAStrided<false, sizeof(Tc),
compute_threads_per_cta*sizeof(Tc),
TOTAL_THREADS_PER_CTA,
2*2>dma_coeff(nedge*sizeof(Tc));
#else
// Coefficients at edges are stored as array of structure, that
// is, we have nedge real-valued subarray of length 2*ncoeff
cudaDMAStrided<false, sizeof(Tc), 2*sizeof(Tc),
TOTAL_THREADS_PER_CTA,
2*compute_threads_per_cta>dma_coeff(ncoeff*sizeof(Tc));
#endif
//--------------------------------------------------------------------------
// Loop over all edge-groups to be processed by this block
for (int ipt=0; ipt<nedge_per_thread; ++ipt) {
if (nedge_per_thread>1)
__syncthreads();
//------------------------------------------------------------------------
// Load the indices with all threads - no warp specialisation
//------------------------------------------------------------------------
dma_ind.execute_dma(&IedgeList[ ((ipt*gridDim.x+blockIdx.x)*
compute_threads_per_cta+nedge_offset)*
(EDGELIST_DEVICE == SOA ? 2 : 6)], s_IedgeList);
__syncthreads();
dma_vec.execute_dma(s_IedgeList, vecSrc-NVAR2D, s_DataAtEdge);
dma_coeff.execute_dma(&CoeffsAtEdge[ ((ipt*gridDim.x+blockIdx.x)*
compute_threads_per_cta+nedge_offset)*
(COEFFSATEDGE_DEVICE == SOA ? 1 : 2*ncoeff)],
s_CoeffsAtEdge);
__syncthreads();
//--------------------------------------------------------------------------
// Compute velocities
TdDest ui = XVELOCITY3_2D(s_DataAtEdge,
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest vi = YVELOCITY3_2D(s_DataAtEdge,
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest uj = XVELOCITY3_2D(s_DataAtEdge,
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest vj = YVELOCITY3_2D(s_DataAtEdge,
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
TdDest pi = PRESSURE3_3D(s_DataAtEdge,
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest pj = PRESSURE3_3D(s_DataAtEdge,
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Local variables
TdDest FluxAtEdge[2*NVAR2D];
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],s_CoeffsAtEdge,s_DataAtEdge,ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,s_CoeffsAtEdge,s_DataAtEdge,ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
dma_vec.execute_dma(s_IedgeList, vecDest-NVAR2D, s_DataAtEdge);
__syncthreads();
// Get positions of edge endpoints (idx starts at zero)
Ti i = IDX2(s_IedgeList,1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_DataAtEdge, ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
Ti j = IDX2(s_IedgeList,2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_DataAtEdge, ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
}
};
#undef TOTAL_THREADS_PER_CTA
#endif
#ifdef HAS_INLINE_PTX
/*****************************************************************************
* This CUDA kernel calculates the inviscid fluxes and applies
* artificial dissipation if required) (cudaDMA implementation with
* manual single buffering strategy with prefetching of indices).
****************************************************************************/
#define TOTAL_THREADS_PER_CTA compute_threads_per_cta+dma_threads_per_ld* \
(CUDADMA_DMA_LDS_IND+CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST+CUDADMA_DMA_LDS_COEFF)
template <typename Tc,
typename TdSrc,
typename TdDest,
typename Ti,
int isystemformat,
int idissipationtype,
int compute_threads_per_cta,
int dma_threads_per_ld>
__launch_bounds__(TOTAL_THREADS_PER_CTA)
__global__ void hydro_calcFlux2d_cudaDMA_prefetch_single(Tc *CoeffsAtEdge,
Ti *IedgeList,
TdSrc *vecSrc,
TdDest *vecDest,
TdDest scale,
Ti neq,
Ti nedge,
Ti ncoeff,
Ti nedge_last,
Ti nedge_per_thread=1,
Ti nedge_offset=0)
{
// Shared memory
__shared__ Ti s_IedgeList[1][2*compute_threads_per_cta];
__shared__ TdSrc s_VecSrc[1][NVAR2D*2*compute_threads_per_cta];
__shared__ TdDest s_VecDest[1][NVAR2D*2*compute_threads_per_cta];
__shared__ Tc s_CoeffsAtEdge[1][2*2*compute_threads_per_cta];
//--------------------------------------------------------------------------
#if EDGELIST_DEVICE == SOA
// List of edges is stored as structure of arrays, that is, we
// have 6 integer subarrays of length nedge which store:
//
// 0-subarray: first end point i,
// 1-subarray: second end point j,
// 2-subarray: matrix entry ij,
// 3-subarray: matrix entry ji,
// 4-subarray: matrix entry ii,
// 5-subarray: matrix entry jj.
//
// For the flux assembly, only the two endpoints (i,j) are
// required. Therefore, only subarrays 0 and 1 are transfered.
// Sequential cudaDMA thread to transfer edge list from integer
// array IedgeList into shared memory s_IedgeList
cudaDMASequential<false, 2*sizeof(Ti),
2*compute_threads_per_cta*sizeof(Ti),
TOTAL_THREADS_PER_CTA>dma_ind;
#else
// List of edges is stored as array of structures, that is, we
// have nedge integer subarrays of length 6 which store:
//
// (i,j,ij,jj,ii,jj) for each edge iedge
//
// For the flux assembly, only the two entpoins (i,j) are
// required. Therefore, only the first two entries of each edge
// are transfered using strided DMA.
// Strided cudaDMA thread to transfer edge list from integer
// array IedgeList into shared memory s_IedgeList
cudaDMAStrided<false, 2*sizeof(Ti), 2*sizeof(Ti),
TOTAL_THREADS_PER_CTA,
compute_threads_per_cta>dma_ind(6*sizeof(Ti));
#endif
//--------------------------------------------------------------------------
// Indirect cudaDMA thread to transfer nodal data from vecSrc into
// shared memory s_VecSrc, we need to distinguish between vecSrc
// stored in interleaved format and vecSrc stored in block format
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_SRC*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecSrc0(0, compute_threads_per_cta, compute_threads_per_cta);
// Indirect cudaDMA thread to transfer nodal data from vecDest into
// shared memory s_VecDest, we need to distinguish between vecDest
// stored in interleaved format and vecSrc stored in block format
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_DEST*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecDest0(1, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_SRC)*dma_threads_per_ld);
//--------------------------------------------------------------------------
#if COEFFSATEDGE_DEVICE == SOA
// Coefficients at edges are stored as structure of arrays, that
// is, we have 2*ncoeff subarrays of length nedge which store:
//
// 0-subarray: ij-coefficients for x-direction,
// 1-subarray: ji-coefficients for x-direction,
// 2-subarray: ij-coefficients for y-direction,
// 3-subarray: ji-coefficients for y-direction,
// ...
// n-subarray: further coefficients not required here
// Strided cudaDMA thread to transfer precomputed coefficients
// CoeffsAtEdge into shared memory s_CoeffsAtEdge
cudaDMAStrided<true, sizeof(Tc),
compute_threads_per_cta*sizeof(Tc),
CUDADMA_DMA_LDS_COEFF*dma_threads_per_ld,
2*2>
dma_coeff0(2, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld,
nedge*sizeof(Tc));
#else
// Coefficients at edges are stored as array of structure, that
// is, we have nedge real-valued subarray of length 2*ncoeff
cudaDMAStrided<true, sizeof(Tc), 2*sizeof(Tc),
CUDADMA_DMA_LDS_COEFF*dma_threads_per_ld,
2*compute_threads_per_cta>
dma_coeff0(2, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_SRC+0*CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld,
ncoeff*sizeof(Tc));
#endif
//--------------------------------------------------------------------------
// Loop over all edge-groups to be processed by this block
for (int ipt=0; ipt<nedge_per_thread; ++ipt) {
//------------------------------------------------------------------------
// Load the indices with all threads - no warp specialisation
//------------------------------------------------------------------------
if (nedge_per_thread>1)
ptx_cudaDMA_barrier_blocking(5, TOTAL_THREADS_PER_CTA);
dma_ind.execute_dma(&IedgeList[ ((ipt*gridDim.x+blockIdx.x)*
compute_threads_per_cta+nedge_offset)*
(EDGELIST_DEVICE == SOA ? 2 : 6)],
s_IedgeList[0]);
ptx_cudaDMA_barrier_blocking(5, TOTAL_THREADS_PER_CTA);
//------------------------------------------------------------------------
// Warp specialisation
//------------------------------------------------------------------------
if (threadIdx.x<compute_threads_per_cta) {
// Start DMA transfer of coefficients
dma_coeff0.start_async_dma();
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
#define IBUF 0
#define DBUF 0
#define IOFF 0
// Wait for source vector to be ready
dma_vecSrc0.wait_for_dma_finish();
// Compute velocities
TdDest ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
TdDest pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Local variables
TdDest FluxAtEdge[2*NVAR2D];
// Wait for coefficients to be ready
dma_coeff0.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],s_CoeffsAtEdge[0],s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,s_CoeffsAtEdge[0],s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
// Wait for destination vector to be ready
dma_vecDest0.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
Ti i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
Ti j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
}
//------------------------------------------------------------------------
// DMA transfer warps
//------------------------------------------------------------------------
else if(dma_vecSrc0.owns_this_thread()) {
dma_vecSrc0.execute_dma(s_IedgeList[0], vecSrc-NVAR2D, s_VecSrc[0]);
}
else if(dma_vecDest0.owns_this_thread()) {
dma_vecDest0.execute_dma(s_IedgeList[0], vecDest-NVAR2D, s_VecDest[0]);
}
else if(dma_coeff0.owns_this_thread()) {
dma_coeff0.execute_dma(&CoeffsAtEdge[((ipt*gridDim.x+blockIdx.x)*
compute_threads_per_cta +
nedge_offset)*
(COEFFSATEDGE_DEVICE == SOA ? 1 : 2*ncoeff)],
s_CoeffsAtEdge[0]);
}
}
};
#undef TOTAL_THREADS_PER_CTA
#endif
#ifdef HAS_INLINE_PTX
/*****************************************************************************
* This CUDA kernel calculates the inviscid fluxes and applies
* artificial dissipation if required) (cudaDMA implementation with
* manual double buffering strategy with prefetching of indices).
****************************************************************************/
#define TOTAL_THREADS_PER_CTA compute_threads_per_cta+dma_threads_per_ld* \
(CUDADMA_DMA_LDS_IND+CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST+CUDADMA_DMA_LDS_COEFF)
template <typename Tc,
typename TdSrc,
typename TdDest,
typename Ti,
int isystemformat,
int idissipationtype,
int compute_threads_per_cta,
int dma_threads_per_ld>
__launch_bounds__(TOTAL_THREADS_PER_CTA)
__global__ void hydro_calcFlux2d_cudaDMA_prefetch_double(Tc *CoeffsAtEdge,
Ti *IedgeList,
TdSrc *vecSrc,
TdDest *vecDest,
TdDest scale,
Ti neq,
Ti nedge,
Ti ncoeff,
Ti nedge_last,
Ti nedge_per_thread=1,
Ti nedge_offset=0)
{
// Shared memory
__shared__ Ti s_IedgeList[4][2*compute_threads_per_cta];
__shared__ TdSrc s_VecSrc[2][NVAR2D*2*compute_threads_per_cta];
__shared__ TdDest s_VecDest[2][NVAR2D*2*compute_threads_per_cta];
__shared__ Tc s_CoeffsAtEdge[2][2*2*compute_threads_per_cta];
//--------------------------------------------------------------------------
#if EDGELIST_DEVICE == SOA
// List of edges is stored as structure of arrays, that is, we
// have 6 integer subarrays of length nedge which store:
//
// 0-subarray: first end point i,
// 1-subarray: second end point j,
// 2-subarray: matrix entry ij,
// 3-subarray: matrix entry ji,
// 4-subarray: matrix entry ii,
// 5-subarray: matrix entry jj.
//
// For the flux assembly, only the two endpoints (i,j) are
// required. Therefore, only subarrays 0 and 1 are transfered.
// Sequential cudaDMA thread to transfer edge list from integer
// array IedgeList into shared memory s_IedgeList
cudaDMASequential<false, 2*sizeof(Ti),
4*2*compute_threads_per_cta*sizeof(Ti),
TOTAL_THREADS_PER_CTA>dma_ind;
#else
// List of edges is stored as array of structures, that is, we
// have nedge integer subarrays of length 6 which store:
//
// (i,j,ij,jj,ii,jj) for each edge iedge
//
// For the flux assembly, only the two entpoins (i,j) are
// required. Therefore, only the first two entries of each edge
// are transfered using strided DMA.
// Strided cudaDMA thread to transfer edge list from integer
// array IedgeList into shared memory s_IedgeList
cudaDMAStrided<false, 2*sizeof(Ti), 2*sizeof(Ti),
TOTAL_THREADS_PER_CTA,
4*compute_threads_per_cta>dma_ind(6*sizeof(Ti));
#endif
//--------------------------------------------------------------------------
// Indirect cudaDMA thread to transfer nodal data from vecSrc into
// shared memory s_VecSrc, we need to distinguish between vecSrc
// stored in interleaved format and vecSrc stored in block format
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_SRC*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecSrc0(0, compute_threads_per_cta, compute_threads_per_cta);
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_SRC*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecSrc1(1, compute_threads_per_cta, compute_threads_per_cta);
// Indirect cudaDMA thread to transfer nodal data from vecDest into
// shared memory s_VecDest, we need to distinguish between vecDest
// stored in interleaved format and vecSrc stored in block format
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_DEST*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecDest0(2, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_SRC)*dma_threads_per_ld);
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_DEST*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecDest1(3, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_SRC)*dma_threads_per_ld);
//--------------------------------------------------------------------------
#if COEFFSATEDGE_DEVICE == SOA
// Coefficients at edges are stored as structure of arrays, that
// is, we have 2*ncoeff subarrays of length nedge which store:
//
// 0-subarray: ij-coefficients for x-direction,
// 1-subarray: ji-coefficients for x-direction,
// 2-subarray: ij-coefficients for y-direction,
// 3-subarray: ji-coefficients for y-direction,
// ...
// n-subarray: further coefficients not required here
// Strided cudaDMA thread to transfer precomputed coefficients
// CoeffsAtEdge into shared memory s_CoeffsAtEdge
cudaDMAStrided<true, sizeof(Tc),
compute_threads_per_cta*sizeof(Tc),
CUDADMA_DMA_LDS_COEFF*dma_threads_per_ld,
2*2>
dma_coeff0(4, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld,
nedge*sizeof(Tc));
cudaDMAStrided<true, sizeof(Tc),
compute_threads_per_cta*sizeof(Tc),
CUDADMA_DMA_LDS_COEFF*dma_threads_per_ld,
2*2>
dma_coeff1(5, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld,
nedge*sizeof(Tc));
#else
// Coefficients at edges are stored as array of structure, that
// is, we have nedge real-valued subarray of length 2*ncoeff
cudaDMAStrided<true, sizeof(Tc), 2*sizeof(Tc),
CUDADMA_DMA_LDS_COEFF*dma_threads_per_ld,
2*compute_threads_per_cta>
dma_coeff0(4, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld,
ncoeff*sizeof(Tc));
cudaDMAStrided<true, sizeof(Tc), 2*sizeof(Tc),
CUDADMA_DMA_LDS_COEFF*dma_threads_per_ld,
2*compute_threads_per_cta>
dma_coeff1(5, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld,
ncoeff*sizeof(Tc));
#endif
//--------------------------------------------------------------------------
// Loop over all edge-groups to be processed by this block
for (int ipt=0; ipt<nedge_per_thread; ipt+=4) {
//------------------------------------------------------------------------
// Load the indices with all threads - no warp specialisation
//------------------------------------------------------------------------
ptx_cudaDMA_barrier_blocking(11, TOTAL_THREADS_PER_CTA);
dma_ind.execute_dma(&IedgeList[ ((ipt*gridDim.x+blockIdx.x)*
4*compute_threads_per_cta+nedge_offset)*
(EDGELIST_DEVICE == SOA ? 2 : 6)],
s_IedgeList[0]);
ptx_cudaDMA_barrier_blocking(11, TOTAL_THREADS_PER_CTA);
//------------------------------------------------------------------------
// Warp specialisation
//------------------------------------------------------------------------
if (threadIdx.x<compute_threads_per_cta) {
// Start DMA transfer of coefficients
dma_coeff0.start_async_dma();
dma_coeff1.start_async_dma();
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
// Start DMA transfer of indirect data
dma_vecSrc1.start_async_dma();
dma_vecDest1.start_async_dma();
#define IBUF 0
#define DBUF 0
#define IOFF 0
// Wait for source vector to be ready
dma_vecSrc0.wait_for_dma_finish();
// Compute velocities
TdDest ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
TdDest pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Local variables
TdDest FluxAtEdge[2*NVAR2D];
// Wait for coefficients to be ready
dma_coeff0.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],s_CoeffsAtEdge[0],s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,s_CoeffsAtEdge[0],s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
// Start DMA transfer of coefficients
dma_coeff0.start_async_dma();
// Wait for destination vector to be ready
dma_vecDest0.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
Ti i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
Ti j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
#define IBUF 1
#define DBUF 1
#define IOFF 1
// Wait for source vector to be ready
dma_vecSrc1.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Wait for coefficients to be ready
dma_coeff1.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],s_CoeffsAtEdge[1],s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,s_CoeffsAtEdge[1],s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
// Start DMA transfer of coefficients
dma_coeff1.start_async_dma();
// Wait for destination vector to be ready
dma_vecDest1.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indirect data
dma_vecSrc1.start_async_dma();
dma_vecDest1.start_async_dma();
#define IBUF 2
#define DBUF 0
#define IOFF 2
// Wait for source vector to be ready
dma_vecSrc0.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Wait for coefficients to be ready
dma_coeff0.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],s_CoeffsAtEdge[0],s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,s_CoeffsAtEdge[0],s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
// Wait for destination vector to be ready
dma_vecDest0.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
#define IBUF 3
#define DBUF 1
#define IOFF 3
// Wait for source vector to be ready
dma_vecSrc1.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Wait for coefficients to be ready
dma_coeff1.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],s_CoeffsAtEdge[1],s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,s_CoeffsAtEdge[1],s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
// Wait for destination vector to be ready
dma_vecDest1.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
}
//------------------------------------------------------------------------
// DMA transfer warps
//------------------------------------------------------------------------
else if(dma_vecSrc0.owns_this_thread()) {
dma_vecSrc0.execute_dma(s_IedgeList[0], vecSrc-NVAR2D, s_VecSrc[0]);
dma_vecSrc1.execute_dma(s_IedgeList[1], vecSrc-NVAR2D, s_VecSrc[1]);
dma_vecSrc0.execute_dma(s_IedgeList[2], vecSrc-NVAR2D, s_VecSrc[0]);
dma_vecSrc1.execute_dma(s_IedgeList[3], vecSrc-NVAR2D, s_VecSrc[1]);
}
else if(dma_vecDest0.owns_this_thread()) {
dma_vecDest0.execute_dma(s_IedgeList[0], vecDest-NVAR2D, s_VecDest[0]);
dma_vecDest1.execute_dma(s_IedgeList[1], vecDest-NVAR2D, s_VecDest[1]);
dma_vecDest0.execute_dma(s_IedgeList[2], vecDest-NVAR2D, s_VecDest[0]);
dma_vecDest1.execute_dma(s_IedgeList[3], vecDest-NVAR2D, s_VecDest[1]);
}
else if(dma_coeff0.owns_this_thread()) {
dma_coeff0.execute_dma(&CoeffsAtEdge[((ipt*gridDim.x+blockIdx.x)*
4*compute_threads_per_cta +
0*compute_threads_per_cta +
nedge_offset)*
(COEFFSATEDGE_DEVICE == SOA ? 1 : 2*ncoeff)],
s_CoeffsAtEdge[0]);
dma_coeff1.execute_dma(&CoeffsAtEdge[((ipt*gridDim.x+blockIdx.x)*
4*compute_threads_per_cta +
1*compute_threads_per_cta +
nedge_offset)*
(COEFFSATEDGE_DEVICE == SOA ? 1 : 2*ncoeff)],
s_CoeffsAtEdge[1]);
dma_coeff0.execute_dma(&CoeffsAtEdge[((ipt*gridDim.x+blockIdx.x)*
4*compute_threads_per_cta +
2*compute_threads_per_cta +
nedge_offset)*
(COEFFSATEDGE_DEVICE == SOA ? 1 : 2*ncoeff)],
s_CoeffsAtEdge[0]);
dma_coeff1.execute_dma(&CoeffsAtEdge[((ipt*gridDim.x+blockIdx.x)*
4*compute_threads_per_cta +
3*compute_threads_per_cta +
nedge_offset)*
(COEFFSATEDGE_DEVICE == SOA ? 1 : 2*ncoeff)],
s_CoeffsAtEdge[1]);
}
}
};
#undef TOTAL_THREADS_PER_CTA
#endif
#ifdef HAS_INLINE_PTX
/*****************************************************************************
* This CUDA kernel calculates the inviscid fluxes and applies
* artificial dissipation if required) (cudaDMA implementation with
* double buffering strategy).
****************************************************************************/
#define TOTAL_THREADS_PER_CTA compute_threads_per_cta+dma_threads_per_ld* \
(3*CUDADMA_DMA_LDS_IND+2*CUDADMA_DMA_LDS_SRC+2*CUDADMA_DMA_LDS_DEST)
template <typename Tc,
typename TdSrc,
typename TdDest,
typename Ti,
int isystemformat,
int idissipationtype,
int compute_threads_per_cta,
int dma_threads_per_ld>
__launch_bounds__(TOTAL_THREADS_PER_CTA)
__global__ void hydro_calcFlux2d_cudaDMA_double(Tc *CoeffsAtEdge,
Ti *IedgeList,
TdSrc *vecSrc,
TdDest *vecDest,
TdDest scale,
Ti neq,
Ti nedge,
Ti ncoeff,
Ti nedge_last,
Ti nedge_per_thread=1,
Ti nedge_offset=0)
{
// Shared memory
__shared__ Ti s_IedgeList[3][2*compute_threads_per_cta];
__shared__ TdSrc s_VecSrc[2][NVAR2D*2*compute_threads_per_cta];
__shared__ TdDest s_VecDest[2][NVAR2D*2*compute_threads_per_cta];
//--------------------------------------------------------------------------
#if EDGELIST_DEVICE == SOA
// List of edges is stored as structure of arrays, that is, we
// have 6 integer subarrays of length nedge which store:
//
// 0-subarray: first end point i,
// 1-subarray: second end point j,
// 2-subarray: matrix entry ij,
// 3-subarray: matrix entry ji,
// 4-subarray: matrix entry ii,
// 5-subarray: matrix entry jj.
//
// For the flux assembly, only the two endpoints (i,j) are
// required. Therefore, only subarrays 0 and 1 are transfered.
// Sequential cudaDMA thread to transfer edge list from integer
// array IedgeList into shared memory s_IedgeList
cudaDMASequential<true, 2*sizeof(Ti),
2*compute_threads_per_cta*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld>
dma_ind0(0, compute_threads_per_cta, compute_threads_per_cta);
cudaDMASequential<true, 2*sizeof(Ti),
2*compute_threads_per_cta*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld>
dma_ind1(1, compute_threads_per_cta,
compute_threads_per_cta+CUDADMA_DMA_LDS_IND*dma_threads_per_ld);
cudaDMASequential<true, 2*sizeof(Ti),
2*compute_threads_per_cta*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld>
dma_ind2(2, compute_threads_per_cta,
compute_threads_per_cta+2*CUDADMA_DMA_LDS_IND*dma_threads_per_ld);
#else
// List of edges is stored as array of structures, that is, we
// have nedge integer subarrays of length 6 which store:
//
// (i,j,ij,jj,ii,jj) for each edge iedge
//
// For the flux assembly, only the two entpoins (i,j) are
// required. Therefore, only the first two entries of each edge
// are transfered using strided DMA.
// Strided cudaDMA thread to transfer edge list from integer
// array IedgeList into shared memory s_IedgeList
cudaDMAStrided<true, 2*sizeof(Ti), 2*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld,
compute_threads_per_cta>
dma_ind0(0, compute_threads_per_cta,
compute_threads_per_cta,
6*sizeof(Ti));
cudaDMAStrided<true, 2*sizeof(Ti), 2*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld,
compute_threads_per_cta>
dma_ind1(1, compute_threads_per_cta,
compute_threads_per_cta+CUDADMA_DMA_LDS_IND*dma_threads_per_ld,
6*sizeof(Ti));
cudaDMAStrided<true, 2*sizeof(Ti), 2*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld,
compute_threads_per_cta>
dma_ind2(2, compute_threads_per_cta,
compute_threads_per_cta+2*CUDADMA_DMA_LDS_IND*dma_threads_per_ld,
6*sizeof(Ti));
#endif
//--------------------------------------------------------------------------
// Indirect cudaDMA thread to transfer nodal data from vecSrc into
// shared memory s_VecSrc, we need to distinguish between vecSrc
// stored in interleaved format and vecSrc stored in block format
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_SRC*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecSrc0(3, compute_threads_per_cta,
compute_threads_per_cta+3*CUDADMA_DMA_LDS_IND*dma_threads_per_ld);
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_SRC*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecSrc1(4, compute_threads_per_cta, compute_threads_per_cta+
(3*CUDADMA_DMA_LDS_IND+1*CUDADMA_DMA_LDS_SRC)*dma_threads_per_ld);
// Indirect cudaDMA thread to transfer nodal data from vecDest into
// shared memory s_VecDest, we need to distinguish between vecDest
// stored in interleaved format and vecSrc stored in block format
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_DEST*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecDest0(5, compute_threads_per_cta, compute_threads_per_cta+
(3*CUDADMA_DMA_LDS_IND+2*CUDADMA_DMA_LDS_SRC)*dma_threads_per_ld);
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_DEST*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecDest1(6, compute_threads_per_cta, compute_threads_per_cta+
(3*CUDADMA_DMA_LDS_IND+2*CUDADMA_DMA_LDS_SRC
+1*CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld);
//--------------------------------------------------------------------------
#if COEFFSATEDGE_DEVICE == SOA
// Coefficients at edges are stored as structure of arrays, that
// is, we have 2*ncoeff subarrays of length nedge which store:
//
// 0-subarray: ij-coefficients for x-direction,
// 1-subarray: ji-coefficients for x-direction,
// 2-subarray: ij-coefficients for y-direction,
// 3-subarray: ji-coefficients for y-direction,
// ...
// n-subarray: further coefficients not required here
// Strided cudaDMA thread to transfer precomputed coefficients
// CoeffsAtEdge into shared memory s_CoeffsAtEdge
// cudaDMAStrided<false, sizeof(Tc),
// compute_threads_per_cta*sizeof(Tc),
// TOTAL_THREADS_PER_CTA,
// 2*2>dma_coeff(nedge*sizeof(Tc));
#else
// Coefficients at edges are stored as array of structure, that
// is, we have nedge real-valued subarray of length 2*ncoeff
// cudaDMAStrided<false, sizeof(Tc), 2*sizeof(Tc),
// TOTAL_THREADS_PER_CTA,
// 2*compute_threads_per_cta>dma_coeff(ncoeff*sizeof(Tc));
#endif
//--------------------------------------------------------------------------
// Warp specialisation
//--------------------------------------------------------------------------
if (threadIdx.x<compute_threads_per_cta) {
// Start DMA transfer of indices
dma_ind0.start_async_dma();
dma_ind1.start_async_dma();
dma_ind2.start_async_dma();
// Wait for indices to be ready
dma_ind0.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
// Wait for indices to be ready
dma_ind1.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc1.start_async_dma();
dma_vecDest1.start_async_dma();
// Loop over all edge-groups to be processed by this block
for (int ipt=0; ipt<nedge_per_thread; ipt+=6) {
#define IBUF 0
#define DBUF 0
#define IOFF 0
// Wait for source vector to be ready
dma_vecSrc0.wait_for_dma_finish();
// Compute velocities
TdDest ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
TdDest pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
Ti idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Local variables
TdDest FluxAtEdge[2*NVAR2D];
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Wait for destination vector to be ready
dma_vecDest0.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
Ti i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
Ti j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind0.start_async_dma();
// Wait for indices to be ready
dma_ind2.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
#define IBUF 1
#define DBUF 1
#define IOFF 1
// Wait for source vector to be ready
dma_vecSrc1.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Wait for destination vector to be ready
dma_vecDest1.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind1.start_async_dma();
// Wait for indices to be ready
dma_ind0.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc1.start_async_dma();
dma_vecDest1.start_async_dma();
#define IBUF 2
#define DBUF 0
#define IOFF 2
// Wait for source vector to be ready
dma_vecSrc0.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Wait for destination vector to be ready
dma_vecDest0.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind2.start_async_dma();
// Wait for indices to be ready
dma_ind1.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
#define IBUF 0
#define DBUF 1
#define IOFF 3
// Wait for source vector to be ready
dma_vecSrc1.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Wait for destination vector to be ready
dma_vecDest1.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind0.start_async_dma();
// Wait for indices to be ready
dma_ind2.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc1.start_async_dma();
dma_vecDest1.start_async_dma();
#define IBUF 1
#define DBUF 0
#define IOFF 4
// Wait for source vector to be ready
dma_vecSrc0.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Wait for destination vector to be ready
dma_vecDest0.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind1.start_async_dma();
// Wait for indices to be ready
dma_ind0.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
#define IBUF 2
#define DBUF 1
#define IOFF 5
// Wait for source vector to be ready
dma_vecSrc1.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Wait for destination vector to be ready
dma_vecDest1.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind2.start_async_dma();
// Wait for indices to be ready
dma_ind1.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc1.start_async_dma();
dma_vecDest1.start_async_dma();
}
}
//--------------------------------------------------------------------------
// DMA transfer warps
//--------------------------------------------------------------------------
else if(dma_ind0.owns_this_thread()) {
for (int ipt=0; ipt<nedge_per_thread+1; ipt+=3) {
dma_ind0.execute_dma(&IedgeList[ ((ipt*gridDim.x+blockIdx.x)*
compute_threads_per_cta+nedge_offset)*
(EDGELIST_DEVICE == SOA ? 2 : 6)],
s_IedgeList[0]);
}
}
else if(dma_ind1.owns_this_thread()) {
for (int ipt=1; ipt<nedge_per_thread+2; ipt+=3) {
dma_ind1.execute_dma(&IedgeList[ ((ipt*gridDim.x+blockIdx.x)*
compute_threads_per_cta+nedge_offset)*
(EDGELIST_DEVICE == SOA ? 2 : 6)],
s_IedgeList[1]);
}
}
else if(dma_ind2.owns_this_thread()) {
for (int ipt=2; ipt<nedge_per_thread+3; ipt+=3) {
dma_ind2.execute_dma(&IedgeList[ ((ipt*gridDim.x+blockIdx.x)*
compute_threads_per_cta+nedge_offset)*
(EDGELIST_DEVICE == SOA ? 2 : 6)],
s_IedgeList[2]);
}
}
else if(dma_vecSrc0.owns_this_thread()) {
for (int ipt=0; ipt<nedge_per_thread; ipt+=6) {
dma_vecSrc0.wait_for_dma_start();
dma_vecSrc0.execute_dma_no_sync(s_IedgeList[0], vecSrc-NVAR2D, s_VecSrc[0]);
dma_vecSrc0.finish_async_dma();
dma_vecSrc0.wait_for_dma_start();
dma_vecSrc0.execute_dma_no_sync(s_IedgeList[2], vecSrc-NVAR2D, s_VecSrc[0]);
dma_vecSrc0.finish_async_dma();
dma_vecSrc0.wait_for_dma_start();
dma_vecSrc0.execute_dma_no_sync(s_IedgeList[1], vecSrc-NVAR2D, s_VecSrc[0]);
dma_vecSrc0.finish_async_dma();
}
}
else if(dma_vecSrc1.owns_this_thread()) {
for (int ipt=0; ipt<nedge_per_thread; ipt+=6) {
dma_vecSrc1.wait_for_dma_start();
dma_vecSrc1.execute_dma_no_sync(s_IedgeList[1], vecSrc-NVAR2D, s_VecSrc[1]);
dma_vecSrc1.finish_async_dma();
dma_vecSrc1.wait_for_dma_start();
dma_vecSrc1.execute_dma_no_sync(s_IedgeList[0], vecSrc-NVAR2D, s_VecSrc[1]);
dma_vecSrc1.finish_async_dma();
dma_vecSrc1.wait_for_dma_start();
dma_vecSrc1.execute_dma_no_sync(s_IedgeList[2], vecSrc-NVAR2D, s_VecSrc[1]);
dma_vecSrc1.finish_async_dma();
}
}
else if(dma_vecDest0.owns_this_thread()) {
for (int ipt=0; ipt<nedge_per_thread; ipt+=6) {
dma_vecDest0.wait_for_dma_start();
dma_vecDest0.execute_dma_no_sync(s_IedgeList[0], vecDest-NVAR2D, s_VecDest[0]);
dma_vecDest0.finish_async_dma();
dma_vecDest0.wait_for_dma_start();
dma_vecDest0.execute_dma_no_sync(s_IedgeList[2], vecDest-NVAR2D, s_VecDest[0]);
dma_vecDest0.finish_async_dma();
dma_vecDest0.wait_for_dma_start();
dma_vecDest0.execute_dma_no_sync(s_IedgeList[1], vecDest-NVAR2D, s_VecDest[0]);
dma_vecDest0.finish_async_dma();
}
}
else if(dma_vecDest1.owns_this_thread()) {
for (int ipt=0; ipt<nedge_per_thread; ipt+=6) {
dma_vecDest1.wait_for_dma_start();
dma_vecDest1.execute_dma_no_sync(s_IedgeList[1], vecDest-NVAR2D, s_VecDest[1]);
dma_vecDest1.finish_async_dma();
dma_vecDest1.wait_for_dma_start();
dma_vecDest1.execute_dma_no_sync(s_IedgeList[0], vecDest-NVAR2D, s_VecDest[1]);
dma_vecDest1.finish_async_dma();
dma_vecDest1.wait_for_dma_start();
dma_vecDest1.execute_dma_no_sync(s_IedgeList[2], vecDest-NVAR2D, s_VecDest[1]);
dma_vecDest1.finish_async_dma();
}
}
};
#undef TOTAL_THREADS_PER_CTA
#endif
#ifdef HAS_INLINE_PTX
/*****************************************************************************
* This CUDA kernel calculates the inviscid fluxes and applies
* artificial dissipation if required) (cudaDMA implementation with
* manual buffering strategy).
****************************************************************************/
#define TOTAL_THREADS_PER_CTA compute_threads_per_cta+dma_threads_per_ld* \
(CUDADMA_DMA_LDS_IND+CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST+CUDADMA_DMA_LDS_COEFF)
template <typename Tc,
typename TdSrc,
typename TdDest,
typename Ti,
int isystemformat,
int idissipationtype,
int compute_threads_per_cta,
int dma_threads_per_ld>
__launch_bounds__(TOTAL_THREADS_PER_CTA)
__global__ void hydro_calcFlux2d_cudaDMA_manual(Tc *CoeffsAtEdge,
Ti *IedgeList,
TdSrc *vecSrc,
TdDest *vecDest,
TdDest scale,
Ti neq,
Ti nedge,
Ti ncoeff,
Ti nedge_last,
Ti nedge_per_thread=1,
Ti nedge_offset=0)
{
// Shared memory
__shared__ Ti s_IedgeList[3][2*compute_threads_per_cta];
__shared__ TdSrc s_VecSrc[2][NVAR2D*2*compute_threads_per_cta];
__shared__ TdDest s_VecDest[2][NVAR2D*2*compute_threads_per_cta];
__shared__ Tc s_CoeffsAtEdge[2][2*2*compute_threads_per_cta];
//--------------------------------------------------------------------------
#if EDGELIST_DEVICE == SOA
// List of edges is stored as structure of arrays, that is, we
// have 6 integer subarrays of length nedge which store:
//
// 0-subarray: first end point i,
// 1-subarray: second end point j,
// 2-subarray: matrix entry ij,
// 3-subarray: matrix entry ji,
// 4-subarray: matrix entry ii,
// 5-subarray: matrix entry jj.
//
// For the flux assembly, only the two endpoints (i,j) are
// required. Therefore, only subarrays 0 and 1 are transfered.
// Sequential cudaDMA thread to transfer edge list from integer
// array IedgeList into shared memory s_IedgeList
cudaDMASequential<true, 2*sizeof(Ti),
2*compute_threads_per_cta*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld>
dma_ind0(0, compute_threads_per_cta, compute_threads_per_cta);
cudaDMASequential<true, 2*sizeof(Ti),
2*compute_threads_per_cta*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld>
dma_ind1(1, compute_threads_per_cta, compute_threads_per_cta);
cudaDMASequential<true, 2*sizeof(Ti),
2*compute_threads_per_cta*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld>
dma_ind2(2, compute_threads_per_cta, compute_threads_per_cta);
#else
// List of edges is stored as array of structures, that is, we
// have nedge integer subarrays of length 6 which store:
//
// (i,j,ij,jj,ii,jj) for each edge iedge
//
// For the flux assembly, only the two entpoins (i,j) are
// required. Therefore, only the first two entries of each edge
// are transfered using strided DMA.
// Strided cudaDMA thread to transfer edge list from integer
// array IedgeList into shared memory s_IedgeList
cudaDMAStrided<true, 2*sizeof(Ti), 2*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld,
compute_threads_per_cta>
dma_ind0(0, compute_threads_per_cta, compute_threads_per_cta, 6*sizeof(Ti));
cudaDMAStrided<true, 2*sizeof(Ti), 2*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld,
compute_threads_per_cta>
dma_ind1(1, compute_threads_per_cta, compute_threads_per_cta, 6*sizeof(Ti));
cudaDMAStrided<true, 2*sizeof(Ti), 2*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld,
compute_threads_per_cta>
dma_ind2(2, compute_threads_per_cta, compute_threads_per_cta, 6*sizeof(Ti));
#endif
//--------------------------------------------------------------------------
// Indirect cudaDMA thread to transfer nodal data from vecSrc into
// shared memory s_VecSrc, we need to distinguish between vecSrc
// stored in interleaved format and vecSrc stored in block format
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_SRC*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecSrc0(3, compute_threads_per_cta,
compute_threads_per_cta+CUDADMA_DMA_LDS_IND*dma_threads_per_ld);
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_SRC*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecSrc1(4, compute_threads_per_cta,
compute_threads_per_cta+CUDADMA_DMA_LDS_IND*dma_threads_per_ld);
// Indirect cudaDMA thread to transfer nodal data from vecDest into
// shared memory s_VecDest, we need to distinguish between vecDest
// stored in interleaved format and vecSrc stored in block format
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_DEST*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecDest0(5, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_IND+CUDADMA_DMA_LDS_SRC)*dma_threads_per_ld);
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_DEST*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecDest1(6, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_IND+CUDADMA_DMA_LDS_SRC)*dma_threads_per_ld);
//--------------------------------------------------------------------------
#if COEFFSATEDGE_DEVICE == SOA
// Coefficients at edges are stored as structure of arrays, that
// is, we have 2*ncoeff subarrays of length nedge which store:
//
// 0-subarray: ij-coefficients for x-direction,
// 1-subarray: ji-coefficients for x-direction,
// 2-subarray: ij-coefficients for y-direction,
// 3-subarray: ji-coefficients for y-direction,
// ...
// n-subarray: further coefficients not required here
// Strided cudaDMA thread to transfer precomputed coefficients
// CoeffsAtEdge into shared memory s_CoeffsAtEdge
cudaDMAStrided<true, sizeof(Tc),
compute_threads_per_cta*sizeof(Tc),
CUDADMA_DMA_LDS_COEFF*dma_threads_per_ld,
2*2>
dma_coeff0(7, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_IND+CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld,
nedge*sizeof(Tc));
// cudaDMAStrided<true, sizeof(Tc),
// compute_threads_per_cta*sizeof(Tc),
// CUDADMA_DMA_LDS_COEFF*dma_threads_per_ld,
// 2*2>
// dma_coeff1(8, compute_threads_per_cta, compute_threads_per_cta+
// (CUDADMA_DMA_LDS_IND+CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld,
// nedge*sizeof(Tc));
#else
// Coefficients at edges are stored as array of structure, that
// is, we have nedge real-valued subarray of length 2*ncoeff
cudaDMAStrided<true, sizeof(Tc), 2*sizeof(Tc),
CUDADMA_DMA_LDS_COEFF*dma_threads_per_ld,
2*compute_threads_per_cta>
dma_coeff0(7, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_IND+CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld,
ncoeff*sizeof(Tc));
// cudaDMAStrided<true, sizeof(Tc), 2*sizeof(Tc),
// CUDADMA_DMA_LDS_COEFF*dma_threads_per_ld,
// 2*compute_threads_per_cta>
// dma_coeff1(8, compute_threads_per_cta, compute_threads_per_cta+
// (CUDADMA_DMA_LDS_IND+CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld,
// ncoeff*sizeof(Tc));
#endif
//--------------------------------------------------------------------------
// Warp specialisation
//--------------------------------------------------------------------------
if (threadIdx.x<compute_threads_per_cta) {
// Start DMA transfer of indices
dma_ind0.start_async_dma();
dma_ind1.start_async_dma();
dma_ind2.start_async_dma();
// Start DMA transfer of coefficients
dma_coeff0.start_async_dma();
// dma_coeff1.start_async_dma();
// Wait for indices to be ready
dma_ind0.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
// Wait for indices to be ready
dma_ind1.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc1.start_async_dma();
dma_vecDest1.start_async_dma();
// Loop over all edge-groups to be processed by this block
for (int ipt=0; ipt<nedge_per_thread; ipt+=6) {
#define IBUF 0
#define DBUF 0
#define IOFF 0
// Wait for source vector to be ready
dma_vecSrc0.wait_for_dma_finish();
// Compute velocities
TdDest ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
TdDest pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
Ti idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Local variables
TdDest FluxAtEdge[2*NVAR2D];
// Wait for coefficients to be ready
dma_coeff0.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Start DMA transfer of coefficients
dma_coeff0.start_async_dma();
// Wait for destination vector to be ready
dma_vecDest0.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
Ti i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
Ti j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind0.start_async_dma();
// Wait for indices to be ready
dma_ind2.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
#define IBUF 1
#define DBUF 1
#define IOFF 1
// Wait for source vector to be ready
dma_vecSrc1.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Wait for coefficients to be ready
// dma_coeff1.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Start DMA transfer of coefficients
// dma_coeff1.start_async_dma();
// Wait for destination vector to be ready
dma_vecDest1.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind1.start_async_dma();
// Wait for indices to be ready
dma_ind0.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc1.start_async_dma();
dma_vecDest1.start_async_dma();
#define IBUF 2
#define DBUF 0
#define IOFF 2
// Wait for source vector to be ready
dma_vecSrc0.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Wait for coefficients to be ready
dma_coeff0.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Start DMA transfer of coefficients
dma_coeff0.start_async_dma();
// Wait for destination vector to be ready
dma_vecDest0.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind2.start_async_dma();
// Wait for indices to be ready
dma_ind1.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
#define IBUF 0
#define DBUF 1
#define IOFF 3
// Wait for source vector to be ready
dma_vecSrc1.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Wait for coefficients to be ready
// dma_coeff1.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Start DMA transfer of coefficients
// dma_coeff1.start_async_dma();
// Wait for destination vector to be ready
dma_vecDest1.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind0.start_async_dma();
// Wait for indices to be ready
dma_ind2.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc1.start_async_dma();
dma_vecDest1.start_async_dma();
#define IBUF 1
#define DBUF 0
#define IOFF 4
// Wait for source vector to be ready
dma_vecSrc0.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Wait for coefficients to be ready
dma_coeff0.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Start DMA transfer of coefficients
dma_coeff0.start_async_dma();
// Wait for destination vector to be ready
dma_vecDest0.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind1.start_async_dma();
// Wait for indices to be ready
dma_ind0.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
#define IBUF 2
#define DBUF 1
#define IOFF 5
// Wait for source vector to be ready
dma_vecSrc1.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Wait for coefficients to be ready
// dma_coeff1.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Start DMA transfer of coefficients
// dma_coeff1.start_async_dma();
// Wait for destination vector to be ready
dma_vecDest1.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind2.start_async_dma();
// Wait for indices to be ready
dma_ind1.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc1.start_async_dma();
dma_vecDest1.start_async_dma();
}
}
//--------------------------------------------------------------------------
// DMA transfer warps
//--------------------------------------------------------------------------
else if(dma_ind0.owns_this_thread()) {
for (int ipt=0; ipt<nedge_per_thread; ipt+=3) {
dma_ind0.execute_dma(&IedgeList[ (((ipt+0)*gridDim.x+blockIdx.x)*
compute_threads_per_cta+nedge_offset)*
(EDGELIST_DEVICE == SOA ? 2 : 6)],
s_IedgeList[0]);
dma_ind1.execute_dma(&IedgeList[ (((ipt+1)*gridDim.x+blockIdx.x)*
compute_threads_per_cta+nedge_offset)*
(EDGELIST_DEVICE == SOA ? 2 : 6)],
s_IedgeList[1]);
dma_ind2.execute_dma(&IedgeList[ (((ipt+2)*gridDim.x+blockIdx.x)*
compute_threads_per_cta+nedge_offset)*
(EDGELIST_DEVICE == SOA ? 2 : 6)],
s_IedgeList[2]);
}
dma_ind0.finish_async_dma();
dma_ind1.finish_async_dma();
}
else if(dma_vecSrc0.owns_this_thread()) {
for (int ipt=0; ipt<nedge_per_thread; ipt+=6) {
dma_vecSrc0.wait_for_dma_start();
dma_vecSrc0.execute_dma_no_sync(s_IedgeList[0], vecSrc-NVAR2D, s_VecSrc[0]);
dma_vecSrc0.finish_async_dma();
dma_vecSrc1.wait_for_dma_start();
dma_vecSrc1.execute_dma_no_sync(s_IedgeList[1], vecSrc-NVAR2D, s_VecSrc[1]);
dma_vecSrc1.finish_async_dma();
dma_vecSrc0.wait_for_dma_start();
dma_vecSrc0.execute_dma_no_sync(s_IedgeList[2], vecSrc-NVAR2D, s_VecSrc[0]);
dma_vecSrc0.finish_async_dma();
dma_vecSrc1.wait_for_dma_start();
dma_vecSrc1.execute_dma_no_sync(s_IedgeList[0], vecSrc-NVAR2D, s_VecSrc[1]);
dma_vecSrc1.finish_async_dma();
dma_vecSrc0.wait_for_dma_start();
dma_vecSrc0.execute_dma_no_sync(s_IedgeList[1], vecSrc-NVAR2D, s_VecSrc[0]);
dma_vecSrc0.finish_async_dma();
dma_vecSrc1.wait_for_dma_start();
dma_vecSrc1.execute_dma_no_sync(s_IedgeList[2], vecSrc-NVAR2D, s_VecSrc[1]);
dma_vecSrc1.finish_async_dma();
}
}
else if(dma_vecDest0.owns_this_thread()) {
for (int ipt=0; ipt<nedge_per_thread; ipt+=6) {
dma_vecDest0.wait_for_dma_start();
dma_vecDest0.execute_dma_no_sync(s_IedgeList[0], vecDest-NVAR2D, s_VecDest[0]);
dma_vecDest0.finish_async_dma();
dma_vecDest1.wait_for_dma_start();
dma_vecDest1.execute_dma_no_sync(s_IedgeList[1], vecDest-NVAR2D, s_VecDest[1]);
dma_vecDest1.finish_async_dma();
dma_vecDest0.wait_for_dma_start();
dma_vecDest0.execute_dma_no_sync(s_IedgeList[2], vecDest-NVAR2D, s_VecDest[0]);
dma_vecDest0.finish_async_dma();
dma_vecDest1.wait_for_dma_start();
dma_vecDest1.execute_dma_no_sync(s_IedgeList[0], vecDest-NVAR2D, s_VecDest[1]);
dma_vecDest1.finish_async_dma();
dma_vecDest0.wait_for_dma_start();
dma_vecDest0.execute_dma_no_sync(s_IedgeList[1], vecDest-NVAR2D, s_VecDest[0]);
dma_vecDest0.finish_async_dma();
dma_vecDest1.wait_for_dma_start();
dma_vecDest1.execute_dma_no_sync(s_IedgeList[2], vecDest-NVAR2D, s_VecDest[1]);
dma_vecDest1.finish_async_dma();
}
}
else if(dma_coeff0.owns_this_thread()) {
for (int ipt=0; ipt<nedge_per_thread; ipt+=2) {
dma_coeff0.execute_dma(&CoeffsAtEdge[ ((ipt*gridDim.x+blockIdx.x)*
compute_threads_per_cta+nedge_offset)*
(COEFFSATEDGE_DEVICE == SOA ? 1 : 2*ncoeff)],
s_CoeffsAtEdge[0]);
// dma_coeff1.execute_dma(&CoeffsAtEdge[ (((ipt+1)*gridDim.x+blockIdx.x)*
// compute_threads_per_cta+nedge_offset)*
// (COEFFSATEDGE_DEVICE == SOA ? 1 : 2*ncoeff)],
// s_CoeffsAtEdge[1]);
}
}
};
#undef TOTAL_THREADS_PER_CTA
#endif
/*****************************************************************************
* Internal C++ functions which invoke the CUDA kernels
****************************************************************************/
template <typename Tc,
typename TdSrc,
typename TdDest,
typename Ti,
int idissipationtype>
inline
int hydro_calcFlux2d_cuda(__SIZET *d_CoeffsAtEdge,
__SIZET *d_IedgeList,
__SIZET *d_vecSrc,
__SIZET *d_vecDest,
TdDest scale,
Ti nblocks,
Ti neq,
Ti nedge,
Ti ncoeff,
Ti nedgeset,
Ti iedgeset,
hipStream_t stream=0)
{
const hipDeviceProp_t *devProp = coproc_getCurrentDeviceProp();
// Strategy: run the largest possible number of blocks with a
// predefined number of compute/dma threads per block and let each
// compute thread process the minimal number of edges
const int compute_threads_per_cta = CUDADMA_COMPUTE_THREADS_PER_CTA;
const int dma_threads_per_ld = CUDADMA_THREADS_PER_LD;
const int dma_lds = CUDADMA_DMA_LDS;
int nedge_per_thread_cudaDMA = CUDADMA_NEDGE_PER_THREAD;
const int threads_per_cta_baseline = BASELINE_THREADS_PER_CTA;
int nedge_per_thread_baseline = BASELINE_NEDGE_PER_THREAD;
int blocks, threads, nedge_cudaDMA, nedge_baseline;
prepare_cudaDMA(devProp, nedgeset,
&nedge_per_thread_cudaDMA,
compute_threads_per_cta, dma_threads_per_ld,
dma_lds, &blocks, &threads, &nedge_cudaDMA);
dim3 grid_cudaDMA(blocks, 1, 1);
dim3 block_cudaDMA(threads, 1, 1);
prepare_baseline(devProp, nedgeset-nedge_cudaDMA,
&nedge_per_thread_baseline, threads_per_cta_baseline,
&blocks, &threads, &nedge_baseline);
dim3 grid_baseline(blocks, 1, 1);
dim3 block_baseline(threads, 1, 1);
TdSrc *vecSrc = (TdSrc*)(*d_vecSrc);
TdDest *vecDest = (TdDest*)(*d_vecDest);
Tc *CoeffsAtEdge = (Tc*)(*d_CoeffsAtEdge);
Ti *IedgeList = (Ti*)(*d_IedgeList);
if (nblocks == 1) {
#ifdef CUDADMA_KERNEL
if (grid_cudaDMA.x>0) {
// CudaDMA implementation
hipLaunchKernelGGL(( CUDADMA_KERNEL
<Tc,TdSrc,TdDest,Ti,SYSTEM_SCALAR,idissipationtype,
MAX(32,compute_threads_per_cta),MAX(32,dma_threads_per_ld)>)
, dim3(grid_cudaDMA), dim3(block_cudaDMA), 0, stream, CoeffsAtEdge,
IedgeList,
vecSrc, vecDest, scale,
neq, nedge, ncoeff,
nedge_cudaDMA+iedgeset-1,
nedge_per_thread_cudaDMA,
iedgeset-1);
}
#endif
#ifdef BASELINE_KERNEL
if (grid_baseline.x>0) {
// Baseline implementation
hipLaunchKernelGGL(( BASELINE_KERNEL
<Tc,TdSrc,TdDest,Ti,SYSTEM_SCALAR,idissipationtype,
threads_per_cta_baseline>)
, dim3(grid_baseline), dim3(block_baseline), 0, stream, CoeffsAtEdge,
IedgeList,
vecSrc, vecDest, scale,
neq, nedge, ncoeff,
nedgeset+iedgeset-1,
nedge_per_thread_baseline,
nedge_cudaDMA+iedgeset-1);
}
#endif
} else {
#ifdef CUDADMA_KERNEL
if (grid_cudaDMA.x>0) {
// CudaDMA implementation
hipLaunchKernelGGL(( CUDADMA_KERNEL
<Tc,TdSrc,TdDest,Ti,SYSTEM_BLOCK,idissipationtype,
MAX(32,compute_threads_per_cta),MAX(32,dma_threads_per_ld)>)
, dim3(grid_cudaDMA), dim3(block_cudaDMA), 0, stream, CoeffsAtEdge,
IedgeList,
vecSrc, vecDest, scale,
neq, nedge, ncoeff,
nedge_cudaDMA+iedgeset-1,
nedge_per_thread_cudaDMA,
iedgeset-1);
}
#endif
#ifdef BASELINE_KERNEL
if (grid_baseline.x>0) {
// Baseline implementation
hipLaunchKernelGGL(( BASELINE_KERNEL
<Tc,TdSrc,TdDest,Ti,SYSTEM_BLOCK,idissipationtype,
threads_per_cta_baseline>)
, dim3(grid_baseline), dim3(block_baseline), 0, stream, CoeffsAtEdge,
IedgeList,
vecSrc, vecDest, scale,
neq, nedge, ncoeff,
nedgeset+iedgeset-1,
nedge_per_thread_baseline,
nedge_cudaDMA+iedgeset-1);
}
#endif
}
coproc_checkError("hydro_calcFlux2d_cuda");
return 0;
};
/*****************************************************************************
* External C functions which can be called from the Fortran code
****************************************************************************/
extern "C"
{
__INT FNAME(hydro_calcfluxgalerkin2d_cuda)(__SIZET *d_CoeffsAtEdge,
__SIZET *d_IedgeList,
__SIZET *d_vecSrc,
__SIZET *d_vecDest,
__DP *scale,
__INT *nblocks,
__INT *neq,
__INT *nedge,
__INT *ncoeff,
__INT *nedges,
__INT *iedgeset,
__I64 *stream)
{
return (__INT) hydro_calcFlux2d_cuda
<__DP,__DP,__DP,__INT,DISSIPATION_ZERO>
(d_CoeffsAtEdge, d_IedgeList, d_vecSrc, d_vecDest,
*scale, *nblocks, *neq, *nedge,
*ncoeff, *nedges, *iedgeset,
(hipStream_t)(*stream));
}
/**************************************************************************/
__INT FNAME(hydro_calcfluxscdiss2d_cuda)(__SIZET *d_CoeffsAtEdge,
__SIZET *d_IedgeList,
__SIZET *d_vecSrc,
__SIZET *d_vecDest,
__DP *scale,
__INT *nblocks,
__INT *neq,
__INT *nedge,
__INT *ncoeff,
__INT *nedges,
__INT *iedgeset,
__I64 *stream)
{
return (__INT) hydro_calcFlux2d_cuda
<__DP,__DP,__DP,__INT,DISSIPATION_SCALAR>
(d_CoeffsAtEdge, d_IedgeList, d_vecSrc, d_vecDest,
*scale, *nblocks, *neq, *nedge,
*ncoeff, *nedges, *iedgeset,
(hipStream_t)(*stream));
}
/**************************************************************************/
__INT FNAME(hydro_calcfluxscdissdisp2d_cuda)(__SIZET *d_CoeffsAtEdge,
__SIZET *d_IedgeList,
__SIZET *d_vecSrc,
__SIZET *d_vecDest,
__DP *scale,
__INT *nblocks,
__INT *neq,
__INT *nedge,
__INT *ncoeff,
__INT *nedges,
__INT *iedgeset,
__I64 *stream)
{
return (__INT) hydro_calcFlux2d_cuda
<__DP,__DP,__DP,__INT,DISSIPATION_SCALAR_DSPLIT>
(d_CoeffsAtEdge, d_IedgeList, d_vecSrc, d_vecDest,
*scale, *nblocks, *neq, *nedge,
*ncoeff, *nedges, *iedgeset,
(hipStream_t)(*stream));
}
/**************************************************************************/
__INT FNAME(hydro_calcfluxroediss2d_cuda)(__SIZET *d_CoeffsAtEdge,
__SIZET *d_IedgeList,
__SIZET *d_vecSrc,
__SIZET *d_vecDest,
__DP *scale,
__INT *nblocks,
__INT *neq,
__INT *nedge,
__INT *ncoeff,
__INT *nedges,
__INT *iedgeset,
__I64 *stream)
{
return (__INT) hydro_calcFlux2d_cuda
<__DP,__DP,__DP,__INT,DISSIPATION_ROE>
(d_CoeffsAtEdge, d_IedgeList, d_vecSrc, d_vecDest,
*scale, *nblocks, *neq, *nedge,
*ncoeff, *nedges, *iedgeset,
(hipStream_t)(*stream));
}
/***************************************************************************/
__INT FNAME(hydro_calcfluxroedissdisp2d_cuda)(__SIZET *d_CoeffsAtEdge,
__SIZET *d_IedgeList,
__SIZET *d_vecSrc,
__SIZET *d_vecDest,
__DP *scale,
__INT *nblocks,
__INT *neq,
__INT *nedge,
__INT *ncoeff,
__INT *nedges,
__INT *iedgeset,
__I64 *stream)
{
return (__INT) hydro_calcFlux2d_cuda
<__DP,__DP,__DP,__INT,DISSIPATION_ROE_DSPLIT>
(d_CoeffsAtEdge, d_IedgeList, d_vecSrc, d_vecDest,
*scale, *nblocks, *neq, *nedge,
*ncoeff, *nedges, *iedgeset,
(hipStream_t)*stream);
}
/**************************************************************************/
__INT FNAME(hydro_calcfluxrusdiss2d_cuda)(__SIZET *d_CoeffsAtEdge,
__SIZET *d_IedgeList,
__SIZET *d_vecSrc,
__SIZET *d_vecDest,
__DP *scale,
__INT *nblocks,
__INT *neq,
__INT *nedge,
__INT *ncoeff,
__INT *nedges,
__INT *iedgeset,
__I64 *stream)
{
return (__INT)hydro_calcFlux2d_cuda
<__DP,__DP,__DP,__INT,DISSIPATION_RUSANOV>
(d_CoeffsAtEdge, d_IedgeList, d_vecSrc, d_vecDest,
*scale, *nblocks, *neq, *nedge,
*ncoeff, *nedges, *iedgeset,
(hipStream_t)*stream);
}
/**************************************************************************/
__INT FNAME(hydro_calcfluxrusdissdisp2d_cuda)(__SIZET *d_CoeffsAtEdge,
__SIZET *d_IedgeList,
__SIZET *d_vecSrc,
__SIZET *d_vecDest,
__DP *scale,
__INT *nblocks,
__INT *neq,
__INT *nedge,
__INT *ncoeff,
__INT *nedges,
__INT *iedgeset,
__I64 *stream)
{
return (__INT) hydro_calcFlux2d_cuda
<__DP,__DP,__DP,__INT,DISSIPATION_RUSANOV_DSPLIT>
(d_CoeffsAtEdge, d_IedgeList, d_vecSrc, d_vecDest,
*scale, *nblocks, *neq, *nedge,
*ncoeff, *nedges, *iedgeset,
(hipStream_t)*stream);
}
};
}
| a0a88da545c2c2030dbe513a6d0d6a4c172c7e08.cu | /*#############################################################################
******************************************************************************
* <name> hydro_calcFlux2d_cuda </name>
******************************************************************************
*
* <purpose>
* This file provides CUDA kernels to compute the fluxes for the low-order
* scheme in 2D using different types if artificial viscosities.
* </purpose>
*
*#############################################################################
*/
#include <stdio.h>
#include <cmath>
#include <cfloat>
#include <iostream>
#include <coproc_core.h>
#include <coproc_storage_cuda.h>
#include "cudaGatherScatter.h"
#ifdef HAS_INLINE_PTX
#include "cudaDMA.h"
#endif
#include "flagship.h"
#include "cudaMacros.h"
#include "models/hydro/hydro.h"
#include "kernel/System/fmath.h"
// Define CUDA kernel which does not make use of the CUDADMA library
// and is applied to the remaining edges which are not processed in groups
// #define BASELINE_KERNEL hydro_calcFlux2d_shmem
#define BASELINE_KERNEL hydro_calcFlux2d_baseline
// Defines for baseline implementation
#define BASELINE_THREADS_PER_CTA 32*2
#define BASELINE_NEDGE_PER_THREAD 1
// Defines for shared memory implementation
#define SHMEM_DATA_TRANSPOSE true
#define SHMEM_DATA_IDX3 IDX3T
#define SHMEM_NEDGE_PER_THREAD BASELINE_NEDGE_PER_THREAD
#ifdef HAS_INLINE_PTX
// Define CUDA kernel which makes use of the CUDADMA library to achive
// higher throughput between global and shared memory on the device
// #define CUDADMA_PREFETCH_SINGLE
#endif
// Defines for cudaDMA implementation without warp specialisation
#ifdef CUDADMA_NOSPEC
#define CUDADMA_KERNEL hydro_calcFlux2d_cudaDMA_nospec
#define CUDADMA_COMPUTE_THREADS_PER_CTA 32*2
#define CUDADMA_THREADS_PER_LD 0
#define CUDADMA_NEDGE_PER_THREAD 1
#define CUDADMA_DMA_LDS_IND 0
#define CUDADMA_DMA_LDS_SRC 0
#define CUDADMA_DMA_LDS_DEST 0
#define CUDADMA_DMA_LDS_COEFF 0
#define CUDADMA_DMA_LDS 0
#endif
// Defines for cudaDMA single buffer implementation with prefetching of indices
#ifdef CUDADMA_PREFETCH_SINGLE
#define CUDADMA_KERNEL hydro_calcFlux2d_cudaDMA_prefetch_single
#define CUDADMA_COMPUTE_THREADS_PER_CTA 32*4
#define CUDADMA_THREADS_PER_LD 32*1
#define CUDADMA_NEDGE_PER_THREAD 1*1
#define CUDADMA_DMA_LDS_IND 0
#define CUDADMA_DMA_LDS_SRC 1
#define CUDADMA_DMA_LDS_DEST 1
#define CUDADMA_DMA_LDS_COEFF 1
#define CUDADMA_DMA_LDS (CUDADMA_DMA_LDS_IND + \
CUDADMA_DMA_LDS_SRC + \
CUDADMA_DMA_LDS_DEST + \
CUDADMA_DMA_LDS_COEFF)
#endif
// Defines for cudaDMA double buffer implementation with prefetching of indices
#ifdef CUDADMA_PREFETCH_DOUBLE
#define CUDADMA_KERNEL hydro_calcFlux2d_cudaDMA_prefetch_double
#define CUDADMA_COMPUTE_THREADS_PER_CTA 32*4
#define CUDADMA_THREADS_PER_LD 32*1
#define CUDADMA_NEDGE_PER_THREAD 4*1
#define CUDADMA_DMA_LDS_IND 0
#define CUDADMA_DMA_LDS_SRC 1
#define CUDADMA_DMA_LDS_DEST 1
#define CUDADMA_DMA_LDS_COEFF 1
#define CUDADMA_DMA_LDS (CUDADMA_DMA_LDS_IND + \
CUDADMA_DMA_LDS_SRC + \
CUDADMA_DMA_LDS_DEST + \
CUDADMA_DMA_LDS_COEFF)
#endif
// Defines for cudaDMA double buffer implementation
#ifdef CUDADMA_DOUBLE
#define CUDADMA_KERNEL hydro_calcFlux2d_cudaDMA_double
#define CUDADMA_COMPUTE_THREADS_PER_CTA 32*2
#define CUDADMA_THREADS_PER_LD 32*1
#define CUDADMA_NEDGE_PER_THREAD 6*1
#define CUDADMA_DMA_LDS_IND 1
#define CUDADMA_DMA_LDS_SRC 1
#define CUDADMA_DMA_LDS_DEST 1
#define CUDADMA_DMA_LDS_COEFF 0
#define CUDADMA_DMA_LDS (3*CUDADMA_DMA_LDS_IND + \
2*CUDADMA_DMA_LDS_SRC + \
2*CUDADMA_DMA_LDS_DEST)
#endif
// Defines for cudaDMA manual buffer implementation
#ifdef CUDADMA_MANUAL
#define CUDADMA_KERNEL hydro_calcFlux2d_cudaDMA_manual
#define CUDADMA_COMPUTE_THREADS_PER_CTA 32*2
#define CUDADMA_THREADS_PER_LD 32*1
#define CUDADMA_NEDGE_PER_THREAD 6*1
#define CUDADMA_DMA_LDS_IND 1
#define CUDADMA_DMA_LDS_SRC 1
#define CUDADMA_DMA_LDS_DEST 1
#define CUDADMA_DMA_LDS_COEFF 1
#define CUDADMA_DMA_LDS (CUDADMA_DMA_LDS_IND + \
CUDADMA_DMA_LDS_SRC + \
CUDADMA_DMA_LDS_DEST + \
CUDADMA_DMA_LDS_COEFF)
#endif
// Defines for empty cudaDMA implementation
#ifndef CUDADMA_KERNEL
#define CUDADMA_COMPUTE_THREADS_PER_CTA 0
#define CUDADMA_THREADS_PER_LD 0
#define CUDADMA_NEDGE_PER_THREAD 0
#define CUDADMA_DMA_LDS_IND 0
#define CUDADMA_DMA_LDS_SRC 0
#define CUDADMA_DMA_LDS_DEST 0
#define CUDADMA_DMA_LDS_COEFF 0
#define CUDADMA_DMA_LDS 0
#endif
using namespace std;
namespace hydro2d_cuda
{
/*****************************************************************************
* FluxBase: Compute inviscid fluxes for nedgesim edges
****************************************************************************/
struct InviscidFluxBase
{
/*
* Calculate the inviscid flux for x-direction (not skew-symmetric)
*/
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
bool boverwrite, typename Td, typename Ti>
__device__ __forceinline__
static void calcFluxXdir(Td *Fxi,
Td *Fxj,
Td *DataAtEdge,
Td ui,
Td uj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc)
{
if (boverwrite) {
// Overwrite destination vector
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
IDX2T(Fxi,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxj,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
else {
// Destination vector is transposed
IDX2T(Fxi,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxj,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
IDX2(Fxi,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxj,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
else {
// Both vectors are not transposed
IDX2(Fxi,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxj,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
}
}
else {
// Keep content of destination vector
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
IDX2T(Fxi,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxj,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
else {
// Destination vector is transposed
IDX2T(Fxi,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxi,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2T(Fxj,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fxj,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
IDX2(Fxi,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxj,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
else {
// Both vectors are not transposed
IDX2(Fxi,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxi,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi);
IDX2(Fxj,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fxj,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
}
}
}
/*
* Calculate the inviscid flux for y-direction (not skew-symmetric)
*/
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
bool boverwrite, typename Td, typename Ti>
__device__ __forceinline__
static void calcFluxYdir(Td *Fyi,
Td *Fyj,
Td *DataAtEdge,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc)
{
if (boverwrite) {
// Overwrite destination vector
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
IDX2T(Fyi,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyj,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
else {
// Destination vector is transposed
IDX2T(Fyi,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyj,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
IDX2(Fyi,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyj,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
else {
// Both vectors are not transposed
IDX2(Fyi,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyj,1,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,2,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,3,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,4,iposDest,NVAR2D,nedgesimDest) = INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
}
}
else {
// Keep content of destination vector
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
IDX2T(Fyi,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyj,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
else {
// Destination vector is transposed
IDX2T(Fyi,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyi,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2T(Fyj,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fyj,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
IDX2(Fyi,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyj,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
else {
// Both vectors are not transposed
IDX2(Fyi,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyi,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi);
IDX2(Fyj,1,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,2,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,3,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fyj,4,iposDest,NVAR2D,nedgesimDest) += INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
}
}
}
/*
* Calculate the inviscid flux for x-direction (skew-symmetric)
*/
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
bool boverwrite, typename Td, typename Ti>
__device__ __forceinline__
static void calcFluxXdir(Td *Fx_ij,
Td *DataAtEdge,
Td ui,
Td uj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc)
{
if (boverwrite) {
// Overwrite destination vector
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
IDX2T(Fx_ij,1,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3t,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,2,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,3,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,4,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
else {
// Destination vector is transposed
IDX2T(Fx_ij,1,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,2,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,3,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,4,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
IDX2(Fx_ij,1,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,2,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,3,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,4,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
else {
// Both vectors are not transposed
IDX2(Fx_ij,1,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,2,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,3,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,4,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
}
}
else {
// Keep content of destination vector
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
IDX2T(Fx_ij,1,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3t,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,2,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,3,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,4,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
else {
// Destination vector is transposed
IDX2T(Fx_ij,1,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,2,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,3,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2T(Fx_ij,4,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
IDX2(Fx_ij,1,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,2,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,3,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,4,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
else {
// Both vectors are not transposed
IDX2(Fx_ij,1,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,2,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,3,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
IDX2(Fx_ij,4,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj);
}
}
}
}
/*
* Calculate the inviscid flux for y-direction (skew-symmetric)
*/
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
bool boverwrite, typename Td, typename Ti>
__device__ __forceinline__
static void calcFluxYdir(Td *Fy_ij,
Td *DataAtEdge,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc)
{
if (boverwrite) {
// Overwrite destination vector
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
IDX2T(Fy_ij,1,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,2,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,3,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,4,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
else {
// Destination vector is transposed
IDX2T(Fy_ij,1,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,2,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,3,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,4,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
IDX2(Fy_ij,1,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,2,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,3,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,4,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
else {
// Both vectors are not transposed
IDX2(Fy_ij,1,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,2,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,3,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,4,iposDest,NVAR2D,nedgesimDest) =
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
}
}
else {
// Keep content of destination vector
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
IDX2T(Fy_ij,1,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,2,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,3,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,4,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
else {
// Destination vector is transposed
IDX2T(Fy_ij,1,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,2,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,3,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2T(Fy_ij,4,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
IDX2(Fy_ij,1,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,2,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,3,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,4,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
else {
// Both vectors are not transposed
IDX2(Fy_ij,1,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,2,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,3,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
IDX2(Fy_ij,4,iposDest,NVAR2D,nedgesimDest) +=
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj);
}
}
}
}
/*
* Calculate the inviscid fluxes in all directions (not skew-symmetric)
*/
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
bool boverwrite, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *Fxi,
Td *Fxj,
Td *Fyi,
Td *Fyj,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc)
{
// Compute the Galerkin fluxes for x-direction
InviscidFluxBase::
calcFluxXdir<nedgesimDest,nedgesimSrc,btransposeDest,btransposeSrc,boverwrite>
(Fxi,Fxj,DataAtEdge,ui,uj,pi,pj,iposDest,iposSrc);
// Compute Galerkin fluxes for y-direction
InviscidFluxBase::
calcFluxYdir<nedgesimDest,nedgesimSrc,btransposeDest,btransposeSrc,boverwrite>
(Fyi,Fyj,DataAtEdge,vi,vj,pi,pj,iposDest,iposSrc);
}
/*
* Calculate the inviscid fluxes in all directions (skew-symmetric)
*/
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
bool boverwrite, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *Fx_ij,
Td *Fy_ij,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc)
{
// Compute Galerkin flux difference for x-direction
InviscidFluxBase::
calcFluxXdir<nedgesimDest,nedgesimSrc,btransposeDest,btransposeSrc,boverwrite>
(Fx_ij,DataAtEdge,ui,uj,pi,pj,iposDest,iposSrc);
// Compute Galerkin flux difference for y-direction
InviscidFluxBase::
calcFluxYdir<nedgesimDest,nedgesimSrc,btransposeDest,btransposeSrc,boverwrite>
(Fy_ij,DataAtEdge,vi,vj,pi,pj,iposDest,iposSrc);
}
/*
* Calculate the inviscid fluxes in all directions (not skew-symmetric)
* and multiply them by the precomputed finite element coefficients
*/
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
bool boverwrite, typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *FluxesAtEdge,
Tc *CoeffsAtEdge,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Td scale,
Ti iposDest,
Ti iposSrc,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
Td aux;
#ifdef HYDRO_USE_IBP
// Calculate skew-symmetric inviscid fluxes
// Flux component 1
if (btransposeSrc) {
aux = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj)
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi));
}
else {
aux = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj)
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi));
}
if (boverwrite) {
if (btransposeDest) {
IDX3T(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) = aux;
IDX3T(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) = -aux;
}
else {
IDX3(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) = aux;
IDX3(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) = -aux;
}
}
else {
if (btransposeDest) {
IDX3T(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) += aux;
IDX3T(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) -= aux;
}
else {
IDX3(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) += aux;
IDX3(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) -= aux;
}
}
// Flux component 2
if (btransposeSrc) {
aux = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj)
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi));
}
else {
aux = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj)
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi));
}
if (boverwrite) {
if (btransposeDest) {
IDX3T(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) = aux;
IDX3T(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) = -aux;
}
else {
IDX3(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) = aux;
IDX3(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) = -aux;
}
}
else {
if (btransposeDest) {
IDX3T(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) += aux;
IDX3T(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) -= aux;
}
else {
IDX3(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) += aux;
IDX3(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) -= aux;
}
}
// Flux component 3
if (btransposeSrc) {
aux = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj)
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi));
}
else {
aux = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj)
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi));
}
if (boverwrite) {
if (btransposeDest) {
IDX3T(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) = aux;
IDX3T(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) = -aux;
}
else {
IDX3(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) = aux;
IDX3(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) = -aux;
}
}
else {
if (btransposeDest) {
IDX3T(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) += aux;
IDX3T(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) -= aux;
}
else {
IDX3(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) += aux;
IDX3(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) -= aux;
}
}
// Flux component 4
if (btransposeSrc) {
aux = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj)
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi));
}
else {
aux = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj)
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)
-IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi));
}
if (boverwrite) {
if (btransposeDest) {
IDX3T(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) = aux;
IDX3T(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) = -aux;
}
else {
IDX3(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) = aux;
IDX3(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) = -aux;
}
}
else {
if (btransposeDest) {
IDX3T(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) += aux;
IDX3T(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) -= aux;
}
else {
IDX3(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) += aux;
IDX3(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) -= aux;
}
}
#else
// Calculate inviscid fluxes (not skew-symmetric)
if (boverwrite) {
// Overwrite destination vector
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
IDX3T(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
}
else {
// Destination vector is transposed
IDX3T(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
IDX3(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
}
else {
// Both vectors are not transposed
IDX3(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
}
}
}
else {
// Keep content of destination vector
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
IDX3T(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
}
else {
// Destination vector is transposed
IDX3T(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3T(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
IDX3(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
}
else {
// Both vectors are not transposed
IDX3(FluxesAtEdge,1,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,1,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX1_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX1_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,2,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,2,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX2_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX2_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,3,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,3,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX3_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX3_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,4,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
IDX3(FluxesAtEdge,4,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,ui,pi)-
INVISCIDFLUX4_XDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,uj,pj))
+IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*
(INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc,vi,pi)-
INVISCIDFLUX4_YDIR3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc,vj,pj)));
}
}
}
#endif
}
};
/*****************************************************************************
* InviscidFlux
****************************************************************************/
struct InviscidFlux : public InviscidFluxBase
{
// Enable use of inherited functions
using InviscidFluxBase::calcFluxXdir;
using InviscidFluxBase::calcFluxYdir;
using InviscidFluxBase::calcEdgeData;
/**************************************************************************
* Wrapper routine for processing a single edge
*************************************************************************/
/*
* Calculate the inviscid flux for x-direction (not skew-symmetric)
*/
template <bool boverwrite, typename Td>
__device__ __forceinline__
static void calcFluxXdir(Td *Fxi,
Td *Fxj,
Td *DataAtEdge,
Td ui,
Td uj,
Td pi,
Td pj)
{
InviscidFluxBase::calcFluxXdir<1,1,false,false,boverwrite>
(Fxi,Fxj,DataAtEdge,ui,uj,pi,pj,1,1);
}
/*
* Calculate the inviscid flux for y-direction (not skew-symmetric)
*/
template <bool boverwrite, typename Td>
__device__ __forceinline__
static void calcFluxYdir(Td *Fyi,
Td *Fyj,
Td *DataAtEdge,
Td vi,
Td vj,
Td pi,
Td pj)
{
InviscidFluxBase::calcFluxYdir<1,1,false,false,boverwrite>
(Fyi,Fyj,DataAtEdge,vi,vj,pi,pj,1,1);
}
/*
* Calculate the inviscid flux for x-direction (skew-symmetric)
*/
template <bool boverwrite, typename Td>
__device__ __forceinline__
static void calcFluxXdir(Td *Fx_ij,
Td *DataAtEdge,
Td ui,
Td uj,
Td pi,
Td pj)
{
InviscidFluxBase::calcFluxXdir<1,1,false,false,boverwrite>
(Fx_ij,DataAtEdge,ui,uj,pi,pj,1,1);
}
/*
* Calculate the inviscid flux for y-direction (skew-symmetric)
*/
template <bool boverwrite, typename Td>
__device__ __forceinline__
static void calcFluxYdir(Td *Fy_ij,
Td *DataAtEdge,
Td vi,
Td vj,
Td pi,
Td pj)
{
InviscidFluxBase::calcFluxYdir<1,1,false,false,boverwrite>
(Fy_ij,DataAtEdge,vi,vj,pi,pj,1,1);
}
/*
* Calculate the inviscid fluxes in all directions (not skew-symmetric)
*/
template <bool boverwrite, typename Td>
__device__ __forceinline__
static void calcEdgeData(Td *Fxi,
Td *Fxj,
Td *Fyi,
Td *Fyj,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj)
{
InviscidFluxBase::calcEdgeData<1,1,false,false,boverwrite>
(Fxi,Fxj,Fyi,Fyj,DataAtEdge,ui,uj,vi,vj,pi,pj,1,1);
}
/*
* Calculate the inviscid fluxes in all directions (skew-symmetric)
*/
template <bool boverwrite, typename Td>
__device__ __forceinline__
static void calcEdgeData(Td *Fx_ij,
Td *Fy_ij,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj)
{
InviscidFluxBase::calcEdgeData<1,1,false,false,boverwrite>
(Fx_ij,Fy_ij,DataAtEdge,ui,uj,vi,vj,pi,pj,1,1);
}
/*
* Calculate the inviscid fluxes in all directions (not skew-symmetric)
* and multiply them by the precomputed finite element coefficients
*/
template <bool boverwrite, typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *FluxesAtEdge,
Tc *CoeffsAtEdge,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Td scale,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
InviscidFluxBase::calcEdgeData<1,1,false,false,boverwrite>
(FluxesAtEdge,CoeffsAtEdge,DataAtEdge,ui,uj,vi,vj,pi,pj,
scale,1,1,iedge,nedge,ncoeff);
}
};
/*****************************************************************************
* InviscidFluxDissipationBase (basic functionality individual specialisations)
****************************************************************************/
template <int idissipationtype>
struct InviscidFluxDissipationBase
{
};
/*****************************************************************************
* InviscidFluxDissipationBase: Specialisation for computing zero
* artificial dissipation, aka standard Galerkin
****************************************************************************/
template <>
struct InviscidFluxDissipationBase<DISSIPATION_ZERO>
{
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *VectorAtEdge,
Tc *CoeffsAtEdge,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
if (btransposeDest) {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) = 0.0;
}
else {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) = 0.0;
}
}
};
/*****************************************************************************
* InviscidFluxDissipationBase: Specialisation for computing scalar
* artificial dissipation proportional to the spectral radius
* (largest eigenvector) of the cumulative Roe matrix.
****************************************************************************/
template <>
struct InviscidFluxDissipationBase<DISSIPATION_SCALAR>
{
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *VectorAtEdge,
Tc *CoeffsAtEdge,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
Td ri,rj,hi,hj;
if (btransposeSrc) {
// Compute densities
ri = DENSITY3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc);
rj = DENSITY3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc);
// Compute enthalpies
hi = (TOTALENERGY3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc)+pi)/ri;
hj = (TOTALENERGY3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc)+pj)/rj;
}
else {
// Compute densities
ri = DENSITY3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc);
rj = DENSITY3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc);
// Compute enthalpies
hi = (TOTALENERGY3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc)+pi)/ri;
hj = (TOTALENERGY3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc)+pj)/rj;
}
// Compute Roe mean values
Td aux = ROE_MEAN_RATIO(ri,rj);
Td u_ij = ROE_MEAN_VALUE(ui,uj,aux);
Td v_ij = ROE_MEAN_VALUE(vi,vj,aux);
Td H_ij = ROE_MEAN_VALUE(hi,hj,aux);
// Compute skew-symmetric coefficient
Td a[2];
a[0] = DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge));
a[1] = DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge));
// Compute auxiliary variables
Td q_ij = DCONST(0.5) * (u_ij * u_ij + v_ij * v_ij);
Td vel_ij = u_ij * a[0] + v_ij * a[1];
// Compute the speed of sound
Td c_ij = sqrt(max(((HYDRO_GAMMA)-DCONST(1.0))*(H_ij-q_ij), DBL_EPSILON));
// Compute scalar dissipation
Td d_ij = abs(vel_ij) + sqrt(a[0] * a[0] + a[1] * a[1])*c_ij;
// Multiply the solution difference by the scalar dissipation
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
else {
// Destination vector is transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
else {
// Both vectors are not transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
}
}
};
/*****************************************************************************
* InviscidFluxDissipationBase: Specialisation for computing scalar
* artificial dissipation proportional to the spectral radius
* (largest eigenvector) of the dimensional-split Roe matrix.
****************************************************************************/
template <>
struct InviscidFluxDissipationBase<DISSIPATION_SCALAR_DSPLIT>
{
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *VectorAtEdge,
Tc *CoeffsAtEdge,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
Td ri,rj,hi,hj;
if (btransposeSrc) {
// Compute densities
ri = DENSITY3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc);
rj = DENSITY3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc);
// Compute enthalpies
hi = (TOTALENERGY3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc)+pi)/ri;
hj = (TOTALENERGY3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc)+pj)/rj;
}
else {
// Compute densities
ri = DENSITY3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc);
rj = DENSITY3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc);
// Compute enthalpies
hi = (TOTALENERGY3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc)+pi)/ri;
hj = (TOTALENERGY3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc)+pj)/rj;
}
// Compute Roe mean values
Td aux = ROE_MEAN_RATIO(ri,rj);
Td u_ij = ROE_MEAN_VALUE(ui,uj,aux);
Td v_ij = ROE_MEAN_VALUE(vi,vj,aux);
Td H_ij = ROE_MEAN_VALUE(hi,hj,aux);
// Compute auxiliary variables
Td q_ij = DCONST(0.5) *(u_ij * u_ij + v_ij * v_ij);
// Compute the speed of sound
Td c_ij = sqrt(max(((HYDRO_GAMMA)-DCONST(1.0))*(H_ij-q_ij), DBL_EPSILON));
// Compute skew-symmetric coefficient
Td a[2];
a[0] = DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge));
a[1] = DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge));
// Compute scalar dissipation
Td d_ij = ( abs(a[0]*u_ij) + abs(a[0])*c_ij +
abs(a[1]*v_ij) + abs(a[1])*c_ij );
// Multiply the solution difference by the scalar dissipation
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
else {
// Destination vector is transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
else {
// Both vectors are not transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
}
}
};
/*****************************************************************************
* InviscidFluxDissipationBase: Specialisation for computing
* tensorial artificial dissipation of Roe-type.
****************************************************************************/
template <>
struct InviscidFluxDissipationBase<DISSIPATION_ROE>
{
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *VectorAtEdge,
Tc *CoeffsAtEdge,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
// Compute skew-symmetric coefficient
Td a[2];
a[0] = DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge));
a[1] = DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge));
Td anorm = sqrt(a[0] * a[0] + a[1] * a[1]);
if (anorm > DBL_EPSILON) {
// Normalise the skew-symmetric coefficient
a[0] = a[0]/anorm;
a[1] = a[1]/anorm;
Td ri,rj,hi,hj;
if (btransposeSrc) {
// Compute densities
ri = DENSITY3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc);
rj = DENSITY3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc);
// Compute enthalpies
hi = (TOTALENERGY3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc)+pi)/ri;
hj = (TOTALENERGY3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc)+pj)/rj;
}
else {
// Compute densities
ri = DENSITY3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc);
rj = DENSITY3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc);
// Compute enthalpies
hi = (TOTALENERGY3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc)+pi)/ri;
hj = (TOTALENERGY3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc)+pj)/rj;
}
// Compute Roe mean values
Td aux = ROE_MEAN_RATIO(ri,rj);
Td u_ij = ROE_MEAN_VALUE(ui,uj,aux);
Td v_ij = ROE_MEAN_VALUE(vi,vj,aux);
Td H_ij = ROE_MEAN_VALUE(hi,hj,aux);
// Compute auxiliary variables
Td vel_ij = u_ij * a[0] + v_ij * a[1];
Td q_ij = DCONST(0.5) * (u_ij * u_ij + v_ij * v_ij);
// Compute the speed of sound
Td c2_ij = max(((HYDRO_GAMMA)-DCONST(1.0))*(H_ij-q_ij), DBL_EPSILON);
Td c_ij = sqrt(c2_ij);
// Compute eigenvalues
Td l1 = abs(vel_ij-c_ij);
Td l2 = abs(vel_ij);
Td l3 = abs(vel_ij+c_ij);
Td l4 = abs(vel_ij);
// Compute solution difference U_j-U_i
Td Diff[NVAR2D];
if (btransposeSrc) {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
Diff[i-1] = IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc);
}
else {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
Diff[i-1] = IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc);
}
// Compute auxiliary quantities for characteristic variables
Td aux1 = ((HYDRO_GAMMA)-DCONST(1.0))*(q_ij*Diff[0]
-u_ij*Diff[1]
-v_ij*Diff[2]
+Diff[3])/DCONST(2.0)/c2_ij;
Td aux2 = (vel_ij*Diff[0]
-a[0]*Diff[1]
-a[1]*Diff[2])/DCONST(2.0)/c_ij;
// Compute characteristic variables multiplied by the corresponding eigenvalue
Td w1 = l1 * (aux1 + aux2);
Td w2 = l2 * ((DCONST(1.0)-((HYDRO_GAMMA)-DCONST(1.0))*q_ij/c2_ij)*Diff[0]
+((HYDRO_GAMMA)-DCONST(1.0))*(u_ij*Diff[1]
+v_ij*Diff[2]
-Diff[3])/c2_ij);
Td w3 = l3 * (aux1 - aux2);
Td w4 = l4 * ((a[0]*v_ij-a[1]*u_ij)*Diff[0]
+a[1]*Diff[1]
-a[0]*Diff[2]);
// Compute "R_ij * |Lbd_ij| * L_ij * dU"
if (btransposeDest) {
IDX2T(VectorAtEdge,1,iposDest,NVAR2D,nedgesimDest) = anorm * ( w1 + w2 + w3 );
IDX2T(VectorAtEdge,2,iposDest,NVAR2D,nedgesimDest) = anorm * ( (u_ij-c_ij*a[0])*w1 + u_ij*w2 +
(u_ij+c_ij*a[0])*w3 + a[1]*w4 );
IDX2T(VectorAtEdge,3,iposDest,NVAR2D,nedgesimDest) = anorm * ( (v_ij-c_ij*a[1])*w1 + v_ij*w2 +
(v_ij+c_ij*a[1])*w3 - a[0]*w4 );
IDX2T(VectorAtEdge,4,iposDest,NVAR2D,nedgesimDest) = anorm * ( (H_ij-c_ij*vel_ij)*w1 + q_ij*w2 +
(H_ij+c_ij*vel_ij)*w3 + (u_ij*a[1]-v_ij*a[0])*w4 );
}
else {
IDX2(VectorAtEdge,1,iposDest,NVAR2D,nedgesimDest) = anorm * ( w1 + w2 + w3 );
IDX2(VectorAtEdge,2,iposDest,NVAR2D,nedgesimDest) = anorm * ( (u_ij-c_ij*a[0])*w1 + u_ij*w2 +
(u_ij+c_ij*a[0])*w3 + a[1]*w4 );
IDX2(VectorAtEdge,3,iposDest,NVAR2D,nedgesimDest) = anorm * ( (v_ij-c_ij*a[1])*w1 + v_ij*w2 +
(v_ij+c_ij*a[1])*w3 - a[0]*w4 );
IDX2(VectorAtEdge,4,iposDest,NVAR2D,nedgesimDest) = anorm * ( (H_ij-c_ij*vel_ij)*w1 + q_ij*w2 +
(H_ij+c_ij*vel_ij)*w3 + (u_ij*a[1]-v_ij*a[0])*w4 );
}
} else {
if (btransposeDest) {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) = 0.0;
}
else {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) = 0.0;
}
}
}
};
/*****************************************************************************
* InviscidFluxDissipationBase: Specialisation for computing
* tensorial artificial dissipation of Roe-type using dimensional splitting.
****************************************************************************/
template <>
struct InviscidFluxDissipationBase<DISSIPATION_ROE_DSPLIT>
{
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *VectorAtEdge,
Tc *CoeffsAtEdge,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
// Compute skew-symmetric coefficient
Td a[2];
a[0] = DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge));
a[1] = DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge));
Td anorm = sqrt(a[0] * a[0] + a[1] * a[1]);
if (anorm > DBL_EPSILON) {
// Compute the absolute value
a[0] = abs(a[0]);
a[1] = abs(a[1]);
Td ri,rj,hi,hj;
if (btransposeSrc) {
// Compute densities
ri = DENSITY3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc);
rj = DENSITY3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc);
// Compute enthalpies
hi = (TOTALENERGY3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc)+pi)/ri;
hj = (TOTALENERGY3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc)+pj)/rj;
}
else {
// Compute densities
ri = DENSITY3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc);
rj = DENSITY3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc);
// Compute enthalpies
hi = (TOTALENERGY3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc)+pi)/ri;
hj = (TOTALENERGY3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc)+pj)/rj;
}
// Compute Roe mean values
Td aux = ROE_MEAN_RATIO(ri,rj);
Td u_ij = ROE_MEAN_VALUE(ui,uj,aux);
Td v_ij = ROE_MEAN_VALUE(vi,vj,aux);
Td H_ij = ROE_MEAN_VALUE(hi,hj,aux);
// Compute auxiliary variable
Td q_ij = DCONST(0.5) * (u_ij * u_ij + v_ij * v_ij);
// Compute the speed of sound
Td c2_ij = max(((HYDRO_GAMMA)-DCONST(1.0))*(H_ij-q_ij), DBL_EPSILON);
Td c_ij = sqrt(c2_ij);
//----------------------------------------------------------------------
// Dimensional splitting: x-direction
//----------------------------------------------------------------------
// Compute eigenvalues
Td l1 = abs(u_ij-c_ij);
Td l2 = abs(u_ij);
Td l3 = abs(u_ij+c_ij);
Td l4 = abs(u_ij);
// Compute solution difference U_j-U_i
Td Diff[NVAR2D];
if (btransposeSrc) {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
Diff[i-1] = IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc);
}
else {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
Diff[i-1] = IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc);
}
// Compute auxiliary quantities for characteristic variables
Td aux1 = ((HYDRO_GAMMA)-DCONST(1.0))*(q_ij*Diff[0]
-u_ij*Diff[1]
-v_ij*Diff[2]
+Diff[3])/DCONST(2.0)/c2_ij;
Td aux2 = (u_ij*Diff[0]
-Diff[1])/DCONST(2.0)/c_ij;
// Compute characteristic variables multiplied by the corresponding eigenvalue
Td w1 = l1 * (aux1 + aux2);
Td w2 = l2 * ((DCONST(1.0)-((HYDRO_GAMMA)-DCONST(1.0))*q_ij/c2_ij)*Diff[0]
+((HYDRO_GAMMA)-DCONST(1.0))*(u_ij*Diff[1]
+v_ij*Diff[2]
-Diff[3])/c2_ij);
Td w3 = l3 * (aux1 - aux2);
Td w4 = l4 * (v_ij*Diff[0]-Diff[2]);
// Compute "R_ij * |Lbd_ij| * L_ij * dU"
if (btransposeDest) {
IDX2T(VectorAtEdge,1,iposDest,NVAR2D,nedgesimDest) = a[0] * ( w1 + w2 + w3 );
IDX2T(VectorAtEdge,2,iposDest,NVAR2D,nedgesimDest) = a[0] * ( (u_ij-c_ij)*w1 + u_ij*w2 + (u_ij+c_ij)*w3 );
IDX2T(VectorAtEdge,3,iposDest,NVAR2D,nedgesimDest) = a[0] * ( v_ij*w1 + v_ij*w2 + v_ij*w3 - w4 );
IDX2T(VectorAtEdge,4,iposDest,NVAR2D,nedgesimDest) = a[0] * ( (H_ij-c_ij*u_ij)*w1 + q_ij*w2 +
(H_ij+c_ij*u_ij)*w3 - v_ij*w4 );
}
else {
IDX2(VectorAtEdge,1,iposDest,NVAR2D,nedgesimDest) = a[0] * ( w1 + w2 + w3 );
IDX2(VectorAtEdge,2,iposDest,NVAR2D,nedgesimDest) = a[0] * ( (u_ij-c_ij)*w1 + u_ij*w2 + (u_ij+c_ij)*w3 );
IDX2(VectorAtEdge,3,iposDest,NVAR2D,nedgesimDest) = a[0] * ( v_ij*w1 + v_ij*w2 + v_ij*w3 - w4 );
IDX2(VectorAtEdge,4,iposDest,NVAR2D,nedgesimDest) = a[0] * ( (H_ij-c_ij*u_ij)*w1 + q_ij*w2 +
(H_ij+c_ij*u_ij)*w3 - v_ij*w4 );
}
//----------------------------------------------------------------------
// Dimensional splitting: y-direction
//----------------------------------------------------------------------
// Compute eigenvalues
l1 = abs(v_ij-c_ij);
l2 = abs(v_ij);
l3 = abs(v_ij+c_ij);
l4 = abs(v_ij);
// Compute solution difference U_j-U_i
if (btransposeSrc) {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
Diff[i-1] = IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc);
}
else {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
Diff[i-1] = IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc);
}
// Compute auxiliary quantities for characteristic variables
aux1 = ((HYDRO_GAMMA)-DCONST(1.0))*(q_ij*Diff[0]
-u_ij*Diff[1]
-v_ij*Diff[2]
+Diff[3])/DCONST(2.0)/c2_ij;
aux2 = (v_ij*Diff[0]-Diff[2])/DCONST(2.0)/c_ij;
// Compute characteristic variables multiplied by the corresponding eigenvalue
w1 = l1 * (aux1 + aux2);
w2 = l2 * ((DCONST(1.0)-((HYDRO_GAMMA)-DCONST(1.0))*q_ij/c2_ij)*Diff[0]
+((HYDRO_GAMMA)-DCONST(1.0))*(u_ij*Diff[1]
+v_ij*Diff[2]
-Diff[3])/c2_ij);
w3 = l3 * (aux1 - aux2);
w4 = l4 * (-u_ij*Diff[0]+Diff[1]);
// Compute "R_ij * |Lbd_ij| * L_ij * dU"
if (btransposeDest) {
IDX2T(VectorAtEdge,1,iposDest,NVAR2D,nedgesimDest) += a[1] * ( w1 + w2 + w3 );
IDX2T(VectorAtEdge,2,iposDest,NVAR2D,nedgesimDest) += a[1] * ( u_ij*w1 + u_ij*w2 + u_ij*w3 + w4 );
IDX2T(VectorAtEdge,3,iposDest,NVAR2D,nedgesimDest) += a[1] * ( (v_ij-c_ij)*w1 + v_ij*w2 + (v_ij+c_ij)*w3 );
IDX2T(VectorAtEdge,4,iposDest,NVAR2D,nedgesimDest) += a[1] * ( (H_ij-c_ij*v_ij)*w1 + q_ij*w2 +
(H_ij+c_ij*v_ij)*w3 + u_ij*w4 );
}
else {
IDX2(VectorAtEdge,1,iposDest,NVAR2D,nedgesimDest) += a[1] * ( w1 + w2 + w3 );
IDX2(VectorAtEdge,2,iposDest,NVAR2D,nedgesimDest) += a[1] * ( u_ij*w1 + u_ij*w2 + u_ij*w3 + w4 );
IDX2(VectorAtEdge,3,iposDest,NVAR2D,nedgesimDest) += a[1] * ( (v_ij-c_ij)*w1 + v_ij*w2 + (v_ij+c_ij)*w3 );
IDX2(VectorAtEdge,4,iposDest,NVAR2D,nedgesimDest) += a[1] * ( (H_ij-c_ij*v_ij)*w1 + q_ij*w2 +
(H_ij+c_ij*v_ij)*w3 + u_ij*w4 );
}
} else {
if (btransposeDest) {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) = 0.0;
}
else {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) = 0.0;
}
}
}
};
/*****************************************************************************
* InviscidFluxDissipationBase: Specialisation for computing
* scalar artificial dissipation of Rusanov-type.
****************************************************************************/
template <>
struct InviscidFluxDissipationBase<DISSIPATION_RUSANOV>
{
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *VectorAtEdge,
Tc *CoeffsAtEdge,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
Td Ei,Ej;
if (btransposeSrc) {
// Compute specific energies
Ei = SPECIFICTOTALENERGY3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc);
Ej = SPECIFICTOTALENERGY3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc);
}
else {
// Compute specific energies
Ei = SPECIFICTOTALENERGY3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc);
Ej = SPECIFICTOTALENERGY3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc);
}
// Compute the speed of sound
Td ci = sqrt(max(((HYDRO_GAMMA)-DCONST(1.0))*
(HYDRO_GAMMA)*(Ei-DCONST(0.5)*(ui*ui+vi*vi)), DBL_EPSILON));
Td cj = sqrt(max(((HYDRO_GAMMA)-DCONST(1.0))*
(HYDRO_GAMMA)*(Ej-DCONST(0.5)*(uj*uj+vj*vj)), DBL_EPSILON));
#ifdef HYDRO_USE_IBP
// Compute scalar dissipation based on the skew-symmetric part
// which does not include the symmetric boundary contribution
Td d_ij = max( abs(DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge))*uj+
DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge))*vj)+
DCONST(0.5)*sqrt(POW(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge),2)+
POW(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge),2))*cj,
abs(DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge))*ui+
DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge))*vi)+
DCONST(0.5)*sqrt(POW(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge),2)+
POW(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge),2))*ci );
#else
// Compute scalar dissipation
Td d_ij = max( abs(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*uj+
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*vj)+
sqrt(POW(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge),2)+
POW(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge),2))*cj,
abs(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*ui+
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*vi)+
sqrt(POW(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge),2)+
POW(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge),2))*ci );
#endif
// Multiply the solution difference by the scalar dissipation
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
else {
// Destination vector is transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
else {
// Both vectors are not transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
}
}
};
/*****************************************************************************
* InviscidFluxDissipationBase: Specialisation for computing
* scalar artificial dissipation of Rusanov-type using dimensional splitting.
****************************************************************************/
template <>
struct InviscidFluxDissipationBase<DISSIPATION_RUSANOV_DSPLIT>
{
template <int nedgesimDest, int nedgesimSrc,
bool btransposeDest, bool btransposeSrc,
typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *VectorAtEdge,
Tc *CoeffsAtEdge,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iposDest,
Ti iposSrc,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
Td Ei,Ej;
if (btransposeSrc) {
// Compute specific energies
Ei = SPECIFICTOTALENERGY3_2D(DataAtEdge,IDX3T,1,iposSrc,NVAR2D,2,nedgesimSrc);
Ej = SPECIFICTOTALENERGY3_2D(DataAtEdge,IDX3T,2,iposSrc,NVAR2D,2,nedgesimSrc);
}
else {
// Compute specific energies
Ei = SPECIFICTOTALENERGY3_2D(DataAtEdge,IDX3,1,iposSrc,NVAR2D,2,nedgesimSrc);
Ej = SPECIFICTOTALENERGY3_2D(DataAtEdge,IDX3,2,iposSrc,NVAR2D,2,nedgesimSrc);
}
// Compute the speed of sound
Td ci = sqrt(max(((HYDRO_GAMMA)-DCONST(1.0))*
(HYDRO_GAMMA)*(Ei-DCONST(0.5)*(ui*ui+vi*vi)), DBL_EPSILON));
Td cj = sqrt(max(((HYDRO_GAMMA)-DCONST(1.0))*
(HYDRO_GAMMA)*(Ej-DCONST(0.5)*(uj*uj+vj*vj)), DBL_EPSILON));
#ifdef HYDRO_USE_IBP
// Compute scalar dissipation with dimensional splitting based on
// the skew-symmetric part which does not include the symmetric
// boundary contribution
Td d_ij = max( abs(DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge))*uj)+
abs(DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)))*cj,
abs(DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge))*ui)+
abs(DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)))*ci )
+ max( abs(DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge))*vj)+
abs(DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)))*cj,
abs(DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge))*vi)+
abs(DCONST(0.5)*(IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)))*ci );
#else
// Compute scalar dissipation with dimensional splitting
Td d_ij = max( abs(IDX3(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*uj)+
abs(IDX3(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge))*cj,
abs(IDX3(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*ui)+
abs(IDX3(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge))*ci )
+ max( abs(IDX3(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*vj)+
abs(IDX3(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge))*cj,
abs(IDX3(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*vi)+
abs(IDX3(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge))*ci );
#endif
// Multiply the solution difference by the scalar dissipation
if (btransposeDest) {
if (btransposeSrc) {
// Both source and destination vector are transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
else {
// Destination vector is transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2T(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
}
else {
if (btransposeSrc) {
// Source vector is transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3T(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3T(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
else {
// Both vectors are not transposed
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX2(VectorAtEdge,i,iposDest,NVAR2D,nedgesimDest) =
d_ij*(IDX3(DataAtEdge,i,2,iposSrc,NVAR2D,2,nedgesimSrc)
-IDX3(DataAtEdge,i,1,iposSrc,NVAR2D,2,nedgesimSrc));
}
}
}
};
/*****************************************************************************
* InviscidFluxDissipation: Artificial dissipation
****************************************************************************/
template <int idissipationtype>
struct InviscidFluxDissipation : public InviscidFluxDissipationBase<idissipationtype>
{
// Enable use of inherited functions
using InviscidFluxDissipationBase<idissipationtype>::calcEdgeData;
/***************************************************************************
* Wrapper routine for processing a single edge
**************************************************************************/
template <typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void calcEdgeData(Td *VectorAtEdge,
Tc *CoeffsAtEdge,
Td *DataAtEdge,
Td ui,
Td uj,
Td vi,
Td vj,
Td pi,
Td pj,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
InviscidFluxDissipationBase<idissipationtype>::calcEdgeData<1,1,false,false>
(VectorAtEdge,CoeffsAtEdge,DataAtEdge,ui,uj,vi,vj,pi,pj,1,1,iedge,nedge,ncoeff);
}
};
/*****************************************************************************
* FluxBase
****************************************************************************/
struct FluxBase
{
/*
* Combine inviscid fluxes (not skew-symmetric) and artificial diffusion
*/
template <int nedgesimDest, int nedgesimSrc, bool boverwrite,
typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void combineEdgeData(Td *FluxesAtEdge,
Tc *CoeffsAtEdge,
Td *Fxi,
Td *Fxj,
Td *Fyi,
Td *Fyj,
Td *Diff,
Td scale,
Ti iposDest,
Ti iposSrc,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
if (boverwrite) {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*IDX2(Fxj,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*IDX2(Fyj,i,iposSrc,NVAR2D,nedgesimSrc)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*IDX2(Fxi,i,iposSrc,NVAR2D,nedgesimSrc)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*IDX2(Fyi,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc));
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,2,iposDest,NVAR2D,2,nedgesimDest) = -IDX3(FluxesAtEdge,i,1,iposDest,NVAR2D,2,nedgesimDest);
}
else {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*IDX2(Fxj,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*IDX2(Fyj,i,iposSrc,NVAR2D,nedgesimSrc)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*IDX2(Fxi,i,iposSrc,NVAR2D,nedgesimSrc)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*IDX2(Fyi,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc));
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*IDX2(Fxj,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*IDX2(Fyj,i,iposSrc,NVAR2D,nedgesimSrc)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*IDX2(Fxi,i,iposSrc,NVAR2D,nedgesimSrc)-
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*IDX2(Fyi,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc));
}
}
/*
* Combine inviscid fluxes (skew-symmetric) and artificial diffusion
*/
template <int nedgesimDest, int nedgesimSrc, bool boverwrite,
typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void combineEdgeData(Td *FluxesAtEdge,
Tc *CoeffsAtEdge,
Td *Fx_ij,
Td *Fy_ij,
Td *Diff,
Td scale,
Ti iposDest,
Ti iposSrc,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
if (boverwrite) {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,1,iposDest,NVAR2D,2,nedgesimDest) = scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*IDX2(Fx_ij,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*IDX2(Fy_ij,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc));
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,2,iposDest,NVAR2D,2,nedgesimDest) = -scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*IDX2(Fx_ij,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*IDX2(Fy_ij,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc));
}
else {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,1,iposDest,NVAR2D,2,nedgesimDest) += scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,1,iedge,2,ncoeff,nedge)*IDX2(Fx_ij,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,1,iedge,2,ncoeff,nedge)*IDX2(Fy_ij,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc));
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,2,iposDest,NVAR2D,2,nedgesimDest) -= scale *
(IDX3_COEFFSATEDGE(CoeffsAtEdge,1,2,iedge,2,ncoeff,nedge)*IDX2(Fx_ij,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX3_COEFFSATEDGE(CoeffsAtEdge,2,2,iedge,2,ncoeff,nedge)*IDX2(Fy_ij,i,iposSrc,NVAR2D,nedgesimSrc)+
IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc));
}
}
/*
* Combine inviscid fluxes with artificial diffusion
*/
template <int nedgesimDest, int nedgesimSrc, bool boverwrite,
typename Td, typename Ti>
__device__ __forceinline__
static void combineEdgeData(Td *FluxesAtEdge,
Td *Diff,
Td scale,
Ti iposDest,
Ti iposSrc)
{
if (boverwrite) {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,1,iposDest,NVAR2D,2,nedgesimDest) = scale * IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc);
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,2,iposDest,NVAR2D,2,nedgesimDest) = -scale * IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc);
}
else {
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,1,iposDest,NVAR2D,2,nedgesimDest) += scale * IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc);
#pragma unroll
for (int i=1; i<=NVAR2D; i++)
IDX3(FluxesAtEdge,i,2,iposDest,NVAR2D,2,nedgesimDest) -= scale * IDX2(Diff,i,iposSrc,NVAR2D,nedgesimSrc);
}
}
};
/*****************************************************************************
* Flux
****************************************************************************/
struct Flux : public FluxBase
{
// Enable use of inherited functions
using FluxBase::combineEdgeData;
/***************************************************************************
* Wrapper routines for processing a single edge
**************************************************************************/
/*
* Combine inviscid fluxes (not skew-symmetric) and artificial diffusion
*/
template <bool boverwrite, typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void combineEdgeData(Td *FluxesAtEdge,
Tc *CoeffsAtEdge,
Td *Fxi,
Td *Fxj,
Td *Fyi,
Td *Fyj,
Td *Diff,
Td scale,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
FluxBase::combineEdgeData<1,1,boverwrite>
(FluxesAtEdge,CoeffsAtEdge,Fxi,Fxj,Fyi,Fyj,Diff,scale,1,1,iedge,nedge,ncoeff);
}
/*
* Combine inviscid fluxes (skew-symmetric) and artificial diffusion
*/
template <bool boverwrite, typename Tc, typename Td, typename Ti>
__device__ __forceinline__
static void combineEdgeData(Td *FluxesAtEdge,
Tc *CoeffsAtEdge,
Td *Fx_ij,
Td *Fy_ij,
Td *Diff,
Td scale,
Ti iedge,
Ti nedge,
Ti ncoeff)
{
FluxBase::combineEdgeData<1,1,boverwrite>
(FluxesAtEdge,CoeffsAtEdge,Fx_ij,Fy_ij,Diff,scale,1,1,iedge,nedge,ncoeff);
}
/*
* Combine inviscid fluxes with artificial diffusion
*/
template <bool boverwrite, typename Td>
__device__ __forceinline__
static void combineEdgeData(Td *FluxesAtEdge,
Td *Diff,
Td scale)
{
FluxBase::combineEdgeData<1,1,boverwrite>
(FluxesAtEdge,Diff,scale,1,1);
}
};
/*****************************************************************************
* This CUDA kernel calculates the inviscid fluxes and applies
* artificial dissipation if required) (baseline implementation).
****************************************************************************/
template <typename Tc,
typename TdSrc,
typename TdDest,
typename Ti,
int isystemformat,
int idissipationtype,
int threads_per_cta>
__launch_bounds__(threads_per_cta)
__global__ void hydro_calcFlux2d_baseline(Tc *CoeffsAtEdge,
Ti *IedgeList,
TdSrc *vecSrc,
TdDest *vecDest,
TdDest scale,
Ti neq,
Ti nedge,
Ti ncoeff,
Ti nedge_last,
Ti nedge_per_thread=1,
Ti nedge_offset=0)
{
// Loop over all items per thread
for (int ipt=0; ipt<nedge_per_thread; ++ipt) {
// Global edge ID
Ti idx = (ipt*gridDim.x+blockIdx.x)*threads_per_cta+nedge_offset+threadIdx.x;
if (threadIdx.x<threads_per_cta && idx<nedge_last)
{
// Get positions of edge endpoints (idx starts at zero)
Ti i = IDX2_EDGELIST(IedgeList,1,idx+1,6,nedge);
Ti j = IDX2_EDGELIST(IedgeList,2,idx+1,6,nedge);
// Local variables
TdDest DataAtEdge[2*NVAR2D];
// Get solution values at edge endpoints
Vector<NVAR2D,isystemformat==SYSTEM_BLOCK>::
gatherEdgeData<true>(DataAtEdge,vecSrc,i,j,neq);
// Compute velocities
TdDest ui = XVELOCITY2_2D(DataAtEdge,IDX2,1,NVAR2D,2);
TdDest vi = YVELOCITY2_2D(DataAtEdge,IDX2,1,NVAR2D,2);
TdDest uj = XVELOCITY2_2D(DataAtEdge,IDX2,2,NVAR2D,2);
TdDest vj = YVELOCITY2_2D(DataAtEdge,IDX2,2,NVAR2D,2);
// Compute pressures
TdDest pi = PRESSURE2_2D(DataAtEdge,IDX2,1,NVAR2D,2);
TdDest pj = PRESSURE2_2D(DataAtEdge,IDX2,2,NVAR2D,2);
// Local variables
TdDest FluxAtEdge[2*NVAR2D];
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::calcEdgeData
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,DataAtEdge,ui,uj,vi,vj,pi,pj,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<false>
(FluxAtEdge,CoeffsAtEdge,DataAtEdge,ui,uj,vi,vj,pi,pj,scale,idx+1,nedge,ncoeff);
// Build fluxes into nodal vector
Vector<NVAR2D,isystemformat==SYSTEM_BLOCK>::
scatterEdgeData<false>(vecDest,FluxAtEdge,i,j,neq);
}
}
};
/*****************************************************************************
* This CUDA kernel calculates the inviscid fluxes and applies
* artificial dissipation if required) (shared memory implementation)
****************************************************************************/
template <typename Tc,
typename TdSrc,
typename TdDest,
typename Ti,
int isystemformat,
int idissipationtype,
int threads_per_cta>
__launch_bounds__(threads_per_cta)
__global__ void hydro_calcFlux2d_shmem(Tc *CoeffsAtEdge,
Ti *IedgeList,
TdSrc *vecSrc,
TdDest *vecDest,
TdDest scale,
Ti neq,
Ti nedge,
Ti ncoeff,
Ti nedge_last,
Ti nedge_per_thread=1,
Ti nedge_offset=0)
{
// Shared memory
__shared__ TdSrc s_DataAtEdge[2*NVAR2D*threads_per_cta];
// Loop over all items per thread
for (int ipt=0; ipt<nedge_per_thread; ++ipt) {
// Global edge ID
Ti idx = (ipt*gridDim.x+blockIdx.x)*threads_per_cta+nedge_offset+threadIdx.x;
if (threadIdx.x<threads_per_cta && idx<nedge_last)
{
// Get positions of edge endpoints (idx starts at zero)
Ti i = IDX2_EDGELIST(IedgeList,1,idx+1,6,nedge);
Ti j = IDX2_EDGELIST(IedgeList,2,idx+1,6,nedge);
// Get solution values at edge endpoints
Vector<NVAR2D,isystemformat==SYSTEM_BLOCK>::
gatherEdgeData<threads_per_cta,SHMEM_DATA_TRANSPOSE,true>
(s_DataAtEdge,vecSrc,(int)threadIdx.x+1,i,j,neq);
// Compute velocities
TdDest ui = XVELOCITY3_2D(s_DataAtEdge,SHMEM_DATA_IDX3,1,(int)threadIdx.x+1,NVAR2D,2,threads_per_cta);
TdDest vi = YVELOCITY3_2D(s_DataAtEdge,SHMEM_DATA_IDX3,1,(int)threadIdx.x+1,NVAR2D,2,threads_per_cta);
TdDest uj = XVELOCITY3_2D(s_DataAtEdge,SHMEM_DATA_IDX3,2,(int)threadIdx.x+1,NVAR2D,2,threads_per_cta);
TdDest vj = YVELOCITY3_2D(s_DataAtEdge,SHMEM_DATA_IDX3,2,(int)threadIdx.x+1,NVAR2D,2,threads_per_cta);
// Compute pressures
TdDest pi = PRESSURE3_3D(s_DataAtEdge,SHMEM_DATA_IDX3,1,(int)threadIdx.x+1,NVAR2D,2,threads_per_cta);
TdDest pj = PRESSURE3_3D(s_DataAtEdge,SHMEM_DATA_IDX3,2,(int)threadIdx.x+1,NVAR2D,2,threads_per_cta);
// Local variables
TdDest FluxAtEdge[2*NVAR2D];
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,threads_per_cta,false,SHMEM_DATA_TRANSPOSE>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_DataAtEdge,ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,threads_per_cta,false,SHMEM_DATA_TRANSPOSE,false>
(FluxAtEdge,CoeffsAtEdge,s_DataAtEdge,ui,uj,vi,vj,pi,pj,
scale,1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Build fluxes into nodal vector
Vector<NVAR2D,isystemformat==SYSTEM_BLOCK>::
scatterEdgeData<false>(vecDest,FluxAtEdge,i,j,neq);
}
}
};
#ifdef HAS_INLINE_PTX
/*****************************************************************************
* This CUDA kernel calculates the inviscid fluxes and applies
* artificial dissipation if required) (cudaDMA implementation
* without warp specialisation).
****************************************************************************/
#define TOTAL_THREADS_PER_CTA compute_threads_per_cta+dma_threads_per_ld* \
(CUDADMA_DMA_LDS_IND+CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST)
template <typename Tc,
typename TdSrc,
typename TdDest,
typename Ti,
int isystemformat,
int idissipationtype,
int compute_threads_per_cta,
int dma_threads_per_ld>
__launch_bounds__(TOTAL_THREADS_PER_CTA)
__global__ void hydro_calcFlux2d_cudaDMA_nospec(Tc *CoeffsAtEdge,
Ti *IedgeList,
TdSrc *vecSrc,
TdDest *vecDest,
TdDest scale,
Ti neq,
Ti nedge,
Ti ncoeff,
Ti nedge_last,
Ti nedge_per_thread=1,
Ti nedge_offset=0)
{
// Shared memory
__shared__ Ti s_IedgeList[2*compute_threads_per_cta];
__shared__ TdSrc s_DataAtEdge[NVAR2D*2*compute_threads_per_cta];
__shared__ Tc s_CoeffsAtEdge[2*2*compute_threads_per_cta];
//--------------------------------------------------------------------------
#if EDGELIST_DEVICE == SOA
// List of edges is stored as structure of arrays, that is, we
// have 6 integer subarrays of length nedge which store:
//
// 0-subarray: first end point i,
// 1-subarray: second end point j,
// 2-subarray: matrix entry ij,
// 3-subarray: matrix entry ji,
// 4-subarray: matrix entry ii,
// 5-subarray: matrix entry jj.
//
// For the flux assembly, only the two endpoints (i,j) are
// required. Therefore, only subarrays 0 and 1 are transfered.
// Sequential cudaDMA thread to transfer edge list from integer
// array IedgeList into shared memory s_IedgeList
cudaDMASequential<false, 2*sizeof(Ti),
2*compute_threads_per_cta*sizeof(Ti),
TOTAL_THREADS_PER_CTA>dma_ind;
#else
// List of edges is stored as array of structures, that is, we
// have nedge integer subarrays of length 6 which store:
//
// (i,j,ij,jj,ii,jj) for each edge iedge
//
// For the flux assembly, only the two entpoins (i,j) are
// required. Therefore, only the first two entries of each edge
// are transfered using strided DMA.
// Strided cudaDMA thread to transfer edge list from integer
// array IedgeList into shared memory s_IedgeList
cudaDMAStrided<false, 2*sizeof(Ti), 2*sizeof(Ti),
TOTAL_THREADS_PER_CTA,
compute_threads_per_cta>dma_ind(6*sizeof(Ti));
#endif
//--------------------------------------------------------------------------
// Indirect cudaDMA thread to transfer nodal data from vecSrc into
// shared memory s_DataAtEdge, we need to distinguish between vecSrc
// stored in interleaved format and vecSrc stored in block format
cudaDMAIndirect<true, false,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
TOTAL_THREADS_PER_CTA, 2*compute_threads_per_cta>dma_vec;
//--------------------------------------------------------------------------
#if COEFFSATEDGE_DEVICE == SOA
// Coefficients at edges are stored as structure of arrays, that
// is, we have 2*ncoeff subarrays of length nedge which store:
//
// 0-subarray: ij-coefficients for x-direction,
// 1-subarray: ji-coefficients for x-direction,
// 2-subarray: ij-coefficients for y-direction,
// 3-subarray: ji-coefficients for y-direction,
// ...
// n-subarray: further coefficients not required here
// Strided cudaDMA thread to transfer precomputed coefficients
// CoeffsAtEdge into shared memory s_CoeffsAtEdge
cudaDMAStrided<false, sizeof(Tc),
compute_threads_per_cta*sizeof(Tc),
TOTAL_THREADS_PER_CTA,
2*2>dma_coeff(nedge*sizeof(Tc));
#else
// Coefficients at edges are stored as array of structure, that
// is, we have nedge real-valued subarray of length 2*ncoeff
cudaDMAStrided<false, sizeof(Tc), 2*sizeof(Tc),
TOTAL_THREADS_PER_CTA,
2*compute_threads_per_cta>dma_coeff(ncoeff*sizeof(Tc));
#endif
//--------------------------------------------------------------------------
// Loop over all edge-groups to be processed by this block
for (int ipt=0; ipt<nedge_per_thread; ++ipt) {
if (nedge_per_thread>1)
__syncthreads();
//------------------------------------------------------------------------
// Load the indices with all threads - no warp specialisation
//------------------------------------------------------------------------
dma_ind.execute_dma(&IedgeList[ ((ipt*gridDim.x+blockIdx.x)*
compute_threads_per_cta+nedge_offset)*
(EDGELIST_DEVICE == SOA ? 2 : 6)], s_IedgeList);
__syncthreads();
dma_vec.execute_dma(s_IedgeList, vecSrc-NVAR2D, s_DataAtEdge);
dma_coeff.execute_dma(&CoeffsAtEdge[ ((ipt*gridDim.x+blockIdx.x)*
compute_threads_per_cta+nedge_offset)*
(COEFFSATEDGE_DEVICE == SOA ? 1 : 2*ncoeff)],
s_CoeffsAtEdge);
__syncthreads();
//--------------------------------------------------------------------------
// Compute velocities
TdDest ui = XVELOCITY3_2D(s_DataAtEdge,
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest vi = YVELOCITY3_2D(s_DataAtEdge,
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest uj = XVELOCITY3_2D(s_DataAtEdge,
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest vj = YVELOCITY3_2D(s_DataAtEdge,
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
TdDest pi = PRESSURE3_3D(s_DataAtEdge,
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest pj = PRESSURE3_3D(s_DataAtEdge,
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Local variables
TdDest FluxAtEdge[2*NVAR2D];
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],s_CoeffsAtEdge,s_DataAtEdge,ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,s_CoeffsAtEdge,s_DataAtEdge,ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
dma_vec.execute_dma(s_IedgeList, vecDest-NVAR2D, s_DataAtEdge);
__syncthreads();
// Get positions of edge endpoints (idx starts at zero)
Ti i = IDX2(s_IedgeList,1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_DataAtEdge, ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
Ti j = IDX2(s_IedgeList,2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_DataAtEdge, ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
}
};
#undef TOTAL_THREADS_PER_CTA
#endif
#ifdef HAS_INLINE_PTX
/*****************************************************************************
* This CUDA kernel calculates the inviscid fluxes and applies
* artificial dissipation if required) (cudaDMA implementation with
* manual single buffering strategy with prefetching of indices).
****************************************************************************/
#define TOTAL_THREADS_PER_CTA compute_threads_per_cta+dma_threads_per_ld* \
(CUDADMA_DMA_LDS_IND+CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST+CUDADMA_DMA_LDS_COEFF)
template <typename Tc,
typename TdSrc,
typename TdDest,
typename Ti,
int isystemformat,
int idissipationtype,
int compute_threads_per_cta,
int dma_threads_per_ld>
__launch_bounds__(TOTAL_THREADS_PER_CTA)
__global__ void hydro_calcFlux2d_cudaDMA_prefetch_single(Tc *CoeffsAtEdge,
Ti *IedgeList,
TdSrc *vecSrc,
TdDest *vecDest,
TdDest scale,
Ti neq,
Ti nedge,
Ti ncoeff,
Ti nedge_last,
Ti nedge_per_thread=1,
Ti nedge_offset=0)
{
// Shared memory
__shared__ Ti s_IedgeList[1][2*compute_threads_per_cta];
__shared__ TdSrc s_VecSrc[1][NVAR2D*2*compute_threads_per_cta];
__shared__ TdDest s_VecDest[1][NVAR2D*2*compute_threads_per_cta];
__shared__ Tc s_CoeffsAtEdge[1][2*2*compute_threads_per_cta];
//--------------------------------------------------------------------------
#if EDGELIST_DEVICE == SOA
// List of edges is stored as structure of arrays, that is, we
// have 6 integer subarrays of length nedge which store:
//
// 0-subarray: first end point i,
// 1-subarray: second end point j,
// 2-subarray: matrix entry ij,
// 3-subarray: matrix entry ji,
// 4-subarray: matrix entry ii,
// 5-subarray: matrix entry jj.
//
// For the flux assembly, only the two endpoints (i,j) are
// required. Therefore, only subarrays 0 and 1 are transfered.
// Sequential cudaDMA thread to transfer edge list from integer
// array IedgeList into shared memory s_IedgeList
cudaDMASequential<false, 2*sizeof(Ti),
2*compute_threads_per_cta*sizeof(Ti),
TOTAL_THREADS_PER_CTA>dma_ind;
#else
// List of edges is stored as array of structures, that is, we
// have nedge integer subarrays of length 6 which store:
//
// (i,j,ij,jj,ii,jj) for each edge iedge
//
// For the flux assembly, only the two entpoins (i,j) are
// required. Therefore, only the first two entries of each edge
// are transfered using strided DMA.
// Strided cudaDMA thread to transfer edge list from integer
// array IedgeList into shared memory s_IedgeList
cudaDMAStrided<false, 2*sizeof(Ti), 2*sizeof(Ti),
TOTAL_THREADS_PER_CTA,
compute_threads_per_cta>dma_ind(6*sizeof(Ti));
#endif
//--------------------------------------------------------------------------
// Indirect cudaDMA thread to transfer nodal data from vecSrc into
// shared memory s_VecSrc, we need to distinguish between vecSrc
// stored in interleaved format and vecSrc stored in block format
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_SRC*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecSrc0(0, compute_threads_per_cta, compute_threads_per_cta);
// Indirect cudaDMA thread to transfer nodal data from vecDest into
// shared memory s_VecDest, we need to distinguish between vecDest
// stored in interleaved format and vecSrc stored in block format
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_DEST*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecDest0(1, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_SRC)*dma_threads_per_ld);
//--------------------------------------------------------------------------
#if COEFFSATEDGE_DEVICE == SOA
// Coefficients at edges are stored as structure of arrays, that
// is, we have 2*ncoeff subarrays of length nedge which store:
//
// 0-subarray: ij-coefficients for x-direction,
// 1-subarray: ji-coefficients for x-direction,
// 2-subarray: ij-coefficients for y-direction,
// 3-subarray: ji-coefficients for y-direction,
// ...
// n-subarray: further coefficients not required here
// Strided cudaDMA thread to transfer precomputed coefficients
// CoeffsAtEdge into shared memory s_CoeffsAtEdge
cudaDMAStrided<true, sizeof(Tc),
compute_threads_per_cta*sizeof(Tc),
CUDADMA_DMA_LDS_COEFF*dma_threads_per_ld,
2*2>
dma_coeff0(2, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld,
nedge*sizeof(Tc));
#else
// Coefficients at edges are stored as array of structure, that
// is, we have nedge real-valued subarray of length 2*ncoeff
cudaDMAStrided<true, sizeof(Tc), 2*sizeof(Tc),
CUDADMA_DMA_LDS_COEFF*dma_threads_per_ld,
2*compute_threads_per_cta>
dma_coeff0(2, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_SRC+0*CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld,
ncoeff*sizeof(Tc));
#endif
//--------------------------------------------------------------------------
// Loop over all edge-groups to be processed by this block
for (int ipt=0; ipt<nedge_per_thread; ++ipt) {
//------------------------------------------------------------------------
// Load the indices with all threads - no warp specialisation
//------------------------------------------------------------------------
if (nedge_per_thread>1)
ptx_cudaDMA_barrier_blocking(5, TOTAL_THREADS_PER_CTA);
dma_ind.execute_dma(&IedgeList[ ((ipt*gridDim.x+blockIdx.x)*
compute_threads_per_cta+nedge_offset)*
(EDGELIST_DEVICE == SOA ? 2 : 6)],
s_IedgeList[0]);
ptx_cudaDMA_barrier_blocking(5, TOTAL_THREADS_PER_CTA);
//------------------------------------------------------------------------
// Warp specialisation
//------------------------------------------------------------------------
if (threadIdx.x<compute_threads_per_cta) {
// Start DMA transfer of coefficients
dma_coeff0.start_async_dma();
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
#define IBUF 0
#define DBUF 0
#define IOFF 0
// Wait for source vector to be ready
dma_vecSrc0.wait_for_dma_finish();
// Compute velocities
TdDest ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
TdDest pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Local variables
TdDest FluxAtEdge[2*NVAR2D];
// Wait for coefficients to be ready
dma_coeff0.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],s_CoeffsAtEdge[0],s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,s_CoeffsAtEdge[0],s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
// Wait for destination vector to be ready
dma_vecDest0.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
Ti i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
Ti j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
}
//------------------------------------------------------------------------
// DMA transfer warps
//------------------------------------------------------------------------
else if(dma_vecSrc0.owns_this_thread()) {
dma_vecSrc0.execute_dma(s_IedgeList[0], vecSrc-NVAR2D, s_VecSrc[0]);
}
else if(dma_vecDest0.owns_this_thread()) {
dma_vecDest0.execute_dma(s_IedgeList[0], vecDest-NVAR2D, s_VecDest[0]);
}
else if(dma_coeff0.owns_this_thread()) {
dma_coeff0.execute_dma(&CoeffsAtEdge[((ipt*gridDim.x+blockIdx.x)*
compute_threads_per_cta +
nedge_offset)*
(COEFFSATEDGE_DEVICE == SOA ? 1 : 2*ncoeff)],
s_CoeffsAtEdge[0]);
}
}
};
#undef TOTAL_THREADS_PER_CTA
#endif
#ifdef HAS_INLINE_PTX
/*****************************************************************************
* This CUDA kernel calculates the inviscid fluxes and applies
* artificial dissipation if required) (cudaDMA implementation with
* manual double buffering strategy with prefetching of indices).
****************************************************************************/
#define TOTAL_THREADS_PER_CTA compute_threads_per_cta+dma_threads_per_ld* \
(CUDADMA_DMA_LDS_IND+CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST+CUDADMA_DMA_LDS_COEFF)
template <typename Tc,
typename TdSrc,
typename TdDest,
typename Ti,
int isystemformat,
int idissipationtype,
int compute_threads_per_cta,
int dma_threads_per_ld>
__launch_bounds__(TOTAL_THREADS_PER_CTA)
__global__ void hydro_calcFlux2d_cudaDMA_prefetch_double(Tc *CoeffsAtEdge,
Ti *IedgeList,
TdSrc *vecSrc,
TdDest *vecDest,
TdDest scale,
Ti neq,
Ti nedge,
Ti ncoeff,
Ti nedge_last,
Ti nedge_per_thread=1,
Ti nedge_offset=0)
{
// Shared memory
__shared__ Ti s_IedgeList[4][2*compute_threads_per_cta];
__shared__ TdSrc s_VecSrc[2][NVAR2D*2*compute_threads_per_cta];
__shared__ TdDest s_VecDest[2][NVAR2D*2*compute_threads_per_cta];
__shared__ Tc s_CoeffsAtEdge[2][2*2*compute_threads_per_cta];
//--------------------------------------------------------------------------
#if EDGELIST_DEVICE == SOA
// List of edges is stored as structure of arrays, that is, we
// have 6 integer subarrays of length nedge which store:
//
// 0-subarray: first end point i,
// 1-subarray: second end point j,
// 2-subarray: matrix entry ij,
// 3-subarray: matrix entry ji,
// 4-subarray: matrix entry ii,
// 5-subarray: matrix entry jj.
//
// For the flux assembly, only the two endpoints (i,j) are
// required. Therefore, only subarrays 0 and 1 are transfered.
// Sequential cudaDMA thread to transfer edge list from integer
// array IedgeList into shared memory s_IedgeList
cudaDMASequential<false, 2*sizeof(Ti),
4*2*compute_threads_per_cta*sizeof(Ti),
TOTAL_THREADS_PER_CTA>dma_ind;
#else
// List of edges is stored as array of structures, that is, we
// have nedge integer subarrays of length 6 which store:
//
// (i,j,ij,jj,ii,jj) for each edge iedge
//
// For the flux assembly, only the two entpoins (i,j) are
// required. Therefore, only the first two entries of each edge
// are transfered using strided DMA.
// Strided cudaDMA thread to transfer edge list from integer
// array IedgeList into shared memory s_IedgeList
cudaDMAStrided<false, 2*sizeof(Ti), 2*sizeof(Ti),
TOTAL_THREADS_PER_CTA,
4*compute_threads_per_cta>dma_ind(6*sizeof(Ti));
#endif
//--------------------------------------------------------------------------
// Indirect cudaDMA thread to transfer nodal data from vecSrc into
// shared memory s_VecSrc, we need to distinguish between vecSrc
// stored in interleaved format and vecSrc stored in block format
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_SRC*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecSrc0(0, compute_threads_per_cta, compute_threads_per_cta);
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_SRC*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecSrc1(1, compute_threads_per_cta, compute_threads_per_cta);
// Indirect cudaDMA thread to transfer nodal data from vecDest into
// shared memory s_VecDest, we need to distinguish between vecDest
// stored in interleaved format and vecSrc stored in block format
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_DEST*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecDest0(2, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_SRC)*dma_threads_per_ld);
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_DEST*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecDest1(3, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_SRC)*dma_threads_per_ld);
//--------------------------------------------------------------------------
#if COEFFSATEDGE_DEVICE == SOA
// Coefficients at edges are stored as structure of arrays, that
// is, we have 2*ncoeff subarrays of length nedge which store:
//
// 0-subarray: ij-coefficients for x-direction,
// 1-subarray: ji-coefficients for x-direction,
// 2-subarray: ij-coefficients for y-direction,
// 3-subarray: ji-coefficients for y-direction,
// ...
// n-subarray: further coefficients not required here
// Strided cudaDMA thread to transfer precomputed coefficients
// CoeffsAtEdge into shared memory s_CoeffsAtEdge
cudaDMAStrided<true, sizeof(Tc),
compute_threads_per_cta*sizeof(Tc),
CUDADMA_DMA_LDS_COEFF*dma_threads_per_ld,
2*2>
dma_coeff0(4, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld,
nedge*sizeof(Tc));
cudaDMAStrided<true, sizeof(Tc),
compute_threads_per_cta*sizeof(Tc),
CUDADMA_DMA_LDS_COEFF*dma_threads_per_ld,
2*2>
dma_coeff1(5, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld,
nedge*sizeof(Tc));
#else
// Coefficients at edges are stored as array of structure, that
// is, we have nedge real-valued subarray of length 2*ncoeff
cudaDMAStrided<true, sizeof(Tc), 2*sizeof(Tc),
CUDADMA_DMA_LDS_COEFF*dma_threads_per_ld,
2*compute_threads_per_cta>
dma_coeff0(4, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld,
ncoeff*sizeof(Tc));
cudaDMAStrided<true, sizeof(Tc), 2*sizeof(Tc),
CUDADMA_DMA_LDS_COEFF*dma_threads_per_ld,
2*compute_threads_per_cta>
dma_coeff1(5, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld,
ncoeff*sizeof(Tc));
#endif
//--------------------------------------------------------------------------
// Loop over all edge-groups to be processed by this block
for (int ipt=0; ipt<nedge_per_thread; ipt+=4) {
//------------------------------------------------------------------------
// Load the indices with all threads - no warp specialisation
//------------------------------------------------------------------------
ptx_cudaDMA_barrier_blocking(11, TOTAL_THREADS_PER_CTA);
dma_ind.execute_dma(&IedgeList[ ((ipt*gridDim.x+blockIdx.x)*
4*compute_threads_per_cta+nedge_offset)*
(EDGELIST_DEVICE == SOA ? 2 : 6)],
s_IedgeList[0]);
ptx_cudaDMA_barrier_blocking(11, TOTAL_THREADS_PER_CTA);
//------------------------------------------------------------------------
// Warp specialisation
//------------------------------------------------------------------------
if (threadIdx.x<compute_threads_per_cta) {
// Start DMA transfer of coefficients
dma_coeff0.start_async_dma();
dma_coeff1.start_async_dma();
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
// Start DMA transfer of indirect data
dma_vecSrc1.start_async_dma();
dma_vecDest1.start_async_dma();
#define IBUF 0
#define DBUF 0
#define IOFF 0
// Wait for source vector to be ready
dma_vecSrc0.wait_for_dma_finish();
// Compute velocities
TdDest ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
TdDest pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Local variables
TdDest FluxAtEdge[2*NVAR2D];
// Wait for coefficients to be ready
dma_coeff0.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],s_CoeffsAtEdge[0],s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,s_CoeffsAtEdge[0],s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
// Start DMA transfer of coefficients
dma_coeff0.start_async_dma();
// Wait for destination vector to be ready
dma_vecDest0.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
Ti i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
Ti j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
#define IBUF 1
#define DBUF 1
#define IOFF 1
// Wait for source vector to be ready
dma_vecSrc1.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Wait for coefficients to be ready
dma_coeff1.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],s_CoeffsAtEdge[1],s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,s_CoeffsAtEdge[1],s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
// Start DMA transfer of coefficients
dma_coeff1.start_async_dma();
// Wait for destination vector to be ready
dma_vecDest1.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indirect data
dma_vecSrc1.start_async_dma();
dma_vecDest1.start_async_dma();
#define IBUF 2
#define DBUF 0
#define IOFF 2
// Wait for source vector to be ready
dma_vecSrc0.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Wait for coefficients to be ready
dma_coeff0.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],s_CoeffsAtEdge[0],s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,s_CoeffsAtEdge[0],s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
// Wait for destination vector to be ready
dma_vecDest0.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
#define IBUF 3
#define DBUF 1
#define IOFF 3
// Wait for source vector to be ready
dma_vecSrc1.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Wait for coefficients to be ready
dma_coeff1.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],s_CoeffsAtEdge[1],s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,s_CoeffsAtEdge[1],s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,(int)threadIdx.x+1,compute_threads_per_cta,2);
// Wait for destination vector to be ready
dma_vecDest1.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
}
//------------------------------------------------------------------------
// DMA transfer warps
//------------------------------------------------------------------------
else if(dma_vecSrc0.owns_this_thread()) {
dma_vecSrc0.execute_dma(s_IedgeList[0], vecSrc-NVAR2D, s_VecSrc[0]);
dma_vecSrc1.execute_dma(s_IedgeList[1], vecSrc-NVAR2D, s_VecSrc[1]);
dma_vecSrc0.execute_dma(s_IedgeList[2], vecSrc-NVAR2D, s_VecSrc[0]);
dma_vecSrc1.execute_dma(s_IedgeList[3], vecSrc-NVAR2D, s_VecSrc[1]);
}
else if(dma_vecDest0.owns_this_thread()) {
dma_vecDest0.execute_dma(s_IedgeList[0], vecDest-NVAR2D, s_VecDest[0]);
dma_vecDest1.execute_dma(s_IedgeList[1], vecDest-NVAR2D, s_VecDest[1]);
dma_vecDest0.execute_dma(s_IedgeList[2], vecDest-NVAR2D, s_VecDest[0]);
dma_vecDest1.execute_dma(s_IedgeList[3], vecDest-NVAR2D, s_VecDest[1]);
}
else if(dma_coeff0.owns_this_thread()) {
dma_coeff0.execute_dma(&CoeffsAtEdge[((ipt*gridDim.x+blockIdx.x)*
4*compute_threads_per_cta +
0*compute_threads_per_cta +
nedge_offset)*
(COEFFSATEDGE_DEVICE == SOA ? 1 : 2*ncoeff)],
s_CoeffsAtEdge[0]);
dma_coeff1.execute_dma(&CoeffsAtEdge[((ipt*gridDim.x+blockIdx.x)*
4*compute_threads_per_cta +
1*compute_threads_per_cta +
nedge_offset)*
(COEFFSATEDGE_DEVICE == SOA ? 1 : 2*ncoeff)],
s_CoeffsAtEdge[1]);
dma_coeff0.execute_dma(&CoeffsAtEdge[((ipt*gridDim.x+blockIdx.x)*
4*compute_threads_per_cta +
2*compute_threads_per_cta +
nedge_offset)*
(COEFFSATEDGE_DEVICE == SOA ? 1 : 2*ncoeff)],
s_CoeffsAtEdge[0]);
dma_coeff1.execute_dma(&CoeffsAtEdge[((ipt*gridDim.x+blockIdx.x)*
4*compute_threads_per_cta +
3*compute_threads_per_cta +
nedge_offset)*
(COEFFSATEDGE_DEVICE == SOA ? 1 : 2*ncoeff)],
s_CoeffsAtEdge[1]);
}
}
};
#undef TOTAL_THREADS_PER_CTA
#endif
#ifdef HAS_INLINE_PTX
/*****************************************************************************
* This CUDA kernel calculates the inviscid fluxes and applies
* artificial dissipation if required) (cudaDMA implementation with
* double buffering strategy).
****************************************************************************/
#define TOTAL_THREADS_PER_CTA compute_threads_per_cta+dma_threads_per_ld* \
(3*CUDADMA_DMA_LDS_IND+2*CUDADMA_DMA_LDS_SRC+2*CUDADMA_DMA_LDS_DEST)
template <typename Tc,
typename TdSrc,
typename TdDest,
typename Ti,
int isystemformat,
int idissipationtype,
int compute_threads_per_cta,
int dma_threads_per_ld>
__launch_bounds__(TOTAL_THREADS_PER_CTA)
__global__ void hydro_calcFlux2d_cudaDMA_double(Tc *CoeffsAtEdge,
Ti *IedgeList,
TdSrc *vecSrc,
TdDest *vecDest,
TdDest scale,
Ti neq,
Ti nedge,
Ti ncoeff,
Ti nedge_last,
Ti nedge_per_thread=1,
Ti nedge_offset=0)
{
// Shared memory
__shared__ Ti s_IedgeList[3][2*compute_threads_per_cta];
__shared__ TdSrc s_VecSrc[2][NVAR2D*2*compute_threads_per_cta];
__shared__ TdDest s_VecDest[2][NVAR2D*2*compute_threads_per_cta];
//--------------------------------------------------------------------------
#if EDGELIST_DEVICE == SOA
// List of edges is stored as structure of arrays, that is, we
// have 6 integer subarrays of length nedge which store:
//
// 0-subarray: first end point i,
// 1-subarray: second end point j,
// 2-subarray: matrix entry ij,
// 3-subarray: matrix entry ji,
// 4-subarray: matrix entry ii,
// 5-subarray: matrix entry jj.
//
// For the flux assembly, only the two endpoints (i,j) are
// required. Therefore, only subarrays 0 and 1 are transfered.
// Sequential cudaDMA thread to transfer edge list from integer
// array IedgeList into shared memory s_IedgeList
cudaDMASequential<true, 2*sizeof(Ti),
2*compute_threads_per_cta*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld>
dma_ind0(0, compute_threads_per_cta, compute_threads_per_cta);
cudaDMASequential<true, 2*sizeof(Ti),
2*compute_threads_per_cta*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld>
dma_ind1(1, compute_threads_per_cta,
compute_threads_per_cta+CUDADMA_DMA_LDS_IND*dma_threads_per_ld);
cudaDMASequential<true, 2*sizeof(Ti),
2*compute_threads_per_cta*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld>
dma_ind2(2, compute_threads_per_cta,
compute_threads_per_cta+2*CUDADMA_DMA_LDS_IND*dma_threads_per_ld);
#else
// List of edges is stored as array of structures, that is, we
// have nedge integer subarrays of length 6 which store:
//
// (i,j,ij,jj,ii,jj) for each edge iedge
//
// For the flux assembly, only the two entpoins (i,j) are
// required. Therefore, only the first two entries of each edge
// are transfered using strided DMA.
// Strided cudaDMA thread to transfer edge list from integer
// array IedgeList into shared memory s_IedgeList
cudaDMAStrided<true, 2*sizeof(Ti), 2*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld,
compute_threads_per_cta>
dma_ind0(0, compute_threads_per_cta,
compute_threads_per_cta,
6*sizeof(Ti));
cudaDMAStrided<true, 2*sizeof(Ti), 2*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld,
compute_threads_per_cta>
dma_ind1(1, compute_threads_per_cta,
compute_threads_per_cta+CUDADMA_DMA_LDS_IND*dma_threads_per_ld,
6*sizeof(Ti));
cudaDMAStrided<true, 2*sizeof(Ti), 2*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld,
compute_threads_per_cta>
dma_ind2(2, compute_threads_per_cta,
compute_threads_per_cta+2*CUDADMA_DMA_LDS_IND*dma_threads_per_ld,
6*sizeof(Ti));
#endif
//--------------------------------------------------------------------------
// Indirect cudaDMA thread to transfer nodal data from vecSrc into
// shared memory s_VecSrc, we need to distinguish between vecSrc
// stored in interleaved format and vecSrc stored in block format
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_SRC*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecSrc0(3, compute_threads_per_cta,
compute_threads_per_cta+3*CUDADMA_DMA_LDS_IND*dma_threads_per_ld);
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_SRC*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecSrc1(4, compute_threads_per_cta, compute_threads_per_cta+
(3*CUDADMA_DMA_LDS_IND+1*CUDADMA_DMA_LDS_SRC)*dma_threads_per_ld);
// Indirect cudaDMA thread to transfer nodal data from vecDest into
// shared memory s_VecDest, we need to distinguish between vecDest
// stored in interleaved format and vecSrc stored in block format
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_DEST*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecDest0(5, compute_threads_per_cta, compute_threads_per_cta+
(3*CUDADMA_DMA_LDS_IND+2*CUDADMA_DMA_LDS_SRC)*dma_threads_per_ld);
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_DEST*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecDest1(6, compute_threads_per_cta, compute_threads_per_cta+
(3*CUDADMA_DMA_LDS_IND+2*CUDADMA_DMA_LDS_SRC
+1*CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld);
//--------------------------------------------------------------------------
#if COEFFSATEDGE_DEVICE == SOA
// Coefficients at edges are stored as structure of arrays, that
// is, we have 2*ncoeff subarrays of length nedge which store:
//
// 0-subarray: ij-coefficients for x-direction,
// 1-subarray: ji-coefficients for x-direction,
// 2-subarray: ij-coefficients for y-direction,
// 3-subarray: ji-coefficients for y-direction,
// ...
// n-subarray: further coefficients not required here
// Strided cudaDMA thread to transfer precomputed coefficients
// CoeffsAtEdge into shared memory s_CoeffsAtEdge
// cudaDMAStrided<false, sizeof(Tc),
// compute_threads_per_cta*sizeof(Tc),
// TOTAL_THREADS_PER_CTA,
// 2*2>dma_coeff(nedge*sizeof(Tc));
#else
// Coefficients at edges are stored as array of structure, that
// is, we have nedge real-valued subarray of length 2*ncoeff
// cudaDMAStrided<false, sizeof(Tc), 2*sizeof(Tc),
// TOTAL_THREADS_PER_CTA,
// 2*compute_threads_per_cta>dma_coeff(ncoeff*sizeof(Tc));
#endif
//--------------------------------------------------------------------------
// Warp specialisation
//--------------------------------------------------------------------------
if (threadIdx.x<compute_threads_per_cta) {
// Start DMA transfer of indices
dma_ind0.start_async_dma();
dma_ind1.start_async_dma();
dma_ind2.start_async_dma();
// Wait for indices to be ready
dma_ind0.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
// Wait for indices to be ready
dma_ind1.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc1.start_async_dma();
dma_vecDest1.start_async_dma();
// Loop over all edge-groups to be processed by this block
for (int ipt=0; ipt<nedge_per_thread; ipt+=6) {
#define IBUF 0
#define DBUF 0
#define IOFF 0
// Wait for source vector to be ready
dma_vecSrc0.wait_for_dma_finish();
// Compute velocities
TdDest ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
TdDest pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
Ti idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Local variables
TdDest FluxAtEdge[2*NVAR2D];
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Wait for destination vector to be ready
dma_vecDest0.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
Ti i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
Ti j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind0.start_async_dma();
// Wait for indices to be ready
dma_ind2.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
#define IBUF 1
#define DBUF 1
#define IOFF 1
// Wait for source vector to be ready
dma_vecSrc1.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Wait for destination vector to be ready
dma_vecDest1.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind1.start_async_dma();
// Wait for indices to be ready
dma_ind0.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc1.start_async_dma();
dma_vecDest1.start_async_dma();
#define IBUF 2
#define DBUF 0
#define IOFF 2
// Wait for source vector to be ready
dma_vecSrc0.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Wait for destination vector to be ready
dma_vecDest0.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind2.start_async_dma();
// Wait for indices to be ready
dma_ind1.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
#define IBUF 0
#define DBUF 1
#define IOFF 3
// Wait for source vector to be ready
dma_vecSrc1.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Wait for destination vector to be ready
dma_vecDest1.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind0.start_async_dma();
// Wait for indices to be ready
dma_ind2.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc1.start_async_dma();
dma_vecDest1.start_async_dma();
#define IBUF 1
#define DBUF 0
#define IOFF 4
// Wait for source vector to be ready
dma_vecSrc0.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Wait for destination vector to be ready
dma_vecDest0.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind1.start_async_dma();
// Wait for indices to be ready
dma_ind0.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
#define IBUF 2
#define DBUF 1
#define IOFF 5
// Wait for source vector to be ready
dma_vecSrc1.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Wait for destination vector to be ready
dma_vecDest1.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind2.start_async_dma();
// Wait for indices to be ready
dma_ind1.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc1.start_async_dma();
dma_vecDest1.start_async_dma();
}
}
//--------------------------------------------------------------------------
// DMA transfer warps
//--------------------------------------------------------------------------
else if(dma_ind0.owns_this_thread()) {
for (int ipt=0; ipt<nedge_per_thread+1; ipt+=3) {
dma_ind0.execute_dma(&IedgeList[ ((ipt*gridDim.x+blockIdx.x)*
compute_threads_per_cta+nedge_offset)*
(EDGELIST_DEVICE == SOA ? 2 : 6)],
s_IedgeList[0]);
}
}
else if(dma_ind1.owns_this_thread()) {
for (int ipt=1; ipt<nedge_per_thread+2; ipt+=3) {
dma_ind1.execute_dma(&IedgeList[ ((ipt*gridDim.x+blockIdx.x)*
compute_threads_per_cta+nedge_offset)*
(EDGELIST_DEVICE == SOA ? 2 : 6)],
s_IedgeList[1]);
}
}
else if(dma_ind2.owns_this_thread()) {
for (int ipt=2; ipt<nedge_per_thread+3; ipt+=3) {
dma_ind2.execute_dma(&IedgeList[ ((ipt*gridDim.x+blockIdx.x)*
compute_threads_per_cta+nedge_offset)*
(EDGELIST_DEVICE == SOA ? 2 : 6)],
s_IedgeList[2]);
}
}
else if(dma_vecSrc0.owns_this_thread()) {
for (int ipt=0; ipt<nedge_per_thread; ipt+=6) {
dma_vecSrc0.wait_for_dma_start();
dma_vecSrc0.execute_dma_no_sync(s_IedgeList[0], vecSrc-NVAR2D, s_VecSrc[0]);
dma_vecSrc0.finish_async_dma();
dma_vecSrc0.wait_for_dma_start();
dma_vecSrc0.execute_dma_no_sync(s_IedgeList[2], vecSrc-NVAR2D, s_VecSrc[0]);
dma_vecSrc0.finish_async_dma();
dma_vecSrc0.wait_for_dma_start();
dma_vecSrc0.execute_dma_no_sync(s_IedgeList[1], vecSrc-NVAR2D, s_VecSrc[0]);
dma_vecSrc0.finish_async_dma();
}
}
else if(dma_vecSrc1.owns_this_thread()) {
for (int ipt=0; ipt<nedge_per_thread; ipt+=6) {
dma_vecSrc1.wait_for_dma_start();
dma_vecSrc1.execute_dma_no_sync(s_IedgeList[1], vecSrc-NVAR2D, s_VecSrc[1]);
dma_vecSrc1.finish_async_dma();
dma_vecSrc1.wait_for_dma_start();
dma_vecSrc1.execute_dma_no_sync(s_IedgeList[0], vecSrc-NVAR2D, s_VecSrc[1]);
dma_vecSrc1.finish_async_dma();
dma_vecSrc1.wait_for_dma_start();
dma_vecSrc1.execute_dma_no_sync(s_IedgeList[2], vecSrc-NVAR2D, s_VecSrc[1]);
dma_vecSrc1.finish_async_dma();
}
}
else if(dma_vecDest0.owns_this_thread()) {
for (int ipt=0; ipt<nedge_per_thread; ipt+=6) {
dma_vecDest0.wait_for_dma_start();
dma_vecDest0.execute_dma_no_sync(s_IedgeList[0], vecDest-NVAR2D, s_VecDest[0]);
dma_vecDest0.finish_async_dma();
dma_vecDest0.wait_for_dma_start();
dma_vecDest0.execute_dma_no_sync(s_IedgeList[2], vecDest-NVAR2D, s_VecDest[0]);
dma_vecDest0.finish_async_dma();
dma_vecDest0.wait_for_dma_start();
dma_vecDest0.execute_dma_no_sync(s_IedgeList[1], vecDest-NVAR2D, s_VecDest[0]);
dma_vecDest0.finish_async_dma();
}
}
else if(dma_vecDest1.owns_this_thread()) {
for (int ipt=0; ipt<nedge_per_thread; ipt+=6) {
dma_vecDest1.wait_for_dma_start();
dma_vecDest1.execute_dma_no_sync(s_IedgeList[1], vecDest-NVAR2D, s_VecDest[1]);
dma_vecDest1.finish_async_dma();
dma_vecDest1.wait_for_dma_start();
dma_vecDest1.execute_dma_no_sync(s_IedgeList[0], vecDest-NVAR2D, s_VecDest[1]);
dma_vecDest1.finish_async_dma();
dma_vecDest1.wait_for_dma_start();
dma_vecDest1.execute_dma_no_sync(s_IedgeList[2], vecDest-NVAR2D, s_VecDest[1]);
dma_vecDest1.finish_async_dma();
}
}
};
#undef TOTAL_THREADS_PER_CTA
#endif
#ifdef HAS_INLINE_PTX
/*****************************************************************************
* This CUDA kernel calculates the inviscid fluxes and applies
* artificial dissipation if required) (cudaDMA implementation with
* manual buffering strategy).
****************************************************************************/
#define TOTAL_THREADS_PER_CTA compute_threads_per_cta+dma_threads_per_ld* \
(CUDADMA_DMA_LDS_IND+CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST+CUDADMA_DMA_LDS_COEFF)
template <typename Tc,
typename TdSrc,
typename TdDest,
typename Ti,
int isystemformat,
int idissipationtype,
int compute_threads_per_cta,
int dma_threads_per_ld>
__launch_bounds__(TOTAL_THREADS_PER_CTA)
__global__ void hydro_calcFlux2d_cudaDMA_manual(Tc *CoeffsAtEdge,
Ti *IedgeList,
TdSrc *vecSrc,
TdDest *vecDest,
TdDest scale,
Ti neq,
Ti nedge,
Ti ncoeff,
Ti nedge_last,
Ti nedge_per_thread=1,
Ti nedge_offset=0)
{
// Shared memory
__shared__ Ti s_IedgeList[3][2*compute_threads_per_cta];
__shared__ TdSrc s_VecSrc[2][NVAR2D*2*compute_threads_per_cta];
__shared__ TdDest s_VecDest[2][NVAR2D*2*compute_threads_per_cta];
__shared__ Tc s_CoeffsAtEdge[2][2*2*compute_threads_per_cta];
//--------------------------------------------------------------------------
#if EDGELIST_DEVICE == SOA
// List of edges is stored as structure of arrays, that is, we
// have 6 integer subarrays of length nedge which store:
//
// 0-subarray: first end point i,
// 1-subarray: second end point j,
// 2-subarray: matrix entry ij,
// 3-subarray: matrix entry ji,
// 4-subarray: matrix entry ii,
// 5-subarray: matrix entry jj.
//
// For the flux assembly, only the two endpoints (i,j) are
// required. Therefore, only subarrays 0 and 1 are transfered.
// Sequential cudaDMA thread to transfer edge list from integer
// array IedgeList into shared memory s_IedgeList
cudaDMASequential<true, 2*sizeof(Ti),
2*compute_threads_per_cta*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld>
dma_ind0(0, compute_threads_per_cta, compute_threads_per_cta);
cudaDMASequential<true, 2*sizeof(Ti),
2*compute_threads_per_cta*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld>
dma_ind1(1, compute_threads_per_cta, compute_threads_per_cta);
cudaDMASequential<true, 2*sizeof(Ti),
2*compute_threads_per_cta*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld>
dma_ind2(2, compute_threads_per_cta, compute_threads_per_cta);
#else
// List of edges is stored as array of structures, that is, we
// have nedge integer subarrays of length 6 which store:
//
// (i,j,ij,jj,ii,jj) for each edge iedge
//
// For the flux assembly, only the two entpoins (i,j) are
// required. Therefore, only the first two entries of each edge
// are transfered using strided DMA.
// Strided cudaDMA thread to transfer edge list from integer
// array IedgeList into shared memory s_IedgeList
cudaDMAStrided<true, 2*sizeof(Ti), 2*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld,
compute_threads_per_cta>
dma_ind0(0, compute_threads_per_cta, compute_threads_per_cta, 6*sizeof(Ti));
cudaDMAStrided<true, 2*sizeof(Ti), 2*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld,
compute_threads_per_cta>
dma_ind1(1, compute_threads_per_cta, compute_threads_per_cta, 6*sizeof(Ti));
cudaDMAStrided<true, 2*sizeof(Ti), 2*sizeof(Ti),
CUDADMA_DMA_LDS_IND*dma_threads_per_ld,
compute_threads_per_cta>
dma_ind2(2, compute_threads_per_cta, compute_threads_per_cta, 6*sizeof(Ti));
#endif
//--------------------------------------------------------------------------
// Indirect cudaDMA thread to transfer nodal data from vecSrc into
// shared memory s_VecSrc, we need to distinguish between vecSrc
// stored in interleaved format and vecSrc stored in block format
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_SRC*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecSrc0(3, compute_threads_per_cta,
compute_threads_per_cta+CUDADMA_DMA_LDS_IND*dma_threads_per_ld);
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_SRC*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecSrc1(4, compute_threads_per_cta,
compute_threads_per_cta+CUDADMA_DMA_LDS_IND*dma_threads_per_ld);
// Indirect cudaDMA thread to transfer nodal data from vecDest into
// shared memory s_VecDest, we need to distinguish between vecDest
// stored in interleaved format and vecSrc stored in block format
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_DEST*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecDest0(5, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_IND+CUDADMA_DMA_LDS_SRC)*dma_threads_per_ld);
cudaDMAIndirect<true, true,
MAXALIGN((isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc)),
(isystemformat==SYSTEM_BLOCK ? 1 : NVAR2D)*sizeof(TdSrc),
CUDADMA_DMA_LDS_DEST*dma_threads_per_ld, 2*compute_threads_per_cta>
dma_vecDest1(6, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_IND+CUDADMA_DMA_LDS_SRC)*dma_threads_per_ld);
//--------------------------------------------------------------------------
#if COEFFSATEDGE_DEVICE == SOA
// Coefficients at edges are stored as structure of arrays, that
// is, we have 2*ncoeff subarrays of length nedge which store:
//
// 0-subarray: ij-coefficients for x-direction,
// 1-subarray: ji-coefficients for x-direction,
// 2-subarray: ij-coefficients for y-direction,
// 3-subarray: ji-coefficients for y-direction,
// ...
// n-subarray: further coefficients not required here
// Strided cudaDMA thread to transfer precomputed coefficients
// CoeffsAtEdge into shared memory s_CoeffsAtEdge
cudaDMAStrided<true, sizeof(Tc),
compute_threads_per_cta*sizeof(Tc),
CUDADMA_DMA_LDS_COEFF*dma_threads_per_ld,
2*2>
dma_coeff0(7, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_IND+CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld,
nedge*sizeof(Tc));
// cudaDMAStrided<true, sizeof(Tc),
// compute_threads_per_cta*sizeof(Tc),
// CUDADMA_DMA_LDS_COEFF*dma_threads_per_ld,
// 2*2>
// dma_coeff1(8, compute_threads_per_cta, compute_threads_per_cta+
// (CUDADMA_DMA_LDS_IND+CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld,
// nedge*sizeof(Tc));
#else
// Coefficients at edges are stored as array of structure, that
// is, we have nedge real-valued subarray of length 2*ncoeff
cudaDMAStrided<true, sizeof(Tc), 2*sizeof(Tc),
CUDADMA_DMA_LDS_COEFF*dma_threads_per_ld,
2*compute_threads_per_cta>
dma_coeff0(7, compute_threads_per_cta, compute_threads_per_cta+
(CUDADMA_DMA_LDS_IND+CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld,
ncoeff*sizeof(Tc));
// cudaDMAStrided<true, sizeof(Tc), 2*sizeof(Tc),
// CUDADMA_DMA_LDS_COEFF*dma_threads_per_ld,
// 2*compute_threads_per_cta>
// dma_coeff1(8, compute_threads_per_cta, compute_threads_per_cta+
// (CUDADMA_DMA_LDS_IND+CUDADMA_DMA_LDS_SRC+CUDADMA_DMA_LDS_DEST)*dma_threads_per_ld,
// ncoeff*sizeof(Tc));
#endif
//--------------------------------------------------------------------------
// Warp specialisation
//--------------------------------------------------------------------------
if (threadIdx.x<compute_threads_per_cta) {
// Start DMA transfer of indices
dma_ind0.start_async_dma();
dma_ind1.start_async_dma();
dma_ind2.start_async_dma();
// Start DMA transfer of coefficients
dma_coeff0.start_async_dma();
// dma_coeff1.start_async_dma();
// Wait for indices to be ready
dma_ind0.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
// Wait for indices to be ready
dma_ind1.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc1.start_async_dma();
dma_vecDest1.start_async_dma();
// Loop over all edge-groups to be processed by this block
for (int ipt=0; ipt<nedge_per_thread; ipt+=6) {
#define IBUF 0
#define DBUF 0
#define IOFF 0
// Wait for source vector to be ready
dma_vecSrc0.wait_for_dma_finish();
// Compute velocities
TdDest ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
TdDest pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
TdDest pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
Ti idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Local variables
TdDest FluxAtEdge[2*NVAR2D];
// Wait for coefficients to be ready
dma_coeff0.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Start DMA transfer of coefficients
dma_coeff0.start_async_dma();
// Wait for destination vector to be ready
dma_vecDest0.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
Ti i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
Ti j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind0.start_async_dma();
// Wait for indices to be ready
dma_ind2.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
#define IBUF 1
#define DBUF 1
#define IOFF 1
// Wait for source vector to be ready
dma_vecSrc1.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Wait for coefficients to be ready
// dma_coeff1.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Start DMA transfer of coefficients
// dma_coeff1.start_async_dma();
// Wait for destination vector to be ready
dma_vecDest1.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind1.start_async_dma();
// Wait for indices to be ready
dma_ind0.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc1.start_async_dma();
dma_vecDest1.start_async_dma();
#define IBUF 2
#define DBUF 0
#define IOFF 2
// Wait for source vector to be ready
dma_vecSrc0.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Wait for coefficients to be ready
dma_coeff0.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Start DMA transfer of coefficients
dma_coeff0.start_async_dma();
// Wait for destination vector to be ready
dma_vecDest0.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind2.start_async_dma();
// Wait for indices to be ready
dma_ind1.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
#define IBUF 0
#define DBUF 1
#define IOFF 3
// Wait for source vector to be ready
dma_vecSrc1.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Wait for coefficients to be ready
// dma_coeff1.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Start DMA transfer of coefficients
// dma_coeff1.start_async_dma();
// Wait for destination vector to be ready
dma_vecDest1.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind0.start_async_dma();
// Wait for indices to be ready
dma_ind2.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc1.start_async_dma();
dma_vecDest1.start_async_dma();
#define IBUF 1
#define DBUF 0
#define IOFF 4
// Wait for source vector to be ready
dma_vecSrc0.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Wait for coefficients to be ready
dma_coeff0.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Start DMA transfer of coefficients
dma_coeff0.start_async_dma();
// Wait for destination vector to be ready
dma_vecDest0.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind1.start_async_dma();
// Wait for indices to be ready
dma_ind0.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc0.start_async_dma();
dma_vecDest0.start_async_dma();
#define IBUF 2
#define DBUF 1
#define IOFF 5
// Wait for source vector to be ready
dma_vecSrc1.wait_for_dma_finish();
// Compute velocities
ui = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vi = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
uj = XVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
vj = YVELOCITY3_2D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Compute pressures
pi = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,1,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
pj = PRESSURE3_3D(s_VecSrc[DBUF],
IDX3,2,(int)threadIdx.x+1,
NVAR2D,2,compute_threads_per_cta);
// Global edge ID
idx = ((ipt+IOFF)*gridDim.x+blockIdx.x)*compute_threads_per_cta
+ nedge_offset + threadIdx.x;
// Wait for coefficients to be ready
// dma_coeff1.wait_for_dma_finish();
// Compute the artificial viscosities
InviscidFluxDissipation<idissipationtype>::
calcEdgeData<1,compute_threads_per_cta,false,false>
(&FluxAtEdge[NVAR2D],CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
Flux::combineEdgeData<true>(FluxAtEdge,&FluxAtEdge[NVAR2D],scale);
// Compute inviscid fluxes
InviscidFlux::calcEdgeData<1,compute_threads_per_cta,false,false,false>
(FluxAtEdge,CoeffsAtEdge,s_VecSrc[DBUF],ui,uj,vi,vj,pi,pj,scale,
1,(int)threadIdx.x+1,idx+1,nedge,ncoeff);
// Start DMA transfer of coefficients
// dma_coeff1.start_async_dma();
// Wait for destination vector to be ready
dma_vecDest1.wait_for_dma_finish();
// Get positions of edge endpoints (idx starts at zero)
i = IDX2(s_IedgeList[IBUF],1,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,i,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 1, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,1,NVAR2D,2);
// Get positions of edge endpoints (idx starts at zero)
j = IDX2(s_IedgeList[IBUF],2,(int)threadIdx.x+1,2,compute_threads_per_cta);
#pragma unroll
for (int ivar=1; ivar<=NVAR2D; ++ivar)
IDX2_REVERSE(vecDest,ivar,j,NVAR2D,neq) =
IDX3(s_VecDest[DBUF], ivar, 2, (int)threadIdx.x+1,
NVAR2D, 2, compute_threads_per_cta) + IDX2(FluxAtEdge,ivar,2,NVAR2D,2);
#undef IBUF
#undef DBUF
#undef IOFF
// Start DMA transfer of indices
dma_ind2.start_async_dma();
// Wait for indices to be ready
dma_ind1.wait_for_dma_finish();
// Start DMA transfer of indirect data
dma_vecSrc1.start_async_dma();
dma_vecDest1.start_async_dma();
}
}
//--------------------------------------------------------------------------
// DMA transfer warps
//--------------------------------------------------------------------------
else if(dma_ind0.owns_this_thread()) {
for (int ipt=0; ipt<nedge_per_thread; ipt+=3) {
dma_ind0.execute_dma(&IedgeList[ (((ipt+0)*gridDim.x+blockIdx.x)*
compute_threads_per_cta+nedge_offset)*
(EDGELIST_DEVICE == SOA ? 2 : 6)],
s_IedgeList[0]);
dma_ind1.execute_dma(&IedgeList[ (((ipt+1)*gridDim.x+blockIdx.x)*
compute_threads_per_cta+nedge_offset)*
(EDGELIST_DEVICE == SOA ? 2 : 6)],
s_IedgeList[1]);
dma_ind2.execute_dma(&IedgeList[ (((ipt+2)*gridDim.x+blockIdx.x)*
compute_threads_per_cta+nedge_offset)*
(EDGELIST_DEVICE == SOA ? 2 : 6)],
s_IedgeList[2]);
}
dma_ind0.finish_async_dma();
dma_ind1.finish_async_dma();
}
else if(dma_vecSrc0.owns_this_thread()) {
for (int ipt=0; ipt<nedge_per_thread; ipt+=6) {
dma_vecSrc0.wait_for_dma_start();
dma_vecSrc0.execute_dma_no_sync(s_IedgeList[0], vecSrc-NVAR2D, s_VecSrc[0]);
dma_vecSrc0.finish_async_dma();
dma_vecSrc1.wait_for_dma_start();
dma_vecSrc1.execute_dma_no_sync(s_IedgeList[1], vecSrc-NVAR2D, s_VecSrc[1]);
dma_vecSrc1.finish_async_dma();
dma_vecSrc0.wait_for_dma_start();
dma_vecSrc0.execute_dma_no_sync(s_IedgeList[2], vecSrc-NVAR2D, s_VecSrc[0]);
dma_vecSrc0.finish_async_dma();
dma_vecSrc1.wait_for_dma_start();
dma_vecSrc1.execute_dma_no_sync(s_IedgeList[0], vecSrc-NVAR2D, s_VecSrc[1]);
dma_vecSrc1.finish_async_dma();
dma_vecSrc0.wait_for_dma_start();
dma_vecSrc0.execute_dma_no_sync(s_IedgeList[1], vecSrc-NVAR2D, s_VecSrc[0]);
dma_vecSrc0.finish_async_dma();
dma_vecSrc1.wait_for_dma_start();
dma_vecSrc1.execute_dma_no_sync(s_IedgeList[2], vecSrc-NVAR2D, s_VecSrc[1]);
dma_vecSrc1.finish_async_dma();
}
}
else if(dma_vecDest0.owns_this_thread()) {
for (int ipt=0; ipt<nedge_per_thread; ipt+=6) {
dma_vecDest0.wait_for_dma_start();
dma_vecDest0.execute_dma_no_sync(s_IedgeList[0], vecDest-NVAR2D, s_VecDest[0]);
dma_vecDest0.finish_async_dma();
dma_vecDest1.wait_for_dma_start();
dma_vecDest1.execute_dma_no_sync(s_IedgeList[1], vecDest-NVAR2D, s_VecDest[1]);
dma_vecDest1.finish_async_dma();
dma_vecDest0.wait_for_dma_start();
dma_vecDest0.execute_dma_no_sync(s_IedgeList[2], vecDest-NVAR2D, s_VecDest[0]);
dma_vecDest0.finish_async_dma();
dma_vecDest1.wait_for_dma_start();
dma_vecDest1.execute_dma_no_sync(s_IedgeList[0], vecDest-NVAR2D, s_VecDest[1]);
dma_vecDest1.finish_async_dma();
dma_vecDest0.wait_for_dma_start();
dma_vecDest0.execute_dma_no_sync(s_IedgeList[1], vecDest-NVAR2D, s_VecDest[0]);
dma_vecDest0.finish_async_dma();
dma_vecDest1.wait_for_dma_start();
dma_vecDest1.execute_dma_no_sync(s_IedgeList[2], vecDest-NVAR2D, s_VecDest[1]);
dma_vecDest1.finish_async_dma();
}
}
else if(dma_coeff0.owns_this_thread()) {
for (int ipt=0; ipt<nedge_per_thread; ipt+=2) {
dma_coeff0.execute_dma(&CoeffsAtEdge[ ((ipt*gridDim.x+blockIdx.x)*
compute_threads_per_cta+nedge_offset)*
(COEFFSATEDGE_DEVICE == SOA ? 1 : 2*ncoeff)],
s_CoeffsAtEdge[0]);
// dma_coeff1.execute_dma(&CoeffsAtEdge[ (((ipt+1)*gridDim.x+blockIdx.x)*
// compute_threads_per_cta+nedge_offset)*
// (COEFFSATEDGE_DEVICE == SOA ? 1 : 2*ncoeff)],
// s_CoeffsAtEdge[1]);
}
}
};
#undef TOTAL_THREADS_PER_CTA
#endif
/*****************************************************************************
* Internal C++ functions which invoke the CUDA kernels
****************************************************************************/
template <typename Tc,
typename TdSrc,
typename TdDest,
typename Ti,
int idissipationtype>
inline
int hydro_calcFlux2d_cuda(__SIZET *d_CoeffsAtEdge,
__SIZET *d_IedgeList,
__SIZET *d_vecSrc,
__SIZET *d_vecDest,
TdDest scale,
Ti nblocks,
Ti neq,
Ti nedge,
Ti ncoeff,
Ti nedgeset,
Ti iedgeset,
cudaStream_t stream=0)
{
const cudaDeviceProp *devProp = coproc_getCurrentDeviceProp();
// Strategy: run the largest possible number of blocks with a
// predefined number of compute/dma threads per block and let each
// compute thread process the minimal number of edges
const int compute_threads_per_cta = CUDADMA_COMPUTE_THREADS_PER_CTA;
const int dma_threads_per_ld = CUDADMA_THREADS_PER_LD;
const int dma_lds = CUDADMA_DMA_LDS;
int nedge_per_thread_cudaDMA = CUDADMA_NEDGE_PER_THREAD;
const int threads_per_cta_baseline = BASELINE_THREADS_PER_CTA;
int nedge_per_thread_baseline = BASELINE_NEDGE_PER_THREAD;
int blocks, threads, nedge_cudaDMA, nedge_baseline;
prepare_cudaDMA(devProp, nedgeset,
&nedge_per_thread_cudaDMA,
compute_threads_per_cta, dma_threads_per_ld,
dma_lds, &blocks, &threads, &nedge_cudaDMA);
dim3 grid_cudaDMA(blocks, 1, 1);
dim3 block_cudaDMA(threads, 1, 1);
prepare_baseline(devProp, nedgeset-nedge_cudaDMA,
&nedge_per_thread_baseline, threads_per_cta_baseline,
&blocks, &threads, &nedge_baseline);
dim3 grid_baseline(blocks, 1, 1);
dim3 block_baseline(threads, 1, 1);
TdSrc *vecSrc = (TdSrc*)(*d_vecSrc);
TdDest *vecDest = (TdDest*)(*d_vecDest);
Tc *CoeffsAtEdge = (Tc*)(*d_CoeffsAtEdge);
Ti *IedgeList = (Ti*)(*d_IedgeList);
if (nblocks == 1) {
#ifdef CUDADMA_KERNEL
if (grid_cudaDMA.x>0) {
// CudaDMA implementation
CUDADMA_KERNEL
<Tc,TdSrc,TdDest,Ti,SYSTEM_SCALAR,idissipationtype,
MAX(32,compute_threads_per_cta),MAX(32,dma_threads_per_ld)>
<<<grid_cudaDMA, block_cudaDMA, 0, stream>>>(CoeffsAtEdge,
IedgeList,
vecSrc, vecDest, scale,
neq, nedge, ncoeff,
nedge_cudaDMA+iedgeset-1,
nedge_per_thread_cudaDMA,
iedgeset-1);
}
#endif
#ifdef BASELINE_KERNEL
if (grid_baseline.x>0) {
// Baseline implementation
BASELINE_KERNEL
<Tc,TdSrc,TdDest,Ti,SYSTEM_SCALAR,idissipationtype,
threads_per_cta_baseline>
<<<grid_baseline, block_baseline, 0, stream>>>(CoeffsAtEdge,
IedgeList,
vecSrc, vecDest, scale,
neq, nedge, ncoeff,
nedgeset+iedgeset-1,
nedge_per_thread_baseline,
nedge_cudaDMA+iedgeset-1);
}
#endif
} else {
#ifdef CUDADMA_KERNEL
if (grid_cudaDMA.x>0) {
// CudaDMA implementation
CUDADMA_KERNEL
<Tc,TdSrc,TdDest,Ti,SYSTEM_BLOCK,idissipationtype,
MAX(32,compute_threads_per_cta),MAX(32,dma_threads_per_ld)>
<<<grid_cudaDMA, block_cudaDMA, 0, stream>>>(CoeffsAtEdge,
IedgeList,
vecSrc, vecDest, scale,
neq, nedge, ncoeff,
nedge_cudaDMA+iedgeset-1,
nedge_per_thread_cudaDMA,
iedgeset-1);
}
#endif
#ifdef BASELINE_KERNEL
if (grid_baseline.x>0) {
// Baseline implementation
BASELINE_KERNEL
<Tc,TdSrc,TdDest,Ti,SYSTEM_BLOCK,idissipationtype,
threads_per_cta_baseline>
<<<grid_baseline, block_baseline, 0, stream>>>(CoeffsAtEdge,
IedgeList,
vecSrc, vecDest, scale,
neq, nedge, ncoeff,
nedgeset+iedgeset-1,
nedge_per_thread_baseline,
nedge_cudaDMA+iedgeset-1);
}
#endif
}
coproc_checkError("hydro_calcFlux2d_cuda");
return 0;
};
/*****************************************************************************
* External C functions which can be called from the Fortran code
****************************************************************************/
extern "C"
{
__INT FNAME(hydro_calcfluxgalerkin2d_cuda)(__SIZET *d_CoeffsAtEdge,
__SIZET *d_IedgeList,
__SIZET *d_vecSrc,
__SIZET *d_vecDest,
__DP *scale,
__INT *nblocks,
__INT *neq,
__INT *nedge,
__INT *ncoeff,
__INT *nedges,
__INT *iedgeset,
__I64 *stream)
{
return (__INT) hydro_calcFlux2d_cuda
<__DP,__DP,__DP,__INT,DISSIPATION_ZERO>
(d_CoeffsAtEdge, d_IedgeList, d_vecSrc, d_vecDest,
*scale, *nblocks, *neq, *nedge,
*ncoeff, *nedges, *iedgeset,
(cudaStream_t)(*stream));
}
/**************************************************************************/
__INT FNAME(hydro_calcfluxscdiss2d_cuda)(__SIZET *d_CoeffsAtEdge,
__SIZET *d_IedgeList,
__SIZET *d_vecSrc,
__SIZET *d_vecDest,
__DP *scale,
__INT *nblocks,
__INT *neq,
__INT *nedge,
__INT *ncoeff,
__INT *nedges,
__INT *iedgeset,
__I64 *stream)
{
return (__INT) hydro_calcFlux2d_cuda
<__DP,__DP,__DP,__INT,DISSIPATION_SCALAR>
(d_CoeffsAtEdge, d_IedgeList, d_vecSrc, d_vecDest,
*scale, *nblocks, *neq, *nedge,
*ncoeff, *nedges, *iedgeset,
(cudaStream_t)(*stream));
}
/**************************************************************************/
__INT FNAME(hydro_calcfluxscdissdisp2d_cuda)(__SIZET *d_CoeffsAtEdge,
__SIZET *d_IedgeList,
__SIZET *d_vecSrc,
__SIZET *d_vecDest,
__DP *scale,
__INT *nblocks,
__INT *neq,
__INT *nedge,
__INT *ncoeff,
__INT *nedges,
__INT *iedgeset,
__I64 *stream)
{
return (__INT) hydro_calcFlux2d_cuda
<__DP,__DP,__DP,__INT,DISSIPATION_SCALAR_DSPLIT>
(d_CoeffsAtEdge, d_IedgeList, d_vecSrc, d_vecDest,
*scale, *nblocks, *neq, *nedge,
*ncoeff, *nedges, *iedgeset,
(cudaStream_t)(*stream));
}
/**************************************************************************/
__INT FNAME(hydro_calcfluxroediss2d_cuda)(__SIZET *d_CoeffsAtEdge,
__SIZET *d_IedgeList,
__SIZET *d_vecSrc,
__SIZET *d_vecDest,
__DP *scale,
__INT *nblocks,
__INT *neq,
__INT *nedge,
__INT *ncoeff,
__INT *nedges,
__INT *iedgeset,
__I64 *stream)
{
return (__INT) hydro_calcFlux2d_cuda
<__DP,__DP,__DP,__INT,DISSIPATION_ROE>
(d_CoeffsAtEdge, d_IedgeList, d_vecSrc, d_vecDest,
*scale, *nblocks, *neq, *nedge,
*ncoeff, *nedges, *iedgeset,
(cudaStream_t)(*stream));
}
/***************************************************************************/
__INT FNAME(hydro_calcfluxroedissdisp2d_cuda)(__SIZET *d_CoeffsAtEdge,
__SIZET *d_IedgeList,
__SIZET *d_vecSrc,
__SIZET *d_vecDest,
__DP *scale,
__INT *nblocks,
__INT *neq,
__INT *nedge,
__INT *ncoeff,
__INT *nedges,
__INT *iedgeset,
__I64 *stream)
{
return (__INT) hydro_calcFlux2d_cuda
<__DP,__DP,__DP,__INT,DISSIPATION_ROE_DSPLIT>
(d_CoeffsAtEdge, d_IedgeList, d_vecSrc, d_vecDest,
*scale, *nblocks, *neq, *nedge,
*ncoeff, *nedges, *iedgeset,
(cudaStream_t)*stream);
}
/**************************************************************************/
__INT FNAME(hydro_calcfluxrusdiss2d_cuda)(__SIZET *d_CoeffsAtEdge,
__SIZET *d_IedgeList,
__SIZET *d_vecSrc,
__SIZET *d_vecDest,
__DP *scale,
__INT *nblocks,
__INT *neq,
__INT *nedge,
__INT *ncoeff,
__INT *nedges,
__INT *iedgeset,
__I64 *stream)
{
return (__INT)hydro_calcFlux2d_cuda
<__DP,__DP,__DP,__INT,DISSIPATION_RUSANOV>
(d_CoeffsAtEdge, d_IedgeList, d_vecSrc, d_vecDest,
*scale, *nblocks, *neq, *nedge,
*ncoeff, *nedges, *iedgeset,
(cudaStream_t)*stream);
}
/**************************************************************************/
__INT FNAME(hydro_calcfluxrusdissdisp2d_cuda)(__SIZET *d_CoeffsAtEdge,
__SIZET *d_IedgeList,
__SIZET *d_vecSrc,
__SIZET *d_vecDest,
__DP *scale,
__INT *nblocks,
__INT *neq,
__INT *nedge,
__INT *ncoeff,
__INT *nedges,
__INT *iedgeset,
__I64 *stream)
{
return (__INT) hydro_calcFlux2d_cuda
<__DP,__DP,__DP,__INT,DISSIPATION_RUSANOV_DSPLIT>
(d_CoeffsAtEdge, d_IedgeList, d_vecSrc, d_vecDest,
*scale, *nblocks, *neq, *nedge,
*ncoeff, *nedges, *iedgeset,
(cudaStream_t)*stream);
}
};
}
|
4cb248a8d79a95d514d65202ebc7103337c33991.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathMagma.hip"
#else
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
#ifdef USE_MAGMA
static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, scalar_t *src, int k)
{
int64_t size[1] = { k };
int64_t stride[1] = { 1 };
THCTensor_(resizeNd)(state, self, 1, size, stride);
size_t len = k * sizeof(scalar_t);
auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA();
THCudaCheck(hipMemcpyAsync(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, hipMemcpyHostToDevice, stream));
AT_CUDA_CHECK(hipStreamSynchronize(stream));
}
static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, scalar_t *src, int m, int n)
{
int64_t size[2] = { m, n };
int64_t stride[2] = { 1, m };
THCTensor_(resizeNd)(state, self, 2, size, stride);
size_t len = m * n * sizeof(scalar_t);
auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA();
THCudaCheck(hipMemcpyAsync(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, hipMemcpyHostToDevice, stream));
AT_CUDA_CHECK(hipStreamSynchronize(stream));
}
static void THCTensor_(copyTensor2d)(THCState *state, scalar_t *dst, THCTensor *self)
{
THAssert(self->dim() == 2);
size_t len = THCTensor_(nElement)(state, self)*sizeof(scalar_t);
THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1);
THCTensor *selfc = THCTensor_(newContiguous)(state, temp);
auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA();
THCudaCheck(hipMemcpyAsync(dst, THCStorage_(data)(state, THTensor_getStoragePtr(selfc)) + selfc->storage_offset(), len, hipMemcpyDeviceToHost, stream));
AT_CUDA_CHECK(hipStreamSynchronize(stream));
THCTensor_(free)(state, temp);
THCTensor_(free)(state, selfc);
}
#endif // USE_MAGMA
static THCTensor* THCTensor_(newColumnMajor)(THCState *state, THCTensor *self, THCTensor *src)
{
THAssert(src->dim() == 2);
if (self == src && self->stride(0) == 1 && self->stride(1) == self->size(0))
{
THCTensor_(retain)(state, self);
return self;
}
if (self == src)
self = THCTensor_(new)(state);
else
THCTensor_(retain)(state, self);
int64_t size[2] = { src->size(0), src->size(1) };
int64_t stride[2] = { 1, src->size(0) };
THCTensor_(resizeNd)(state, self, 2, size, stride);
THCTensor_(copy)(state, self, src);
return self;
}
void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional");
THArgCheck(!b_->is_empty() && b_->dim() == 2, 1, "b should be (non-empty) 2 dimensional");
TORCH_CHECK(a_->size(0) == b_->size(0), "Expected A and b to have same size "
"at dim 0, but A has ", a_->size(0), " rows and B has ", b_->size(0), " rows");
THArgCheck(a_->size(0) >= a_->size(1), 2, "Expected A with shape (m x n) to have "
"m >= n. The case for m < n is not implemented yet.");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
scalar_t *a_data = THCTensor_(data)(state, a);
scalar_t *b_data = THCTensor_(data)(state, b);
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t nrhs = b->size(1);
scalar_t wkopt;
int info;
{
at::native::MagmaStreamSyncGuard guard;
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#endif
scalar_t *hwork = th_magma_malloc_pinned<scalar_t>((size_t)wkopt);
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#endif
magma_free_pinned(hwork);
}
if (info != 0)
THError("MAGMA gels : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(gels));
#endif
}
__global__ void THCTensor_(copyUpperSymmetric)(scalar_t *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r > c) {
input[idx] = input[r*n + c];
}
}
}
__global__ void THCTensor_(copyLowerSymmetric)(scalar_t *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r < c) {
input[idx] = input[r*n + c];
}
}
}
void THCTensor_(potri)(THCState *state, THCTensor *ra_, THCTensor *a, bool upper)
{
char uplo = upper ? 'U' : 'L';
#ifdef USE_MAGMA
THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional");
THArgCheck(a->size(0) == a->size(1), 2, "A should be square");
int64_t n = a->size(0);
magma_uplo_t ul = uplo == 'U' ? MagmaUpper : MagmaLower;
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
scalar_t *input_data = THCTensor_(data)(state, input);
int info;
{
at::native::MagmaStreamSyncGuard guard;
#if defined(THC_REAL_IS_FLOAT)
magma_spotri_gpu(ul, n, input_data, n, &info);
#else
magma_dpotri_gpu(ul, n, input_data, n, &info);
#endif
}
if (info > 0)
THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potri : Argument %d : illegal value", -info);
hipStream_t stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int len = n*n;
dim3 blocks(::min(DIVUP(len, 128), 65535));
dim3 threads(128);
if (uplo == 'U') {
hipLaunchKernelGGL(( THCTensor_(copyUpperSymmetric)), dim3(blocks), dim3(threads), 0, stream, input_data, n, len);
} else {
hipLaunchKernelGGL(( THCTensor_(copyLowerSymmetric)), dim3(blocks), dim3(threads), 0, stream, input_data, n, len);
}
THCTensor_(freeCopyTo)(state, input, ra_);
#else
THError(NoMagma(potri));
#endif
}
void THCTensor_(geqrf)(THCState *state, THCTensor *ra_, THCTensor *rtau_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 2, "A should be non-empty 2 dimensional");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t k = (m < n ? m : n);
#if defined(THC_REAL_IS_FLOAT)
int64_t nb = magma_get_sgeqrf_nb(m, n);
#else
int64_t nb = magma_get_dgeqrf_nb(m, n);
#endif
scalar_t *rtau_data = th_magma_malloc_pinned<scalar_t>(k);
scalar_t *a_data = THCTensor_(data)(state, a);
int info;
{
at::native::MagmaStreamSyncGuard guard;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeqrf2_gpu(m, n, a_data, m, rtau_data, &info);
#else
magma_dgeqrf2_gpu(m, n, a_data, m, rtau_data, &info);
#endif
}
if (info != 0)
THError("MAGMA geqrf2 : Argument %d : illegal value.", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(copyArray1d)(state, rtau_, rtau_data, k);
magma_free_pinned(rtau_data);
#else
THError(NoMagma(geqrf));
#endif
}
#endif
#endif
| 4cb248a8d79a95d514d65202ebc7103337c33991.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathMagma.cu"
#else
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
#ifdef USE_MAGMA
static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, scalar_t *src, int k)
{
int64_t size[1] = { k };
int64_t stride[1] = { 1 };
THCTensor_(resizeNd)(state, self, 1, size, stride);
size_t len = k * sizeof(scalar_t);
auto stream = c10::cuda::getCurrentCUDAStream();
THCudaCheck(cudaMemcpyAsync(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, cudaMemcpyHostToDevice, stream));
AT_CUDA_CHECK(cudaStreamSynchronize(stream));
}
static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, scalar_t *src, int m, int n)
{
int64_t size[2] = { m, n };
int64_t stride[2] = { 1, m };
THCTensor_(resizeNd)(state, self, 2, size, stride);
size_t len = m * n * sizeof(scalar_t);
auto stream = c10::cuda::getCurrentCUDAStream();
THCudaCheck(cudaMemcpyAsync(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, cudaMemcpyHostToDevice, stream));
AT_CUDA_CHECK(cudaStreamSynchronize(stream));
}
static void THCTensor_(copyTensor2d)(THCState *state, scalar_t *dst, THCTensor *self)
{
THAssert(self->dim() == 2);
size_t len = THCTensor_(nElement)(state, self)*sizeof(scalar_t);
THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1);
THCTensor *selfc = THCTensor_(newContiguous)(state, temp);
auto stream = c10::cuda::getCurrentCUDAStream();
THCudaCheck(cudaMemcpyAsync(dst, THCStorage_(data)(state, THTensor_getStoragePtr(selfc)) + selfc->storage_offset(), len, cudaMemcpyDeviceToHost, stream));
AT_CUDA_CHECK(cudaStreamSynchronize(stream));
THCTensor_(free)(state, temp);
THCTensor_(free)(state, selfc);
}
#endif // USE_MAGMA
static THCTensor* THCTensor_(newColumnMajor)(THCState *state, THCTensor *self, THCTensor *src)
{
THAssert(src->dim() == 2);
if (self == src && self->stride(0) == 1 && self->stride(1) == self->size(0))
{
THCTensor_(retain)(state, self);
return self;
}
if (self == src)
self = THCTensor_(new)(state);
else
THCTensor_(retain)(state, self);
int64_t size[2] = { src->size(0), src->size(1) };
int64_t stride[2] = { 1, src->size(0) };
THCTensor_(resizeNd)(state, self, 2, size, stride);
THCTensor_(copy)(state, self, src);
return self;
}
void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional");
THArgCheck(!b_->is_empty() && b_->dim() == 2, 1, "b should be (non-empty) 2 dimensional");
TORCH_CHECK(a_->size(0) == b_->size(0), "Expected A and b to have same size "
"at dim 0, but A has ", a_->size(0), " rows and B has ", b_->size(0), " rows");
THArgCheck(a_->size(0) >= a_->size(1), 2, "Expected A with shape (m x n) to have "
"m >= n. The case for m < n is not implemented yet.");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
scalar_t *a_data = THCTensor_(data)(state, a);
scalar_t *b_data = THCTensor_(data)(state, b);
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t nrhs = b->size(1);
scalar_t wkopt;
int info;
{
at::native::MagmaStreamSyncGuard guard;
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#endif
scalar_t *hwork = th_magma_malloc_pinned<scalar_t>((size_t)wkopt);
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#endif
magma_free_pinned(hwork);
}
if (info != 0)
THError("MAGMA gels : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(gels));
#endif
}
__global__ void THCTensor_(copyUpperSymmetric)(scalar_t *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r > c) {
input[idx] = input[r*n + c];
}
}
}
__global__ void THCTensor_(copyLowerSymmetric)(scalar_t *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r < c) {
input[idx] = input[r*n + c];
}
}
}
void THCTensor_(potri)(THCState *state, THCTensor *ra_, THCTensor *a, bool upper)
{
char uplo = upper ? 'U' : 'L';
#ifdef USE_MAGMA
THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional");
THArgCheck(a->size(0) == a->size(1), 2, "A should be square");
int64_t n = a->size(0);
magma_uplo_t ul = uplo == 'U' ? MagmaUpper : MagmaLower;
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
scalar_t *input_data = THCTensor_(data)(state, input);
int info;
{
at::native::MagmaStreamSyncGuard guard;
#if defined(THC_REAL_IS_FLOAT)
magma_spotri_gpu(ul, n, input_data, n, &info);
#else
magma_dpotri_gpu(ul, n, input_data, n, &info);
#endif
}
if (info > 0)
THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potri : Argument %d : illegal value", -info);
cudaStream_t stream = c10::cuda::getCurrentCUDAStream();
const int len = n*n;
dim3 blocks(std::min(DIVUP(len, 128), 65535));
dim3 threads(128);
if (uplo == 'U') {
THCTensor_(copyUpperSymmetric)<<<blocks, threads, 0, stream>>>(input_data, n, len);
} else {
THCTensor_(copyLowerSymmetric)<<<blocks, threads, 0, stream>>>(input_data, n, len);
}
THCTensor_(freeCopyTo)(state, input, ra_);
#else
THError(NoMagma(potri));
#endif
}
void THCTensor_(geqrf)(THCState *state, THCTensor *ra_, THCTensor *rtau_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 2, "A should be non-empty 2 dimensional");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t k = (m < n ? m : n);
#if defined(THC_REAL_IS_FLOAT)
int64_t nb = magma_get_sgeqrf_nb(m, n);
#else
int64_t nb = magma_get_dgeqrf_nb(m, n);
#endif
scalar_t *rtau_data = th_magma_malloc_pinned<scalar_t>(k);
scalar_t *a_data = THCTensor_(data)(state, a);
int info;
{
at::native::MagmaStreamSyncGuard guard;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeqrf2_gpu(m, n, a_data, m, rtau_data, &info);
#else
magma_dgeqrf2_gpu(m, n, a_data, m, rtau_data, &info);
#endif
}
if (info != 0)
THError("MAGMA geqrf2 : Argument %d : illegal value.", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(copyArray1d)(state, rtau_, rtau_data, k);
magma_free_pinned(rtau_data);
#else
THError(NoMagma(geqrf));
#endif
}
#endif
#endif
|
10759e0dc04512e0b44e2a05b4a8b7cdf1559745.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// #define DEBUG_VALUE
#define HANDLE_ERROR(err) if(err != hipSuccess) { printf("Error\n"); exit(1); }
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
typedef struct vector3 {
float x;
float y;
float z;
} Vector3_t;
__global__ void add(Vector3_t *a, Vector3_t *b, Vector3_t *c, int index, int N){
int tid = blockIdx.x;
if(tid < N) {
c[index + tid].x = a[tid].x + b[tid].x;
c[index + tid].y = a[tid].y + b[tid].y;
c[index + tid].z = a[tid].z + b[tid].z;
}
}
__global__ void sub(Vector3_t *a, Vector3_t *b, Vector3_t *c, int index, int N){
int tid = blockIdx.x;
if(tid < N) {
c[index + tid].x = a[tid].x - b[tid].x;
c[index + tid].y = a[tid].y - b[tid].y;
c[index + tid].z = a[tid].z - b[tid].z;
}
}
__global__ void prod(Vector3_t *a, Vector3_t *b, float *c, int index, int N){
int tid = blockIdx.x;
if(tid < N)
c[index + tid] = a[tid].x * b[tid].x + a[tid].y * b[tid].y + a[tid].z * b[tid].z;
}
__global__ void cross(Vector3_t *a, Vector3_t *b, Vector3_t *c, int index, int N){
int tid = blockIdx.x;
if(tid < N) {
c[index + tid].x = a[tid].y * b[tid].z - a[tid].z * b[tid].y;
c[index + tid].y = a[tid].z * b[tid].x - a[tid].x * b[tid].z;
c[index + tid].z = a[tid].x * b[tid].y - a[tid].y * b[tid].x;
}
}
void line(void){
printf("=====================================\n");
}
int main(int argc, char *argv[]){
int N = 2; //
int num_gpu = 0;
Vector3_t *a, *b, *result_vector;
float *result_scalar;
Vector3_t *dev_a, *dev_b, *dev_vector;
float *dev_scalar;
int max_value = 100;
int num_vector_operation = 3; // / /
int num_scalar_operation = 1; //
int num_until_last = 10; //
int now_output_vector_operation = 0; //
int now_output_scalar_operation = 0; //
struct timespec start_time; //
struct timespec end_time; //
long process_time_sec, process_time_nsec; //
if(argc >= 2) N = atoi(argv[1]);
if(num_until_last > N) num_until_last = N;
if(argc >= 3) num_gpu = atoi(argv[2]);
printf("Initialize host memory...");
a = (Vector3_t *)malloc(N * sizeof(Vector3_t));
b = (Vector3_t *)malloc(N * sizeof(Vector3_t));
result_vector = (Vector3_t *)malloc(num_vector_operation * N * sizeof(Vector3_t));
result_scalar = (float *)malloc(num_scalar_operation * N * sizeof(float));
for (int i = 0; i< N; i++){
#ifdef DEBUG_VALUE
a[i].x = (float)i;
a[i].y = (float)i;
a[i].z = (float)i;
b[i].x = (float)i;
b[i].y = (float)i;
b[i].z = (float)i;
#else
a[i].x = (float)i / (float)N * max_value;
a[i].y = (float)i / (float)N * max_value;
a[i].z = (float)i / (float)N * max_value;
b[i].x = max_value - (float)i / (float)N * max_value;
b[i].y = max_value - (float)i / (float)N * max_value;
b[i].z = max_value - (float)i / (float)N * max_value;
#endif
}
printf("Initialized.\n");
printf("Set GPU to number %d.\n", num_gpu);
hipSetDevice(num_gpu);
printf("Intialize VRAM...");
HANDLE_ERROR(hipMalloc((void **)&dev_a, N * sizeof(Vector3_t)));
HANDLE_ERROR(hipMalloc((void **)&dev_b, N * sizeof(Vector3_t)));
HANDLE_ERROR(hipMalloc((void **)&dev_vector, num_vector_operation * N * sizeof(Vector3_t)));
HANDLE_ERROR(hipMalloc((void **)&dev_scalar, num_scalar_operation * N * sizeof(float)));
HANDLE_ERROR(hipMemcpy(dev_a, a, N * sizeof(Vector3_t), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_b, b, N * sizeof(Vector3_t), hipMemcpyHostToDevice));
printf("Intialized.\n");
clock_gettime(CLOCK_REALTIME, &start_time);
hipLaunchKernelGGL(( add) , dim3(N), dim3(1) , 0, 0, dev_a, dev_b, dev_vector, (int)(0 * N), N);
hipLaunchKernelGGL(( sub) , dim3(N), dim3(1) , 0, 0, dev_a, dev_b, dev_vector, (int)(1 * N), N);
hipLaunchKernelGGL(( prod) , dim3(N), dim3(1) , 0, 0, dev_a, dev_b, dev_scalar, (int)(0 * N), N);
hipLaunchKernelGGL(( cross) , dim3(N), dim3(1) , 0, 0, dev_a, dev_b, dev_vector, (int)(2 * N), N);
clock_gettime(CLOCK_REALTIME, &end_time);
HANDLE_ERROR(hipMemcpy(result_vector, dev_vector, num_vector_operation * N * sizeof(Vector3_t), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(result_scalar, dev_scalar, num_scalar_operation * N * sizeof(float), hipMemcpyDeviceToHost));
line();
printf("\n");
for(int i = 0; i < num_until_last; i++)
printf("(%f, %f, %f) + (%f, %f, %f) = (%f, %f, %f) \n",
a[N - num_until_last + i].x,
a[N - num_until_last + i].y,
a[N - num_until_last + i].z,
b[N - num_until_last + i].x,
b[N - num_until_last + i].y,
b[N - num_until_last + i].z,
result_vector[(1 + now_output_vector_operation) * N - num_until_last + i].x,
result_vector[(1 + now_output_vector_operation) * N - num_until_last + i].y,
result_vector[(1 + now_output_vector_operation) * N - num_until_last + i].z
);
line();
now_output_vector_operation += 1;
printf("\n");
for(int i = 0; i < num_until_last; i++)
printf("(%f, %f, %f) - (%f, %f, %f) = (%f, %f, %f) \n",
a[N - num_until_last + i].x,
a[N - num_until_last + i].y,
a[N - num_until_last + i].z,
b[N - num_until_last + i].x,
b[N - num_until_last + i].y,
b[N - num_until_last + i].z,
result_vector[(1 + now_output_vector_operation) * N - num_until_last + i].x,
result_vector[(1 + now_output_vector_operation) * N - num_until_last + i].y,
result_vector[(1 + now_output_vector_operation) * N - num_until_last + i].z
);
line();
printf("\n");
for(int i = 0; i < num_until_last; i++)
printf("(%f, %f, %f) * (%f, %f, %f) = %f \n",
a[N - num_until_last + i].x,
a[N - num_until_last + i].y,
a[N - num_until_last + i].z,
b[N - num_until_last + i].x,
b[N - num_until_last + i].y,
b[N - num_until_last + i].z,
result_scalar[(1 + now_output_scalar_operation) * N - num_until_last + i]
);
line();
now_output_vector_operation += 1;
printf("\n");
for(int i = 0; i < num_until_last; i++)
printf("(%f, %f, %f) x (%f, %f, %f) = (%f, %f, %f) \n",
a[N - num_until_last + i].x,
a[N - num_until_last + i].y,
a[N - num_until_last + i].z,
b[N - num_until_last + i].x,
b[N - num_until_last + i].y,
b[N - num_until_last + i].z,
result_vector[(1 + now_output_vector_operation) * N - num_until_last + i].x,
result_vector[(1 + now_output_vector_operation) * N - num_until_last + i].y,
result_vector[(1 + now_output_vector_operation) * N - num_until_last + i].z
);
line();
process_time_sec = end_time.tv_sec - start_time.tv_sec;
process_time_nsec = end_time.tv_nsec - start_time.tv_nsec;
printf(": %lf[ms]\n", (double)process_time_sec * 1000. + (double)process_time_nsec / (1000.0 * 1000.0));
HANDLE_ERROR(hipFree(dev_a));
HANDLE_ERROR(hipFree(dev_b));
HANDLE_ERROR(hipFree(dev_vector));
HANDLE_ERROR(hipFree(dev_scalar));
free(a);
free(b);
free(result_vector);
free(result_scalar);
return 0;
}
| 10759e0dc04512e0b44e2a05b4a8b7cdf1559745.cu | // #define DEBUG_VALUE
#define HANDLE_ERROR(err) if(err != cudaSuccess) { printf("Error\n"); exit(1); }
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
typedef struct vector3 {
float x;
float y;
float z;
} Vector3_t;
__global__ void add(Vector3_t *a, Vector3_t *b, Vector3_t *c, int index, int N){
int tid = blockIdx.x;
if(tid < N) {
c[index + tid].x = a[tid].x + b[tid].x;
c[index + tid].y = a[tid].y + b[tid].y;
c[index + tid].z = a[tid].z + b[tid].z;
}
}
__global__ void sub(Vector3_t *a, Vector3_t *b, Vector3_t *c, int index, int N){
int tid = blockIdx.x;
if(tid < N) {
c[index + tid].x = a[tid].x - b[tid].x;
c[index + tid].y = a[tid].y - b[tid].y;
c[index + tid].z = a[tid].z - b[tid].z;
}
}
__global__ void prod(Vector3_t *a, Vector3_t *b, float *c, int index, int N){
int tid = blockIdx.x;
if(tid < N)
c[index + tid] = a[tid].x * b[tid].x + a[tid].y * b[tid].y + a[tid].z * b[tid].z;
}
__global__ void cross(Vector3_t *a, Vector3_t *b, Vector3_t *c, int index, int N){
int tid = blockIdx.x;
if(tid < N) {
c[index + tid].x = a[tid].y * b[tid].z - a[tid].z * b[tid].y;
c[index + tid].y = a[tid].z * b[tid].x - a[tid].x * b[tid].z;
c[index + tid].z = a[tid].x * b[tid].y - a[tid].y * b[tid].x;
}
}
void line(void){
printf("=====================================\n");
}
int main(int argc, char *argv[]){
int N = 2; // 並列計算数
int num_gpu = 0;
Vector3_t *a, *b, *result_vector;
float *result_scalar;
Vector3_t *dev_a, *dev_b, *dev_vector;
float *dev_scalar;
int max_value = 100;
int num_vector_operation = 3; // 加算 / 減算 / 外積
int num_scalar_operation = 1; // 内積
int num_until_last = 10; // 表示件数
int now_output_vector_operation = 0; // 現在表示しているベクトルの処理
int now_output_scalar_operation = 0; // 現在表示しているスカラーの処理
struct timespec start_time; // 開始時刻
struct timespec end_time; // 終了時刻
long process_time_sec, process_time_nsec; // 処理時間
if(argc >= 2) N = atoi(argv[1]);
if(num_until_last > N) num_until_last = N;
if(argc >= 3) num_gpu = atoi(argv[2]);
printf("Initialize host memory...");
a = (Vector3_t *)malloc(N * sizeof(Vector3_t));
b = (Vector3_t *)malloc(N * sizeof(Vector3_t));
result_vector = (Vector3_t *)malloc(num_vector_operation * N * sizeof(Vector3_t));
result_scalar = (float *)malloc(num_scalar_operation * N * sizeof(float));
for (int i = 0; i< N; i++){
#ifdef DEBUG_VALUE
a[i].x = (float)i;
a[i].y = (float)i;
a[i].z = (float)i;
b[i].x = (float)i;
b[i].y = (float)i;
b[i].z = (float)i;
#else
a[i].x = (float)i / (float)N * max_value;
a[i].y = (float)i / (float)N * max_value;
a[i].z = (float)i / (float)N * max_value;
b[i].x = max_value - (float)i / (float)N * max_value;
b[i].y = max_value - (float)i / (float)N * max_value;
b[i].z = max_value - (float)i / (float)N * max_value;
#endif
}
printf("Initialized.\n");
printf("Set GPU to number %d.\n", num_gpu);
cudaSetDevice(num_gpu);
printf("Intialize VRAM...");
HANDLE_ERROR(cudaMalloc((void **)&dev_a, N * sizeof(Vector3_t)));
HANDLE_ERROR(cudaMalloc((void **)&dev_b, N * sizeof(Vector3_t)));
HANDLE_ERROR(cudaMalloc((void **)&dev_vector, num_vector_operation * N * sizeof(Vector3_t)));
HANDLE_ERROR(cudaMalloc((void **)&dev_scalar, num_scalar_operation * N * sizeof(float)));
HANDLE_ERROR(cudaMemcpy(dev_a, a, N * sizeof(Vector3_t), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_b, b, N * sizeof(Vector3_t), cudaMemcpyHostToDevice));
printf("Intialized.\n");
clock_gettime(CLOCK_REALTIME, &start_time);
add <<< N, 1 >>> (dev_a, dev_b, dev_vector, (int)(0 * N), N);
sub <<< N, 1 >>> (dev_a, dev_b, dev_vector, (int)(1 * N), N);
prod <<< N, 1 >>> (dev_a, dev_b, dev_scalar, (int)(0 * N), N);
cross <<< N, 1 >>> (dev_a, dev_b, dev_vector, (int)(2 * N), N);
clock_gettime(CLOCK_REALTIME, &end_time);
HANDLE_ERROR(cudaMemcpy(result_vector, dev_vector, num_vector_operation * N * sizeof(Vector3_t), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(result_scalar, dev_scalar, num_scalar_operation * N * sizeof(float), cudaMemcpyDeviceToHost));
line();
printf("加算\n");
for(int i = 0; i < num_until_last; i++)
printf("(%f, %f, %f) + (%f, %f, %f) = (%f, %f, %f) \n",
a[N - num_until_last + i].x,
a[N - num_until_last + i].y,
a[N - num_until_last + i].z,
b[N - num_until_last + i].x,
b[N - num_until_last + i].y,
b[N - num_until_last + i].z,
result_vector[(1 + now_output_vector_operation) * N - num_until_last + i].x,
result_vector[(1 + now_output_vector_operation) * N - num_until_last + i].y,
result_vector[(1 + now_output_vector_operation) * N - num_until_last + i].z
);
line();
now_output_vector_operation += 1;
printf("減算\n");
for(int i = 0; i < num_until_last; i++)
printf("(%f, %f, %f) - (%f, %f, %f) = (%f, %f, %f) \n",
a[N - num_until_last + i].x,
a[N - num_until_last + i].y,
a[N - num_until_last + i].z,
b[N - num_until_last + i].x,
b[N - num_until_last + i].y,
b[N - num_until_last + i].z,
result_vector[(1 + now_output_vector_operation) * N - num_until_last + i].x,
result_vector[(1 + now_output_vector_operation) * N - num_until_last + i].y,
result_vector[(1 + now_output_vector_operation) * N - num_until_last + i].z
);
line();
printf("内積\n");
for(int i = 0; i < num_until_last; i++)
printf("(%f, %f, %f) * (%f, %f, %f) = %f \n",
a[N - num_until_last + i].x,
a[N - num_until_last + i].y,
a[N - num_until_last + i].z,
b[N - num_until_last + i].x,
b[N - num_until_last + i].y,
b[N - num_until_last + i].z,
result_scalar[(1 + now_output_scalar_operation) * N - num_until_last + i]
);
line();
now_output_vector_operation += 1;
printf("外積\n");
for(int i = 0; i < num_until_last; i++)
printf("(%f, %f, %f) x (%f, %f, %f) = (%f, %f, %f) \n",
a[N - num_until_last + i].x,
a[N - num_until_last + i].y,
a[N - num_until_last + i].z,
b[N - num_until_last + i].x,
b[N - num_until_last + i].y,
b[N - num_until_last + i].z,
result_vector[(1 + now_output_vector_operation) * N - num_until_last + i].x,
result_vector[(1 + now_output_vector_operation) * N - num_until_last + i].y,
result_vector[(1 + now_output_vector_operation) * N - num_until_last + i].z
);
line();
process_time_sec = end_time.tv_sec - start_time.tv_sec;
process_time_nsec = end_time.tv_nsec - start_time.tv_nsec;
printf("処理時間: %lf[ms]\n", (double)process_time_sec * 1000. + (double)process_time_nsec / (1000.0 * 1000.0));
HANDLE_ERROR(cudaFree(dev_a));
HANDLE_ERROR(cudaFree(dev_b));
HANDLE_ERROR(cudaFree(dev_vector));
HANDLE_ERROR(cudaFree(dev_scalar));
free(a);
free(b);
free(result_vector);
free(result_scalar);
return 0;
}
|
08c0d9729b0c6d6760dbb9f8d01090c81b1315bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled. P100.
///////////using more than 8gb.
//typedef unsigned char byte;
void shuffle(long long int *array, long long int n)
{
if (n > 1){
long long int i;
for (i = 0; i < n - 1; i++){
long long int j = i + rand() / (RAND_MAX / (n - i) + 1);
long long int t = array[j];
array[j] = array[i];
array[i] = t;
}
}
}
void init_cpu_data(unsigned *A, unsigned size, unsigned stride, unsigned mod, long long int iterations){
if(0){////////////normal
for (unsigned i = 0; i < size - stride; i = i + stride){
A[i]=(i + stride);
}
for (unsigned i = 7; i < size - stride; i = i + stride){
A[i]=(i + stride);
}
A[size - stride]=0;
A[size - stride + 7]=7;
}
if(0){////////////reversed
for (unsigned i = 0; i <= size - stride; i = i + stride){
A[i]=(i - stride);
}
for (unsigned i = 7; i <= size - stride + 7; i = i + stride){
A[i]=(i - stride);
}
A[0]=size - stride;
A[7]=size - stride + 7;
}
if(1){////////////random
long long int *rand_sequence;
rand_sequence = (long long int*)malloc(sizeof(long long int) * iterations);
//////random sequence offset 0
for(long long int i = 0; i < iterations; i++){
rand_sequence[i] = i;
}
//srand (time(NULL));
srand(1);
shuffle(rand_sequence, iterations);
long long int previous_rand_num;
long long int rand_num = rand_sequence[0] * stride;
for(long long int i = 1; i < iterations; i++){
previous_rand_num = rand_num;
rand_num = rand_sequence[i] * stride;
A[previous_rand_num]=(unsigned)rand_num;
}
A[rand_num]=(unsigned)(rand_sequence[0] * stride);////back to beginning
//////random sequence offset 7
//for(int i = 0; i < iterations; i++){
// rand_sequence[i] = i;
//}
//srand (time(NULL));
//shuffle(rand_sequence, iterations);
rand_num = rand_sequence[0] * stride + 7;
for(long long int i = 1; i < iterations; i++){
previous_rand_num = rand_num;
rand_num = rand_sequence[i] * stride + 7;
A[previous_rand_num]=(unsigned)rand_num;
}
A[rand_num]=(unsigned)(rand_sequence[0] * stride + 7);////back to beginning
}
/*
///////manually set the nodes
A[32]=104333344;
A[104333344]=200802336;
A[200802336]=353370144;
A[353370144]=372244512;
A[372244512]=110100512;
A[110100512]=182452256;
A[182452256]=333971488;
A[333971488]=225443872;
A[225443872]=155189280;
A[155189280]=104333344;
*/
}
__device__ void P_chasing0(int mark, unsigned *A, int iterations, int *B, int *C, unsigned *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, unsigned *A, long long int iterations, unsigned *B, unsigned *C, long long int *D, unsigned starting_index, float clock_rate, unsigned data_stride){
unsigned j = starting_index;/////make them in the same page, and miss near in cache lines
//unsigned start_time = 0;//////clock
//unsigned end_time = 0;//////clock
//start_time = clock64();//////clock
for (long long int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//unsigned total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, unsigned *A, long long int iterations, unsigned *B, unsigned *C, long long int *D, unsigned starting_index, float clock_rate, unsigned data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ unsigned s_index[1024 * 4];
//__shared__ unsigned s_index[1];
unsigned j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//unsigned total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u64 t1;\n\t"
".reg .u64 t2;\n\t");
for (long long int it = 0; it < iterations; it++){
/*
asm("mul.wide.u32 t1, %3, %5;\n\t"
"add.u64 t2, t1, %4;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
*/
asm("mul.wide.u32 t1, %2, %4;\n\t"
"add.u64 t2, t1, %3;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %1, [t2];\n\t"
: "=l"(start_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
s_index[it] = j;////what if without this? ///Then it is not accurate and cannot get the access time at all, due to the ILP. (another way is to use average time, but inevitably containing other instructions:setp, add).
asm volatile ("mov.u64 %0, %clock64;": "=l"(end_time));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (long long int it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(unsigned *A, long long int iterations, unsigned *B, unsigned *C, long long int *D, float clock_rate, unsigned mod, int data_stride){
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
//reduced_iter = 16;
}
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
//P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, reduced_iter, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
hipDeviceProp_t device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == hipComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out;
checkCudaErrors(hipMalloc(&GPU_data_out, sizeof(unsigned) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
unsigned counter = 0;
for(unsigned data_stride = 1 * 1 * 256; data_stride <= 2 * 256 * 1024; data_stride = data_stride * 2){/////////32mb stride
//data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit.
//printf("###################data_stride%d#########################\n", data_stride);
//for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m = 12288 cache lines, L1 16k = 128 cache lines.
for(unsigned mod2 = 2 * 256 * 1024; mod2 <= 1073741824; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
unsigned mod = mod2;
if(mod > 2684354560){
mod = 2684354560;
}
//unsigned data_size = 2684354560;///when size gets larger than 32MB(8388608), an additional latency is added. Is it prefetching? cpu cache or tlb? (cache)
unsigned data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
unsigned *CPU_data_in;
CPU_data_in = (unsigned*)malloc(sizeof(unsigned) * data_size);
checkCudaErrors(hipMallocManaged(&CPU_data_in, sizeof(int) * data_size));/////////////using unified memory
checkCudaErrors(hipMemAdvise(CPU_data_in, sizeof(int) * data_size, hipMemAdviseSetPreferredLocation, hipCpuDeviceId));///////////////using hint
init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations);
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
//reduced_iter = 16;
}
unsigned *CPU_data_out_index;
CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//unsigned *GPU_data_in;
//checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(unsigned) * data_size));
//hipMemcpy(GPU_data_in, CPU_data_in, sizeof(unsigned) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%u %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
//checkCudaErrors(hipFree(GPU_data_in));
checkCudaErrors(hipFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(unsigned mod2 = 1; mod2 <= 1; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
unsigned mod = 2147483648;
if(mod > 3221225472){
mod = 3221225472;
}
//unsigned data_size = 2684354560;
unsigned data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
unsigned *CPU_data_in;
CPU_data_in = (unsigned*)malloc(sizeof(unsigned) * data_size);
checkCudaErrors(hipMallocManaged(&CPU_data_in, sizeof(int) * data_size));/////////////using unified memory
checkCudaErrors(hipMemAdvise(CPU_data_in, sizeof(int) * data_size, hipMemAdviseSetPreferredLocation, hipCpuDeviceId));///////////////using hint
init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations);
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
//reduced_iter = 16;
}
unsigned *CPU_data_out_index;
CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//unsigned *GPU_data_in;
//checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(unsigned) * data_size));
//hipMemcpy(GPU_data_in, CPU_data_in, sizeof(unsigned) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%u %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
//checkCudaErrors(hipFree(GPU_data_in));
checkCudaErrors(hipFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(unsigned mod2 = 1; mod2 <= 1; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
unsigned mod = 2684354560;
if(mod > 2684354560){
mod = 2684354560;
}
//unsigned data_size = 2684354560;
unsigned data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
unsigned *CPU_data_in;
CPU_data_in = (unsigned*)malloc(sizeof(unsigned) * data_size);
checkCudaErrors(hipMallocManaged(&CPU_data_in, sizeof(int) * data_size));/////////////using unified memory
checkCudaErrors(hipMemAdvise(CPU_data_in, sizeof(int) * data_size, hipMemAdviseSetPreferredLocation, hipCpuDeviceId));///////////////using hint
init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations);
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
//reduced_iter = 16;
}
unsigned *CPU_data_out_index;
CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//unsigned *GPU_data_in;
//checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(unsigned) * data_size));
//hipMemcpy(GPU_data_in, CPU_data_in, sizeof(unsigned) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%u %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
//checkCudaErrors(hipFree(GPU_data_in));
checkCudaErrors(hipFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(hipFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
| 08c0d9729b0c6d6760dbb9f8d01090c81b1315bf.cu | #include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled. P100.
///////////using more than 8gb.
//typedef unsigned char byte;
void shuffle(long long int *array, long long int n)
{
if (n > 1){
long long int i;
for (i = 0; i < n - 1; i++){
long long int j = i + rand() / (RAND_MAX / (n - i) + 1);
long long int t = array[j];
array[j] = array[i];
array[i] = t;
}
}
}
void init_cpu_data(unsigned *A, unsigned size, unsigned stride, unsigned mod, long long int iterations){
if(0){////////////normal
for (unsigned i = 0; i < size - stride; i = i + stride){
A[i]=(i + stride);
}
for (unsigned i = 7; i < size - stride; i = i + stride){
A[i]=(i + stride);
}
A[size - stride]=0;
A[size - stride + 7]=7;
}
if(0){////////////reversed
for (unsigned i = 0; i <= size - stride; i = i + stride){
A[i]=(i - stride);
}
for (unsigned i = 7; i <= size - stride + 7; i = i + stride){
A[i]=(i - stride);
}
A[0]=size - stride;
A[7]=size - stride + 7;
}
if(1){////////////random
long long int *rand_sequence;
rand_sequence = (long long int*)malloc(sizeof(long long int) * iterations);
//////random sequence offset 0
for(long long int i = 0; i < iterations; i++){
rand_sequence[i] = i;
}
//srand (time(NULL));
srand(1);
shuffle(rand_sequence, iterations);
long long int previous_rand_num;
long long int rand_num = rand_sequence[0] * stride;
for(long long int i = 1; i < iterations; i++){
previous_rand_num = rand_num;
rand_num = rand_sequence[i] * stride;
A[previous_rand_num]=(unsigned)rand_num;
}
A[rand_num]=(unsigned)(rand_sequence[0] * stride);////back to beginning
//////random sequence offset 7
//for(int i = 0; i < iterations; i++){
// rand_sequence[i] = i;
//}
//srand (time(NULL));
//shuffle(rand_sequence, iterations);
rand_num = rand_sequence[0] * stride + 7;
for(long long int i = 1; i < iterations; i++){
previous_rand_num = rand_num;
rand_num = rand_sequence[i] * stride + 7;
A[previous_rand_num]=(unsigned)rand_num;
}
A[rand_num]=(unsigned)(rand_sequence[0] * stride + 7);////back to beginning
}
/*
///////manually set the nodes
A[32]=104333344;
A[104333344]=200802336;
A[200802336]=353370144;
A[353370144]=372244512;
A[372244512]=110100512;
A[110100512]=182452256;
A[182452256]=333971488;
A[333971488]=225443872;
A[225443872]=155189280;
A[155189280]=104333344;
*/
}
__device__ void P_chasing0(int mark, unsigned *A, int iterations, int *B, int *C, unsigned *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, unsigned *A, long long int iterations, unsigned *B, unsigned *C, long long int *D, unsigned starting_index, float clock_rate, unsigned data_stride){
unsigned j = starting_index;/////make them in the same page, and miss near in cache lines
//unsigned start_time = 0;//////clock
//unsigned end_time = 0;//////clock
//start_time = clock64();//////clock
for (long long int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//unsigned total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, unsigned *A, long long int iterations, unsigned *B, unsigned *C, long long int *D, unsigned starting_index, float clock_rate, unsigned data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ unsigned s_index[1024 * 4];
//__shared__ unsigned s_index[1];
unsigned j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//unsigned total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u64 t1;\n\t"
".reg .u64 t2;\n\t");
for (long long int it = 0; it < iterations; it++){
/*
asm("mul.wide.u32 t1, %3, %5;\n\t"
"add.u64 t2, t1, %4;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
*/
asm("mul.wide.u32 t1, %2, %4;\n\t"
"add.u64 t2, t1, %3;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %1, [t2];\n\t"
: "=l"(start_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
s_index[it] = j;////what if without this? ///Then it is not accurate and cannot get the access time at all, due to the ILP. (another way is to use average time, but inevitably containing other instructions:setp, add).
asm volatile ("mov.u64 %0, %clock64;": "=l"(end_time));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (long long int it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(unsigned *A, long long int iterations, unsigned *B, unsigned *C, long long int *D, float clock_rate, unsigned mod, int data_stride){
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
//reduced_iter = 16;
}
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
//P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, reduced_iter, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
cudaDeviceProp device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == cudaComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out;
checkCudaErrors(cudaMalloc(&GPU_data_out, sizeof(unsigned) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
unsigned counter = 0;
for(unsigned data_stride = 1 * 1 * 256; data_stride <= 2 * 256 * 1024; data_stride = data_stride * 2){/////////32mb stride
//data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit.
//printf("###################data_stride%d#########################\n", data_stride);
//for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m = 12288 cache lines, L1 16k = 128 cache lines.
for(unsigned mod2 = 2 * 256 * 1024; mod2 <= 1073741824; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
unsigned mod = mod2;
if(mod > 2684354560){
mod = 2684354560;
}
//unsigned data_size = 2684354560;///when size gets larger than 32MB(8388608), an additional latency is added. Is it prefetching? cpu cache or tlb? (cache)
unsigned data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
unsigned *CPU_data_in;
CPU_data_in = (unsigned*)malloc(sizeof(unsigned) * data_size);
checkCudaErrors(cudaMallocManaged(&CPU_data_in, sizeof(int) * data_size));/////////////using unified memory
checkCudaErrors(cudaMemAdvise(CPU_data_in, sizeof(int) * data_size, cudaMemAdviseSetPreferredLocation, cudaCpuDeviceId));///////////////using hint
init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations);
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
//reduced_iter = 16;
}
unsigned *CPU_data_out_index;
CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//unsigned *GPU_data_in;
//checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(unsigned) * data_size));
//cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(unsigned) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
tlb_latency_test<<<1, 1>>>(CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%u %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
//checkCudaErrors(cudaFree(GPU_data_in));
checkCudaErrors(cudaFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(unsigned mod2 = 1; mod2 <= 1; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
unsigned mod = 2147483648;
if(mod > 3221225472){
mod = 3221225472;
}
//unsigned data_size = 2684354560;
unsigned data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
unsigned *CPU_data_in;
CPU_data_in = (unsigned*)malloc(sizeof(unsigned) * data_size);
checkCudaErrors(cudaMallocManaged(&CPU_data_in, sizeof(int) * data_size));/////////////using unified memory
checkCudaErrors(cudaMemAdvise(CPU_data_in, sizeof(int) * data_size, cudaMemAdviseSetPreferredLocation, cudaCpuDeviceId));///////////////using hint
init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations);
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
//reduced_iter = 16;
}
unsigned *CPU_data_out_index;
CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//unsigned *GPU_data_in;
//checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(unsigned) * data_size));
//cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(unsigned) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
tlb_latency_test<<<1, 1>>>(CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%u %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
//checkCudaErrors(cudaFree(GPU_data_in));
checkCudaErrors(cudaFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(unsigned mod2 = 1; mod2 <= 1; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
unsigned mod = 2684354560;
if(mod > 2684354560){
mod = 2684354560;
}
//unsigned data_size = 2684354560;
unsigned data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
unsigned *CPU_data_in;
CPU_data_in = (unsigned*)malloc(sizeof(unsigned) * data_size);
checkCudaErrors(cudaMallocManaged(&CPU_data_in, sizeof(int) * data_size));/////////////using unified memory
checkCudaErrors(cudaMemAdvise(CPU_data_in, sizeof(int) * data_size, cudaMemAdviseSetPreferredLocation, cudaCpuDeviceId));///////////////using hint
init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations);
long long int reduced_iter = iterations;
if(reduced_iter > 512){
reduced_iter = 512;
}else if(reduced_iter < 16){
//reduced_iter = 16;
}
unsigned *CPU_data_out_index;
CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//unsigned *GPU_data_in;
//checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(unsigned) * data_size));
//cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(unsigned) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
tlb_latency_test<<<1, 1>>>(CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%lld##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%u %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
//checkCudaErrors(cudaFree(GPU_data_in));
checkCudaErrors(cudaFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(cudaFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
|
52a7106698dc3f2f812a611fde5b91e531fd3870.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_erfcxf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_erfcxf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_erfcxf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_erfcxf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 52a7106698dc3f2f812a611fde5b91e531fd3870.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_erfcxf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_erfcxf<<<gridBlock,threadBlock>>>(n,result,x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_erfcxf<<<gridBlock,threadBlock>>>(n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_erfcxf<<<gridBlock,threadBlock>>>(n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
f489a45811a57955e4e4b515ee4461ae2469e11d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions normal z -> s d c
@author Stan Tomov
@author Mathieu Faverge
@author Ichitaro Yamazaki
@author Mark Gates
*/
#include "magma_internal.h"
// MAX_PIVOTS is maximum number of pivots to apply in each kernel launch
// NTHREADS is number of threads in a block
// 64 and 256 are better on Kepler;
//#define MAX_PIVOTS 64
//#define NTHREADS 256
#define MAX_PIVOTS 32
#define NTHREADS 64
typedef struct {
int npivots;
int ipiv[MAX_PIVOTS];
} zlaswp_params_t;
// Matrix A is stored row-wise in dAT.
// Divide matrix A into block-columns of NTHREADS columns each.
// Each GPU block processes one block-column of A.
// Each thread goes down a column of A,
// swapping rows according to pivots stored in params.
__global__ void zlaswp_kernel(
int n,
magmaDoubleComplex *dAT, int ldda,
zlaswp_params_t params )
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if ( tid < n ) {
dAT += tid;
magmaDoubleComplex *A1 = dAT;
for( int i1 = 0; i1 < params.npivots; ++i1 ) {
int i2 = params.ipiv[i1];
magmaDoubleComplex *A2 = dAT + i2*ldda;
magmaDoubleComplex temp = *A1;
*A1 = *A2;
*A2 = temp;
A1 += ldda; // A1 = dA + i1*ldx
}
}
}
/***************************************************************************//**
Purpose:
=============
ZLASWP performs a series of row interchanges on the matrix A.
One row interchange is initiated for each of rows K1 through K2 of A.
** Unlike LAPACK, here A is stored row-wise (hence dAT). **
Otherwise, this is identical to LAPACK's interface.
Arguments:
==========
@param[in]
n INTEGER
The number of columns of the matrix A.
@param[in,out]
dAT COMPLEX*16 array on GPU, stored row-wise, dimension (LDDA,M)
The M-by-N matrix, stored transposed as N-by-M matrix embedded in
LDDA-by-M array. M is not given; it is implicit.
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
@param[in]
ldda INTEGER
The leading dimension of the array A. ldda >= n.
@param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (Fortran one-based index: 1 <= k1.)
@param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (Fortran one-based index: 1 <= k2.)
@param[in]
ipiv INTEGER array, on CPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
@param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, INCI > 0.
TODO: If INCI is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_laswp
*******************************************************************************/
// used in zgessm, zgetrf_incpiv.
extern "C" void
magmablas_zlaswp(
magma_int_t n,
magmaDoubleComplex_ptr dAT, magma_int_t ldda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci,
magma_queue_t queue )
{
#define dAT(i_, j_) (dAT + (i_)*ldda + (j_))
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( n > ldda )
info = -3;
else if ( k1 < 1 )
info = -4;
else if ( k2 < 1 )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
dim3 threads( NTHREADS );
dim3 grid( magma_ceildiv( n, NTHREADS ) );
zlaswp_params_t params;
for( int k = k1-1; k < k2; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, k2-k );
params.npivots = npivots;
for( int j = 0; j < npivots; ++j ) {
params.ipiv[j] = ipiv[(k+j)*inci] - k - 1;
}
hipLaunchKernelGGL(( zlaswp_kernel)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, dAT(k,0), ldda, params );
}
#undef dAT
}
/******************************************************************************/
// Extended version has stride in both directions (ldx, ldy)
// to handle both row-wise and column-wise storage.
// Matrix A is stored row or column-wise in dA.
// Divide matrix A into block-columns of NTHREADS columns each.
// Each GPU block processes one block-column of A.
// Each thread goes down a column of A,
// swapping rows according to pivots stored in params.
__global__ void zlaswpx_kernel(
int n,
magmaDoubleComplex *dA, int ldx, int ldy,
zlaswp_params_t params )
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if ( tid < n ) {
dA += tid*ldy;
magmaDoubleComplex *A1 = dA;
for( int i1 = 0; i1 < params.npivots; ++i1 ) {
int i2 = params.ipiv[i1];
magmaDoubleComplex *A2 = dA + i2*ldx;
magmaDoubleComplex temp = *A1;
*A1 = *A2;
*A2 = temp;
A1 += ldx; // A1 = dA + i1*ldx
}
}
}
/***************************************************************************//**
Purpose:
=============
ZLASWPX performs a series of row interchanges on the matrix A.
One row interchange is initiated for each of rows K1 through K2 of A.
** Unlike LAPACK, here A is stored either row-wise or column-wise,
depending on ldx and ldy. **
Otherwise, this is identical to LAPACK's interface.
Arguments:
==========
@param[in]
n INTEGER
The number of columns of the matrix A.
@param[in,out]
dA COMPLEX*16 array on GPU, dimension (*,*)
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
@param[in]
ldx INTEGER
Stride between elements in same column.
@param[in]
ldy INTEGER
Stride between elements in same row.
For A stored row-wise, set ldx=ldda and ldy=1.
For A stored column-wise, set ldx=1 and ldy=ldda.
@param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (One based index.)
@param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (One based index.)
@param[in]
ipiv INTEGER array, on CPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
@param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, IPIV > 0.
TODO: If IPIV is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_laswp
*******************************************************************************/
extern "C" void
magmablas_zlaswpx(
magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldx, magma_int_t ldy,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci,
magma_queue_t queue )
{
#define dA(i_, j_) (dA + (i_)*ldx + (j_)*ldy)
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( k1 < 0 )
info = -4;
else if ( k2 < 0 || k2 < k1 )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
dim3 threads( NTHREADS );
dim3 grid( magma_ceildiv( n, NTHREADS ) );
zlaswp_params_t params;
for( int k = k1-1; k < k2; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, k2-k );
params.npivots = npivots;
for( int j = 0; j < npivots; ++j ) {
params.ipiv[j] = ipiv[(k+j)*inci] - k - 1;
}
hipLaunchKernelGGL(( zlaswpx_kernel)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, dA(k,0), ldx, ldy, params );
}
#undef dA
}
/******************************************************************************/
// This version takes d_ipiv on the GPU. Thus it does not pass pivots
// as an argument using a structure, avoiding all the argument size
// limitations of CUDA and OpenCL. It also needs just one kernel launch
// with all the pivots, instead of multiple kernel launches with small
// batches of pivots. On Fermi, it is faster than magmablas_zlaswp
// (including copying pivots to the GPU).
__global__ void zlaswp2_kernel(
int n,
magmaDoubleComplex *dAT, int ldda,
int npivots,
const magma_int_t *d_ipiv, int inci )
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if ( tid < n ) {
dAT += tid;
magmaDoubleComplex *A1 = dAT;
for( int i1 = 0; i1 < npivots; ++i1 ) {
int i2 = d_ipiv[i1*inci] - 1; // Fortran index
magmaDoubleComplex *A2 = dAT + i2*ldda;
magmaDoubleComplex temp = *A1;
*A1 = *A2;
*A2 = temp;
A1 += ldda; // A1 = dA + i1*ldx
}
}
}
/***************************************************************************//**
Purpose:
=============
ZLASWP2 performs a series of row interchanges on the matrix A.
One row interchange is initiated for each of rows K1 through K2 of A.
** Unlike LAPACK, here A is stored row-wise (hence dAT). **
Otherwise, this is identical to LAPACK's interface.
Here, d_ipiv is passed in GPU memory.
Arguments:
==========
@param[in]
n INTEGER
The number of columns of the matrix A.
@param[in,out]
dAT COMPLEX*16 array on GPU, stored row-wise, dimension (LDDA,*)
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
@param[in]
ldda INTEGER
The leading dimension of the array A.
(I.e., stride between elements in a column.)
@param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (One based index.)
@param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (One based index.)
@param[in]
d_ipiv INTEGER array, on GPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
@param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, IPIV > 0.
TODO: If IPIV is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_laswp
*******************************************************************************/
extern "C" void
magmablas_zlaswp2(
magma_int_t n,
magmaDoubleComplex_ptr dAT, magma_int_t ldda,
magma_int_t k1, magma_int_t k2,
magmaInt_const_ptr d_ipiv, magma_int_t inci,
magma_queue_t queue )
{
#define dAT(i_, j_) (dAT + (i_)*ldda + (j_))
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( k1 < 0 )
info = -4;
else if ( k2 < 0 || k2 < k1 )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t nb = k2-(k1-1);
dim3 threads( NTHREADS );
dim3 grid( magma_ceildiv( n, NTHREADS ) );
hipLaunchKernelGGL(( zlaswp2_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, dAT(k1-1,0), ldda, nb, d_ipiv, inci );
}
| f489a45811a57955e4e4b515ee4461ae2469e11d.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions normal z -> s d c
@author Stan Tomov
@author Mathieu Faverge
@author Ichitaro Yamazaki
@author Mark Gates
*/
#include "magma_internal.h"
// MAX_PIVOTS is maximum number of pivots to apply in each kernel launch
// NTHREADS is number of threads in a block
// 64 and 256 are better on Kepler;
//#define MAX_PIVOTS 64
//#define NTHREADS 256
#define MAX_PIVOTS 32
#define NTHREADS 64
typedef struct {
int npivots;
int ipiv[MAX_PIVOTS];
} zlaswp_params_t;
// Matrix A is stored row-wise in dAT.
// Divide matrix A into block-columns of NTHREADS columns each.
// Each GPU block processes one block-column of A.
// Each thread goes down a column of A,
// swapping rows according to pivots stored in params.
__global__ void zlaswp_kernel(
int n,
magmaDoubleComplex *dAT, int ldda,
zlaswp_params_t params )
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if ( tid < n ) {
dAT += tid;
magmaDoubleComplex *A1 = dAT;
for( int i1 = 0; i1 < params.npivots; ++i1 ) {
int i2 = params.ipiv[i1];
magmaDoubleComplex *A2 = dAT + i2*ldda;
magmaDoubleComplex temp = *A1;
*A1 = *A2;
*A2 = temp;
A1 += ldda; // A1 = dA + i1*ldx
}
}
}
/***************************************************************************//**
Purpose:
=============
ZLASWP performs a series of row interchanges on the matrix A.
One row interchange is initiated for each of rows K1 through K2 of A.
** Unlike LAPACK, here A is stored row-wise (hence dAT). **
Otherwise, this is identical to LAPACK's interface.
Arguments:
==========
@param[in]
n INTEGER
The number of columns of the matrix A.
@param[in,out]
dAT COMPLEX*16 array on GPU, stored row-wise, dimension (LDDA,M)
The M-by-N matrix, stored transposed as N-by-M matrix embedded in
LDDA-by-M array. M is not given; it is implicit.
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
@param[in]
ldda INTEGER
The leading dimension of the array A. ldda >= n.
@param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (Fortran one-based index: 1 <= k1.)
@param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (Fortran one-based index: 1 <= k2.)
@param[in]
ipiv INTEGER array, on CPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
@param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, INCI > 0.
TODO: If INCI is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_laswp
*******************************************************************************/
// used in zgessm, zgetrf_incpiv.
extern "C" void
magmablas_zlaswp(
magma_int_t n,
magmaDoubleComplex_ptr dAT, magma_int_t ldda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci,
magma_queue_t queue )
{
#define dAT(i_, j_) (dAT + (i_)*ldda + (j_))
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( n > ldda )
info = -3;
else if ( k1 < 1 )
info = -4;
else if ( k2 < 1 )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
dim3 threads( NTHREADS );
dim3 grid( magma_ceildiv( n, NTHREADS ) );
zlaswp_params_t params;
for( int k = k1-1; k < k2; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, k2-k );
params.npivots = npivots;
for( int j = 0; j < npivots; ++j ) {
params.ipiv[j] = ipiv[(k+j)*inci] - k - 1;
}
zlaswp_kernel
<<< grid, threads, 0, queue->cuda_stream() >>>
( n, dAT(k,0), ldda, params );
}
#undef dAT
}
/******************************************************************************/
// Extended version has stride in both directions (ldx, ldy)
// to handle both row-wise and column-wise storage.
// Matrix A is stored row or column-wise in dA.
// Divide matrix A into block-columns of NTHREADS columns each.
// Each GPU block processes one block-column of A.
// Each thread goes down a column of A,
// swapping rows according to pivots stored in params.
__global__ void zlaswpx_kernel(
int n,
magmaDoubleComplex *dA, int ldx, int ldy,
zlaswp_params_t params )
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if ( tid < n ) {
dA += tid*ldy;
magmaDoubleComplex *A1 = dA;
for( int i1 = 0; i1 < params.npivots; ++i1 ) {
int i2 = params.ipiv[i1];
magmaDoubleComplex *A2 = dA + i2*ldx;
magmaDoubleComplex temp = *A1;
*A1 = *A2;
*A2 = temp;
A1 += ldx; // A1 = dA + i1*ldx
}
}
}
/***************************************************************************//**
Purpose:
=============
ZLASWPX performs a series of row interchanges on the matrix A.
One row interchange is initiated for each of rows K1 through K2 of A.
** Unlike LAPACK, here A is stored either row-wise or column-wise,
depending on ldx and ldy. **
Otherwise, this is identical to LAPACK's interface.
Arguments:
==========
@param[in]
n INTEGER
The number of columns of the matrix A.
@param[in,out]
dA COMPLEX*16 array on GPU, dimension (*,*)
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
@param[in]
ldx INTEGER
Stride between elements in same column.
@param[in]
ldy INTEGER
Stride between elements in same row.
For A stored row-wise, set ldx=ldda and ldy=1.
For A stored column-wise, set ldx=1 and ldy=ldda.
@param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (One based index.)
@param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (One based index.)
@param[in]
ipiv INTEGER array, on CPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
@param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, IPIV > 0.
TODO: If IPIV is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_laswp
*******************************************************************************/
extern "C" void
magmablas_zlaswpx(
magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldx, magma_int_t ldy,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci,
magma_queue_t queue )
{
#define dA(i_, j_) (dA + (i_)*ldx + (j_)*ldy)
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( k1 < 0 )
info = -4;
else if ( k2 < 0 || k2 < k1 )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
dim3 threads( NTHREADS );
dim3 grid( magma_ceildiv( n, NTHREADS ) );
zlaswp_params_t params;
for( int k = k1-1; k < k2; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, k2-k );
params.npivots = npivots;
for( int j = 0; j < npivots; ++j ) {
params.ipiv[j] = ipiv[(k+j)*inci] - k - 1;
}
zlaswpx_kernel
<<< grid, threads, 0, queue->cuda_stream() >>>
( n, dA(k,0), ldx, ldy, params );
}
#undef dA
}
/******************************************************************************/
// This version takes d_ipiv on the GPU. Thus it does not pass pivots
// as an argument using a structure, avoiding all the argument size
// limitations of CUDA and OpenCL. It also needs just one kernel launch
// with all the pivots, instead of multiple kernel launches with small
// batches of pivots. On Fermi, it is faster than magmablas_zlaswp
// (including copying pivots to the GPU).
__global__ void zlaswp2_kernel(
int n,
magmaDoubleComplex *dAT, int ldda,
int npivots,
const magma_int_t *d_ipiv, int inci )
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if ( tid < n ) {
dAT += tid;
magmaDoubleComplex *A1 = dAT;
for( int i1 = 0; i1 < npivots; ++i1 ) {
int i2 = d_ipiv[i1*inci] - 1; // Fortran index
magmaDoubleComplex *A2 = dAT + i2*ldda;
magmaDoubleComplex temp = *A1;
*A1 = *A2;
*A2 = temp;
A1 += ldda; // A1 = dA + i1*ldx
}
}
}
/***************************************************************************//**
Purpose:
=============
ZLASWP2 performs a series of row interchanges on the matrix A.
One row interchange is initiated for each of rows K1 through K2 of A.
** Unlike LAPACK, here A is stored row-wise (hence dAT). **
Otherwise, this is identical to LAPACK's interface.
Here, d_ipiv is passed in GPU memory.
Arguments:
==========
@param[in]
n INTEGER
The number of columns of the matrix A.
@param[in,out]
dAT COMPLEX*16 array on GPU, stored row-wise, dimension (LDDA,*)
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
@param[in]
ldda INTEGER
The leading dimension of the array A.
(I.e., stride between elements in a column.)
@param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (One based index.)
@param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (One based index.)
@param[in]
d_ipiv INTEGER array, on GPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
@param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, IPIV > 0.
TODO: If IPIV is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_laswp
*******************************************************************************/
extern "C" void
magmablas_zlaswp2(
magma_int_t n,
magmaDoubleComplex_ptr dAT, magma_int_t ldda,
magma_int_t k1, magma_int_t k2,
magmaInt_const_ptr d_ipiv, magma_int_t inci,
magma_queue_t queue )
{
#define dAT(i_, j_) (dAT + (i_)*ldda + (j_))
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( k1 < 0 )
info = -4;
else if ( k2 < 0 || k2 < k1 )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t nb = k2-(k1-1);
dim3 threads( NTHREADS );
dim3 grid( magma_ceildiv( n, NTHREADS ) );
zlaswp2_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( n, dAT(k1-1,0), ldda, nb, d_ipiv, inci );
}
|
fb9c4f6800d8f9dbd6d1b3dac480f02ac07b09a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "device_matrix/runtime.h"
#include <glog/logging.h>
namespace cuda {
template <>
Runtime<FLOATING_POINT_TYPE>* Runtime<FLOATING_POINT_TYPE>::INSTANCE_ = new Runtime;
template <typename FloatT>
Runtime<FloatT>::Runtime() : ZERO(nullptr), ONE(nullptr) {
int device_count;
checkCudaErrors(hipGetDeviceCount(&device_count));
LOG_IF(FATAL, (device_count == 0)) << "Unable to find any CUDA-enabled device.";
const int32 device_id = 0;
CHECK_LT(device_id, device_count)
<< "Invalid CUDA device identifier "
<< "(" << device_count << " devices available).";
CCE(hipSetDevice(device_id));
// Fresh start.
CCE(hipDeviceReset());
// Hard-coded to run on device #0.
CCE(hipGetDeviceProperties(&props_, device_id));
CCE(hipblasCreate(&handle_));
CCE(hipblasSetPointerMode(handle_, HIPBLAS_POINTER_MODE_DEVICE));
LOG(INFO) << "Using device #" << device_id << ".";
memset(&device_, 0, sizeof(device_));
device_.device = device_id;
device_.size = (size_t) (0.85 * props_.totalGlobalMem);
CHECK_EQ(CNMEM_STATUS_SUCCESS, cnmemInit(1, &device_, CNMEM_FLAGS_DEFAULT));
CCE(hipMalloc(const_cast<FloatT**>(&ZERO), sizeof(FloatT)));
const FloatT zero = 0.0;
CCE(hipMemcpy(const_cast<FloatT*>(ZERO), &zero,
sizeof(FloatT),
hipMemcpyHostToDevice));
CCE(hipMalloc(const_cast<FloatT**>(&ONE), sizeof(FloatT)));
const FloatT one = 1.0;
CCE(hipMemcpy(const_cast<FloatT*>(ONE),
&one, sizeof(FloatT),
hipMemcpyHostToDevice));
}
const decltype(&hipblasSgemm) CuBLAS<float32>::gemm = &hipblasSgemm;
const decltype(&hipblasSgemv) CuBLAS<float32>::gemv = &hipblasSgemv;
const decltype(&hipblasSger) CuBLAS<float32>::ger = &hipblasSger;
const decltype(&hipblasDgemm) CuBLAS<float64>::gemm = &hipblasDgemm;
const decltype(&hipblasDgemv) CuBLAS<float64>::gemv = &hipblasDgemv;
const decltype(&hipblasDger) CuBLAS<float64>::ger = &hipblasDger;
// Explicit instantiation.
template class Runtime<FLOATING_POINT_TYPE>;
} // namespace cuda
| fb9c4f6800d8f9dbd6d1b3dac480f02ac07b09a1.cu | #include "device_matrix/runtime.h"
#include <glog/logging.h>
namespace cuda {
template <>
Runtime<FLOATING_POINT_TYPE>* Runtime<FLOATING_POINT_TYPE>::INSTANCE_ = new Runtime;
template <typename FloatT>
Runtime<FloatT>::Runtime() : ZERO(nullptr), ONE(nullptr) {
int device_count;
checkCudaErrors(cudaGetDeviceCount(&device_count));
LOG_IF(FATAL, (device_count == 0)) << "Unable to find any CUDA-enabled device.";
const int32 device_id = 0;
CHECK_LT(device_id, device_count)
<< "Invalid CUDA device identifier "
<< "(" << device_count << " devices available).";
CCE(cudaSetDevice(device_id));
// Fresh start.
CCE(cudaDeviceReset());
// Hard-coded to run on device #0.
CCE(cudaGetDeviceProperties(&props_, device_id));
CCE(cublasCreate(&handle_));
CCE(cublasSetPointerMode(handle_, CUBLAS_POINTER_MODE_DEVICE));
LOG(INFO) << "Using device #" << device_id << ".";
memset(&device_, 0, sizeof(device_));
device_.device = device_id;
device_.size = (size_t) (0.85 * props_.totalGlobalMem);
CHECK_EQ(CNMEM_STATUS_SUCCESS, cnmemInit(1, &device_, CNMEM_FLAGS_DEFAULT));
CCE(cudaMalloc(const_cast<FloatT**>(&ZERO), sizeof(FloatT)));
const FloatT zero = 0.0;
CCE(cudaMemcpy(const_cast<FloatT*>(ZERO), &zero,
sizeof(FloatT),
cudaMemcpyHostToDevice));
CCE(cudaMalloc(const_cast<FloatT**>(&ONE), sizeof(FloatT)));
const FloatT one = 1.0;
CCE(cudaMemcpy(const_cast<FloatT*>(ONE),
&one, sizeof(FloatT),
cudaMemcpyHostToDevice));
}
const decltype(&cublasSgemm) CuBLAS<float32>::gemm = &cublasSgemm;
const decltype(&cublasSgemv) CuBLAS<float32>::gemv = &cublasSgemv;
const decltype(&cublasSger) CuBLAS<float32>::ger = &cublasSger;
const decltype(&cublasDgemm) CuBLAS<float64>::gemm = &cublasDgemm;
const decltype(&cublasDgemv) CuBLAS<float64>::gemv = &cublasDgemv;
const decltype(&cublasDger) CuBLAS<float64>::ger = &cublasDger;
// Explicit instantiation.
template class Runtime<FLOATING_POINT_TYPE>;
} // namespace cuda
|
c317dd43ce4af0e6be5e0b3a04914c89c999f6e0.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <vector>
#include "hip/hip_runtime.h"
#include "caffe/layer.hpp"
#include "caffe/layers/spatial_transformer_layer.hpp"
namespace caffe {
///////////////////////////////////////////////////////////////////
template <typename Dtype>
__global__ void forward_affine(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* in, const Dtype* theta, Dtype* source_data, Dtype* out) {//, const Dtype* fill_value_) {
//int div = channels_ * output_H_ * output_W_;
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
int offset = 6 * n;
Dtype x = x_target * theta[offset] + y_target * theta[offset + 1] + theta[offset + 2];
Dtype y = x_target * theta[offset + 3] + y_target * theta[offset + 4] + theta[offset + 5];
x = (x + (Dtype) 1.) / 2 * (width_ - 1);
y = (y + (Dtype) 1.) / 2 * (height_ - 1);
//offset = n * map_size * 2 + h * output_W_ + w;
offset = (n * map_size + h * output_W_ + w) * 2;
source_data[offset] = x;
//source_data[offset + map_size] = y;
source_data[offset + 1] = y;
x = x > 0 ? x : 0; x = x < (width_ - 1) ? x : width_ - 1;
y = y > 0 ? y : 0; y = y < (height_ - 1) ? y : height_ - 1;
int w_min = (int)floor(x);
int w_max = (int)ceil(x);
int h_min = (int)floor(y);
int h_max = (int)ceil(y);
for (int c = 0; c < channels_; ++c) {
Dtype r = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
r += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = r;
}
/*int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
for (int c = 0; c < channels_; ++c) {
Dtype tmp;
if (h_max < h_min || w_max < w_min) {
tmp = fill_value_[c];
}else {
tmp = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
tmp += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = tmp;
}*/
}
}
template <typename Dtype>
__global__ void forward_translation(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* in, const Dtype* theta, Dtype* source_data, Dtype* out,
const float theta_1_1, const float theta_2_2//, const Dtype* fill_value_
) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
int offset = 2 * n;
Dtype x = theta_1_1 * x_target + theta[offset];
Dtype y = theta_2_2 * y_target + theta[offset + 1];
x = (x + (Dtype) 1.) / 2 * (width_ - 1);
y = (y + (Dtype) 1.) / 2 * (height_ - 1);
offset = (n * map_size + h * output_W_ + w) * 2;
source_data[offset] = x;
source_data[offset + 1] = y;
x = x > 0 ? x : 0; x = x < (width_ - 1) ? x : width_ - 1;
y = y > 0 ? y : 0; y = y < (height_ - 1) ? y : height_ - 1;
int w_min = (int)floor(x);
int w_max = (int)ceil(x);
int h_min = (int)floor(y);
int h_max = (int)ceil(y);
for (int c = 0; c < channels_; ++c) {
Dtype r = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
r += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = r;
}
/*int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
for (int c = 0; c < channels_; ++c) {
Dtype tmp;
if (h_max < h_min || w_max < w_min) {
tmp = fill_value_[c];
}
else {
tmp = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
tmp += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = tmp;
}*/
}
}
template <typename Dtype>
__global__ void forward_translation_scaling(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* in, const Dtype* theta, Dtype* source_data, Dtype* out) {//, const Dtype* fill_value_) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
int offset = 4 * n;
Dtype x = x_target * theta[offset] + theta[offset + 1];
Dtype y = y_target * theta[offset + 2] + theta[offset + 3];
x = (x + (Dtype) 1.) / 2 * (width_ - 1);
y = (y + (Dtype) 1.) / 2 * (height_ - 1);
offset = (n * map_size + h * output_W_ + w) * 2;
source_data[offset] = x;
source_data[offset + 1] = y;
x = x > 0 ? x : 0; x = x < (width_ - 1) ? x : width_ - 1;
y = y > 0 ? y : 0; y = y < (height_ - 1) ? y : height_ - 1;
int w_min = (int)floor(x);
int w_max = (int)ceil(x);
int h_min = (int)floor(y);
int h_max = (int)ceil(y);
for (int c = 0; c < channels_; ++c) {
Dtype r = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
r += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = r;
}
/*int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
for (int c = 0; c < channels_; ++c) {
Dtype tmp;
if (h_max < h_min || w_max < w_min) {
tmp = fill_value_[c];
}
else {
tmp = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
tmp += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = tmp;
}*/
}
}
template <typename Dtype>
__global__ void forward_projective(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* in, const Dtype* theta, Dtype* source_data, Dtype* out) {//, const Dtype* fill_value_) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
int offset = 8 * n;
Dtype z = 1 / (x_target * theta[offset + 6] + y_target * theta[offset + 7] + 1);
Dtype x = x_target * theta[offset] + y_target * theta[offset + 1] + theta[offset + 2];
Dtype y = x_target * theta[offset + 3] + y_target * theta[offset + 4] + theta[offset + 5];
/*offset = (n * map_size + h * output_W_ + w) * 3;
source_data[offset] = (x *= z);
source_data[offset + 1] = (y *= z);
source_data[offset + 2] = z;*/
x = (x * z + (Dtype) 1.) * (width_ - 1) / 2;
y = (y * z + (Dtype) 1.) * (height_ - 1) / 2;
offset = (n * map_size + h * output_W_ + w) * 3;
source_data[offset] = x;
source_data[offset + 1] = y;
source_data[offset + 2] = z;
x = x > 0 ? x : 0; x = x < (width_ - 1) ? x : width_ - 1;
y = y > 0 ? y : 0; y = y < (height_ - 1) ? y : height_ - 1;
int w_min = (int)floor(x);
int w_max = (int)ceil(x);
int h_min = (int)floor(y);
int h_max = (int)ceil(y);
for (int c = 0; c < channels_; ++c) {
Dtype r = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
r += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = r;
}
/*int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
for (int c = 0; c < channels_; ++c) {
Dtype tmp;
if (h_max < h_min || w_max < w_min) {
tmp = fill_value_[c];
}
else {
tmp = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
tmp += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = tmp;
}*/
}
}
template <typename Dtype>
__global__ void forward_grid(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* in, const Dtype* theta, Dtype* out) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 2;
Dtype x = theta[offset];
Dtype y = theta[offset + 1];
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
for (int c = 0; c < channels_; ++c) {
offset = (n * channels_ + c) * height_ * width_;
Dtype tmp = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
tmp += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = tmp;
}
}
}
template <typename Dtype>
__global__ void forward_similarity(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* in, const Dtype* theta, Dtype* source_data, Dtype* out) {//, const Dtype* fill_value_) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
int offset = 4 * n;
// 0: alpha
// 1: scaling
// 2: tx
// 3: ty
Dtype ct = cos(theta[offset]), st = sin(theta[offset]);
Dtype x = theta[offset + 1] * (x_target * ct - y_target * st) + theta[offset + 2];
Dtype y = theta[offset + 1] * (x_target * st + y_target * ct) + theta[offset + 3];
//offset = n * map_size * 2 + h * output_W_ + w;
offset = (n * map_size + h * output_W_ + w) * 2;
source_data[offset] = x;
//source_data[offset + map_size] = y;
source_data[offset + 1] = y;
x = (x + (Dtype) 1.) / 2 * (width_ - 1);
y = (y + (Dtype) 1.) / 2 * (height_ - 1);
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
for (int c = 0; c < channels_; ++c) {
Dtype r = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
r += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = r;
}
}
}
template <typename Dtype>
__global__ void forward_similarity_plus(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* in, const Dtype* theta, Dtype* source_data, Dtype* out) {//, const Dtype* fill_value_) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
Dtype x_target = (Dtype)w / (output_W_ - 1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype)h / (output_H_ - 1) * 2 - (Dtype)1.;
int offset = 5 * n;
// 0: alpha
// 1: scaling_x
// 2: scaling_y
// 3: tx
// 4: ty
Dtype ct = cos(theta[offset]), st = sin(theta[offset]);
Dtype sx = theta[offset + 1], sy = theta[offset + 2];
Dtype x = sx * x_target * ct - sy * y_target * st + theta[offset + 3];
Dtype y = sx * x_target * st + sy * y_target * ct + theta[offset + 4];
x = (x + (Dtype) 1.) / 2 * (width_ - 1);
y = (y + (Dtype) 1.) / 2 * (height_ - 1);
//offset = n * map_size * 2 + h * output_W_ + w;
offset = (n * map_size + h * output_W_ + w) * 2;
source_data[offset] = x;
//source_data[offset + map_size] = y;
source_data[offset + 1] = y;
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
for (int c = 0; c < channels_; ++c) {
Dtype r = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
r += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = r;
}
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* theta_data = bottom[1]->gpu_data();
const int count = num_ * map_size_;
if (t_type_ == 4) {
forward_grid<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, channels_, height_, width_, output_H_, output_W_,
bottom_data, theta_data, top_data);
CUDA_POST_KERNEL_CHECK;
return;
}
Dtype* source_data = source_.mutable_gpu_data();
switch (t_type_) {
case 0:
// affine
forward_affine<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channels_, height_, width_, output_H_, output_W_,
bottom_data, theta_data, source_data, top_data);//, fill_value_.gpu_data());
break;
case 1:
// translation
forward_translation<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channels_, height_, width_, output_H_, output_W_,
bottom_data, theta_data, source_data, top_data,
this->layer_param_.st_param().theta_1_1(), this->layer_param_.st_param().theta_2_2());// , fill_value_.gpu_data());
break;
case 2:
// translation + scaling
forward_translation_scaling<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channels_, height_, width_, output_H_, output_W_,
bottom_data, theta_data, source_data, top_data);// , fill_value_.gpu_data());
break;
case 3:
// projective
forward_projective<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channels_, height_, width_, output_H_, output_W_,
bottom_data, theta_data, source_data, top_data);// , fill_value_.gpu_data());
break;
case 5:
// similarity
forward_similarity<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channels_, height_, width_, output_H_, output_W_,
bottom_data, theta_data, source_data, top_data);//, fill_value_.gpu_data());
break;
case 6:
// similarity+
forward_similarity_plus<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channels_, height_, width_, output_H_, output_W_,
bottom_data, theta_data, source_data, top_data);//, fill_value_.gpu_data());
break;
}
CUDA_POST_KERNEL_CHECK;
}
///////////////////////////////////////////////////////////////////
__device__ inline void atomic_add(float * address, float val) {
atomicAdd(address, val);
}
__device__ inline void atomic_add(double * address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
}
// compute (1) d{V_i} / d{x_i}, then (2) d{V_i} / d{theta}
// compute sum_{i} d{V_i} / d{U_nm}
template <typename Dtype>
__global__ void backward_affine(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* data, const Dtype* source_data, const Dtype* top_diff,
Dtype* data_diff, Dtype* theta_diff_cache) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 2;
Dtype x = source_data[offset];
Dtype y = source_data[offset + 1];
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
Dtype dv_dx = 0;
Dtype dv_dy = 0;
if (data_diff) {
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype buffer = top_diff[(n * channels_ + c) * map_size + h * output_W_ + w];
// offset in the input image U
offset = ((n * channels_ + c) * height_ + hh) * width_ + ww;
atomic_add(data_diff + offset, buffer * dx * dy);
buffer *= data[offset];
dv_dx += buffer * dy * sign_x;
dv_dy += buffer * dx * sign_y;
}
}
}
}else {
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype u =
top_diff[(n * channels_ + c) * map_size + h * output_W_ + w] *
data[((n * channels_ + c) * height_ + hh) * width_ + ww];
dv_dx += u * dy * sign_x;
dv_dy += u * dx * sign_y;
}
}
}
}
dv_dx *= (Dtype)(width_ - 1) / 2;
dv_dy *= (Dtype)(height_ - 1) / 2;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
n = n * 6 * map_size + h * output_W_ + w;
theta_diff_cache[n] = dv_dx * x_target;
theta_diff_cache[n + map_size] = dv_dx * y_target;
theta_diff_cache[n + map_size*2] = dv_dx;
theta_diff_cache[n + map_size*3] = dv_dy * x_target;
theta_diff_cache[n + map_size*4] = dv_dy * y_target;
theta_diff_cache[n + map_size*5] = dv_dy;
}
}
template <typename Dtype>
__global__ void backward_translation(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* data, const Dtype* source_data, const Dtype* top_diff,
Dtype* data_diff, Dtype* theta_diff_cache) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 2;
Dtype x = source_data[offset];
Dtype y = source_data[offset + 1];
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
Dtype dv_dx = 0;
Dtype dv_dy = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype u =
top_diff[(n * channels_ + c) * map_size + h * output_W_ + w] *
data[((n * channels_ + c) * height_ + hh) * width_ + ww];
dv_dx += u * dy * sign_x;
dv_dy += u * dx * sign_y;
}
}
}
dv_dx *= (Dtype)(width_ - 1) / 2;
dv_dy *= (Dtype)(height_ - 1) / 2;
n = n * 2 * map_size + h * output_W_ + w;
theta_diff_cache[n] = dv_dx;
theta_diff_cache[n + map_size] = dv_dy;
}
}
template <typename Dtype>
__global__ void backward_translation_scaling(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* data, const Dtype* source_data, const Dtype* top_diff,
Dtype* data_diff, Dtype* theta_diff_cache) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 2;
Dtype x = source_data[offset];
Dtype y = source_data[offset + 1];
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
Dtype dv_dx = 0;
Dtype dv_dy = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype u =
top_diff[(n * channels_ + c) * map_size + h * output_W_ + w] *
data[((n * channels_ + c) * height_ + hh) * width_ + ww];
dv_dx += u * dy * sign_x;
dv_dy += u * dx * sign_y;
}
}
}
dv_dx *= (Dtype)(width_ - 1) / 2;
dv_dy *= (Dtype)(height_ - 1) / 2;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
n = n * 4 * map_size + h * output_W_ + w;
theta_diff_cache[n] = dv_dx * x_target;
theta_diff_cache[n + map_size] = dv_dx;
theta_diff_cache[n + map_size*2] = dv_dy * y_target;
theta_diff_cache[n + map_size*3] = dv_dy;
}
}
template <typename Dtype>
__global__ void backward_projective(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* data, const Dtype* source_data, const Dtype* top_diff,
Dtype* data_diff, Dtype* theta_diff_cache) {
const int map_size = output_H_ * output_W_;
//const Dtype width_const = (Dtype)2 / (Dtype)(width_ - 1);
//const Dtype height_const = (Dtype)2 / (Dtype)(height_ - 1);
const Dtype width_const = (Dtype)(width_ - 1) / 2;
const Dtype height_const = (Dtype)(height_ - 1) / 2;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 3;
Dtype x = source_data[offset];
Dtype y = source_data[offset + 1];
Dtype z = source_data[offset + 2];
//Dtype x = (x0 + (Dtype) 1.) * (width_ - 1) / 2;
//Dtype y = (y0 + (Dtype) 1.) * (height_ - 1) / 2;
Dtype x0 = x - width_const, y0 = y - height_const;
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
Dtype dv_dx = 0;
Dtype dv_dy = 0;
Dtype tmp_source_z = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype u =
top_diff[(n * channels_ + c) * map_size + h * output_W_ + w] *
data[((n * channels_ + c) * height_ + hh) * width_ + ww];
Dtype dv_dx_i = u * dy * sign_x;
Dtype dv_dy_i = u * dx * sign_y;
dv_dx += dv_dx_i;
dv_dy += dv_dy_i;
tmp_source_z -= dv_dx_i * x0 + dv_dy_i * y0;
}
}
}
dv_dx *= width_const * z;
dv_dy *= height_const * z;
tmp_source_z *= z;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
n = n * 8 * map_size + h * output_W_ + w;
theta_diff_cache[n] = dv_dx * x_target;
theta_diff_cache[n + map_size] = dv_dx * y_target;
theta_diff_cache[n + map_size*2] = dv_dx;
theta_diff_cache[n + map_size*3] = dv_dy * x_target;
theta_diff_cache[n + map_size*4] = dv_dy * y_target;
theta_diff_cache[n + map_size*5] = dv_dy;
theta_diff_cache[n + map_size*6] = tmp_source_z * x_target;
theta_diff_cache[n + map_size*7] = tmp_source_z * y_target;
}
}
template <typename Dtype>
__global__ void backward_grid(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* data, const Dtype* theta_data, const Dtype* top_diff,
Dtype* data_diff, Dtype* theta_diff) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 2;
Dtype x = theta_data[offset];
Dtype y = theta_data[offset + 1];
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
Dtype dv_dx = 0;
Dtype dv_dy = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype u =
top_diff[(n * channels_ + c) * map_size + h * output_W_ + w] *
data[((n * channels_ + c) * height_ + hh) * width_ + ww];
dv_dx += u * dy * sign_x;
dv_dy += u * dx * sign_y;
}
}
}
theta_diff[offset] = dv_dx;
theta_diff[offset + 1] = dv_dy;
}
}
template <typename Dtype>
__global__ void backward_similarity(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* data, const Dtype* source_data, const Dtype* top_diff, const Dtype* theta_data,
Dtype* data_diff, Dtype* theta_diff_cache) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 2;
Dtype x0 = source_data[offset];
Dtype y0 = source_data[offset + 1];
Dtype x = (x0 + (Dtype) 1.) / 2 * (width_ - 1);
Dtype y = (y0 + (Dtype) 1.) / 2 * (height_ - 1);
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
Dtype dv_dx = 0;
Dtype dv_dy = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype u =
top_diff[(n * channels_ + c) * map_size + h * output_W_ + w] *
data[((n * channels_ + c) * height_ + hh) * width_ + ww];
dv_dx += u * dy * sign_x;
dv_dy += u * dx * sign_y;
}
}
}
dv_dx *= (Dtype)(width_ - 1) / 2;
dv_dy *= (Dtype)(height_ - 1) / 2;
//Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
//Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
offset = 4 * n;
n = offset * map_size + h * output_W_ + w;
Dtype s = 1 / theta_data[offset + 1];
x0 -= theta_data[offset + 2];
y0 -= theta_data[offset + 3];
//theta_diff_cache[n] = dv_dx * (ty - y) + dv_dy * (x - tx); // alpha
//theta_diff_cache[n + map_size] = dv_dx * 1/s * (tx - x) + dv_dy * 1/s * (y - ty); // scaling
theta_diff_cache[n] = dv_dx * (-y0) + dv_dy * (x0); // alpha
theta_diff_cache[n + map_size] = s * (dv_dx * (x0) + dv_dy * (y0)); // scaling
theta_diff_cache[n + map_size * 2] = dv_dx; // tx
theta_diff_cache[n + map_size * 3] = dv_dy; // ty
}
}
template <typename Dtype>
__global__ void backward_similarity_plus(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* data, const Dtype* source_data, const Dtype* top_diff, const Dtype* theta_data,
Dtype* data_diff, Dtype* theta_diff_cache) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 2;
Dtype x = source_data[offset];
Dtype y = source_data[offset + 1];
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
Dtype dv_dx = 0;
Dtype dv_dy = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype u =
top_diff[(n * channels_ + c) * map_size + h * output_W_ + w] *
data[((n * channels_ + c) * height_ + hh) * width_ + ww];
dv_dx += u * dy * sign_x;
dv_dy += u * dx * sign_y;
}
}
}
dv_dx *= (Dtype)(width_ - 1) / 2;
dv_dy *= (Dtype)(height_ - 1) / 2;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
offset = 5 * n;
n = offset * map_size + h * output_W_ + w;
Dtype ct = cos(theta_data[offset]), st = sin(theta_data[offset]);
Dtype sx = 1 / theta_data[offset + 1], sy = 1 / theta_data[offset + 2];
x -= theta_data[offset + 3];
y -= theta_data[offset + 4];
//theta_diff_cache[n] = dv_dx * (ty - y) + dv_dy * (x - tx); // alpha
//theta_diff_cache[n + map_size] = dv_dx * 1/s * (tx - x) + dv_dy * 1/s * (y - ty); // scaling
theta_diff_cache[n] = dv_dx * (-y) + dv_dy * (x); // alpha
theta_diff_cache[n + map_size] = (dv_dx * ct - dv_dy * st) * x_target; // scaling x
theta_diff_cache[n + map_size * 2] = (-dv_dx * st + dv_dy * ct) * y_target; // scaling y
theta_diff_cache[n + map_size * 3] = dv_dx; // tx
theta_diff_cache[n + map_size * 4] = dv_dy; // ty
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* data_diff = 0;
Dtype* theta_diff = bottom[1]->mutable_gpu_diff();
int count = num_ * map_size_;
if (t_type_ == 4) {
const Dtype* theta_data = bottom[1]->gpu_data();
backward_grid<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, channels_,
height_, width_, output_H_, output_W_,
bottom_data, theta_data, top_diff, // input
data_diff, theta_diff // output
);
CUDA_POST_KERNEL_CHECK;
return;
}
Dtype* theta_diff_cache = theta_diff_cache_.mutable_gpu_data();
const Dtype* source_data = source_.gpu_data();
if (propagate_down[0]) {
data_diff = bottom[0]->mutable_gpu_diff();
caffe_gpu_set<Dtype>(bottom[0]->count(), 0, data_diff);
}
//caffe_gpu_set<Dtype>(bottom[1]->count(), 0, theta_diff); // UNNECCESSARY
switch (t_type_) {
case 0:
// affine
// compute gradient with respect to theta
backward_affine<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channels_,
height_, width_, output_H_, output_W_,
bottom_data, source_data, top_diff, // input
data_diff, theta_diff_cache // output
);
// aggregate gradient for theta
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * 6, 1, map_size_,
Dtype(1), theta_diff_cache, theta_diff_op_.gpu_data(), Dtype(0), theta_diff);
break;
case 1:
// translation
backward_translation<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, channels_, height_, width_, output_H_, output_W_,
bottom_data, source_data, top_diff, data_diff, theta_diff_cache);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * 2, 1, map_size_,
Dtype(1), theta_diff_cache, theta_diff_op_.gpu_data(), Dtype(0), theta_diff);
break;
case 2:
// translation + scaling
backward_translation_scaling<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, channels_, height_, width_, output_H_, output_W_,
bottom_data, source_data, top_diff, data_diff, theta_diff_cache);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * 4, 1, map_size_,
Dtype(1), theta_diff_cache, theta_diff_op_.gpu_data(), Dtype(0), theta_diff);
break;
case 3:
// projective
backward_projective<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, channels_, height_, width_, output_H_, output_W_,
bottom_data, source_data, top_diff, data_diff, theta_diff_cache);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * 8, 1, map_size_,
Dtype(1), theta_diff_cache, theta_diff_op_.gpu_data(), Dtype(0), theta_diff);
break;
case 5:
// similarity
backward_similarity<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, channels_, height_, width_, output_H_, output_W_,
bottom_data, source_data, top_diff, bottom[1]->gpu_data(),
data_diff, theta_diff_cache);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * 4, 1, map_size_,
Dtype(1), theta_diff_cache, theta_diff_op_.gpu_data(), Dtype(0), theta_diff);
break;
case 6:
// similarity+
backward_similarity_plus<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, channels_, height_, width_, output_H_, output_W_,
bottom_data, source_data, top_diff, bottom[1]->gpu_data(),
data_diff, theta_diff_cache);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * 4, 1, map_size_,
Dtype(1), theta_diff_cache, theta_diff_op_.gpu_data(), Dtype(0), theta_diff);
break;
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(SpatialTransformerLayer);
} // namespace caffe
| c317dd43ce4af0e6be5e0b3a04914c89c999f6e0.cu | #include <algorithm>
#include <vector>
#include "cuda.h"
#include "caffe/layer.hpp"
#include "caffe/layers/spatial_transformer_layer.hpp"
namespace caffe {
///////////////////////////////////////////////////////////////////
template <typename Dtype>
__global__ void forward_affine(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* in, const Dtype* theta, Dtype* source_data, Dtype* out) {//, const Dtype* fill_value_) {
//int div = channels_ * output_H_ * output_W_;
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
int offset = 6 * n;
Dtype x = x_target * theta[offset] + y_target * theta[offset + 1] + theta[offset + 2];
Dtype y = x_target * theta[offset + 3] + y_target * theta[offset + 4] + theta[offset + 5];
x = (x + (Dtype) 1.) / 2 * (width_ - 1);
y = (y + (Dtype) 1.) / 2 * (height_ - 1);
//offset = n * map_size * 2 + h * output_W_ + w;
offset = (n * map_size + h * output_W_ + w) * 2;
source_data[offset] = x;
//source_data[offset + map_size] = y;
source_data[offset + 1] = y;
x = x > 0 ? x : 0; x = x < (width_ - 1) ? x : width_ - 1;
y = y > 0 ? y : 0; y = y < (height_ - 1) ? y : height_ - 1;
int w_min = (int)floor(x);
int w_max = (int)ceil(x);
int h_min = (int)floor(y);
int h_max = (int)ceil(y);
for (int c = 0; c < channels_; ++c) {
Dtype r = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
r += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = r;
}
/*int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
for (int c = 0; c < channels_; ++c) {
Dtype tmp;
if (h_max < h_min || w_max < w_min) {
tmp = fill_value_[c];
}else {
tmp = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
tmp += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = tmp;
}*/
}
}
template <typename Dtype>
__global__ void forward_translation(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* in, const Dtype* theta, Dtype* source_data, Dtype* out,
const float theta_1_1, const float theta_2_2//, const Dtype* fill_value_
) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
int offset = 2 * n;
Dtype x = theta_1_1 * x_target + theta[offset];
Dtype y = theta_2_2 * y_target + theta[offset + 1];
x = (x + (Dtype) 1.) / 2 * (width_ - 1);
y = (y + (Dtype) 1.) / 2 * (height_ - 1);
offset = (n * map_size + h * output_W_ + w) * 2;
source_data[offset] = x;
source_data[offset + 1] = y;
x = x > 0 ? x : 0; x = x < (width_ - 1) ? x : width_ - 1;
y = y > 0 ? y : 0; y = y < (height_ - 1) ? y : height_ - 1;
int w_min = (int)floor(x);
int w_max = (int)ceil(x);
int h_min = (int)floor(y);
int h_max = (int)ceil(y);
for (int c = 0; c < channels_; ++c) {
Dtype r = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
r += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = r;
}
/*int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
for (int c = 0; c < channels_; ++c) {
Dtype tmp;
if (h_max < h_min || w_max < w_min) {
tmp = fill_value_[c];
}
else {
tmp = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
tmp += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = tmp;
}*/
}
}
template <typename Dtype>
__global__ void forward_translation_scaling(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* in, const Dtype* theta, Dtype* source_data, Dtype* out) {//, const Dtype* fill_value_) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
int offset = 4 * n;
Dtype x = x_target * theta[offset] + theta[offset + 1];
Dtype y = y_target * theta[offset + 2] + theta[offset + 3];
x = (x + (Dtype) 1.) / 2 * (width_ - 1);
y = (y + (Dtype) 1.) / 2 * (height_ - 1);
offset = (n * map_size + h * output_W_ + w) * 2;
source_data[offset] = x;
source_data[offset + 1] = y;
x = x > 0 ? x : 0; x = x < (width_ - 1) ? x : width_ - 1;
y = y > 0 ? y : 0; y = y < (height_ - 1) ? y : height_ - 1;
int w_min = (int)floor(x);
int w_max = (int)ceil(x);
int h_min = (int)floor(y);
int h_max = (int)ceil(y);
for (int c = 0; c < channels_; ++c) {
Dtype r = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
r += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = r;
}
/*int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
for (int c = 0; c < channels_; ++c) {
Dtype tmp;
if (h_max < h_min || w_max < w_min) {
tmp = fill_value_[c];
}
else {
tmp = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
tmp += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = tmp;
}*/
}
}
template <typename Dtype>
__global__ void forward_projective(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* in, const Dtype* theta, Dtype* source_data, Dtype* out) {//, const Dtype* fill_value_) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
int offset = 8 * n;
Dtype z = 1 / (x_target * theta[offset + 6] + y_target * theta[offset + 7] + 1);
Dtype x = x_target * theta[offset] + y_target * theta[offset + 1] + theta[offset + 2];
Dtype y = x_target * theta[offset + 3] + y_target * theta[offset + 4] + theta[offset + 5];
/*offset = (n * map_size + h * output_W_ + w) * 3;
source_data[offset] = (x *= z);
source_data[offset + 1] = (y *= z);
source_data[offset + 2] = z;*/
x = (x * z + (Dtype) 1.) * (width_ - 1) / 2;
y = (y * z + (Dtype) 1.) * (height_ - 1) / 2;
offset = (n * map_size + h * output_W_ + w) * 3;
source_data[offset] = x;
source_data[offset + 1] = y;
source_data[offset + 2] = z;
x = x > 0 ? x : 0; x = x < (width_ - 1) ? x : width_ - 1;
y = y > 0 ? y : 0; y = y < (height_ - 1) ? y : height_ - 1;
int w_min = (int)floor(x);
int w_max = (int)ceil(x);
int h_min = (int)floor(y);
int h_max = (int)ceil(y);
for (int c = 0; c < channels_; ++c) {
Dtype r = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
r += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = r;
}
/*int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
for (int c = 0; c < channels_; ++c) {
Dtype tmp;
if (h_max < h_min || w_max < w_min) {
tmp = fill_value_[c];
}
else {
tmp = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
tmp += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = tmp;
}*/
}
}
template <typename Dtype>
__global__ void forward_grid(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* in, const Dtype* theta, Dtype* out) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 2;
Dtype x = theta[offset];
Dtype y = theta[offset + 1];
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
for (int c = 0; c < channels_; ++c) {
offset = (n * channels_ + c) * height_ * width_;
Dtype tmp = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
tmp += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = tmp;
}
}
}
template <typename Dtype>
__global__ void forward_similarity(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* in, const Dtype* theta, Dtype* source_data, Dtype* out) {//, const Dtype* fill_value_) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
int offset = 4 * n;
// 0: alpha
// 1: scaling
// 2: tx
// 3: ty
Dtype ct = cos(theta[offset]), st = sin(theta[offset]);
Dtype x = theta[offset + 1] * (x_target * ct - y_target * st) + theta[offset + 2];
Dtype y = theta[offset + 1] * (x_target * st + y_target * ct) + theta[offset + 3];
//offset = n * map_size * 2 + h * output_W_ + w;
offset = (n * map_size + h * output_W_ + w) * 2;
source_data[offset] = x;
//source_data[offset + map_size] = y;
source_data[offset + 1] = y;
x = (x + (Dtype) 1.) / 2 * (width_ - 1);
y = (y + (Dtype) 1.) / 2 * (height_ - 1);
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
for (int c = 0; c < channels_; ++c) {
Dtype r = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
r += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = r;
}
}
}
template <typename Dtype>
__global__ void forward_similarity_plus(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* in, const Dtype* theta, Dtype* source_data, Dtype* out) {//, const Dtype* fill_value_) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
Dtype x_target = (Dtype)w / (output_W_ - 1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype)h / (output_H_ - 1) * 2 - (Dtype)1.;
int offset = 5 * n;
// 0: alpha
// 1: scaling_x
// 2: scaling_y
// 3: tx
// 4: ty
Dtype ct = cos(theta[offset]), st = sin(theta[offset]);
Dtype sx = theta[offset + 1], sy = theta[offset + 2];
Dtype x = sx * x_target * ct - sy * y_target * st + theta[offset + 3];
Dtype y = sx * x_target * st + sy * y_target * ct + theta[offset + 4];
x = (x + (Dtype) 1.) / 2 * (width_ - 1);
y = (y + (Dtype) 1.) / 2 * (height_ - 1);
//offset = n * map_size * 2 + h * output_W_ + w;
offset = (n * map_size + h * output_W_ + w) * 2;
source_data[offset] = x;
//source_data[offset + map_size] = y;
source_data[offset + 1] = y;
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
for (int c = 0; c < channels_; ++c) {
Dtype r = 0;
offset = (n * channels_ + c) * height_ * width_;
for (int hh = h_min; hh <= h_max; ++hh) {
const Dtype dy = (1 - fabs(y - hh));
for (int ww = w_min; ww <= w_max; ++ww) {
r += in[offset + hh * width_ + ww] * (1 - fabs(x - ww)) * dy;
}
}
out[(n * channels_ + c) * map_size + h * output_W_ + w] = r;
}
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* theta_data = bottom[1]->gpu_data();
const int count = num_ * map_size_;
if (t_type_ == 4) {
forward_grid<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, channels_, height_, width_, output_H_, output_W_,
bottom_data, theta_data, top_data);
CUDA_POST_KERNEL_CHECK;
return;
}
Dtype* source_data = source_.mutable_gpu_data();
switch (t_type_) {
case 0:
// affine
forward_affine<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channels_, height_, width_, output_H_, output_W_,
bottom_data, theta_data, source_data, top_data);//, fill_value_.gpu_data());
break;
case 1:
// translation
forward_translation<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channels_, height_, width_, output_H_, output_W_,
bottom_data, theta_data, source_data, top_data,
this->layer_param_.st_param().theta_1_1(), this->layer_param_.st_param().theta_2_2());// , fill_value_.gpu_data());
break;
case 2:
// translation + scaling
forward_translation_scaling<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channels_, height_, width_, output_H_, output_W_,
bottom_data, theta_data, source_data, top_data);// , fill_value_.gpu_data());
break;
case 3:
// projective
forward_projective<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channels_, height_, width_, output_H_, output_W_,
bottom_data, theta_data, source_data, top_data);// , fill_value_.gpu_data());
break;
case 5:
// similarity
forward_similarity<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channels_, height_, width_, output_H_, output_W_,
bottom_data, theta_data, source_data, top_data);//, fill_value_.gpu_data());
break;
case 6:
// similarity+
forward_similarity_plus<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channels_, height_, width_, output_H_, output_W_,
bottom_data, theta_data, source_data, top_data);//, fill_value_.gpu_data());
break;
}
CUDA_POST_KERNEL_CHECK;
}
///////////////////////////////////////////////////////////////////
__device__ inline void atomic_add(float * address, float val) {
atomicAdd(address, val);
}
__device__ inline void atomic_add(double * address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
}
// compute (1) d{V_i} / d{x_i}, then (2) d{V_i} / d{theta}
// compute sum_{i} d{V_i} / d{U_nm}
template <typename Dtype>
__global__ void backward_affine(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* data, const Dtype* source_data, const Dtype* top_diff,
Dtype* data_diff, Dtype* theta_diff_cache) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 2;
Dtype x = source_data[offset];
Dtype y = source_data[offset + 1];
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
Dtype dv_dx = 0;
Dtype dv_dy = 0;
if (data_diff) {
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype buffer = top_diff[(n * channels_ + c) * map_size + h * output_W_ + w];
// offset in the input image U
offset = ((n * channels_ + c) * height_ + hh) * width_ + ww;
atomic_add(data_diff + offset, buffer * dx * dy);
buffer *= data[offset];
dv_dx += buffer * dy * sign_x;
dv_dy += buffer * dx * sign_y;
}
}
}
}else {
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype u =
top_diff[(n * channels_ + c) * map_size + h * output_W_ + w] *
data[((n * channels_ + c) * height_ + hh) * width_ + ww];
dv_dx += u * dy * sign_x;
dv_dy += u * dx * sign_y;
}
}
}
}
dv_dx *= (Dtype)(width_ - 1) / 2;
dv_dy *= (Dtype)(height_ - 1) / 2;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
n = n * 6 * map_size + h * output_W_ + w;
theta_diff_cache[n] = dv_dx * x_target;
theta_diff_cache[n + map_size] = dv_dx * y_target;
theta_diff_cache[n + map_size*2] = dv_dx;
theta_diff_cache[n + map_size*3] = dv_dy * x_target;
theta_diff_cache[n + map_size*4] = dv_dy * y_target;
theta_diff_cache[n + map_size*5] = dv_dy;
}
}
template <typename Dtype>
__global__ void backward_translation(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* data, const Dtype* source_data, const Dtype* top_diff,
Dtype* data_diff, Dtype* theta_diff_cache) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 2;
Dtype x = source_data[offset];
Dtype y = source_data[offset + 1];
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
Dtype dv_dx = 0;
Dtype dv_dy = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype u =
top_diff[(n * channels_ + c) * map_size + h * output_W_ + w] *
data[((n * channels_ + c) * height_ + hh) * width_ + ww];
dv_dx += u * dy * sign_x;
dv_dy += u * dx * sign_y;
}
}
}
dv_dx *= (Dtype)(width_ - 1) / 2;
dv_dy *= (Dtype)(height_ - 1) / 2;
n = n * 2 * map_size + h * output_W_ + w;
theta_diff_cache[n] = dv_dx;
theta_diff_cache[n + map_size] = dv_dy;
}
}
template <typename Dtype>
__global__ void backward_translation_scaling(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* data, const Dtype* source_data, const Dtype* top_diff,
Dtype* data_diff, Dtype* theta_diff_cache) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 2;
Dtype x = source_data[offset];
Dtype y = source_data[offset + 1];
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
Dtype dv_dx = 0;
Dtype dv_dy = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype u =
top_diff[(n * channels_ + c) * map_size + h * output_W_ + w] *
data[((n * channels_ + c) * height_ + hh) * width_ + ww];
dv_dx += u * dy * sign_x;
dv_dy += u * dx * sign_y;
}
}
}
dv_dx *= (Dtype)(width_ - 1) / 2;
dv_dy *= (Dtype)(height_ - 1) / 2;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
n = n * 4 * map_size + h * output_W_ + w;
theta_diff_cache[n] = dv_dx * x_target;
theta_diff_cache[n + map_size] = dv_dx;
theta_diff_cache[n + map_size*2] = dv_dy * y_target;
theta_diff_cache[n + map_size*3] = dv_dy;
}
}
template <typename Dtype>
__global__ void backward_projective(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* data, const Dtype* source_data, const Dtype* top_diff,
Dtype* data_diff, Dtype* theta_diff_cache) {
const int map_size = output_H_ * output_W_;
//const Dtype width_const = (Dtype)2 / (Dtype)(width_ - 1);
//const Dtype height_const = (Dtype)2 / (Dtype)(height_ - 1);
const Dtype width_const = (Dtype)(width_ - 1) / 2;
const Dtype height_const = (Dtype)(height_ - 1) / 2;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 3;
Dtype x = source_data[offset];
Dtype y = source_data[offset + 1];
Dtype z = source_data[offset + 2];
//Dtype x = (x0 + (Dtype) 1.) * (width_ - 1) / 2;
//Dtype y = (y0 + (Dtype) 1.) * (height_ - 1) / 2;
Dtype x0 = x - width_const, y0 = y - height_const;
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
Dtype dv_dx = 0;
Dtype dv_dy = 0;
Dtype tmp_source_z = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype u =
top_diff[(n * channels_ + c) * map_size + h * output_W_ + w] *
data[((n * channels_ + c) * height_ + hh) * width_ + ww];
Dtype dv_dx_i = u * dy * sign_x;
Dtype dv_dy_i = u * dx * sign_y;
dv_dx += dv_dx_i;
dv_dy += dv_dy_i;
tmp_source_z -= dv_dx_i * x0 + dv_dy_i * y0;
}
}
}
dv_dx *= width_const * z;
dv_dy *= height_const * z;
tmp_source_z *= z;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
n = n * 8 * map_size + h * output_W_ + w;
theta_diff_cache[n] = dv_dx * x_target;
theta_diff_cache[n + map_size] = dv_dx * y_target;
theta_diff_cache[n + map_size*2] = dv_dx;
theta_diff_cache[n + map_size*3] = dv_dy * x_target;
theta_diff_cache[n + map_size*4] = dv_dy * y_target;
theta_diff_cache[n + map_size*5] = dv_dy;
theta_diff_cache[n + map_size*6] = tmp_source_z * x_target;
theta_diff_cache[n + map_size*7] = tmp_source_z * y_target;
}
}
template <typename Dtype>
__global__ void backward_grid(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* data, const Dtype* theta_data, const Dtype* top_diff,
Dtype* data_diff, Dtype* theta_diff) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 2;
Dtype x = theta_data[offset];
Dtype y = theta_data[offset + 1];
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
Dtype dv_dx = 0;
Dtype dv_dy = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype u =
top_diff[(n * channels_ + c) * map_size + h * output_W_ + w] *
data[((n * channels_ + c) * height_ + hh) * width_ + ww];
dv_dx += u * dy * sign_x;
dv_dy += u * dx * sign_y;
}
}
}
theta_diff[offset] = dv_dx;
theta_diff[offset + 1] = dv_dy;
}
}
template <typename Dtype>
__global__ void backward_similarity(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* data, const Dtype* source_data, const Dtype* top_diff, const Dtype* theta_data,
Dtype* data_diff, Dtype* theta_diff_cache) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 2;
Dtype x0 = source_data[offset];
Dtype y0 = source_data[offset + 1];
Dtype x = (x0 + (Dtype) 1.) / 2 * (width_ - 1);
Dtype y = (y0 + (Dtype) 1.) / 2 * (height_ - 1);
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
Dtype dv_dx = 0;
Dtype dv_dy = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype u =
top_diff[(n * channels_ + c) * map_size + h * output_W_ + w] *
data[((n * channels_ + c) * height_ + hh) * width_ + ww];
dv_dx += u * dy * sign_x;
dv_dy += u * dx * sign_y;
}
}
}
dv_dx *= (Dtype)(width_ - 1) / 2;
dv_dy *= (Dtype)(height_ - 1) / 2;
//Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
//Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
offset = 4 * n;
n = offset * map_size + h * output_W_ + w;
Dtype s = 1 / theta_data[offset + 1];
x0 -= theta_data[offset + 2];
y0 -= theta_data[offset + 3];
//theta_diff_cache[n] = dv_dx * (ty - y) + dv_dy * (x - tx); // alpha
//theta_diff_cache[n + map_size] = dv_dx * 1/s * (tx - x) + dv_dy * 1/s * (y - ty); // scaling
theta_diff_cache[n] = dv_dx * (-y0) + dv_dy * (x0); // alpha
theta_diff_cache[n + map_size] = s * (dv_dx * (x0) + dv_dy * (y0)); // scaling
theta_diff_cache[n + map_size * 2] = dv_dx; // tx
theta_diff_cache[n + map_size * 3] = dv_dy; // ty
}
}
template <typename Dtype>
__global__ void backward_similarity_plus(const int count, const int channels_,
const int height_, const int width_, const int output_H_, const int output_W_,
const Dtype* data, const Dtype* source_data, const Dtype* top_diff, const Dtype* theta_data,
Dtype* data_diff, Dtype* theta_diff_cache) {
const int map_size = output_H_ * output_W_;
CUDA_KERNEL_LOOP(index, count) {
int n = index / map_size;
int n_rem = index % map_size;
int h = n_rem / output_W_;
int w = n_rem % output_W_;
int offset = (n * map_size + h * output_W_ + w) * 2;
Dtype x = source_data[offset];
Dtype y = source_data[offset + 1];
int w_min = (floor(x) > 0) ? floor(x) : 0;
int w_max = (ceil(x) < width_ - 1) ? ceil(x) : (width_ - 1);
int h_min = (floor(y) > 0) ? floor(y) : 0;
int h_max = (ceil(y) < height_ - 1) ? ceil(y) : (height_ - 1);
Dtype dv_dx = 0;
Dtype dv_dy = 0;
for (int hh = h_min; hh <= h_max; ++hh) {
int sign_y = (Dtype(0) <= Dtype(hh - y)) - (Dtype(hh - y) < Dtype(0));
Dtype dy = 1 - fabs(y - hh);
for (int ww = w_min; ww <= w_max; ++ww) {
int sign_x = (Dtype(0) <= Dtype(ww - x)) - (Dtype(ww - x) < Dtype(0));
Dtype dx = 1 - fabs(x - ww);
for (int c = 0; c < channels_; ++c) {
Dtype u =
top_diff[(n * channels_ + c) * map_size + h * output_W_ + w] *
data[((n * channels_ + c) * height_ + hh) * width_ + ww];
dv_dx += u * dy * sign_x;
dv_dy += u * dx * sign_y;
}
}
}
dv_dx *= (Dtype)(width_ - 1) / 2;
dv_dy *= (Dtype)(height_ - 1) / 2;
Dtype x_target = (Dtype) w / (output_W_-1) * 2 - (Dtype)1.;
Dtype y_target = (Dtype) h / (output_H_-1) * 2 - (Dtype)1.;
offset = 5 * n;
n = offset * map_size + h * output_W_ + w;
Dtype ct = cos(theta_data[offset]), st = sin(theta_data[offset]);
Dtype sx = 1 / theta_data[offset + 1], sy = 1 / theta_data[offset + 2];
x -= theta_data[offset + 3];
y -= theta_data[offset + 4];
//theta_diff_cache[n] = dv_dx * (ty - y) + dv_dy * (x - tx); // alpha
//theta_diff_cache[n + map_size] = dv_dx * 1/s * (tx - x) + dv_dy * 1/s * (y - ty); // scaling
theta_diff_cache[n] = dv_dx * (-y) + dv_dy * (x); // alpha
theta_diff_cache[n + map_size] = (dv_dx * ct - dv_dy * st) * x_target; // scaling x
theta_diff_cache[n + map_size * 2] = (-dv_dx * st + dv_dy * ct) * y_target; // scaling y
theta_diff_cache[n + map_size * 3] = dv_dx; // tx
theta_diff_cache[n + map_size * 4] = dv_dy; // ty
}
}
template <typename Dtype>
void SpatialTransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* data_diff = 0;
Dtype* theta_diff = bottom[1]->mutable_gpu_diff();
int count = num_ * map_size_;
if (t_type_ == 4) {
const Dtype* theta_data = bottom[1]->gpu_data();
backward_grid<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, channels_,
height_, width_, output_H_, output_W_,
bottom_data, theta_data, top_diff, // input
data_diff, theta_diff // output
);
CUDA_POST_KERNEL_CHECK;
return;
}
Dtype* theta_diff_cache = theta_diff_cache_.mutable_gpu_data();
const Dtype* source_data = source_.gpu_data();
if (propagate_down[0]) {
data_diff = bottom[0]->mutable_gpu_diff();
caffe_gpu_set<Dtype>(bottom[0]->count(), 0, data_diff);
}
//caffe_gpu_set<Dtype>(bottom[1]->count(), 0, theta_diff); // UNNECCESSARY
switch (t_type_) {
case 0:
// affine
// compute gradient with respect to theta
backward_affine<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(count, channels_,
height_, width_, output_H_, output_W_,
bottom_data, source_data, top_diff, // input
data_diff, theta_diff_cache // output
);
// aggregate gradient for theta
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * 6, 1, map_size_,
Dtype(1), theta_diff_cache, theta_diff_op_.gpu_data(), Dtype(0), theta_diff);
break;
case 1:
// translation
backward_translation<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, channels_, height_, width_, output_H_, output_W_,
bottom_data, source_data, top_diff, data_diff, theta_diff_cache);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * 2, 1, map_size_,
Dtype(1), theta_diff_cache, theta_diff_op_.gpu_data(), Dtype(0), theta_diff);
break;
case 2:
// translation + scaling
backward_translation_scaling<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, channels_, height_, width_, output_H_, output_W_,
bottom_data, source_data, top_diff, data_diff, theta_diff_cache);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * 4, 1, map_size_,
Dtype(1), theta_diff_cache, theta_diff_op_.gpu_data(), Dtype(0), theta_diff);
break;
case 3:
// projective
backward_projective<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, channels_, height_, width_, output_H_, output_W_,
bottom_data, source_data, top_diff, data_diff, theta_diff_cache);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * 8, 1, map_size_,
Dtype(1), theta_diff_cache, theta_diff_op_.gpu_data(), Dtype(0), theta_diff);
break;
case 5:
// similarity
backward_similarity<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, channels_, height_, width_, output_H_, output_W_,
bottom_data, source_data, top_diff, bottom[1]->gpu_data(),
data_diff, theta_diff_cache);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * 4, 1, map_size_,
Dtype(1), theta_diff_cache, theta_diff_op_.gpu_data(), Dtype(0), theta_diff);
break;
case 6:
// similarity+
backward_similarity_plus<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, channels_, height_, width_, output_H_, output_W_,
bottom_data, source_data, top_diff, bottom[1]->gpu_data(),
data_diff, theta_diff_cache);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * 4, 1, map_size_,
Dtype(1), theta_diff_cache, theta_diff_op_.gpu_data(), Dtype(0), theta_diff);
break;
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(SpatialTransformerLayer);
} // namespace caffe
|
945df841691c0d47c503f547e0e1d84f16de936b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdio.h>
#include "LIS.cu"
#include "LDS.cu"
#include "EnumaratorSequence.cu"
#include <time.h>
//#define NUM_THREADS 1024
#define THREAD_PER_BLOCK 128
#define N 20
#define NUM_DEVICES 2
__device__
void printVector(char* array, int length){
for(int k = 0; k < length; k++){
printf("%d - ",array[k]);
}
printf("\n");
}
__device__
void inversion(char* vet, int length){
char temp;
for(int i = 0; i < length/2; i++){
temp = vet[length-i-1];
vet[length-i-1] = vet[i];
vet[i] = temp;
}
}
__device__
void rotation(char *array, int length){
char temp;
int i;
temp = array[0];
for (i = 0; i < length-1; i++)
array[i] = array[i+1];
array[i] = temp;
}
unsigned long long fatorialHost(unsigned long long n){
int i;
unsigned long long result = 1;
for(i = n; i > 1; i--){
result *= i;
}
return result;
}
//Calcula o LIS de todo o conjunto R partindo do pivor principal da ordem lexico grfica
//Caso encontre um valor que menor do que o mximo local de S, ento ele retorna e no faz os outros calculos.
__global__
void decideLS(char* d_lMax_S, int length, unsigned long long maxSeq, int numThreads, int initThread){
extern __shared__ char s_vet[];
int tid = threadIdx.x + blockIdx.x*blockDim.x;
int s_index = length*threadIdx.x; //Indice da shared memory
unsigned long long int indexSeq = tid+initThread;
//Esses dois vetores so utilizados no LIS e no LDS, so declarados do lado de fora para
//gastar menos memria e no ter necessidade de dar malloc.
char MP[N*(N+1)/2]; //Vetor de most promising
char last[N]; //Vetor de last de MP
//Valores com os resultados encontrados no LIS e no LDS
char lLIS, lLDS;
char lMin_R;
bool flagFinalLoop;
while(indexSeq < maxSeq){
getSequence(s_vet + s_index, length, indexSeq);
lMin_R = 20; //Variavel que representa o min encontrado no conjunto R
flagFinalLoop = true;
for(int i = 0; i < length; i++){ //Rotao
lLIS = LIS(s_vet + s_index, last, MP, length);
//caso seja menor que o minimo do conjunto R, ento modificar o valor
if(lLIS < lMin_R){
lMin_R = lLIS;
}
//Todo o conjunto pode ser descartado, pois no vai subistituir lMax_S no resultado final
if(lMin_R <= d_lMax_S[tid]){
flagFinalLoop = false;
break;
}
lLDS = LDS(s_vet + s_index, last, MP, length);
//caso seja menor que o minimo do conjunto R, ento modificar o valor
if(lLDS < lMin_R){
lMin_R = lLDS;
}
//Todo o conjunto pode ser descartado, pois no vai subistituir lMax_S no resultado final
if(lMin_R <= d_lMax_S[tid]){
flagFinalLoop = false;
break;
}
rotation(s_vet + s_index, length);
}
//Caso o resultado final encontrado de R chegue ate o final, ento significa que ele maior
//Que o minimo local encontrado at o momento.
if(flagFinalLoop){
d_lMax_S[tid] = lMin_R;
if (d_lMax_S[tid] == 6) {
printVector(s_vet + s_index, length);
}
//printf("tid = %d, maxS= %d, index %d\n", tid, d_lMax_S[tid], indexSeq);
}
indexSeq += numThreads;
}
__syncthreads();
if (tid == 0) {
char lMaxS = d_lMax_S[0];
for(int i = 0; i < 10240; i++){
//printf("DLMaxS %d . %d\n",i, d_lMax_S[i]);
if(lMaxS < d_lMax_S[i]){
lMaxS = d_lMax_S[i];
}
}
//printf("LmaxS %d\n", lMaxS);
d_lMax_S[0] = lMaxS;
}
}
//Com os valores de mximos locais de S, calcular o mximo global.
void calcLMaxGlobalS(char* lMax_globalS, char* lMax_localS, int tamVec){
//Nmero de conjuntos
for(int i = 0; i < tamVec; i++){
//printf("%d\n", lMax_localS[i]);
if(*lMax_globalS < lMax_localS[i]){
*lMax_globalS = lMax_localS[i];
}
}
}
//Seja S o conjunto de todas las sequencias dos n primeiros nmeros naturais.
//Defina R(s), com s \in S o conjunto de todas as sequencias que podem
//ser geradas rotacionando S.
//Defina LIS(s) e LDS(s) como voc sabe e sejam |LIS(s)| e |LDS(s)| suas
//cardinalidades.
//Determinar Max_{s \in S}(Min_{s' \in R(s)}(Min(|LIS(s)|, |LDS(s)|)))
int main(int argc, char *argv[]){
//char* h_sequence; //Vetor com a sequncia pivor do grupo
//char* h_threadSequences; //Vetor com as sequncias criadas
char* d_lMax_localS0; //Vetor com os mximos locais de S, cada thread tem um mximo local
char* d_lMax_localS1;
char* h_lMax_localS0;
char* h_lMax_localS1;
int length = atoi(argv[1]);
int NUM_THREADS = 10240;
//hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
//hipDeviceSetCacheConfig(hipFuncCachePreferL1);
clock_t start,end;
//Aloca memria dos vetores
//h_sequence = (char*) malloc(length);
//h_threadSequences = (char*) malloc(length*NUM_THREADS);
h_lMax_localS0 = (char*) malloc(NUM_THREADS);
h_lMax_localS1 = (char*) malloc(NUM_THREADS);
//hipMalloc(&d_threadSequences, length*NUM_THREADS);
hipSetDevice(0);
hipMalloc(&d_lMax_localS0, NUM_THREADS);
hipMemset(d_lMax_localS0, 0, NUM_THREADS);
hipSetDevice(1);
hipMalloc(&d_lMax_localS1, NUM_THREADS);
hipMemset(d_lMax_localS1, 0, NUM_THREADS);
start = clock();
unsigned long long numSeq = fatorialHost(length-1)/2;
int blockSize = 128; // The launch configurator returned block size
int gridSize; // The actual grid size needed, based on input size
// Round up according to array size
gridSize = ceil(NUM_THREADS / blockSize);
//dim3 num_blocks(ceil((float) NUM_THREADS/(float) (THREAD_PER_BLOCK)));
//int tam_shared = length*THREAD_PER_BLOCK;
int tam_shared = length*blockSize;
printf("Comeou\n");
//Cada thread calcula: Min_{s' \in R(s)}(Min(|LIS(s)|, |LDS(s)|)), e se o resultado for maior que o mximo local,
//insere na varivel
hipSetDevice(0);
//decideLS<<<num_blocks, THREAD_PER_BLOCK, tam_shared>>>
hipLaunchKernelGGL(( decideLS), dim3(gridSize), dim3(blockSize), tam_shared, 0,
d_lMax_localS0, length, numSeq, NUM_DEVICES*NUM_THREADS, 0);
hipSetDevice(1);
//decideLS<<<num_blocks, THREAD_PER_BLOCK, tam_shared>>>
hipLaunchKernelGGL(( decideLS), dim3(gridSize), dim3(blockSize), tam_shared, 0,
d_lMax_localS1, length, numSeq, NUM_DEVICES*NUM_THREADS, NUM_THREADS);
//hipSetDevice(0);
//hipMemcpyAsync(h_lMax_localS0, d_lMax_localS0, NUM_THREADS, hipMemcpyDeviceToHost);
//hipSetDevice(1);
//hipMemcpyAsync(h_lMax_localS1, d_lMax_localS1, NUM_THREADS, hipMemcpyDeviceToHost);
hipSetDevice(0);
hipMemcpyAsync(h_lMax_localS0, d_lMax_localS0, 1, hipMemcpyDeviceToHost);
hipSetDevice(1);
hipMemcpyAsync(h_lMax_localS1, d_lMax_localS1, 1, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
char lMax_globalS = 0; //Varivel com o mximo global de S
if (h_lMax_localS0[0] < h_lMax_localS1[0]) {
lMax_globalS = h_lMax_localS1[0];
} else {
lMax_globalS = h_lMax_localS0[0];
}
//hipSetDevice(0);
//hipDeviceSynchronize();
//char lMax_globalS = 0; //Varivel com o mximo global de S
//calcLMaxGlobalS(&lMax_globalS, h_lMax_localS0, NUM_THREADS);
//hipSetDevice(1);
//hipDeviceSynchronize();
//calcLMaxGlobalS(&lMax_globalS, h_lMax_localS1, NUM_THREADS);
/*for(int i = 0; i < NUM_THREADS; i++){
printf("%d - %d\n",i, h_lMax_localS0[i]);
}*/
end = clock();
printf("100%% - Tempo: %f s\n", (float)(end-start)/CLOCKS_PER_SEC);
printf("Lmax R = %d\n",lMax_globalS);
free(h_lMax_localS0);
free(h_lMax_localS1);
//hipFree(d_threadSequences);
hipFree(d_lMax_localS0);
hipFree(d_lMax_localS1);
}
| 945df841691c0d47c503f547e0e1d84f16de936b.cu | #include <stddef.h>
#include <stdio.h>
#include "LIS.cu"
#include "LDS.cu"
#include "EnumaratorSequence.cu"
#include <time.h>
//#define NUM_THREADS 1024
#define THREAD_PER_BLOCK 128
#define N 20
#define NUM_DEVICES 2
__device__
void printVector(char* array, int length){
for(int k = 0; k < length; k++){
printf("%d - ",array[k]);
}
printf("\n");
}
__device__
void inversion(char* vet, int length){
char temp;
for(int i = 0; i < length/2; i++){
temp = vet[length-i-1];
vet[length-i-1] = vet[i];
vet[i] = temp;
}
}
__device__
void rotation(char *array, int length){
char temp;
int i;
temp = array[0];
for (i = 0; i < length-1; i++)
array[i] = array[i+1];
array[i] = temp;
}
unsigned long long fatorialHost(unsigned long long n){
int i;
unsigned long long result = 1;
for(i = n; i > 1; i--){
result *= i;
}
return result;
}
//Calcula o LIS de todo o conjunto R partindo do pivor principal da ordem lexico gráfica
//Caso encontre um valor que é menor do que o máximo local de S, então ele retorna e não faz os outros calculos.
__global__
void decideLS(char* d_lMax_S, int length, unsigned long long maxSeq, int numThreads, int initThread){
extern __shared__ char s_vet[];
int tid = threadIdx.x + blockIdx.x*blockDim.x;
int s_index = length*threadIdx.x; //Indice da shared memory
unsigned long long int indexSeq = tid+initThread;
//Esses dois vetores são utilizados no LIS e no LDS, são declarados do lado de fora para
//gastar menos memória e não ter necessidade de dar malloc.
char MP[N*(N+1)/2]; //Vetor de most promising
char last[N]; //Vetor de last de MP
//Valores com os resultados encontrados no LIS e no LDS
char lLIS, lLDS;
char lMin_R;
bool flagFinalLoop;
while(indexSeq < maxSeq){
getSequence(s_vet + s_index, length, indexSeq);
lMin_R = 20; //Variavel que representa o min encontrado no conjunto R
flagFinalLoop = true;
for(int i = 0; i < length; i++){ //Rotação
lLIS = LIS(s_vet + s_index, last, MP, length);
//caso seja menor que o minimo do conjunto R, então modificar o valor
if(lLIS < lMin_R){
lMin_R = lLIS;
}
//Todo o conjunto pode ser descartado, pois não vai subistituir lMax_S no resultado final
if(lMin_R <= d_lMax_S[tid]){
flagFinalLoop = false;
break;
}
lLDS = LDS(s_vet + s_index, last, MP, length);
//caso seja menor que o minimo do conjunto R, então modificar o valor
if(lLDS < lMin_R){
lMin_R = lLDS;
}
//Todo o conjunto pode ser descartado, pois não vai subistituir lMax_S no resultado final
if(lMin_R <= d_lMax_S[tid]){
flagFinalLoop = false;
break;
}
rotation(s_vet + s_index, length);
}
//Caso o resultado final encontrado de R chegue ate o final, então significa que ele é maior
//Que o minimo local encontrado até o momento.
if(flagFinalLoop){
d_lMax_S[tid] = lMin_R;
if (d_lMax_S[tid] == 6) {
printVector(s_vet + s_index, length);
}
//printf("tid = %d, maxS= %d, index %d\n", tid, d_lMax_S[tid], indexSeq);
}
indexSeq += numThreads;
}
__syncthreads();
if (tid == 0) {
char lMaxS = d_lMax_S[0];
for(int i = 0; i < 10240; i++){
//printf("DLMaxS %d . %d\n",i, d_lMax_S[i]);
if(lMaxS < d_lMax_S[i]){
lMaxS = d_lMax_S[i];
}
}
//printf("LmaxS %d\n", lMaxS);
d_lMax_S[0] = lMaxS;
}
}
//Com os valores de máximos locais de S, calcular o máximo global.
void calcLMaxGlobalS(char* lMax_globalS, char* lMax_localS, int tamVec){
//Número de conjuntos
for(int i = 0; i < tamVec; i++){
//printf("%d\n", lMax_localS[i]);
if(*lMax_globalS < lMax_localS[i]){
*lMax_globalS = lMax_localS[i];
}
}
}
//Seja S o conjunto de todas las sequencias dos n primeiros números naturais.
//Defina R(s), com s \in S o conjunto de todas as sequencias que podem
//ser geradas rotacionando S.
//Defina LIS(s) e LDS(s) como você sabe e sejam |LIS(s)| e |LDS(s)| suas
//cardinalidades.
//Determinar Max_{s \in S}(Min_{s' \in R(s)}(Min(|LIS(s)|, |LDS(s)|)))
int main(int argc, char *argv[]){
//char* h_sequence; //Vetor com a sequência pivor do grupo
//char* h_threadSequences; //Vetor com as sequências criadas
char* d_lMax_localS0; //Vetor com os máximos locais de S, cada thread tem um máximo local
char* d_lMax_localS1;
char* h_lMax_localS0;
char* h_lMax_localS1;
int length = atoi(argv[1]);
int NUM_THREADS = 10240;
//cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
//cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
clock_t start,end;
//Aloca memória dos vetores
//h_sequence = (char*) malloc(length);
//h_threadSequences = (char*) malloc(length*NUM_THREADS);
h_lMax_localS0 = (char*) malloc(NUM_THREADS);
h_lMax_localS1 = (char*) malloc(NUM_THREADS);
//cudaMalloc(&d_threadSequences, length*NUM_THREADS);
cudaSetDevice(0);
cudaMalloc(&d_lMax_localS0, NUM_THREADS);
cudaMemset(d_lMax_localS0, 0, NUM_THREADS);
cudaSetDevice(1);
cudaMalloc(&d_lMax_localS1, NUM_THREADS);
cudaMemset(d_lMax_localS1, 0, NUM_THREADS);
start = clock();
unsigned long long numSeq = fatorialHost(length-1)/2;
int blockSize = 128; // The launch configurator returned block size
int gridSize; // The actual grid size needed, based on input size
// Round up according to array size
gridSize = ceil(NUM_THREADS / blockSize);
//dim3 num_blocks(ceil((float) NUM_THREADS/(float) (THREAD_PER_BLOCK)));
//int tam_shared = length*THREAD_PER_BLOCK;
int tam_shared = length*blockSize;
printf("Começou\n");
//Cada thread calcula: Min_{s' \in R(s)}(Min(|LIS(s)|, |LDS(s)|)), e se o resultado for maior que o máximo local,
//insere na variável
cudaSetDevice(0);
//decideLS<<<num_blocks, THREAD_PER_BLOCK, tam_shared>>>
decideLS<<<gridSize, blockSize, tam_shared>>>
(d_lMax_localS0, length, numSeq, NUM_DEVICES*NUM_THREADS, 0);
cudaSetDevice(1);
//decideLS<<<num_blocks, THREAD_PER_BLOCK, tam_shared>>>
decideLS<<<gridSize, blockSize, tam_shared>>>
(d_lMax_localS1, length, numSeq, NUM_DEVICES*NUM_THREADS, NUM_THREADS);
//cudaSetDevice(0);
//cudaMemcpyAsync(h_lMax_localS0, d_lMax_localS0, NUM_THREADS, cudaMemcpyDeviceToHost);
//cudaSetDevice(1);
//cudaMemcpyAsync(h_lMax_localS1, d_lMax_localS1, NUM_THREADS, cudaMemcpyDeviceToHost);
cudaSetDevice(0);
cudaMemcpyAsync(h_lMax_localS0, d_lMax_localS0, 1, cudaMemcpyDeviceToHost);
cudaSetDevice(1);
cudaMemcpyAsync(h_lMax_localS1, d_lMax_localS1, 1, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
char lMax_globalS = 0; //Variável com o máximo global de S
if (h_lMax_localS0[0] < h_lMax_localS1[0]) {
lMax_globalS = h_lMax_localS1[0];
} else {
lMax_globalS = h_lMax_localS0[0];
}
//cudaSetDevice(0);
//cudaThreadSynchronize();
//char lMax_globalS = 0; //Variável com o máximo global de S
//calcLMaxGlobalS(&lMax_globalS, h_lMax_localS0, NUM_THREADS);
//cudaSetDevice(1);
//cudaThreadSynchronize();
//calcLMaxGlobalS(&lMax_globalS, h_lMax_localS1, NUM_THREADS);
/*for(int i = 0; i < NUM_THREADS; i++){
printf("%d - %d\n",i, h_lMax_localS0[i]);
}*/
end = clock();
printf("100%% - Tempo: %f s\n", (float)(end-start)/CLOCKS_PER_SEC);
printf("Lmax R = %d\n",lMax_globalS);
free(h_lMax_localS0);
free(h_lMax_localS1);
//cudaFree(d_threadSequences);
cudaFree(d_lMax_localS0);
cudaFree(d_lMax_localS1);
}
|
cf6a44fb6e98b1935c7c355b1dfd3675eac3e9b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <vector>
#include "caffe/layers/operator/parallel_batch_norm_layer.hpp"
#include "caffe/util/math_functions.hpp"
#define BN_EPS 1e-5
namespace caffe {
static __global__ void kernel_test_forward(
const int num, const int channels, const int spatial_dim,
const float* scale, const float* bias, const float* mean, const float* var,
const float eps, const float* bottom_data, float* top_data) {
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) {
int c = (index / spatial_dim) % channels;
top_data[index] = ((bottom_data[index] - mean[c]) / sqrt(var[c] + eps))
* scale[c] + bias[c];
}
}
static __global__ void kernel_test_backward(
const int num, const int channels, const int spatial_dim,
const float* scale, const float* bias, const float* mean, const float* var,
const float eps, const float* top_diff, float* bottom_diff) {
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) {
int c = (index / spatial_dim) % channels;
bottom_diff[index] = top_diff[index] / sqrt(var[c] + eps) * scale[c];
}
}
static __global__ void kernel_local_stats(int num, int channels, int spatial_dim,
const float norm_factor,
const float* bottom_data, float* mean, float* var) {
// store local E[x] to mean, E[x^2] to var temporarily
__shared__ float buffer1[CAFFE_CUDA_NUM_THREADS];
__shared__ float buffer2[CAFFE_CUDA_NUM_THREADS];
const int tid = threadIdx.x;
const int c = blockIdx.x;
// load and accumulate data on each thread
buffer1[tid] = buffer2[tid] = 0;
for (int i = tid; i < num * spatial_dim; i += blockDim.x) {
const int index = i / spatial_dim * channels * spatial_dim
+ c * spatial_dim + i % spatial_dim;
buffer1[tid] += bottom_data[index];
buffer2[tid] += bottom_data[index] * bottom_data[index];
}
__syncthreads();
// do tree reduction
for (int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
buffer1[tid] += buffer1[tid + s];
buffer2[tid] += buffer2[tid + s];
}
__syncthreads();
}
// save the result back
if (tid == 0) {
mean[c] = buffer1[0] / norm_factor;
var[c] = buffer2[0] / norm_factor;
}
}
static __global__ void kernel_backward_scale_bias(
const int num, const int channels, const int spatial_dim,
const float* mean, const float* var, const float eps,
const float* top_diff, const float* bottom_data,
float* scale_diff, float* bias_diff) {
__shared__ float buffer1[CAFFE_CUDA_NUM_THREADS];
__shared__ float buffer2[CAFFE_CUDA_NUM_THREADS];
const int tid = threadIdx.x;
const int c = blockIdx.x;
// load and accumulate data on each thread
buffer1[tid] = buffer2[tid] = 0;
for (int i = tid; i < num * spatial_dim; i += blockDim.x) {
const int index = i / spatial_dim * channels * spatial_dim
+ c * spatial_dim + i % spatial_dim;
buffer1[tid] += top_diff[index] * (bottom_data[index] - mean[c]) / sqrt(var[c] + eps);
buffer2[tid] += top_diff[index];
}
__syncthreads();
// do tree reduction
for (int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
buffer1[tid] += buffer1[tid + s];
buffer2[tid] += buffer2[tid + s];
}
__syncthreads();
}
// save the result back
if (tid == 0) {
scale_diff[c] = buffer1[0];
bias_diff[c] = buffer2[0];
}
}
static __global__ void kernel_backward_bottom(
const int num, const int channels, const int spatial_dim,
const float* scale, const float* bias,
const float* mean, const float* var, const float eps,
const float norm_factor,
const float* top_diff, const float* scale_diff, const float* bias_diff,
const float* bottom_data, float* bottom_diff) {
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) {
int c = (index / spatial_dim) % channels;
const float inv_std = float(1) / sqrt(var[c] + eps);
const float x_norm = (bottom_data[index] - mean[c]) * inv_std;
bottom_diff[index] = scale[c] * inv_std *
(top_diff[index] - (x_norm * scale_diff[c] + bias_diff[c]) / norm_factor);
}
}
void ParallelBatchNormLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
if (Caffe::number_collect_sample != -1)
{
CHECK_EQ(this->parallel_blobs_.size(),4*NGPUS);
if (Caffe::number_collect_sample == 0)
{
caffe_gpu_set(this->blobs_[2]->count(),float(0),this->blobs_[2]->mutable_gpu_data());
caffe_gpu_set(this->blobs_[3]->count(),float(0),this->blobs_[3]->mutable_gpu_data());
}
for (int i = 0; i < NGPUS; i++)
{
CUDA_CHECK(hipSetDevice(Caffe::GPUs[i]));
ncclBcast((void *)this->parallel_blobs_[2*NGPUS+i]->mutable_gpu_data(),this->parallel_blobs_[2*NGPUS+i]->count(),
ncclFloat,0,Caffe::comms(i),NULL);
}
for (int i = 0; i < NGPUS; i++)
{
CUDA_CHECK(hipSetDevice(Caffe::GPUs[i]));
ncclBcast((void *)this->parallel_blobs_[3*NGPUS+i]->mutable_gpu_data(),this->parallel_blobs_[3*NGPUS+i]->count(),
ncclFloat,0,Caffe::comms(i),NULL);
}
}
#if 0
for (int i = 0; i < bottom.size(); ++i)
{
CUDA_CHECK(hipSetDevice(Caffe::GPUs[i]));
ncclBcast((void *)this->parallel_blobs_[0*NGPUS+i]->mutable_gpu_data(),this->parallel_blobs_[0*NGPUS+i]->count(),
ncclFloat,0,Caffe::comms(i),Caffe::parallel_stream(i));
}
for (int i = 0; i < bottom.size(); ++i)
{
CUDA_CHECK(hipSetDevice(Caffe::GPUs[i]));
ncclBcast((void *)this->parallel_blobs_[1*NGPUS+i]->mutable_gpu_data(),this->parallel_blobs_[1*NGPUS+i]->count(),
ncclFloat,0,Caffe::comms(i),Caffe::parallel_stream(i));
}
for (int i = 0; i < bottom.size(); ++i)
{
CUDA_CHECK(hipSetDevice(Caffe::GPUs[i]));
ncclBcast((void *)this->parallel_blobs_[2*NGPUS+i]->mutable_gpu_data(),this->parallel_blobs_[2*NGPUS+i]->count(),
ncclFloat,0,Caffe::comms(i),Caffe::parallel_stream(i));
}
for (int i = 0; i < bottom.size(); ++i)
{
CUDA_CHECK(hipSetDevice(Caffe::GPUs[i]));
ncclBcast((void *)this->parallel_blobs_[3*NGPUS+i]->mutable_gpu_data(),this->parallel_blobs_[3*NGPUS+i]->count(),
ncclFloat,0,Caffe::comms(i),Caffe::parallel_stream(i));
}
#endif
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
const int m = num * height * width * NGPUS;
//----------------------------------------------------
// compute local E[x] and E[x^2]
if (Caffe::bn_state() == "learned")
{
if (Caffe::number_collect_sample == 0)
{
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(hipSetDevice(Caffe::GPUs[i]));
caffe_gpu_set(this->parallel_blobs_[2*NGPUS+i]->count(),float(0),this->parallel_blobs_[2*NGPUS+i]->mutable_gpu_data());
caffe_gpu_set(this->parallel_blobs_[3*NGPUS+i]->count(),float(0),this->parallel_blobs_[3*NGPUS+i]->mutable_gpu_data());
}
}
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(hipSetDevice(Caffe::GPUs[i]));
hipLaunchKernelGGL(( kernel_local_stats), dim3(channels), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num, channels, height * width,
float(m),
bottom[i]->gpu_data(),
parallel_mean_buffer_[i]->mutable_gpu_data(),
parallel_var_buffer_[i]->mutable_gpu_data());
}
// sync E[x] and E[x^2]
REDUCE_DATA(parallel_mean_buffer_);
REDUCE_DATA(parallel_var_buffer_);
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(hipSetDevice(Caffe::GPUs[i]));
caffe_gpu_mul(channels, parallel_mean_buffer_[i]->gpu_data(), parallel_mean_buffer_[i]->gpu_data(),
top[i]->mutable_gpu_data()); // reuse the top buffer
caffe_gpu_sub(channels, parallel_var_buffer_[i]->gpu_data(), top[i]->gpu_data(),
parallel_var_buffer_[i]->mutable_gpu_data());
}
float factor;
if (Caffe::number_collect_sample == -1)
factor = 0.01;
else
factor = float(1)/float(Caffe::number_collect_sample+1);
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(hipSetDevice(Caffe::GPUs[i]));
caffe_gpu_axpby(parallel_mean_buffer_[i]->count(),
factor, parallel_mean_buffer_[i]->gpu_data(),
1-factor, this->parallel_blobs_[2*NGPUS+i]->mutable_gpu_data());
caffe_gpu_axpby(parallel_var_buffer_[i]->count(),
factor, parallel_var_buffer_[i]->gpu_data(),
1-factor, this->parallel_blobs_[3*NGPUS+i]->mutable_gpu_data());
}
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(hipSetDevice(Caffe::GPUs[i]));
hipLaunchKernelGGL(( kernel_test_forward), dim3(CAFFE_GET_BLOCKS(bottom[i]->count())),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num, channels, height * width,
this->parallel_blobs_[0*NGPUS+i]->gpu_data(),
this->parallel_blobs_[1*NGPUS+i]->gpu_data(),
parallel_mean_buffer_[i]->gpu_data(),
parallel_var_buffer_[i]->gpu_data(),
float(BN_EPS),
bottom[i]->gpu_data(),
top[i]->mutable_gpu_data());
}
}
else
{
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(hipSetDevice(Caffe::GPUs[i]));
hipLaunchKernelGGL(( kernel_test_forward), dim3(CAFFE_GET_BLOCKS(bottom[i]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num, channels, height * width,
this->parallel_blobs_[0*NGPUS+i]->gpu_data(),
this->parallel_blobs_[1*NGPUS+i]->gpu_data(),
this->parallel_blobs_[2*NGPUS+i]->gpu_data(),
this->parallel_blobs_[3*NGPUS+i]->gpu_data(),
float(BN_EPS),
bottom[i]->gpu_data(),
top[i]->mutable_gpu_data());
}
}
//----------------------------------------------------
if (Caffe::number_collect_sample != -1)
{
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(hipSetDevice(Caffe::GPUs[i]));
ncclReduce( this->parallel_blobs_[2*NGPUS+i]->gpu_data(),this->parallel_blobs_[2*NGPUS+i]->mutable_gpu_data(),
this->parallel_blobs_[2*NGPUS+i]->count(), ncclFloat,ncclSum,0,Caffe::comms(i),NULL);
}
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(hipSetDevice(Caffe::GPUs[i]));
ncclReduce( this->parallel_blobs_[3*NGPUS+i]->gpu_data(),this->parallel_blobs_[3*NGPUS+i]->mutable_gpu_data(),
this->parallel_blobs_[3*NGPUS+i]->count(), ncclFloat,ncclSum,0,Caffe::comms(i),NULL);
}
CUDA_CHECK(hipSetDevice(Caffe::GPUs[0]));
caffe_gpu_scal(this->blobs_[2]->count(),float(1)/float(NGPUS),this->blobs_[2]->mutable_gpu_data());
caffe_gpu_scal(this->blobs_[3]->count(),float(1)/float(NGPUS),this->blobs_[3]->mutable_gpu_data());
}
}
void ParallelBatchNormLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
// compute local scale and bias diff
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(hipSetDevice(Caffe::GPUs[i]));
hipLaunchKernelGGL(( kernel_backward_scale_bias), dim3(channels), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num, channels, height * width,
parallel_mean_buffer_[i]->gpu_data(),
parallel_var_buffer_[i]->gpu_data(),
float(BN_EPS),
top[i]->gpu_diff(),
bottom[i]->gpu_data(),
parallel_mean_buffer_[i]->mutable_gpu_diff(), // temp use for local scale diff
parallel_var_buffer_[i]->mutable_gpu_diff() // temp use for local bias diff
);
}
// sync scale and bias diff
REDUCE_DIFF(parallel_mean_buffer_)
REDUCE_DIFF(parallel_var_buffer_);
// add to param blobs diff
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(hipSetDevice(Caffe::GPUs[i]));
caffe_gpu_axpy(channels, float(1) / float(NGPUS),
parallel_mean_buffer_[i]->gpu_diff(),
this->parallel_blobs_[0*NGPUS+i]->mutable_gpu_diff());
caffe_gpu_axpy(channels, float(1) / float(NGPUS),
parallel_var_buffer_[i]->gpu_diff(),
this->parallel_blobs_[1*NGPUS+i]->mutable_gpu_diff());
}
// compute bottom diff
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(hipSetDevice(Caffe::GPUs[i]));
hipLaunchKernelGGL(( kernel_backward_bottom), dim3(CAFFE_GET_BLOCKS(bottom[i]->count())),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0,
num, channels, height * width,
this->parallel_blobs_[0*NGPUS+i]->gpu_data(),
this->parallel_blobs_[1*NGPUS+i]->gpu_data(),
parallel_mean_buffer_[i]->gpu_data(),
parallel_var_buffer_[i]->gpu_data(),
float(BN_EPS),
float(num * height * width * NGPUS),
top[i]->gpu_diff(),
parallel_mean_buffer_[i]->gpu_diff(),
parallel_var_buffer_[i]->gpu_diff(),
bottom[i]->gpu_data(),
bottom[i]->mutable_gpu_diff());
}
}
void ParallelBatchNormLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
}
} // namespace caffe
| cf6a44fb6e98b1935c7c355b1dfd3675eac3e9b6.cu | #include <vector>
#include <vector>
#include "caffe/layers/operator/parallel_batch_norm_layer.hpp"
#include "caffe/util/math_functions.hpp"
#define BN_EPS 1e-5
namespace caffe {
static __global__ void kernel_test_forward(
const int num, const int channels, const int spatial_dim,
const float* scale, const float* bias, const float* mean, const float* var,
const float eps, const float* bottom_data, float* top_data) {
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) {
int c = (index / spatial_dim) % channels;
top_data[index] = ((bottom_data[index] - mean[c]) / sqrt(var[c] + eps))
* scale[c] + bias[c];
}
}
static __global__ void kernel_test_backward(
const int num, const int channels, const int spatial_dim,
const float* scale, const float* bias, const float* mean, const float* var,
const float eps, const float* top_diff, float* bottom_diff) {
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) {
int c = (index / spatial_dim) % channels;
bottom_diff[index] = top_diff[index] / sqrt(var[c] + eps) * scale[c];
}
}
static __global__ void kernel_local_stats(int num, int channels, int spatial_dim,
const float norm_factor,
const float* bottom_data, float* mean, float* var) {
// store local E[x] to mean, E[x^2] to var temporarily
__shared__ float buffer1[CAFFE_CUDA_NUM_THREADS];
__shared__ float buffer2[CAFFE_CUDA_NUM_THREADS];
const int tid = threadIdx.x;
const int c = blockIdx.x;
// load and accumulate data on each thread
buffer1[tid] = buffer2[tid] = 0;
for (int i = tid; i < num * spatial_dim; i += blockDim.x) {
const int index = i / spatial_dim * channels * spatial_dim
+ c * spatial_dim + i % spatial_dim;
buffer1[tid] += bottom_data[index];
buffer2[tid] += bottom_data[index] * bottom_data[index];
}
__syncthreads();
// do tree reduction
for (int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
buffer1[tid] += buffer1[tid + s];
buffer2[tid] += buffer2[tid + s];
}
__syncthreads();
}
// save the result back
if (tid == 0) {
mean[c] = buffer1[0] / norm_factor;
var[c] = buffer2[0] / norm_factor;
}
}
static __global__ void kernel_backward_scale_bias(
const int num, const int channels, const int spatial_dim,
const float* mean, const float* var, const float eps,
const float* top_diff, const float* bottom_data,
float* scale_diff, float* bias_diff) {
__shared__ float buffer1[CAFFE_CUDA_NUM_THREADS];
__shared__ float buffer2[CAFFE_CUDA_NUM_THREADS];
const int tid = threadIdx.x;
const int c = blockIdx.x;
// load and accumulate data on each thread
buffer1[tid] = buffer2[tid] = 0;
for (int i = tid; i < num * spatial_dim; i += blockDim.x) {
const int index = i / spatial_dim * channels * spatial_dim
+ c * spatial_dim + i % spatial_dim;
buffer1[tid] += top_diff[index] * (bottom_data[index] - mean[c]) / sqrt(var[c] + eps);
buffer2[tid] += top_diff[index];
}
__syncthreads();
// do tree reduction
for (int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
buffer1[tid] += buffer1[tid + s];
buffer2[tid] += buffer2[tid + s];
}
__syncthreads();
}
// save the result back
if (tid == 0) {
scale_diff[c] = buffer1[0];
bias_diff[c] = buffer2[0];
}
}
static __global__ void kernel_backward_bottom(
const int num, const int channels, const int spatial_dim,
const float* scale, const float* bias,
const float* mean, const float* var, const float eps,
const float norm_factor,
const float* top_diff, const float* scale_diff, const float* bias_diff,
const float* bottom_data, float* bottom_diff) {
CUDA_KERNEL_LOOP(index, num * channels * spatial_dim) {
int c = (index / spatial_dim) % channels;
const float inv_std = float(1) / sqrt(var[c] + eps);
const float x_norm = (bottom_data[index] - mean[c]) * inv_std;
bottom_diff[index] = scale[c] * inv_std *
(top_diff[index] - (x_norm * scale_diff[c] + bias_diff[c]) / norm_factor);
}
}
void ParallelBatchNormLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
if (Caffe::number_collect_sample != -1)
{
CHECK_EQ(this->parallel_blobs_.size(),4*NGPUS);
if (Caffe::number_collect_sample == 0)
{
caffe_gpu_set(this->blobs_[2]->count(),float(0),this->blobs_[2]->mutable_gpu_data());
caffe_gpu_set(this->blobs_[3]->count(),float(0),this->blobs_[3]->mutable_gpu_data());
}
for (int i = 0; i < NGPUS; i++)
{
CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i]));
ncclBcast((void *)this->parallel_blobs_[2*NGPUS+i]->mutable_gpu_data(),this->parallel_blobs_[2*NGPUS+i]->count(),
ncclFloat,0,Caffe::comms(i),NULL);
}
for (int i = 0; i < NGPUS; i++)
{
CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i]));
ncclBcast((void *)this->parallel_blobs_[3*NGPUS+i]->mutable_gpu_data(),this->parallel_blobs_[3*NGPUS+i]->count(),
ncclFloat,0,Caffe::comms(i),NULL);
}
}
#if 0
for (int i = 0; i < bottom.size(); ++i)
{
CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i]));
ncclBcast((void *)this->parallel_blobs_[0*NGPUS+i]->mutable_gpu_data(),this->parallel_blobs_[0*NGPUS+i]->count(),
ncclFloat,0,Caffe::comms(i),Caffe::parallel_stream(i));
}
for (int i = 0; i < bottom.size(); ++i)
{
CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i]));
ncclBcast((void *)this->parallel_blobs_[1*NGPUS+i]->mutable_gpu_data(),this->parallel_blobs_[1*NGPUS+i]->count(),
ncclFloat,0,Caffe::comms(i),Caffe::parallel_stream(i));
}
for (int i = 0; i < bottom.size(); ++i)
{
CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i]));
ncclBcast((void *)this->parallel_blobs_[2*NGPUS+i]->mutable_gpu_data(),this->parallel_blobs_[2*NGPUS+i]->count(),
ncclFloat,0,Caffe::comms(i),Caffe::parallel_stream(i));
}
for (int i = 0; i < bottom.size(); ++i)
{
CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i]));
ncclBcast((void *)this->parallel_blobs_[3*NGPUS+i]->mutable_gpu_data(),this->parallel_blobs_[3*NGPUS+i]->count(),
ncclFloat,0,Caffe::comms(i),Caffe::parallel_stream(i));
}
#endif
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
const int m = num * height * width * NGPUS;
//----------------------------------------------------
// compute local E[x] and E[x^2]
if (Caffe::bn_state() == "learned")
{
if (Caffe::number_collect_sample == 0)
{
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i]));
caffe_gpu_set(this->parallel_blobs_[2*NGPUS+i]->count(),float(0),this->parallel_blobs_[2*NGPUS+i]->mutable_gpu_data());
caffe_gpu_set(this->parallel_blobs_[3*NGPUS+i]->count(),float(0),this->parallel_blobs_[3*NGPUS+i]->mutable_gpu_data());
}
}
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i]));
kernel_local_stats<<<channels, CAFFE_CUDA_NUM_THREADS>>>(
num, channels, height * width,
float(m),
bottom[i]->gpu_data(),
parallel_mean_buffer_[i]->mutable_gpu_data(),
parallel_var_buffer_[i]->mutable_gpu_data());
}
// sync E[x] and E[x^2]
REDUCE_DATA(parallel_mean_buffer_);
REDUCE_DATA(parallel_var_buffer_);
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i]));
caffe_gpu_mul(channels, parallel_mean_buffer_[i]->gpu_data(), parallel_mean_buffer_[i]->gpu_data(),
top[i]->mutable_gpu_data()); // reuse the top buffer
caffe_gpu_sub(channels, parallel_var_buffer_[i]->gpu_data(), top[i]->gpu_data(),
parallel_var_buffer_[i]->mutable_gpu_data());
}
float factor;
if (Caffe::number_collect_sample == -1)
factor = 0.01;
else
factor = float(1)/float(Caffe::number_collect_sample+1);
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i]));
caffe_gpu_axpby(parallel_mean_buffer_[i]->count(),
factor, parallel_mean_buffer_[i]->gpu_data(),
1-factor, this->parallel_blobs_[2*NGPUS+i]->mutable_gpu_data());
caffe_gpu_axpby(parallel_var_buffer_[i]->count(),
factor, parallel_var_buffer_[i]->gpu_data(),
1-factor, this->parallel_blobs_[3*NGPUS+i]->mutable_gpu_data());
}
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i]));
kernel_test_forward<<<CAFFE_GET_BLOCKS(bottom[i]->count()),CAFFE_CUDA_NUM_THREADS>>>
( num, channels, height * width,
this->parallel_blobs_[0*NGPUS+i]->gpu_data(),
this->parallel_blobs_[1*NGPUS+i]->gpu_data(),
parallel_mean_buffer_[i]->gpu_data(),
parallel_var_buffer_[i]->gpu_data(),
float(BN_EPS),
bottom[i]->gpu_data(),
top[i]->mutable_gpu_data());
}
}
else
{
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i]));
kernel_test_forward<<<CAFFE_GET_BLOCKS(bottom[i]->count()), CAFFE_CUDA_NUM_THREADS>>>
( num, channels, height * width,
this->parallel_blobs_[0*NGPUS+i]->gpu_data(),
this->parallel_blobs_[1*NGPUS+i]->gpu_data(),
this->parallel_blobs_[2*NGPUS+i]->gpu_data(),
this->parallel_blobs_[3*NGPUS+i]->gpu_data(),
float(BN_EPS),
bottom[i]->gpu_data(),
top[i]->mutable_gpu_data());
}
}
//----------------------------------------------------
if (Caffe::number_collect_sample != -1)
{
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i]));
ncclReduce( this->parallel_blobs_[2*NGPUS+i]->gpu_data(),this->parallel_blobs_[2*NGPUS+i]->mutable_gpu_data(),
this->parallel_blobs_[2*NGPUS+i]->count(), ncclFloat,ncclSum,0,Caffe::comms(i),NULL);
}
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i]));
ncclReduce( this->parallel_blobs_[3*NGPUS+i]->gpu_data(),this->parallel_blobs_[3*NGPUS+i]->mutable_gpu_data(),
this->parallel_blobs_[3*NGPUS+i]->count(), ncclFloat,ncclSum,0,Caffe::comms(i),NULL);
}
CUDA_CHECK(cudaSetDevice(Caffe::GPUs[0]));
caffe_gpu_scal(this->blobs_[2]->count(),float(1)/float(NGPUS),this->blobs_[2]->mutable_gpu_data());
caffe_gpu_scal(this->blobs_[3]->count(),float(1)/float(NGPUS),this->blobs_[3]->mutable_gpu_data());
}
}
void ParallelBatchNormLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom)
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
// compute local scale and bias diff
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i]));
kernel_backward_scale_bias<<<channels, CAFFE_CUDA_NUM_THREADS>>>(
num, channels, height * width,
parallel_mean_buffer_[i]->gpu_data(),
parallel_var_buffer_[i]->gpu_data(),
float(BN_EPS),
top[i]->gpu_diff(),
bottom[i]->gpu_data(),
parallel_mean_buffer_[i]->mutable_gpu_diff(), // temp use for local scale diff
parallel_var_buffer_[i]->mutable_gpu_diff() // temp use for local bias diff
);
}
// sync scale and bias diff
REDUCE_DIFF(parallel_mean_buffer_)
REDUCE_DIFF(parallel_var_buffer_);
// add to param blobs diff
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i]));
caffe_gpu_axpy(channels, float(1) / float(NGPUS),
parallel_mean_buffer_[i]->gpu_diff(),
this->parallel_blobs_[0*NGPUS+i]->mutable_gpu_diff());
caffe_gpu_axpy(channels, float(1) / float(NGPUS),
parallel_var_buffer_[i]->gpu_diff(),
this->parallel_blobs_[1*NGPUS+i]->mutable_gpu_diff());
}
// compute bottom diff
for(int i=0;i<NGPUS;i++)
{
CUDA_CHECK(cudaSetDevice(Caffe::GPUs[i]));
kernel_backward_bottom<<<CAFFE_GET_BLOCKS(bottom[i]->count()),
CAFFE_CUDA_NUM_THREADS>>>(
num, channels, height * width,
this->parallel_blobs_[0*NGPUS+i]->gpu_data(),
this->parallel_blobs_[1*NGPUS+i]->gpu_data(),
parallel_mean_buffer_[i]->gpu_data(),
parallel_var_buffer_[i]->gpu_data(),
float(BN_EPS),
float(num * height * width * NGPUS),
top[i]->gpu_diff(),
parallel_mean_buffer_[i]->gpu_diff(),
parallel_var_buffer_[i]->gpu_diff(),
bottom[i]->gpu_data(),
bottom[i]->mutable_gpu_diff());
}
}
void ParallelBatchNormLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top)
{
}
} // namespace caffe
|
0faba23ec99d708f3cf359ecfc7ccc8e909dfe6b.hip | // !!! This is a file automatically generated by hipify!!!
// includes, system
#include <stdio.h>
#include <math.h>
#include <cuda/std/chrono>
using namespace cuda::std::chrono;
// includes CUDA Runtime
#include <hip/hip_runtime.h>
// includes, project
#include "common/inc/helper_cuda.h"
#include "common/inc/helper_functions.h" // helper utility functions
using clock_value_t = long long;
void horner_on_cpu(float x, float* coeff, int n, float* ret);
__global__ void horner_kernel(float x, float* coeff, int n, float* ret, bool test_seal);
hipError_t horner_on_cuda(float x, float* coeff, int n, float* ret, bool test_seal);
__global__ void nth_order_horner_child(float x_k, float* coeff, int k, int lastIdx, float* b, bool test_seal);
__global__ void nth_order_horner(float x, float* coeff, int k, int lastIdx, float* ret, bool test_seal);
hipError_t nth_order_horner_on_cuda(float x, float* coeff, int k, int lastIdx, float* ret, bool test_seal);
__global__ void estrin_child(float x, float* coeff, float* coeff_cpy, bool odd, int threads, bool test_seal);
__global__ void estrin(float x, float* coeff, float* coeff_cpy, int threads, bool odd, float* ret, bool test_seal);
hipError_t estrin_on_cuda(float x, float* coeff, int lastIdx, float* ret, bool test_seal);
__global__ void timer_kernel();
hipError_t average_timings();
__device__ void mult_wait() {
clock_value_t start_clk = clock64();
clock_value_t cycles_elapsed;
// full spec = 381664932
// half spec = 190832466
// quarter spec = 95416233
// eigth spec = 47708116
// 64th spec = 5963514
// 1/100,000 spec = 3817
do { cycles_elapsed = clock64() - start_clk; } while (cycles_elapsed < 3817);
return;
}
__device__ void add_wait() {
clock_value_t start_clk = clock64();
clock_value_t cycles_elapsed;
// full spec = 2472424
// half spec = 1236212
// quarter spec = 618106
// eigth spec = 309053
// 64th spec = 38631
// 1/100,000 spec = 25
do { cycles_elapsed = clock64() - start_clk; } while (cycles_elapsed < 25);
return;
}
__global__ void horner_kernel(float x, float* coeff, int n, float* ret, bool test_seal)
{
int i = n-1;
float tmp = coeff[n];
while (i >= 0) {
tmp = tmp * x + coeff[i];
// Seal test
if (test_seal) {
// mults (1 mult only at each timestep)
mult_wait();
// adds (1 add only at each timestep)
add_wait();
}
i -= 1;
}
*ret = tmp;
}
__global__ void nth_order_horner_child(float x_k, float* coeff, int k, int lastIdx, float* b, bool test_seal) {
// Calculate number of terms in shortened array
int terms = 0;
while (true) {
if (terms * k + threadIdx.x > lastIdx) {
break;
}
terms = terms + 1;
}
// Calculate subproblem results using Horner
int i = terms - 2;
float tmp = coeff[(terms-1)*k + threadIdx.x];
while (i >= 0) {
tmp = tmp * x_k + coeff[i*k + threadIdx.x];
if (test_seal) {
add_wait();
mult_wait();
}
i -= 1;
}
b[threadIdx.x] = tmp;
}
__global__ void nth_order_horner(float x, float* coeff, int k, int lastIdx, float* ret, bool test_seal) {
// Calculate x to the k
float x_k = powf(x, k);
// Run horner's on k threads for k subcalculations
float* b = (float*)malloc(k * sizeof(float));
hipLaunchKernelGGL(( nth_order_horner_child), dim3(1), dim3(k) , 0, 0, x_k, coeff, k, lastIdx, b, test_seal);
hipDeviceSynchronize();
// Bring them together using horners
int i = k - 2;
float tmp = b[k-1];
while (i >= 0) {
tmp = tmp * x + b[i];
if (test_seal) {
add_wait();
mult_wait();
}
i -= 1;
}
*ret = tmp;
free(b);
}
__global__ void estrin_child(float x, float* coeff, float* coeff_cpy, bool odd, int threads, bool test_seal) {
int startIdx = threadIdx.x * 2;
if (odd) {
if (threadIdx.x == threads - 1) {
coeff_cpy[threadIdx.x] = coeff_cpy[startIdx];
//printf("Thread: %d, val: %.2f\n", threadIdx.x, coeff_cpy[threadIdx.x]);
return;
}
}
coeff_cpy[threadIdx.x] = coeff_cpy[startIdx] + x * coeff_cpy[startIdx + 1];
if (test_seal) {
add_wait();
mult_wait();
}
//printf("Thread: %d, val: %.2f\n", threadIdx.x, coeff_cpy[threadIdx.x]);
return;
}
__global__ void estrin(float x, float* coeff, float* coeff_cpy, int threads, bool odd, float* ret, bool test_seal)
{
float x_cpy = x;
float tmp;
while(threads > 1) {
//printf("x: %.2f\n", x_cpy);
hipLaunchKernelGGL(( estrin_child), dim3(1), dim3(threads), 0, 0, x_cpy, coeff, coeff_cpy, odd, threads, test_seal);
tmp = x_cpy * x_cpy;
if (test_seal) {
mult_wait();
}
hipDeviceSynchronize();
odd = (threads % 2 == 1) ? true : false;
threads = (int)ceilf((float)threads / 2);
x_cpy = tmp;
}
//printf("x: %.2f\n", x_cpy);
*ret = coeff_cpy[0] + x_cpy * coeff_cpy[1];
if (test_seal) {
add_wait();
mult_wait();
}
//printf("Thread: %d, val: %.2f\n", threadIdx.x, *ret);
}
__global__ void timer_kernel() {
// test average time for an add and mult
int i;
long count = 1000;
int add_durations = 0;
int mult_durations = 0;
int test_int = 10;
for (i = 0; i < count; i++) {
// reset
test_int = 10;
// test add
auto start_add = high_resolution_clock::now();
test_int = test_int * test_int;
auto stop_add = high_resolution_clock::now();
// test mult
auto start_mult = high_resolution_clock::now();
test_int = test_int + test_int;
auto stop_mult = high_resolution_clock::now();
// tally durations
auto duration_add = duration_cast<microseconds>(stop_add - start_add);
auto duration_mult = duration_cast<microseconds>(stop_mult - start_mult);
add_durations += duration_add.count();
mult_durations += duration_mult.count();
}
// print results
printf("Average add = %.4f microseconds.\n", (float)add_durations/(float)count);
printf("Average mult = %.4f microseconds.\n", (float)mult_durations / (float)count);
// find optimal do nothing code for extending time of mults and adds
int j;
0.02 long idle_count = 381664932; // for FHE mult (avg 37793 mult + 328987 relin = 366780 microsec)
auto start = high_resolution_clock::now();
clock_value_t start_clk = clock64();
clock_value_t cycles_elapsed;
do { cycles_elapsed = clock64() - start_clk; } while (cycles_elapsed < idle_count);
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
printf("Idle time = %.2f microseconds for idle count %d.\n", (float)duration.count(), idle_count);
//printf("%.2f\n", testInt);
}
void horner_on_cpu(float x, float* coeff, int n, float* ret) {
int i = n - 1;
float tmp = coeff[n];
while (i >= 0) {
tmp = tmp * x + coeff[i];
i -= 1;
}
*ret = tmp;
}
hipError_t horner_on_cuda(float x, float* coeff, int n, float* ret, bool test_seal)
{
float* dev_coeff;
float* dev_ret;
std::ofstream horner_data;
hipError_t cudaStatus;
int coeff_len = n + 1;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers on GPU .
cudaStatus = hipMalloc((void**)&dev_coeff, coeff_len * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_ret, sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_coeff, coeff, coeff_len * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// create cuda event handles
hipEvent_t start, stop;
cudaStatus = hipEventCreate(&start);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipEventCreate failed!");
goto Error;
}
cudaStatus = hipEventCreate(&stop);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipEventCreate failed!");
goto Error;
}
StopWatchInterface* timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
float gpu_time = 0.0f;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( horner_kernel) , dim3(1), dim3(1), 0, 0 , x, dev_coeff, n, dev_ret, test_seal);
hipEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter = 0;
while (hipEventQuery(stop) == hipErrorNotReady)
{
counter++;
}
checkCudaErrors(hipEventElapsedTime(&gpu_time, start, stop));
// print the cpu and gpu times
printf("time spent executing by the GPU: %.8f\n", gpu_time);
printf("time spent by CPU in CUDA calls: %.8f\n", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(ret, dev_ret, sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
horner_data.open("data.csv", std::ios_base::app);
horner_data << gpu_time << ",";
horner_data.close();
Error:
hipFree(dev_coeff);
hipFree(dev_ret);
hipFree(start);
hipFree(stop);
return cudaStatus;
}
hipError_t nth_order_horner_on_cuda(float x, float* coeff, int k, int lastIdx, float* ret, bool test_seal)
{
float* dev_coeff;
float* dev_ret;
std::ofstream nth_horner_data;
hipError_t cudaStatus;
int coeff_len = lastIdx + 1;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers on GPU .
cudaStatus = hipMalloc((void**)&dev_coeff, coeff_len * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_ret, sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_coeff, coeff, coeff_len * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// create cuda event handles
hipEvent_t start, stop;
cudaStatus = hipEventCreate(&start);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipEventCreate failed!");
goto Error;
}
cudaStatus = hipEventCreate(&stop);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipEventCreate failed!");
goto Error;
}
StopWatchInterface* timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
float gpu_time = 0.0f;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
hipEventRecord(start, 0);
nth_order_horner << <1, 1, 0, 0 >> > (x, dev_coeff, k, lastIdx, dev_ret, test_seal);
hipEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter = 0;
while (hipEventQuery(stop) == hipErrorNotReady)
{
counter++;
}
checkCudaErrors(hipEventElapsedTime(&gpu_time, start, stop));
// print the cpu and gpu times
printf("time spent executing by the GPU: %.8f\n", gpu_time);
printf("time spent by CPU in CUDA calls: %.8f\n", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(ret, dev_ret, sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
nth_horner_data.open("data.csv", std::ios_base::app);
nth_horner_data << gpu_time << ",";
nth_horner_data.close();
Error:
hipFree(dev_coeff);
hipFree(dev_ret);
hipFree(start);
hipFree(stop);
return cudaStatus;
}
hipError_t estrin_on_cuda(float x, float* coeff, int lastIdx, float* ret, bool test_seal)
{
float* dev_coeff;
float* dev_coeff_cpy;
float* dev_ret;
std::ofstream estrin_data;
hipError_t cudaStatus;
int coeff_len = lastIdx + 1;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers on GPU .
cudaStatus = hipMalloc((void**)&dev_coeff, coeff_len * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_coeff_cpy, coeff_len * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_ret, sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_coeff, coeff, coeff_len * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_coeff_cpy, coeff, coeff_len * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// create cuda event handles
hipEvent_t start, stop;
cudaStatus = hipEventCreate(&start);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipEventCreate failed!");
goto Error;
}
cudaStatus = hipEventCreate(&stop);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipEventCreate failed!");
goto Error;
}
StopWatchInterface* timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
float gpu_time = 0.0f;
int threads = (int)ceilf((float)coeff_len/2);
bool odd = (coeff_len % 2 == 1) ? true : false;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( estrin), dim3(1), dim3(1), 0, 0, x, dev_coeff, dev_coeff_cpy, threads, odd, dev_ret, test_seal);
hipEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter = 0;
while (hipEventQuery(stop) == hipErrorNotReady)
{
counter++;
}
checkCudaErrors(hipEventElapsedTime(&gpu_time, start, stop));
// Restore copy of coeff
cudaStatus = hipMemcpy(dev_coeff_cpy, coeff, coeff_len * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// print the cpu and gpu times
printf("time spent executing by the GPU: %.8f\n", gpu_time);
printf("time spent by CPU in CUDA calls: %.8f\n", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(ret, dev_ret, sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
estrin_data.open("data.csv", std::ios_base::app);
estrin_data << gpu_time << "\n";
estrin_data.close();
Error:
hipFree(dev_coeff);
hipFree(dev_coeff_cpy);
hipFree(dev_ret);
hipFree(start);
hipFree(stop);
return cudaStatus;
}
hipError_t average_timings()
{
hipError_t cudaStatus;
// create cuda event handles
hipEvent_t start, stop;
cudaStatus = hipEventCreate(&start);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipEventCreate failed!");
goto Error;
}
cudaStatus = hipEventCreate(&stop);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipEventCreate failed!");
goto Error;
}
StopWatchInterface* timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
float gpu_time = 0.0f;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( timer_kernel), dim3(1), dim3(1), 0, 0, );
hipEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter = 0;
while (hipEventQuery(stop) == hipErrorNotReady)
{
counter++;
}
checkCudaErrors(hipEventElapsedTime(&gpu_time, start, stop));
// print the cpu and gpu times
printf("time spent executing by the GPU: %.8f\n", gpu_time);
printf("time spent by CPU in CUDA calls: %.8f\n", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
Error:
hipFree(start);
hipFree(stop);
return cudaStatus;
}
int main(int argc, char* argv[])
{
int devID;
hipDeviceProp_t deviceProps;
hipError_t cudaStatus;
std::ofstream data;
bool test_seal = true;
// ready output file
data.open("data.csv");
data << "Horner,nth Horner,Estrin\n" << "\n";
data.close();
printf("[%s] - Starting...\n\n", argv[0]);
printf("---------------------------------------- Basic Timings ----------------------------------------\n");
cudaStatus = average_timings();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "timings failed!");
return 1;
}
// This will pick the best possible CUDA capable device
devID = findCudaDevice(argc, (const char**)argv);
// get device name
checkCudaErrors(hipGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s]\n", deviceProps.name);
printf("---------------------------------------- Main tests ----------------------------------------\n");
srand(0);
int degree;
int maxDegree = 101;
for (degree = 5; degree < maxDegree; degree++) {
if (degree % 100 == 0) {
printf("%.2f percent completed.\n", 100*(float)degree/(float)maxDegree);
}
// Init test values
//const int degree = 5;
//float x = 2.0;
//float coeff[degree + 1] = {1, 1, 1, 1, 1, 1 };
float x = 0.5;
float* coeff = (float*)malloc((degree + 1) * sizeof(float));
int i;
for (i = 0; i < degree + 1; i++) {
coeff[i] = rand() / 100;
}
// k for nth horners
//const int k = 1;
const int k = (int)floor(degree / 2);
float horner_output = 0;
float nth_horner_output = 0;
float estrin_output = 0;
float cpu_output = 0;
printf("---------------------------------------- Horner on CPU ----------------------------------------\n");
horner_on_cpu(x, coeff, degree, &cpu_output);
printf("Horner's Method Eval (CPU) = {%.4f}\n", cpu_output);
printf("\n");
printf("---------------------------------------- Horner on GPU ----------------------------------------\n");
cudaStatus = horner_on_cuda(x, coeff, degree, &horner_output, test_seal);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "horner_on_cuda failed!");
return 1;
}
printf("Horner's Method Eval (GPU) = {%.4f}\n", horner_output);
if (horner_output != cpu_output)
printf("!!!!!!!!!!!!! Horner WRONG !!!!!!!!!!!!\n");
printf("\n");
printf("-------------------------------------- Nth Horner on GPU --------------------------------------\n");
cudaStatus = nth_order_horner_on_cuda(x, coeff, k, degree, &nth_horner_output, test_seal);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "nth_order_horner_on_cuda failed!");
return 1;
}
printf("Nth-Order Horner's Method Eval (GPU) = {%.4f}\n", nth_horner_output);
if (nth_horner_output != cpu_output)
printf("!!!!!!!!!!!!! nth Horner WRONG !!!!!!!!!!!!\n");
printf("\n");
printf("-------------------------------------- Estrin on GPU --------------------------------------\n");
cudaStatus = estrin_on_cuda(x, coeff, degree, &estrin_output, test_seal);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "estrin_on_cuda failed!");
return 1;
}
printf("Estrin's Method Eval (GPU) = {%.4f}\n", estrin_output);
if (estrin_output != cpu_output)
printf("!!!!!!!!!!!!! Estrin WRONG !!!!!!!!!!!!\n");
printf("\n");
free(coeff);
}
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
printf("Done!\n");
system("pause");
return 0;
}
| 0faba23ec99d708f3cf359ecfc7ccc8e909dfe6b.cu | // includes, system
#include <stdio.h>
#include <math.h>
#include <cuda/std/chrono>
using namespace cuda::std::chrono;
// includes CUDA Runtime
#include <cuda_runtime.h>
// includes, project
#include "common/inc/helper_cuda.h"
#include "common/inc/helper_functions.h" // helper utility functions
using clock_value_t = long long;
void horner_on_cpu(float x, float* coeff, int n, float* ret);
__global__ void horner_kernel(float x, float* coeff, int n, float* ret, bool test_seal);
cudaError_t horner_on_cuda(float x, float* coeff, int n, float* ret, bool test_seal);
__global__ void nth_order_horner_child(float x_k, float* coeff, int k, int lastIdx, float* b, bool test_seal);
__global__ void nth_order_horner(float x, float* coeff, int k, int lastIdx, float* ret, bool test_seal);
cudaError_t nth_order_horner_on_cuda(float x, float* coeff, int k, int lastIdx, float* ret, bool test_seal);
__global__ void estrin_child(float x, float* coeff, float* coeff_cpy, bool odd, int threads, bool test_seal);
__global__ void estrin(float x, float* coeff, float* coeff_cpy, int threads, bool odd, float* ret, bool test_seal);
cudaError_t estrin_on_cuda(float x, float* coeff, int lastIdx, float* ret, bool test_seal);
__global__ void timer_kernel();
cudaError_t average_timings();
__device__ void mult_wait() {
clock_value_t start_clk = clock64();
clock_value_t cycles_elapsed;
// full spec = 381664932
// half spec = 190832466
// quarter spec = 95416233
// eigth spec = 47708116
// 64th spec = 5963514
// 1/100,000 spec = 3817
do { cycles_elapsed = clock64() - start_clk; } while (cycles_elapsed < 3817);
return;
}
__device__ void add_wait() {
clock_value_t start_clk = clock64();
clock_value_t cycles_elapsed;
// full spec = 2472424
// half spec = 1236212
// quarter spec = 618106
// eigth spec = 309053
// 64th spec = 38631
// 1/100,000 spec = 25
do { cycles_elapsed = clock64() - start_clk; } while (cycles_elapsed < 25);
return;
}
__global__ void horner_kernel(float x, float* coeff, int n, float* ret, bool test_seal)
{
int i = n-1;
float tmp = coeff[n];
while (i >= 0) {
tmp = tmp * x + coeff[i];
// Seal test
if (test_seal) {
// mults (1 mult only at each timestep)
mult_wait();
// adds (1 add only at each timestep)
add_wait();
}
i -= 1;
}
*ret = tmp;
}
__global__ void nth_order_horner_child(float x_k, float* coeff, int k, int lastIdx, float* b, bool test_seal) {
// Calculate number of terms in shortened array
int terms = 0;
while (true) {
if (terms * k + threadIdx.x > lastIdx) {
break;
}
terms = terms + 1;
}
// Calculate subproblem results using Horner
int i = terms - 2;
float tmp = coeff[(terms-1)*k + threadIdx.x];
while (i >= 0) {
tmp = tmp * x_k + coeff[i*k + threadIdx.x];
if (test_seal) {
add_wait();
mult_wait();
}
i -= 1;
}
b[threadIdx.x] = tmp;
}
__global__ void nth_order_horner(float x, float* coeff, int k, int lastIdx, float* ret, bool test_seal) {
// Calculate x to the k
float x_k = powf(x, k);
// Run horner's on k threads for k subcalculations
float* b = (float*)malloc(k * sizeof(float));
nth_order_horner_child<<< 1, k >>>(x_k, coeff, k, lastIdx, b, test_seal);
cudaDeviceSynchronize();
// Bring them together using horners
int i = k - 2;
float tmp = b[k-1];
while (i >= 0) {
tmp = tmp * x + b[i];
if (test_seal) {
add_wait();
mult_wait();
}
i -= 1;
}
*ret = tmp;
free(b);
}
__global__ void estrin_child(float x, float* coeff, float* coeff_cpy, bool odd, int threads, bool test_seal) {
int startIdx = threadIdx.x * 2;
if (odd) {
if (threadIdx.x == threads - 1) {
coeff_cpy[threadIdx.x] = coeff_cpy[startIdx];
//printf("Thread: %d, val: %.2f\n", threadIdx.x, coeff_cpy[threadIdx.x]);
return;
}
}
coeff_cpy[threadIdx.x] = coeff_cpy[startIdx] + x * coeff_cpy[startIdx + 1];
if (test_seal) {
add_wait();
mult_wait();
}
//printf("Thread: %d, val: %.2f\n", threadIdx.x, coeff_cpy[threadIdx.x]);
return;
}
__global__ void estrin(float x, float* coeff, float* coeff_cpy, int threads, bool odd, float* ret, bool test_seal)
{
float x_cpy = x;
float tmp;
while(threads > 1) {
//printf("x: %.2f\n", x_cpy);
estrin_child<<<1, threads>>>(x_cpy, coeff, coeff_cpy, odd, threads, test_seal);
tmp = x_cpy * x_cpy;
if (test_seal) {
mult_wait();
}
cudaDeviceSynchronize();
odd = (threads % 2 == 1) ? true : false;
threads = (int)ceilf((float)threads / 2);
x_cpy = tmp;
}
//printf("x: %.2f\n", x_cpy);
*ret = coeff_cpy[0] + x_cpy * coeff_cpy[1];
if (test_seal) {
add_wait();
mult_wait();
}
//printf("Thread: %d, val: %.2f\n", threadIdx.x, *ret);
}
__global__ void timer_kernel() {
// test average time for an add and mult
int i;
long count = 1000;
int add_durations = 0;
int mult_durations = 0;
int test_int = 10;
for (i = 0; i < count; i++) {
// reset
test_int = 10;
// test add
auto start_add = high_resolution_clock::now();
test_int = test_int * test_int;
auto stop_add = high_resolution_clock::now();
// test mult
auto start_mult = high_resolution_clock::now();
test_int = test_int + test_int;
auto stop_mult = high_resolution_clock::now();
// tally durations
auto duration_add = duration_cast<microseconds>(stop_add - start_add);
auto duration_mult = duration_cast<microseconds>(stop_mult - start_mult);
add_durations += duration_add.count();
mult_durations += duration_mult.count();
}
// print results
printf("Average add = %.4f microseconds.\n", (float)add_durations/(float)count);
printf("Average mult = %.4f microseconds.\n", (float)mult_durations / (float)count);
// find optimal do nothing code for extending time of mults and adds
int j;
0.02 long idle_count = 381664932; // for FHE mult (avg 37793 mult + 328987 relin = 366780 microsec)
auto start = high_resolution_clock::now();
clock_value_t start_clk = clock64();
clock_value_t cycles_elapsed;
do { cycles_elapsed = clock64() - start_clk; } while (cycles_elapsed < idle_count);
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
printf("Idle time = %.2f microseconds for idle count %d.\n", (float)duration.count(), idle_count);
//printf("%.2f\n", testInt);
}
void horner_on_cpu(float x, float* coeff, int n, float* ret) {
int i = n - 1;
float tmp = coeff[n];
while (i >= 0) {
tmp = tmp * x + coeff[i];
i -= 1;
}
*ret = tmp;
}
cudaError_t horner_on_cuda(float x, float* coeff, int n, float* ret, bool test_seal)
{
float* dev_coeff;
float* dev_ret;
std::ofstream horner_data;
cudaError_t cudaStatus;
int coeff_len = n + 1;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers on GPU .
cudaStatus = cudaMalloc((void**)&dev_coeff, coeff_len * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_ret, sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_coeff, coeff, coeff_len * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// create cuda event handles
cudaEvent_t start, stop;
cudaStatus = cudaEventCreate(&start);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaEventCreate failed!");
goto Error;
}
cudaStatus = cudaEventCreate(&stop);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaEventCreate failed!");
goto Error;
}
StopWatchInterface* timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
float gpu_time = 0.0f;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
cudaEventRecord(start, 0);
horner_kernel <<<1, 1, 0, 0 >>> (x, dev_coeff, n, dev_ret, test_seal);
cudaEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter = 0;
while (cudaEventQuery(stop) == cudaErrorNotReady)
{
counter++;
}
checkCudaErrors(cudaEventElapsedTime(&gpu_time, start, stop));
// print the cpu and gpu times
printf("time spent executing by the GPU: %.8f\n", gpu_time);
printf("time spent by CPU in CUDA calls: %.8f\n", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(ret, dev_ret, sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
horner_data.open("data.csv", std::ios_base::app);
horner_data << gpu_time << ",";
horner_data.close();
Error:
cudaFree(dev_coeff);
cudaFree(dev_ret);
cudaFree(start);
cudaFree(stop);
return cudaStatus;
}
cudaError_t nth_order_horner_on_cuda(float x, float* coeff, int k, int lastIdx, float* ret, bool test_seal)
{
float* dev_coeff;
float* dev_ret;
std::ofstream nth_horner_data;
cudaError_t cudaStatus;
int coeff_len = lastIdx + 1;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers on GPU .
cudaStatus = cudaMalloc((void**)&dev_coeff, coeff_len * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_ret, sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_coeff, coeff, coeff_len * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// create cuda event handles
cudaEvent_t start, stop;
cudaStatus = cudaEventCreate(&start);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaEventCreate failed!");
goto Error;
}
cudaStatus = cudaEventCreate(&stop);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaEventCreate failed!");
goto Error;
}
StopWatchInterface* timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
float gpu_time = 0.0f;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
cudaEventRecord(start, 0);
nth_order_horner << <1, 1, 0, 0 >> > (x, dev_coeff, k, lastIdx, dev_ret, test_seal);
cudaEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter = 0;
while (cudaEventQuery(stop) == cudaErrorNotReady)
{
counter++;
}
checkCudaErrors(cudaEventElapsedTime(&gpu_time, start, stop));
// print the cpu and gpu times
printf("time spent executing by the GPU: %.8f\n", gpu_time);
printf("time spent by CPU in CUDA calls: %.8f\n", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(ret, dev_ret, sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
nth_horner_data.open("data.csv", std::ios_base::app);
nth_horner_data << gpu_time << ",";
nth_horner_data.close();
Error:
cudaFree(dev_coeff);
cudaFree(dev_ret);
cudaFree(start);
cudaFree(stop);
return cudaStatus;
}
cudaError_t estrin_on_cuda(float x, float* coeff, int lastIdx, float* ret, bool test_seal)
{
float* dev_coeff;
float* dev_coeff_cpy;
float* dev_ret;
std::ofstream estrin_data;
cudaError_t cudaStatus;
int coeff_len = lastIdx + 1;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers on GPU .
cudaStatus = cudaMalloc((void**)&dev_coeff, coeff_len * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_coeff_cpy, coeff_len * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_ret, sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_coeff, coeff, coeff_len * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_coeff_cpy, coeff, coeff_len * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// create cuda event handles
cudaEvent_t start, stop;
cudaStatus = cudaEventCreate(&start);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaEventCreate failed!");
goto Error;
}
cudaStatus = cudaEventCreate(&stop);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaEventCreate failed!");
goto Error;
}
StopWatchInterface* timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
float gpu_time = 0.0f;
int threads = (int)ceilf((float)coeff_len/2);
bool odd = (coeff_len % 2 == 1) ? true : false;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
cudaEventRecord(start, 0);
estrin<<<1, 1, 0, 0>>>(x, dev_coeff, dev_coeff_cpy, threads, odd, dev_ret, test_seal);
cudaEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter = 0;
while (cudaEventQuery(stop) == cudaErrorNotReady)
{
counter++;
}
checkCudaErrors(cudaEventElapsedTime(&gpu_time, start, stop));
// Restore copy of coeff
cudaStatus = cudaMemcpy(dev_coeff_cpy, coeff, coeff_len * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// print the cpu and gpu times
printf("time spent executing by the GPU: %.8f\n", gpu_time);
printf("time spent by CPU in CUDA calls: %.8f\n", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(ret, dev_ret, sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
estrin_data.open("data.csv", std::ios_base::app);
estrin_data << gpu_time << "\n";
estrin_data.close();
Error:
cudaFree(dev_coeff);
cudaFree(dev_coeff_cpy);
cudaFree(dev_ret);
cudaFree(start);
cudaFree(stop);
return cudaStatus;
}
cudaError_t average_timings()
{
cudaError_t cudaStatus;
// create cuda event handles
cudaEvent_t start, stop;
cudaStatus = cudaEventCreate(&start);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaEventCreate failed!");
goto Error;
}
cudaStatus = cudaEventCreate(&stop);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaEventCreate failed!");
goto Error;
}
StopWatchInterface* timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
float gpu_time = 0.0f;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
cudaEventRecord(start, 0);
timer_kernel<<<1, 1>>>();
cudaEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter = 0;
while (cudaEventQuery(stop) == cudaErrorNotReady)
{
counter++;
}
checkCudaErrors(cudaEventElapsedTime(&gpu_time, start, stop));
// print the cpu and gpu times
printf("time spent executing by the GPU: %.8f\n", gpu_time);
printf("time spent by CPU in CUDA calls: %.8f\n", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
Error:
cudaFree(start);
cudaFree(stop);
return cudaStatus;
}
int main(int argc, char* argv[])
{
int devID;
cudaDeviceProp deviceProps;
cudaError_t cudaStatus;
std::ofstream data;
bool test_seal = true;
// ready output file
data.open("data.csv");
data << "Horner,nth Horner,Estrin\n" << "\n";
data.close();
printf("[%s] - Starting...\n\n", argv[0]);
printf("---------------------------------------- Basic Timings ----------------------------------------\n");
cudaStatus = average_timings();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "timings failed!");
return 1;
}
// This will pick the best possible CUDA capable device
devID = findCudaDevice(argc, (const char**)argv);
// get device name
checkCudaErrors(cudaGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s]\n", deviceProps.name);
printf("---------------------------------------- Main tests ----------------------------------------\n");
srand(0);
int degree;
int maxDegree = 101;
for (degree = 5; degree < maxDegree; degree++) {
if (degree % 100 == 0) {
printf("%.2f percent completed.\n", 100*(float)degree/(float)maxDegree);
}
// Init test values
//const int degree = 5;
//float x = 2.0;
//float coeff[degree + 1] = {1, 1, 1, 1, 1, 1 };
float x = 0.5;
float* coeff = (float*)malloc((degree + 1) * sizeof(float));
int i;
for (i = 0; i < degree + 1; i++) {
coeff[i] = rand() / 100;
}
// k for nth horners
//const int k = 1;
const int k = (int)floor(degree / 2);
float horner_output = 0;
float nth_horner_output = 0;
float estrin_output = 0;
float cpu_output = 0;
printf("---------------------------------------- Horner on CPU ----------------------------------------\n");
horner_on_cpu(x, coeff, degree, &cpu_output);
printf("Horner's Method Eval (CPU) = {%.4f}\n", cpu_output);
printf("\n");
printf("---------------------------------------- Horner on GPU ----------------------------------------\n");
cudaStatus = horner_on_cuda(x, coeff, degree, &horner_output, test_seal);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "horner_on_cuda failed!");
return 1;
}
printf("Horner's Method Eval (GPU) = {%.4f}\n", horner_output);
if (horner_output != cpu_output)
printf("!!!!!!!!!!!!! Horner WRONG !!!!!!!!!!!!\n");
printf("\n");
printf("-------------------------------------- Nth Horner on GPU --------------------------------------\n");
cudaStatus = nth_order_horner_on_cuda(x, coeff, k, degree, &nth_horner_output, test_seal);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "nth_order_horner_on_cuda failed!");
return 1;
}
printf("Nth-Order Horner's Method Eval (GPU) = {%.4f}\n", nth_horner_output);
if (nth_horner_output != cpu_output)
printf("!!!!!!!!!!!!! nth Horner WRONG !!!!!!!!!!!!\n");
printf("\n");
printf("-------------------------------------- Estrin on GPU --------------------------------------\n");
cudaStatus = estrin_on_cuda(x, coeff, degree, &estrin_output, test_seal);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "estrin_on_cuda failed!");
return 1;
}
printf("Estrin's Method Eval (GPU) = {%.4f}\n", estrin_output);
if (estrin_output != cpu_output)
printf("!!!!!!!!!!!!! Estrin WRONG !!!!!!!!!!!!\n");
printf("\n");
free(coeff);
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
printf("Done!\n");
system("pause");
return 0;
}
|
49f1b44fe98ad10c1191bce7da9d4805c474e685.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "renderer_hip.cuh"
#define MP 1.67e-27
#define KB 1.38e-23
#define CC 3.00e8
#define PLANCK 6.63e-34
#define PI 3.1415965
#define SQRTPI 1.772454
#define GRPH 2.3804910e-24
#define DCF 1e7 //density conversion factor
texture<float, 3, hipReadModeElementType> dtex; // 3D texture
texture<float, 3, hipReadModeElementType> eetex; // 3D texture
texture<float, 2, hipReadModeElementType> tgtex; // 2D texture
texture<float, 3, hipReadModeElementType> uatex; // velocity along axis of integration
texture<float, 2, hipReadModeElementType> katex; // 2D texture
texture<float, 1, hipReadModeElementType> aptex; // derivative of integration-axis
__constant__ float dmin;
__constant__ float drange;
__constant__ float emin;
__constant__ float erange;
__constant__ float nsteps;
__constant__ char axis;
__constant__ bool reverse; //go along axis in reverse direction
__constant__ int projectionXsize;
__constant__ int projectionYsize;
#define X_AXIS 0
#define Y_AXIS 1
#define Z_AXIS 2
__device__ float3 pointSpecificStuff(float x, float y, float z, bool iRenderOnly) {
float em = tex3D(emtex, x, y, z);
if (iRenderOnly) return make_float3(en * en * g * ds, 0, 0);
float d1 = __logf(tex3D(dtex, x, y, z)) + __logf(1.e-7);
float e1 = __logf(tex3D(eetex, x, y, z)) - d1 + __logf(1.e5);
float dd = (d1 - dmin) / drange; //density, energy lookup values
float ee = (e1 - emin) / erange;
float tt = tex2D(tgtex, ee, dd);
float uu = 1e4 * (tex3D(uatex, x, y, z) * (reverse ? -1 : 1));
return make_float3(en * en * g * ds, uu, sqrtf(tt));
}
__device__ float pointSpecificTau(float x, float y, float z) {
float d2 = tex3D(dtex, x, y, z);
float d1 = __logf(d2) + __logf(1.e-7);
float e1 = __logf(tex3D(eetex, x, y, z)) - d1 + __logf(1.e5);
float dd = (d1 - dmin) / drange; //density, energy lookup values
float ee = (e1 - emin) / erange;
float kk = tex2D(katex, ee, dd);
float ds = tex1D(aptex);
return (kk * d2 * ds) / GRPH;
}
extern "C" {
__global__ void iRender(float *out, float *tau, bool opacity) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > projectionXsize * projectionYsize) return;
int ypixel = idx / projectionXsize;
int xpixel = idx % projectionXsize;
float3 cp;
float* ati; //axis to increment
float da = (reverse ? -1.0 : 1.0) / nsteps;
float target = reverse ? 0 : 1;
switch(axis) {
case X_AXIS: cp.y = xpixel / (float) projectionXsize;
cp.z = ypixel / (float) projectionYsize;
ati = &cp.x;
break;
case Y_AXIS: cp.x = xpixel / (float) projectionXsize;
cp.z = ypixel / (float) projectionYsize;
ati = &cp.y;
break;
case Z_AXIS: cp.x = xpixel / (float) projectionXsize;
cp.y = ypixel / (float) projectionYsize;
ati = &cp.z;
break;
default: return;
}
*ati = 1 - target; //start at either 0 or 1
float emiss = 0;
float tausum = opacity ? tau[idx] : 0;
do {
if (tausum <= 1e2) {
emiss += pointSpecificStuff(cp.x, cp.y, cp.z, true).x *
expf(-tausum);
}
if (opacity) {
tausum += pointSpecificTau(cp.x, cp.y, cp.z);
}
*ati += da;
} while (reverse ? (*ati > target) : (*ati < target));
if (opacity) tau[idx] = tausum;
out[idx] = emiss;
}
__global__ void ilRender(float *out, float *dnus, float *tau,
float nu0, float dopp_width0, int nlamb, bool opacity) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > projectionXsize * projectionYsize) return;
int ypixel = idx / projectionXsize;
int xpixel = idx % projectionXsize;
float3 cp;
float* ati; //axis to increment
float da = (reverse ? -1.0 : 1.0) / nsteps;
float target = reverse ? 0 : 1;
switch(axis) {
case X_AXIS: cp.x = 1 - target;
cp.y = xpixel / (float) projectionXsize;
cp.z = ypixel / (float) projectionYsize;
ati = &cp.x;
break;
case Y_AXIS: cp.y = 1 - target;
cp.x = xpixel / (float) projectionXsize;
cp.z = ypixel / (float) projectionYsize;
ati = &cp.y;
break;
case Z_AXIS: cp.z = 1 - target;
cp.x = xpixel / (float) projectionXsize;
cp.y = ypixel / (float) projectionYsize;
ati = &cp.z;
break;
default: return;
}
*ati = 1 - target;
float3 pointSpecificData;
float dnu;
float nu;
float tausum = opacity ? tau[idx] : 0;
int nfreq;
float dopp_width, shift, phi;
for (nfreq = 0; nfreq < nlamb; nfreq++) {
out[idx * nlamb + nfreq] = 0;
}
do {
if (tausum <= 1e2) {
pointSpecificData = pointSpecificStuff(
cp.x, cp.y, cp.z, false);
dopp_width = pointSpecificData.z * dopp_width0;
pointSpecificData.x *= expf(-tausum);
for (nfreq = 0; nfreq < nlamb; nfreq++) {
dnu = dnus[nfreq];
nu = dnu + nu0;
shift = (dnu - nu * pointSpecificData.y / CC) / dopp_width;
phi = __expf(-shift * shift) / (SQRTPI * dopp_width);
out[idx * nlamb + nfreq] += phi * pointSpecificData.x;
}
}
if (opacity) {
tausum += pointSpecificTau(cp.x, cp.y, cp.z);
}
*ati += da;
} while (reverse ? (*ati > target) : (*ati < target));
for (nfreq = 0; nfreq < nlamb; nfreq++) {
out[idx * nlamb + nfreq] *= DCF * DCF * PLANCK * (dnus[nfreq] + nu0) / (4 * PI);
}
if (opacity) tau[idx] = tausum;
}
}
| 49f1b44fe98ad10c1191bce7da9d4805c474e685.cu | #include "renderer.cuh"
#define MP 1.67e-27
#define KB 1.38e-23
#define CC 3.00e8
#define PLANCK 6.63e-34
#define PI 3.1415965
#define SQRTPI 1.772454
#define GRPH 2.3804910e-24
#define DCF 1e7 //density conversion factor
texture<float, 3, cudaReadModeElementType> dtex; // 3D texture
texture<float, 3, cudaReadModeElementType> eetex; // 3D texture
texture<float, 2, cudaReadModeElementType> tgtex; // 2D texture
texture<float, 3, cudaReadModeElementType> uatex; // velocity along axis of integration
texture<float, 2, cudaReadModeElementType> katex; // 2D texture
texture<float, 1, cudaReadModeElementType> aptex; // derivative of integration-axis
__constant__ float dmin;
__constant__ float drange;
__constant__ float emin;
__constant__ float erange;
__constant__ float nsteps;
__constant__ char axis;
__constant__ bool reverse; //go along axis in reverse direction
__constant__ int projectionXsize;
__constant__ int projectionYsize;
#define X_AXIS 0
#define Y_AXIS 1
#define Z_AXIS 2
__device__ float3 pointSpecificStuff(float x, float y, float z, bool iRenderOnly) {
float em = tex3D(emtex, x, y, z);
if (iRenderOnly) return make_float3(en * en * g * ds, 0, 0);
float d1 = __logf(tex3D(dtex, x, y, z)) + __logf(1.e-7);
float e1 = __logf(tex3D(eetex, x, y, z)) - d1 + __logf(1.e5);
float dd = (d1 - dmin) / drange; //density, energy lookup values
float ee = (e1 - emin) / erange;
float tt = tex2D(tgtex, ee, dd);
float uu = 1e4 * (tex3D(uatex, x, y, z) * (reverse ? -1 : 1));
return make_float3(en * en * g * ds, uu, sqrtf(tt));
}
__device__ float pointSpecificTau(float x, float y, float z) {
float d2 = tex3D(dtex, x, y, z);
float d1 = __logf(d2) + __logf(1.e-7);
float e1 = __logf(tex3D(eetex, x, y, z)) - d1 + __logf(1.e5);
float dd = (d1 - dmin) / drange; //density, energy lookup values
float ee = (e1 - emin) / erange;
float kk = tex2D(katex, ee, dd);
float ds = tex1D(aptex);
return (kk * d2 * ds) / GRPH;
}
extern "C" {
__global__ void iRender(float *out, float *tau, bool opacity) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > projectionXsize * projectionYsize) return;
int ypixel = idx / projectionXsize;
int xpixel = idx % projectionXsize;
float3 cp;
float* ati; //axis to increment
float da = (reverse ? -1.0 : 1.0) / nsteps;
float target = reverse ? 0 : 1;
switch(axis) {
case X_AXIS: cp.y = xpixel / (float) projectionXsize;
cp.z = ypixel / (float) projectionYsize;
ati = &cp.x;
break;
case Y_AXIS: cp.x = xpixel / (float) projectionXsize;
cp.z = ypixel / (float) projectionYsize;
ati = &cp.y;
break;
case Z_AXIS: cp.x = xpixel / (float) projectionXsize;
cp.y = ypixel / (float) projectionYsize;
ati = &cp.z;
break;
default: return;
}
*ati = 1 - target; //start at either 0 or 1
float emiss = 0;
float tausum = opacity ? tau[idx] : 0;
do {
if (tausum <= 1e2) {
emiss += pointSpecificStuff(cp.x, cp.y, cp.z, true).x *
expf(-tausum);
}
if (opacity) {
tausum += pointSpecificTau(cp.x, cp.y, cp.z);
}
*ati += da;
} while (reverse ? (*ati > target) : (*ati < target));
if (opacity) tau[idx] = tausum;
out[idx] = emiss;
}
__global__ void ilRender(float *out, float *dnus, float *tau,
float nu0, float dopp_width0, int nlamb, bool opacity) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > projectionXsize * projectionYsize) return;
int ypixel = idx / projectionXsize;
int xpixel = idx % projectionXsize;
float3 cp;
float* ati; //axis to increment
float da = (reverse ? -1.0 : 1.0) / nsteps;
float target = reverse ? 0 : 1;
switch(axis) {
case X_AXIS: cp.x = 1 - target;
cp.y = xpixel / (float) projectionXsize;
cp.z = ypixel / (float) projectionYsize;
ati = &cp.x;
break;
case Y_AXIS: cp.y = 1 - target;
cp.x = xpixel / (float) projectionXsize;
cp.z = ypixel / (float) projectionYsize;
ati = &cp.y;
break;
case Z_AXIS: cp.z = 1 - target;
cp.x = xpixel / (float) projectionXsize;
cp.y = ypixel / (float) projectionYsize;
ati = &cp.z;
break;
default: return;
}
*ati = 1 - target;
float3 pointSpecificData;
float dnu;
float nu;
float tausum = opacity ? tau[idx] : 0;
int nfreq;
float dopp_width, shift, phi;
for (nfreq = 0; nfreq < nlamb; nfreq++) {
out[idx * nlamb + nfreq] = 0;
}
do {
if (tausum <= 1e2) {
pointSpecificData = pointSpecificStuff(
cp.x, cp.y, cp.z, false);
dopp_width = pointSpecificData.z * dopp_width0;
pointSpecificData.x *= expf(-tausum);
for (nfreq = 0; nfreq < nlamb; nfreq++) {
dnu = dnus[nfreq];
nu = dnu + nu0;
shift = (dnu - nu * pointSpecificData.y / CC) / dopp_width;
phi = __expf(-shift * shift) / (SQRTPI * dopp_width);
out[idx * nlamb + nfreq] += phi * pointSpecificData.x;
}
}
if (opacity) {
tausum += pointSpecificTau(cp.x, cp.y, cp.z);
}
*ati += da;
} while (reverse ? (*ati > target) : (*ati < target));
for (nfreq = 0; nfreq < nlamb; nfreq++) {
out[idx * nlamb + nfreq] *= DCF * DCF * PLANCK * (dnus[nfreq] + nu0) / (4 * PI);
}
if (opacity) tau[idx] = tausum;
}
}
|
8ed2798e693bc716dedb879f9a71ecb8751d986f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ==========================================================================
// SeqAn - The Library for Sequence Analysis
// ==========================================================================
// Copyright (c) 2006-2013, Knut Reinert, FU Berlin
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Knut Reinert or the FU Berlin nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//
// ==========================================================================
// Author: Enrico Siragusa <[email protected]>
// ==========================================================================
#include <seqan/basic.h>
#include <seqan/sequence.h>
#include <seqan/index.h>
#include <seqan/index/index_fm_device.h>
#include "test_cuda_common.h"
using namespace seqan;
// TODO(esiragusa): move this into metaprogramming algebra
namespace seqan {
template <typename T1, typename T2>
struct Pair<T1, T2, Tag<void> > {};
// Manually specialize word size to be compatible with GPU
template <typename TValue>
struct RankDictionaryWordSize_<TValue, TwoLevels<void> > :
BitsPerValue<__uint32> {};
template <typename TValue>
struct RankDictionaryWordSize_<TValue, TwoLevels<Device<void> > > :
BitsPerValue<__uint32> {};
template <typename TValue, typename TSpec>
struct RankDictionaryWordSize_<TValue, TwoLevels<View<TSpec> > > :
BitsPerValue<__uint32> {};
}
// ============================================================================
// Types
// ============================================================================
typedef TagList<FibreRawText,
TagList<FibreLF
> >
FMIndexFibres;
// ============================================================================
// Classes
// ============================================================================
// ----------------------------------------------------------------------------
// Class CudaIndexTest
// ----------------------------------------------------------------------------
template <typename TType>
class CudaIndexTest : public Test
{
public:
typedef TType TIndex;
typedef typename Host<TIndex>::Type TText;
typedef typename Device<TIndex>::Type TCudaIndex;
TText text;
TIndex index;
CudaIndexTest() :
text(),
index(text)
{
// TODO(esiragusa): init generic text.
appendValue(text, "ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGT");
// TODO(esiragusa): reverse text on FM-index only.
reverse(text);
indexCreate(index);
reverse(text);
}
};
typedef TagList<DnaStringSetFMIndex> CudaIndexTestTypes;
SEQAN_TYPED_TEST_CASE(CudaIndexTest, CudaIndexTestTypes);
// ----------------------------------------------------------------------------
// Class CudaIndexFibreTest
// ----------------------------------------------------------------------------
template <typename TTypes>
class CudaIndexFibreTest : public CudaIndexTest<typename Value<TTypes, 1>::Type> {};
// TODO(esiragusa): use metaprogramming algebra.
//typedef Product<DnaStringSetFMIndex, FMIndexFibres>::Type CudaIndexFibreTestTypes;
typedef TagList<Pair<DnaStringSetFMIndex, FibreRawText, Tag<void> >,
TagList<Pair<DnaStringSetFMIndex, FibreLF, Tag<void> >
> >
CudaIndexFibreTestTypes;
SEQAN_TYPED_TEST_CASE(CudaIndexFibreTest, CudaIndexFibreTestTypes);
// ----------------------------------------------------------------------------
// Class CudaIndexCountTest
// ----------------------------------------------------------------------------
template <typename TTypes>
class CudaIndexCountTest : public CudaIndexTest<typename Value<TTypes, 1>::Type>
{
public:
typedef typename Value<TTypes, 1>::Type TIndex;
typedef typename Value<TTypes, 2>::Type TNeedles;
typedef CudaIndexTest<TIndex> TBase;
typedef typename Size<TIndex>::Type TSize;
typedef typename Device<TNeedles>::Type TCudaNeedles;
TNeedles needles;
TSize occurrences;
CudaIndexCountTest() :
TBase()
{
// TODO(esiragusa): append generic needles.
appendValue(needles, "ACGT");
appendValue(needles, "CGT");
appendValue(needles, "GTA");
occurrences = countOccurrences(this->index, needles);
}
};
// TODO(esiragusa): use metaprogramming algebra.
//typedef Product<DnaStringSetFMIndex, DnaStringSet>::Type CudaIndexCountTestTypes;
typedef TagList<Pair<DnaStringSetFMIndex, DnaStringSet, Tag<void> > > CudaIndexCountTestTypes;
SEQAN_TYPED_TEST_CASE(CudaIndexCountTest, CudaIndexCountTestTypes);
// ============================================================================
// Tests
// ============================================================================
// ----------------------------------------------------------------------------
// Test assign()
// ----------------------------------------------------------------------------
SEQAN_TYPED_TEST(CudaIndexTest, Assign)
{
typedef typename TestFixture::TIndex TIndex;
typedef typename TestFixture::TCudaIndex TCudaIndex;
hipDeviceReset();
TCudaIndex cudaIndex;
assign(cudaIndex, this->index);
SEQAN_ASSERT_EQ(length(cudaIndex), length(this->index));
// TIndex index;
// assign(index, cudaIndex);
// SEQAN_ASSERT(index == this->index);
}
// ----------------------------------------------------------------------------
// Test value() on Index Fibres
// ----------------------------------------------------------------------------
SEQAN_TYPED_TEST(CudaIndexFibreTest, Values)
{
typedef FibreLF TTag;
typedef typename TestFixture::TIndex TIndex;
typedef typename TestFixture::TCudaIndex TCudaIndex;
typedef typename Fibre<TIndex, TTag>::Type TFibre;
typedef typename Fibre<TCudaIndex, TTag>::Type TCudaFibre;
typedef typename View<TCudaFibre>::Type TCudaFibreView;
typedef typename Size<TFibre>::Type TSize;
hipDeviceReset();
TCudaIndex cudaIndex;
assign(cudaIndex, this->index);
TFibre & fibre = getFibre(this->index, TTag());
TCudaFibre & cudaFibre = getFibre(cudaIndex, TTag());
SEQAN_ASSERT_EQ(length(fibre), length(cudaFibre));
TCudaFibreView cudaFibreView = view(cudaFibre);
for (TSize pos = 0; pos < length(fibre); pos++)
{
hipLaunchKernelGGL(( testGetValue), dim3(1),dim3(1), 0, 0, cudaFibreView, pos, fibre[pos]);
hipDeviceSynchronize();
SEQAN_ASSERT_EQ(hipGetLastError(), hipSuccess);
}
}
// ----------------------------------------------------------------------------
// Test countOccurrences()
// ----------------------------------------------------------------------------
SEQAN_TYPED_TEST(CudaIndexCountTest, Count)
{
typedef typename TestFixture::TCudaIndex TCudaIndex;
typedef typename TestFixture::TCudaNeedles TCudaNeedles;
hipDeviceReset();
TCudaIndex cudaIndex;
TCudaNeedles cudaNeedles;
assign(cudaIndex, this->index);
assign(cudaNeedles, this->needles);
SEQAN_ASSERT_EQ(countOccurrences(cudaIndex, cudaNeedles), this->occurrences);
}
// ============================================================================
// Register Tests
// ============================================================================
int main(int argc, char const ** argv)
{
TestSystem::init(argc, argv);
return TestSystem::runAll();
}
| 8ed2798e693bc716dedb879f9a71ecb8751d986f.cu | // ==========================================================================
// SeqAn - The Library for Sequence Analysis
// ==========================================================================
// Copyright (c) 2006-2013, Knut Reinert, FU Berlin
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Knut Reinert or the FU Berlin nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//
// ==========================================================================
// Author: Enrico Siragusa <[email protected]>
// ==========================================================================
#include <seqan/basic.h>
#include <seqan/sequence.h>
#include <seqan/index.h>
#include <seqan/index/index_fm_device.h>
#include "test_cuda_common.h"
using namespace seqan;
// TODO(esiragusa): move this into metaprogramming algebra
namespace seqan {
template <typename T1, typename T2>
struct Pair<T1, T2, Tag<void> > {};
// Manually specialize word size to be compatible with GPU
template <typename TValue>
struct RankDictionaryWordSize_<TValue, TwoLevels<void> > :
BitsPerValue<__uint32> {};
template <typename TValue>
struct RankDictionaryWordSize_<TValue, TwoLevels<Device<void> > > :
BitsPerValue<__uint32> {};
template <typename TValue, typename TSpec>
struct RankDictionaryWordSize_<TValue, TwoLevels<View<TSpec> > > :
BitsPerValue<__uint32> {};
}
// ============================================================================
// Types
// ============================================================================
typedef TagList<FibreRawText,
TagList<FibreLF
> >
FMIndexFibres;
// ============================================================================
// Classes
// ============================================================================
// ----------------------------------------------------------------------------
// Class CudaIndexTest
// ----------------------------------------------------------------------------
template <typename TType>
class CudaIndexTest : public Test
{
public:
typedef TType TIndex;
typedef typename Host<TIndex>::Type TText;
typedef typename Device<TIndex>::Type TCudaIndex;
TText text;
TIndex index;
CudaIndexTest() :
text(),
index(text)
{
// TODO(esiragusa): init generic text.
appendValue(text, "ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGT");
// TODO(esiragusa): reverse text on FM-index only.
reverse(text);
indexCreate(index);
reverse(text);
}
};
typedef TagList<DnaStringSetFMIndex> CudaIndexTestTypes;
SEQAN_TYPED_TEST_CASE(CudaIndexTest, CudaIndexTestTypes);
// ----------------------------------------------------------------------------
// Class CudaIndexFibreTest
// ----------------------------------------------------------------------------
template <typename TTypes>
class CudaIndexFibreTest : public CudaIndexTest<typename Value<TTypes, 1>::Type> {};
// TODO(esiragusa): use metaprogramming algebra.
//typedef Product<DnaStringSetFMIndex, FMIndexFibres>::Type CudaIndexFibreTestTypes;
typedef TagList<Pair<DnaStringSetFMIndex, FibreRawText, Tag<void> >,
TagList<Pair<DnaStringSetFMIndex, FibreLF, Tag<void> >
> >
CudaIndexFibreTestTypes;
SEQAN_TYPED_TEST_CASE(CudaIndexFibreTest, CudaIndexFibreTestTypes);
// ----------------------------------------------------------------------------
// Class CudaIndexCountTest
// ----------------------------------------------------------------------------
template <typename TTypes>
class CudaIndexCountTest : public CudaIndexTest<typename Value<TTypes, 1>::Type>
{
public:
typedef typename Value<TTypes, 1>::Type TIndex;
typedef typename Value<TTypes, 2>::Type TNeedles;
typedef CudaIndexTest<TIndex> TBase;
typedef typename Size<TIndex>::Type TSize;
typedef typename Device<TNeedles>::Type TCudaNeedles;
TNeedles needles;
TSize occurrences;
CudaIndexCountTest() :
TBase()
{
// TODO(esiragusa): append generic needles.
appendValue(needles, "ACGT");
appendValue(needles, "CGT");
appendValue(needles, "GTA");
occurrences = countOccurrences(this->index, needles);
}
};
// TODO(esiragusa): use metaprogramming algebra.
//typedef Product<DnaStringSetFMIndex, DnaStringSet>::Type CudaIndexCountTestTypes;
typedef TagList<Pair<DnaStringSetFMIndex, DnaStringSet, Tag<void> > > CudaIndexCountTestTypes;
SEQAN_TYPED_TEST_CASE(CudaIndexCountTest, CudaIndexCountTestTypes);
// ============================================================================
// Tests
// ============================================================================
// ----------------------------------------------------------------------------
// Test assign()
// ----------------------------------------------------------------------------
SEQAN_TYPED_TEST(CudaIndexTest, Assign)
{
typedef typename TestFixture::TIndex TIndex;
typedef typename TestFixture::TCudaIndex TCudaIndex;
cudaDeviceReset();
TCudaIndex cudaIndex;
assign(cudaIndex, this->index);
SEQAN_ASSERT_EQ(length(cudaIndex), length(this->index));
// TIndex index;
// assign(index, cudaIndex);
// SEQAN_ASSERT(index == this->index);
}
// ----------------------------------------------------------------------------
// Test value() on Index Fibres
// ----------------------------------------------------------------------------
SEQAN_TYPED_TEST(CudaIndexFibreTest, Values)
{
typedef FibreLF TTag;
typedef typename TestFixture::TIndex TIndex;
typedef typename TestFixture::TCudaIndex TCudaIndex;
typedef typename Fibre<TIndex, TTag>::Type TFibre;
typedef typename Fibre<TCudaIndex, TTag>::Type TCudaFibre;
typedef typename View<TCudaFibre>::Type TCudaFibreView;
typedef typename Size<TFibre>::Type TSize;
cudaDeviceReset();
TCudaIndex cudaIndex;
assign(cudaIndex, this->index);
TFibre & fibre = getFibre(this->index, TTag());
TCudaFibre & cudaFibre = getFibre(cudaIndex, TTag());
SEQAN_ASSERT_EQ(length(fibre), length(cudaFibre));
TCudaFibreView cudaFibreView = view(cudaFibre);
for (TSize pos = 0; pos < length(fibre); pos++)
{
testGetValue<<<1,1>>>(cudaFibreView, pos, fibre[pos]);
cudaDeviceSynchronize();
SEQAN_ASSERT_EQ(cudaGetLastError(), cudaSuccess);
}
}
// ----------------------------------------------------------------------------
// Test countOccurrences()
// ----------------------------------------------------------------------------
SEQAN_TYPED_TEST(CudaIndexCountTest, Count)
{
typedef typename TestFixture::TCudaIndex TCudaIndex;
typedef typename TestFixture::TCudaNeedles TCudaNeedles;
cudaDeviceReset();
TCudaIndex cudaIndex;
TCudaNeedles cudaNeedles;
assign(cudaIndex, this->index);
assign(cudaNeedles, this->needles);
SEQAN_ASSERT_EQ(countOccurrences(cudaIndex, cudaNeedles), this->occurrences);
}
// ============================================================================
// Register Tests
// ============================================================================
int main(int argc, char const ** argv)
{
TestSystem::init(argc, argv);
return TestSystem::runAll();
}
|
730d2d7c808ada6d93ce78afe653fae6cb2c156b.hip | // !!! This is a file automatically generated by hipify!!!
#include "StreamWrapper.cuh"
#include "Stream.cuh"
void StreamWrapper::initialize_streams(
const uint n,
const std::vector<char>& velopix_geometry,
const PrUTMagnetTool* host_ut_magnet_tool,
const uint number_of_events,
const bool transmit_device_to_host,
const bool do_check,
const bool do_simplified_kalman_filter,
const bool print_memory_usage,
const bool run_on_x86,
const std::string& folder_name_MC,
const uint start_event_offset,
const size_t reserve_mb
) {
for (uint i=0; i<n; ++i) {
streams.push_back(new Stream());
}
for (int i=0; i<streams.size(); ++i) {
streams[i]->initialize(
velopix_geometry,
host_ut_magnet_tool,
number_of_events,
transmit_device_to_host,
do_check,
do_simplified_kalman_filter,
print_memory_usage,
run_on_x86,
folder_name_MC,
start_event_offset,
reserve_mb,
i
);
// Memory consumption
size_t free_byte;
size_t total_byte;
cudaCheck(hipMemGetInfo(&free_byte, &total_byte));
float free_percent = (float)free_byte / total_byte * 100;
float used_percent = (float)(total_byte - free_byte) / total_byte * 100;
verbose_cout << "GPU memory: " << free_percent << " percent free, "
<< used_percent << " percent used " << std::endl;
}
}
void StreamWrapper::run_stream(
const uint i,
char* host_velopix_events,
uint* host_velopix_event_offsets,
const size_t velopix_events_size,
const size_t velopix_event_offsets_size,
VeloUTTracking::HitsSoA *host_ut_hits_events,
const PrUTMagnetTool* host_ut_magnet_tool,
const uint number_of_events,
const uint number_of_repetitions
) {
auto& s = *(streams[i]);
s.run_sequence(
i,
host_velopix_events,
host_velopix_event_offsets,
velopix_events_size,
velopix_event_offsets_size,
host_ut_hits_events,
host_ut_magnet_tool,
number_of_events,
number_of_repetitions
);
}
StreamWrapper::~StreamWrapper() {
for (auto& stream : streams) {
delete stream;
}
}
| 730d2d7c808ada6d93ce78afe653fae6cb2c156b.cu | #include "StreamWrapper.cuh"
#include "Stream.cuh"
void StreamWrapper::initialize_streams(
const uint n,
const std::vector<char>& velopix_geometry,
const PrUTMagnetTool* host_ut_magnet_tool,
const uint number_of_events,
const bool transmit_device_to_host,
const bool do_check,
const bool do_simplified_kalman_filter,
const bool print_memory_usage,
const bool run_on_x86,
const std::string& folder_name_MC,
const uint start_event_offset,
const size_t reserve_mb
) {
for (uint i=0; i<n; ++i) {
streams.push_back(new Stream());
}
for (int i=0; i<streams.size(); ++i) {
streams[i]->initialize(
velopix_geometry,
host_ut_magnet_tool,
number_of_events,
transmit_device_to_host,
do_check,
do_simplified_kalman_filter,
print_memory_usage,
run_on_x86,
folder_name_MC,
start_event_offset,
reserve_mb,
i
);
// Memory consumption
size_t free_byte;
size_t total_byte;
cudaCheck(cudaMemGetInfo(&free_byte, &total_byte));
float free_percent = (float)free_byte / total_byte * 100;
float used_percent = (float)(total_byte - free_byte) / total_byte * 100;
verbose_cout << "GPU memory: " << free_percent << " percent free, "
<< used_percent << " percent used " << std::endl;
}
}
void StreamWrapper::run_stream(
const uint i,
char* host_velopix_events,
uint* host_velopix_event_offsets,
const size_t velopix_events_size,
const size_t velopix_event_offsets_size,
VeloUTTracking::HitsSoA *host_ut_hits_events,
const PrUTMagnetTool* host_ut_magnet_tool,
const uint number_of_events,
const uint number_of_repetitions
) {
auto& s = *(streams[i]);
s.run_sequence(
i,
host_velopix_events,
host_velopix_event_offsets,
velopix_events_size,
velopix_event_offsets_size,
host_ut_hits_events,
host_ut_magnet_tool,
number_of_events,
number_of_repetitions
);
}
StreamWrapper::~StreamWrapper() {
for (auto& stream : streams) {
delete stream;
}
}
|
242ce430f8aa2a1a442fbf31296055e149daf993.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include <iostream>
//#include <time.h>
//#include <float.h>
//#include "vec3.h"
//#include "ray.h"
//#include "sphere.h"
//#include "hitable_list.h"
//
//// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
//#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
//
//void check_cuda(hipError_t result, char const *const func, const char *const file, int const line) {
// if (result) {
// std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
// file << ":" << line << " '" << func << "' \n";
// // Make sure we call CUDA Device Reset before exiting
// hipDeviceReset();
// exit(99);
// }
//}
//
//__device__ vec3 color(const ray& r, hitable **world) {
// hit_record rec;
// if ((*world)->hit(r, 0.0, FLT_MAX, rec)) {
// return 0.5f*vec3(rec.normal.x() + 1.0f, rec.normal.y() + 1.0f, rec.normal.z() + 1.0f);
// }
// else {
// vec3 unit_direction = unit_vector(r.direction());
// float t = 0.5f*(unit_direction.y() + 1.0f);
// return (1.0f - t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0);
// }
//}
//
//__global__ void render(vec3 *fb, int max_x, int max_y,
// vec3 lower_left_corner, vec3 horizontal, vec3 vertical, vec3 origin,
// hitable **world) {
// int i = threadIdx.x + blockIdx.x * blockDim.x;
// int j = threadIdx.y + blockIdx.y * blockDim.y;
// if ((i >= max_x) || (j >= max_y)) return;
// int pixel_index = j*max_x + i;
// float u = float(i) / float(max_x);
// float v = float(j) / float(max_y);
// ray r(origin, lower_left_corner + u*horizontal + v*vertical);
// fb[pixel_index] = color(r, world);
//}
//
//__global__ void create_world(hitable **d_list, hitable **d_world) {
// if (threadIdx.x == 0 && blockIdx.x == 0) {
// *(d_list) = new sphere(vec3(0, 0, -1), 0.5);
// *(d_list + 1) = new sphere(vec3(0, -100.5, -1), 100);
// *d_world = new hitable_list(d_list, 2);
// }
//}
//
//__global__ void free_world(hitable **d_list, hitable **d_world) {
// delete *(d_list);
// delete *(d_list + 1);
// delete *d_world;
//}
//
//int main() {
// int nx = 1200;
// int ny = 600;
// int tx = 8;
// int ty = 8;
//
// std::cerr << "Rendering a " << nx << "x" << ny << " image ";
// std::cerr << "in " << tx << "x" << ty << " blocks.\n";
//
// int num_pixels = nx*ny;
// size_t fb_size = num_pixels * sizeof(vec3);
//
// // allocate FB
// vec3 *fb;
// checkCudaErrors(hipMallocManaged((void **)&fb, fb_size));
//
// // make our world of hitables
// hitable **d_list;
// checkCudaErrors(hipMalloc((void **)&d_list, 2 * sizeof(hitable *)));
// hitable **d_world;
// checkCudaErrors(hipMalloc((void **)&d_world, sizeof(hitable *)));
// create_world << <1, 1 >> >(d_list, d_world);
// checkCudaErrors(hipGetLastError());
// checkCudaErrors(hipDeviceSynchronize());
//
// clock_t start, stop;
// start = clock();
// // Render our buffer
// dim3 blocks(nx / tx + 1, ny / ty + 1);
// dim3 threads(tx, ty);
// render << <blocks, threads >> >(fb, nx, ny,
// vec3(-2.0, -1.0, -1.0),
// vec3(4.0, 0.0, 0.0),
// vec3(0.0, 2.0, 0.0),
// vec3(0.0, 0.0, 0.0),
// d_world);
// checkCudaErrors(hipGetLastError());
// checkCudaErrors(hipDeviceSynchronize());
// stop = clock();
// double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
// std::cerr << "took " << timer_seconds << " seconds.\n";
//
// // Output FB as Image
// std::cout << "P3\n" << nx << " " << ny << "\n255\n";
// for (int j = ny - 1; j >= 0; j--) {
// for (int i = 0; i < nx; i++) {
// size_t pixel_index = j*nx + i;
// int ir = int(255.99*fb[pixel_index].r());
// int ig = int(255.99*fb[pixel_index].g());
// int ib = int(255.99*fb[pixel_index].b());
// std::cout << ir << " " << ig << " " << ib << "\n";
// }
// }
//
// // clean up
// checkCudaErrors(hipDeviceSynchronize());
// free_world << <1, 1 >> >(d_list, d_world);
// checkCudaErrors(hipGetLastError());
// checkCudaErrors(hipFree(d_list));
// checkCudaErrors(hipFree(d_world));
// checkCudaErrors(hipFree(fb));
//
// // useful for cuda-memcheck --leak-check full
// hipDeviceReset();
//}
| 242ce430f8aa2a1a442fbf31296055e149daf993.cu | //#include <iostream>
//#include <time.h>
//#include <float.h>
//#include "vec3.h"
//#include "ray.h"
//#include "sphere.h"
//#include "hitable_list.h"
//
//// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
//#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
//
//void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) {
// if (result) {
// std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
// file << ":" << line << " '" << func << "' \n";
// // Make sure we call CUDA Device Reset before exiting
// cudaDeviceReset();
// exit(99);
// }
//}
//
//__device__ vec3 color(const ray& r, hitable **world) {
// hit_record rec;
// if ((*world)->hit(r, 0.0, FLT_MAX, rec)) {
// return 0.5f*vec3(rec.normal.x() + 1.0f, rec.normal.y() + 1.0f, rec.normal.z() + 1.0f);
// }
// else {
// vec3 unit_direction = unit_vector(r.direction());
// float t = 0.5f*(unit_direction.y() + 1.0f);
// return (1.0f - t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0);
// }
//}
//
//__global__ void render(vec3 *fb, int max_x, int max_y,
// vec3 lower_left_corner, vec3 horizontal, vec3 vertical, vec3 origin,
// hitable **world) {
// int i = threadIdx.x + blockIdx.x * blockDim.x;
// int j = threadIdx.y + blockIdx.y * blockDim.y;
// if ((i >= max_x) || (j >= max_y)) return;
// int pixel_index = j*max_x + i;
// float u = float(i) / float(max_x);
// float v = float(j) / float(max_y);
// ray r(origin, lower_left_corner + u*horizontal + v*vertical);
// fb[pixel_index] = color(r, world);
//}
//
//__global__ void create_world(hitable **d_list, hitable **d_world) {
// if (threadIdx.x == 0 && blockIdx.x == 0) {
// *(d_list) = new sphere(vec3(0, 0, -1), 0.5);
// *(d_list + 1) = new sphere(vec3(0, -100.5, -1), 100);
// *d_world = new hitable_list(d_list, 2);
// }
//}
//
//__global__ void free_world(hitable **d_list, hitable **d_world) {
// delete *(d_list);
// delete *(d_list + 1);
// delete *d_world;
//}
//
//int main() {
// int nx = 1200;
// int ny = 600;
// int tx = 8;
// int ty = 8;
//
// std::cerr << "Rendering a " << nx << "x" << ny << " image ";
// std::cerr << "in " << tx << "x" << ty << " blocks.\n";
//
// int num_pixels = nx*ny;
// size_t fb_size = num_pixels * sizeof(vec3);
//
// // allocate FB
// vec3 *fb;
// checkCudaErrors(cudaMallocManaged((void **)&fb, fb_size));
//
// // make our world of hitables
// hitable **d_list;
// checkCudaErrors(cudaMalloc((void **)&d_list, 2 * sizeof(hitable *)));
// hitable **d_world;
// checkCudaErrors(cudaMalloc((void **)&d_world, sizeof(hitable *)));
// create_world << <1, 1 >> >(d_list, d_world);
// checkCudaErrors(cudaGetLastError());
// checkCudaErrors(cudaDeviceSynchronize());
//
// clock_t start, stop;
// start = clock();
// // Render our buffer
// dim3 blocks(nx / tx + 1, ny / ty + 1);
// dim3 threads(tx, ty);
// render << <blocks, threads >> >(fb, nx, ny,
// vec3(-2.0, -1.0, -1.0),
// vec3(4.0, 0.0, 0.0),
// vec3(0.0, 2.0, 0.0),
// vec3(0.0, 0.0, 0.0),
// d_world);
// checkCudaErrors(cudaGetLastError());
// checkCudaErrors(cudaDeviceSynchronize());
// stop = clock();
// double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
// std::cerr << "took " << timer_seconds << " seconds.\n";
//
// // Output FB as Image
// std::cout << "P3\n" << nx << " " << ny << "\n255\n";
// for (int j = ny - 1; j >= 0; j--) {
// for (int i = 0; i < nx; i++) {
// size_t pixel_index = j*nx + i;
// int ir = int(255.99*fb[pixel_index].r());
// int ig = int(255.99*fb[pixel_index].g());
// int ib = int(255.99*fb[pixel_index].b());
// std::cout << ir << " " << ig << " " << ib << "\n";
// }
// }
//
// // clean up
// checkCudaErrors(cudaDeviceSynchronize());
// free_world << <1, 1 >> >(d_list, d_world);
// checkCudaErrors(cudaGetLastError());
// checkCudaErrors(cudaFree(d_list));
// checkCudaErrors(cudaFree(d_world));
// checkCudaErrors(cudaFree(fb));
//
// // useful for cuda-memcheck --leak-check full
// cudaDeviceReset();
//}
|
324c9d06a0e0f12563703b3445406a47efd74e9a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
// CUDA runtime
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <math.h>
#define EPS 0.0000001192f
float UPPER_BOUND = 1000.0f;
int SIZE = 10;
__host__ int split_32_to_16(float* X, half* Xhi, half* Xlo, float* s1, float* s2, int N)
{
float scale1 = 0.0f;
for (int i = 0; i < N; i++){
float norm = (float) fabs(X[i]);
if (norm > scale1) scale1 = norm;
}
// Restrict scale range
if (scale1 < EPS){
scale1 = EPS;
}
if (scale1 > 1.0f/EPS){
scale1 = 1.0f/EPS;
}
float Xtemp[N];
// Get the normalized Xhi
for (int i = 0; i < N; i++) {
Xtemp[i] = X[i] / scale1;
Xhi[i] = (half)(Xtemp[i]);
// Using Xtemp to store the residual
Xtemp[i] = X[i] - scale1 * (float)Xhi[i];
}
// Normalize Xlo
float scale2 = 0.0f;
for (int i = 0; i < N; i++){
float norm = (float)fabs(Xtemp[i]);
if (norm > scale2) scale2 = norm;
}
if (scale2 < EPS){
scale2 = EPS;
}
if (scale2 > 1.0f/EPS){
scale2 = 1.0f/EPS;
}
for (int i = 0; i < N; i++){
Xtemp[i] = Xtemp[i] / scale2;
Xlo[i] = (half) (Xtemp[i]);
}
*s1 = scale1;
*s2 = scale2;
return 0;
}
int main(int argc, char **argv)
{
srand(time(NULL));
float X[SIZE];
printf("The input is: \n");
for (int i = 0; i < SIZE; i++){
X[i] = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND;
printf("X[%d] = %.10f\n", i, X[i]);
}
half Xhi[SIZE], Xlo[SIZE];
float scale1, scale2;
split_32_to_16(X, Xhi, Xlo, &scale1, &scale2, SIZE);
printf("Result: \n S1=%.10f, S2=%.10f, \n", scale1, scale2);
for (int i = 0; i < SIZE; i++){
printf("Xhi[%d] = %.10f, Xlo[%d] = %.10f\n", i, (float)Xhi[i], i, (float)Xlo[i]);
}
}
| 324c9d06a0e0f12563703b3445406a47efd74e9a.cu | #include <stdio.h>
#include <stdlib.h>
// CUDA runtime
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <math.h>
#define EPS 0.0000001192f
float UPPER_BOUND = 1000.0f;
int SIZE = 10;
__host__ int split_32_to_16(float* X, half* Xhi, half* Xlo, float* s1, float* s2, int N)
{
float scale1 = 0.0f;
for (int i = 0; i < N; i++){
float norm = (float) fabs(X[i]);
if (norm > scale1) scale1 = norm;
}
// Restrict scale range
if (scale1 < EPS){
scale1 = EPS;
}
if (scale1 > 1.0f/EPS){
scale1 = 1.0f/EPS;
}
float Xtemp[N];
// Get the normalized Xhi
for (int i = 0; i < N; i++) {
Xtemp[i] = X[i] / scale1;
Xhi[i] = (half)(Xtemp[i]);
// Using Xtemp to store the residual
Xtemp[i] = X[i] - scale1 * (float)Xhi[i];
}
// Normalize Xlo
float scale2 = 0.0f;
for (int i = 0; i < N; i++){
float norm = (float)fabs(Xtemp[i]);
if (norm > scale2) scale2 = norm;
}
if (scale2 < EPS){
scale2 = EPS;
}
if (scale2 > 1.0f/EPS){
scale2 = 1.0f/EPS;
}
for (int i = 0; i < N; i++){
Xtemp[i] = Xtemp[i] / scale2;
Xlo[i] = (half) (Xtemp[i]);
}
*s1 = scale1;
*s2 = scale2;
return 0;
}
int main(int argc, char **argv)
{
srand(time(NULL));
float X[SIZE];
printf("The input is: \n");
for (int i = 0; i < SIZE; i++){
X[i] = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND;
printf("X[%d] = %.10f\n", i, X[i]);
}
half Xhi[SIZE], Xlo[SIZE];
float scale1, scale2;
split_32_to_16(X, Xhi, Xlo, &scale1, &scale2, SIZE);
printf("Result: \n S1=%.10f, S2=%.10f, \n", scale1, scale2);
for (int i = 0; i < SIZE; i++){
printf("Xhi[%d] = %.10f, Xlo[%d] = %.10f\n", i, (float)Xhi[i], i, (float)Xlo[i]);
}
}
|
c84e69acb849a75e8d1611cd3f09f1e7a4c3773c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*3+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*3+0];
float y1=xyz[(i*n+j)*3+1];
float z1=xyz[(i*n+j)*3+2];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&3);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
// int chamfer_cuda_forward(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i, hipStream_t stream){
int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2){
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, batch_size, n, xyz1.data<float>(), m, xyz2.data<float>(), dist1.data<float>(), idx1.data<int>());
hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, batch_size, m, xyz2.data<float>(), n, xyz1.data<float>(), dist2.data<float>(), idx2.data<int>());
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd updateOutput: %s\n", hipGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz1[(i*n+j)*3+0];
float y1=xyz1[(i*n+j)*3+1];
float z1=xyz1[(i*n+j)*3+2];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*3+0];
float y2=xyz2[(i*m+j2)*3+1];
float z2=xyz2[(i*m+j2)*3+2];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2)));
}
}
}
// int chamfer_cuda_backward(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2, hipStream_t stream){
int chamfer_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2){
// hipMemset(grad_xyz1,0,b*n*3*4);
// hipMemset(grad_xyz2,0,b*m*3*4);
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, batch_size,n,xyz1.data<float>(),m,xyz2.data<float>(),graddist1.data<float>(),idx1.data<int>(),gradxyz1.data<float>(),gradxyz2.data<float>());
hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, batch_size,m,xyz2.data<float>(),n,xyz1.data<float>(),graddist2.data<float>(),idx2.data<int>(),gradxyz2.data<float>(),gradxyz1.data<float>());
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd get grad: %s\n", hipGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
| c84e69acb849a75e8d1611cd3f09f1e7a4c3773c.cu | #include <stdio.h>
#include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*3+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*3+0];
float y1=xyz[(i*n+j)*3+1];
float z1=xyz[(i*n+j)*3+2];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&3);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
// int chamfer_cuda_forward(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i, cudaStream_t stream){
int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2){
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
NmDistanceKernel<<<dim3(32,16,1),512>>>(batch_size, n, xyz1.data<float>(), m, xyz2.data<float>(), dist1.data<float>(), idx1.data<int>());
NmDistanceKernel<<<dim3(32,16,1),512>>>(batch_size, m, xyz2.data<float>(), n, xyz1.data<float>(), dist2.data<float>(), idx2.data<int>());
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd updateOutput: %s\n", cudaGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz1[(i*n+j)*3+0];
float y1=xyz1[(i*n+j)*3+1];
float z1=xyz1[(i*n+j)*3+2];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*3+0];
float y2=xyz2[(i*m+j2)*3+1];
float z2=xyz2[(i*m+j2)*3+2];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2)));
}
}
}
// int chamfer_cuda_backward(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2, cudaStream_t stream){
int chamfer_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2){
// cudaMemset(grad_xyz1,0,b*n*3*4);
// cudaMemset(grad_xyz2,0,b*m*3*4);
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
NmDistanceGradKernel<<<dim3(1,16,1),256>>>(batch_size,n,xyz1.data<float>(),m,xyz2.data<float>(),graddist1.data<float>(),idx1.data<int>(),gradxyz1.data<float>(),gradxyz2.data<float>());
NmDistanceGradKernel<<<dim3(1,16,1),256>>>(batch_size,m,xyz2.data<float>(),n,xyz1.data<float>(),graddist2.data<float>(),idx2.data<int>(),gradxyz2.data<float>(),gradxyz1.data<float>());
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd get grad: %s\n", cudaGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
|
629df859769e857f9ad8a0af21becf9bec8343aa.hip | // !!! This is a file automatically generated by hipify!!!
//---------------------------------------------------------------------------//
// Copyright (c) 2015 Jakub Szuppe <[email protected]>
//
// Distributed under the Boost Software License, Version 1.0
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//
// See http://boostorg.github.com/compute for more information.
//---------------------------------------------------------------------------//
#include <algorithm>
#include <cstdlib>
#include <iostream>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/host_vector.h>
#include <thrust/reverse.h>
#include "perf.hpp"
int main(int argc, char *argv[])
{
perf_parse_args(argc, argv);
std::cout << "size: " << PERF_N << std::endl;
thrust::host_vector<int> h_vec = generate_random_vector<int>(PERF_N);
// transfer data to the device
thrust::device_vector<int> d_vec;
d_vec = h_vec;
// device vector for reversed data
thrust::device_vector<int> d_reversed_vec(PERF_N);
perf_timer t;
for(size_t trial = 0; trial < PERF_TRIALS; trial++){
t.start();
thrust::reverse_copy(d_vec.begin(), d_vec.end(), d_reversed_vec.begin());
hipDeviceSynchronize();
t.stop();
}
std::cout << "time: " << t.min_time() / 1e6 << " ms" << std::endl;
return 0;
}
| 629df859769e857f9ad8a0af21becf9bec8343aa.cu | //---------------------------------------------------------------------------//
// Copyright (c) 2015 Jakub Szuppe <[email protected]>
//
// Distributed under the Boost Software License, Version 1.0
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//
// See http://boostorg.github.com/compute for more information.
//---------------------------------------------------------------------------//
#include <algorithm>
#include <cstdlib>
#include <iostream>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/host_vector.h>
#include <thrust/reverse.h>
#include "perf.hpp"
int main(int argc, char *argv[])
{
perf_parse_args(argc, argv);
std::cout << "size: " << PERF_N << std::endl;
thrust::host_vector<int> h_vec = generate_random_vector<int>(PERF_N);
// transfer data to the device
thrust::device_vector<int> d_vec;
d_vec = h_vec;
// device vector for reversed data
thrust::device_vector<int> d_reversed_vec(PERF_N);
perf_timer t;
for(size_t trial = 0; trial < PERF_TRIALS; trial++){
t.start();
thrust::reverse_copy(d_vec.begin(), d_vec.end(), d_reversed_vec.begin());
cudaDeviceSynchronize();
t.stop();
}
std::cout << "time: " << t.min_time() / 1e6 << " ms" << std::endl;
return 0;
}
|
267494d02e9ad7cb19de66063a9face0bb949ec9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <float.h>
#include <errno.h>
#include <stdint.h>
#include <hip/hip_runtime_api.h>
#define DIM 64
#define SHARED_MEMORY_DIM ((1<<15)+(1<<14)) // 48KB
#define N_THREAD_GPU (1<<10) // limit is 1024
#define MAX_STEPS (1<<20) /* run for no more than 1Mi steps */
#define TARGET_FITNESS (FLT_EPSILON) /* or until the fitness is less than this much */
#define STEP_CHECK_FREQ 1 /* after how many steps to write the system and check the time */
/* needed for find fitness min in parallel */
typedef struct fitness_pos
{
int pos;
float fitness;
} fitness_pos;
/* The whole particle system */
__device__ float *current_best_pos;
__device__ float *global_best_pos;
__constant__ int num_particles;
__device__ float global_fitness = HUGE_VALF;
__device__ float current_fitness;
/* Extents of the domain in each dimension */
#define coord_min -1
#define coord_max 1
#define coord_range 2
float target_pos[DIM]; /* The target position */
__constant__ float target_pos_shared[DIM];
float fitness_min;
/* Particle components*/
__device__ float* pos;
__device__ float* vel;
__device__ float* best_pos;
__device__ uint64_t* prng_state;
__device__ float* best_fit;
__device__ float* fitness_val;
/* Overall weight for the old velocity, best position distance and global
* best position distance in the computation of the new velocity
*/
#define vel_omega 0.9
#define vel_phi_best 2
#define vel_phi_global 2
/* The contribution of the velocity to the new position. Set to 1
* to use the standard PSO approach of adding the whole velocity
* to the position.
*/
#define step_factor 1
__device__ __host__ uint32_t MWC64X(uint64_t *state);
__device__ __host__ float range_rand(float min, float max, uint64_t *prng_state);
__device__ __host__ void init_rand(uint64_t *prng_state, int i);
__device__ float fitness(float *pos);
__device__ void warp_control_float2(float2* smpos, int particleIndexSHM, int indexDIM);
__device__ void warp_control_float(float* smpos, int particleIndexSHM);
__global__ void init_particle();
__global__ void find_min_fitness_parallel(__restrict__ const fitness_pos* in, fitness_pos* out,const int offset,const int n_in,const int blocks);
__global__ void new_vel_pos();
__global__ void calc_fitness();
int ceil_log2(unsigned long long x);
void check_error(hipError_t err, const char *msg);
void start_time_record(hipEvent_t *before, hipEvent_t *after);
void stop_time_record(hipEvent_t *before, hipEvent_t *after, float *runtime);
void parallel_fitness(const int n_particle, int n_thread);
void write_system(const int step, const float calc_fitness_time, const float new_vel_pos, const float fitness_min, const int n_particles);
void init_mem(int n_particle){
float *pos_d, *vel_d, *best_pos_d, *best_fit_d,*fitness_val_d, *current_best_pos_d, *global_best_pos_d;
uint64_t *prng_state_d;
check_error(hipMalloc(&pos_d,sizeof(float) * n_particle * DIM),"memory alloc n particle pos");
check_error(hipMalloc(&vel_d,sizeof(float) * n_particle * DIM),"memory alloc n particle vel");
check_error(hipMalloc(&best_pos_d,sizeof(float) * n_particle * DIM),"memory alloc n particle best_pos");
check_error(hipMalloc((uint64_t **)&prng_state_d,sizeof(uint64_t) * n_particle),"memory alloc n particle best_pos");
check_error(hipMalloc(&best_fit_d,sizeof(float) * n_particle),"memory alloc n particle best_pos");
check_error(hipMalloc(&fitness_val_d,sizeof(float) * n_particle),"memory alloc n particle best_pos");
check_error(hipMalloc(¤t_best_pos_d,sizeof(float) * DIM),"memory alloc n particle best_pos");
check_error(hipMalloc(&global_best_pos_d,sizeof(float) * DIM),"memory alloc n particle best_pos");
check_error(hipMemcpyToSymbol(target_pos_shared, &target_pos, sizeof(float)*DIM),"memory cpy to device target_pos");
check_error(hipMemcpyToSymbol(num_particles, &n_particle, sizeof(int)),"memory cpy to device num_particle");
check_error(hipMemcpyToSymbol(prng_state, &prng_state_d, sizeof(uint64_t)),"memory cpy to device num_particle");
check_error(hipMemcpyToSymbol(pos, &pos_d, sizeof(pos_d)),"memory cpy to device target_pos");
check_error(hipMemcpyToSymbol(vel, &vel_d, sizeof(vel)),"memory cpy to device target_pos");
check_error(hipMemcpyToSymbol(best_pos, &best_pos_d, sizeof(best_pos)),"memory cpy to device target_pos");
check_error(hipMemcpyToSymbol(best_fit, &best_fit_d, sizeof(best_fit)),"memory cpy to device target_pos");
check_error(hipMemcpyToSymbol(fitness_val, &fitness_val_d, sizeof(fitness_val)),"memory cpy to device target_pos");
check_error(hipMemcpyToSymbol(current_best_pos, ¤t_best_pos_d, sizeof(fitness_val)),"memory cpy to device target_pos");
check_error(hipMemcpyToSymbol(global_best_pos, &global_best_pos_d, sizeof(fitness_val)),"memory cpy to device target_pos");
}
int main(int argc, char *argv[])
{
unsigned step = 0;
unsigned n_particle;
hipEvent_t before, after;
float calc_fitness_time = 0, new_vel_pos_time = 0, fitness_min_time = 0;
int j;
int n_blocks;
int n_thread = N_THREAD_GPU;
uint64_t prng_state_h;
dim3 init_parall(N_THREAD_GPU/DIM,DIM,1);
/* Get particle's numbers, default 128 */
n_particle = argc > 1 ? atoi(argv[1]) : 128;
/* Define n blocks for GPU parallelization */
n_blocks = ceil((float)n_particle / N_THREAD_GPU) == 0 ? 1 : ceil((float)n_particle / N_THREAD_GPU);
/* Initialize the target position */
init_rand(&prng_state_h, time(NULL));
printf("target position: (");
for(j = 0; j < DIM; j++){
target_pos[j] = range_rand(coord_min, coord_max, &prng_state_h);
printf("%f,", target_pos[j]);
}
printf(")\n");
/* Initialize a system with the number of particles given
* on the command-line or from default value (128) */
init_mem(n_particle);
/* init particle system and calculate initial fitness */
hipLaunchKernelGGL(( init_particle), dim3(n_blocks), dim3(n_thread), 0, 0, );
parallel_fitness(n_particle, n_thread);
write_system(step, calc_fitness_time, new_vel_pos_time, fitness_min_time, n_particle);
while (step < MAX_STEPS) {
++step;
int n_thread_pos = SHARED_MEMORY_DIM/(sizeof(float2)*DIM) < N_THREAD_GPU ?
SHARED_MEMORY_DIM/(sizeof(float2)*DIM) : N_THREAD_GPU;
int n_blocks_pos_calc_fit = ceil((float)n_particle / (n_thread_pos/DIM)) == 0 ? 1 : ceil((float)n_particle / (n_thread_pos/DIM));
int n_blocks_pos_vel = ceil((float)n_particle / (N_THREAD_GPU/DIM)) == 0 ? 1 : ceil((float)n_particle / (N_THREAD_GPU/DIM));
/* Compute the new velocity for each particle */
/* Update the position of each particle, and the global fitness */
dim3 n_t(DIM,N_THREAD_GPU/DIM);
start_time_record(&before,&after);
hipLaunchKernelGGL(( new_vel_pos), dim3(n_blocks_pos_vel), dim3(n_t), 0, 0, );
stop_time_record(&before,&after,&new_vel_pos_time);
/* Calculate new fitness for each particle*/
dim3 n_t_calc_fit(DIM,n_thread_pos/DIM);
start_time_record(&before,&after);
hipLaunchKernelGGL(( calc_fitness), dim3(n_blocks_pos_calc_fit), dim3(n_t_calc_fit), sizeof(float2)*n_thread_pos, 0, );
stop_time_record(&before,&after,&calc_fitness_time);
/* Calculate min fitness */
start_time_record(&before,&after);
parallel_fitness(n_particle, n_thread);
stop_time_record(&before,&after,&fitness_min_time);
if (fitness_min < TARGET_FITNESS)
break;
if (step % STEP_CHECK_FREQ == 0) {
write_system(step, calc_fitness_time, new_vel_pos_time, fitness_min_time, n_particle);
}
}
write_system(step, calc_fitness_time, new_vel_pos_time, fitness_min_time, n_particle);
}
void write_system(const int step, const float calc_fitness_time, const float new_vel_pos, const float fitness_min, const int n_particles)
{
float current_fitness_d;
float global_fitness_d;
float *current_best_pos_d_addr = (float*)malloc(sizeof(float));
float *global_best_pos_addr = (float*)malloc(sizeof(float));
float *current_best_pos_d = (float*)malloc(sizeof(float) * DIM);
float *global_best_pos_d = (float*)malloc(sizeof(float) * DIM);
float *current_fitness_d_addr = (float*)malloc(sizeof(float));
float *global_fitness_d_addr = (float*)malloc(sizeof(float));
int j;
hipGetSymbolAddress((void **)¤t_fitness_d_addr, current_fitness);
hipGetSymbolAddress((void **)&global_fitness_d_addr, global_fitness);
hipGetSymbolAddress((void **)¤t_best_pos_d_addr, current_best_pos);
hipGetSymbolAddress((void **)&global_best_pos_addr, global_best_pos);
check_error(hipMemcpy(¤t_fitness_d, current_fitness_d_addr, sizeof(float),hipMemcpyDeviceToHost),"refresh current_fitness_d host");
check_error(hipMemcpy(&global_fitness_d, global_fitness_d_addr, sizeof(float),hipMemcpyDeviceToHost),"refresh global_fitness_d host");
printf("step %u, best fitness: current %g, so far %g\n", step,
current_fitness_d, global_fitness_d);
if (step > 0) {
printf("time - calc_fitness_time: %fms new_vel_pos: %fms fitness_min: %f\n",calc_fitness_time,new_vel_pos,fitness_min);
}
printf("\ttarget ");
for(j = 0; j < DIM; j++){
printf("%g,", target_pos[j]);
}
printf("\n");
}
/* Target function to be minimized: this is the square
* Euclidean distance from target_pos, perturbed by the distance
* to the origin: this puts a local minimum at the origin,
* which is good to test if the method actually finds the global
* minimum or not */
__device__ float fitness(float *pos)
{
int i;
float fit1 = 0,fit2 = 0, dim_val;
for(i = 0; i < DIM; i++){
dim_val = pos[i];
fit1 += pow(dim_val - target_pos_shared[i],2);
fit2 += pow(dim_val,2);
}
return fit1*(100*fit2+1)/10;
}
/* A function that generates a random float in the given range */
float range_rand(float min, float max, uint64_t *prng_state)
{
uint32_t r = MWC64X(prng_state);
return min + r*((max - min)/UINT32_MAX);
}
/* Random number generation: we use the MWC64X PRNG from
* http://cas.ee.ic.ac.uk/people/dt10/research/rngs-gpu-mwc64x.html
* which is parallel-friendly (but needs us to keep track of the state)
*/
uint32_t MWC64X(uint64_t *state)
{
uint64_t x = *state;
uint32_t c = x >> 32; // the upper 32 bits
x &= UINT32_MAX; // keep only the lower bits
*state = x*4294883355U + c;
return ((uint32_t)x)^c;
}
/* A functio to initialize the PRNG */
__device__ __host__ void init_rand(uint64_t *prng_state, int i)
{
*prng_state = i;
}
/* Function to initialize a single particle at index i. */
__global__ void init_particle()
{
const int particleIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int particleIndexDIM = particleIndex * DIM;
if(particleIndex >= num_particles)
return;
uint64_t prng_state_l;
init_rand(&prng_state_l, particleIndex);
int j;
for (j = 0; j < DIM; j++){
best_pos[particleIndexDIM + j] = pos[particleIndexDIM + j] = range_rand(coord_min, coord_max, &prng_state_l);
}
for (j = 0; j < DIM; j++){
vel[particleIndexDIM + j] = range_rand(-coord_range, coord_range, &prng_state_l);
}
best_fit[particleIndex] = fitness_val[particleIndex] = fitness(pos + particleIndexDIM);
prng_state[particleIndex] = prng_state_l;
}
/* Kernel function to compute the new position and new velocity of a given particle */
__global__ void new_vel_pos()
{
const int particleIndex = blockIdx.x * blockDim.y + threadIdx.y;
const int particleIndexDIM = particleIndex * DIM + threadIdx.x;
const int indexDIM = threadIdx.x;
if(particleIndex >= num_particles)
return;
uint64_t prng_state_l = prng_state[particleIndex];
const float best_vec_rand_coeff = range_rand(0, 1, &prng_state_l);
const float global_vec_rand_coeff = range_rand(0, 1, &prng_state_l);
// float velLocal = __ldg(&vel[particleIndexDIM])*vel_omega;
// float posLocal = __ldg(&pos[particleIndexDIM]);
float velLocal = vel[particleIndexDIM]*vel_omega;
float posLocal = pos[particleIndexDIM];
float pbest = (best_pos[particleIndexDIM] - posLocal) * best_vec_rand_coeff*vel_phi_best;
float gbest = (global_best_pos[indexDIM] - posLocal) * global_vec_rand_coeff*vel_phi_global;
velLocal+= (pbest + gbest);
if(velLocal > coord_range) velLocal = coord_range;
else if (velLocal < -coord_range) velLocal = -coord_range;
posLocal += (step_factor*velLocal);
if (posLocal > coord_max) posLocal = coord_max;
else if (posLocal < coord_min) posLocal = coord_min;
pos[particleIndexDIM] = posLocal;
vel[particleIndexDIM] = velLocal;
prng_state[particleIndex] = prng_state_l;
}
/* Kernel function to compute the new fitness val of a given particle */
__global__ void calc_fitness()
{
extern __shared__ float2 smpos[];
const int particleIndex = blockIdx.x * blockDim.y + threadIdx.y;
const int particleIndexSHM = threadIdx.y * blockDim.x + threadIdx.x;
const int particleIndexDIM = particleIndex * DIM + threadIdx.x;
const int indexDIM = threadIdx.x;
if(particleIndex >= num_particles)
return;
float posLocal = pos[particleIndexDIM];
smpos[particleIndexSHM].x = (posLocal - target_pos_shared[indexDIM])*(posLocal - target_pos_shared[indexDIM]);
smpos[particleIndexSHM].y = (posLocal*posLocal);
warp_control_float2(smpos,particleIndexSHM, indexDIM);
if (indexDIM==0){
float fitness = smpos[particleIndexSHM].x*(100*smpos[particleIndexSHM].y+1)/10;
fitness_val[particleIndex] = fitness;
if (fitness < best_fit[particleIndex]) {
best_fit[particleIndex] = fitness;
memcpy(best_pos + particleIndex,pos + particleIndex,sizeof(float)*DIM);
}
}
}
/* Function to handle the Kernel function find_min_fitness_parallel to don't let the shared memory become full */
void parallel_fitness(const int n_particle, int n_thread){
int shmdim;
fitness_pos *fitness_device_out,*fitness_device_in = NULL;
int last_n_block;
int offset;
int blocks = n_particle;
int max_parallel_particle_iteration = SHARED_MEMORY_DIM / sizeof(fitness_pos);
int iteration;
int max_blocks_per_iteration = max_parallel_particle_iteration / n_thread;
while(blocks != 1){
offset = 0;
last_n_block = blocks;
blocks = ceil((float)blocks / n_thread);
if(blocks == 1){
n_thread = ceil_log2(last_n_block);
}
hipMalloc(&fitness_device_out, sizeof(fitness_pos) * blocks);
shmdim = n_thread*sizeof(fitness_pos);
if(max_parallel_particle_iteration < last_n_block && 0){
iteration = 0;
while(iteration + max_parallel_particle_iteration < blocks*n_thread){
hipLaunchKernelGGL(( find_min_fitness_parallel), dim3(max_blocks_per_iteration), dim3(n_thread),shmdim, 0,
fitness_device_in, fitness_device_out, offset, last_n_block, blocks);
iteration += max_parallel_particle_iteration;
offset += (max_parallel_particle_iteration/n_thread);
}
int x = (blocks*n_thread) - (offset*n_thread);
x = ceil((float)x / n_thread);
hipLaunchKernelGGL(( find_min_fitness_parallel), dim3(x), dim3(n_thread),shmdim, 0,
fitness_device_in, fitness_device_out, offset, last_n_block, blocks);
}else{
hipLaunchKernelGGL(( find_min_fitness_parallel), dim3(blocks), dim3(n_thread),shmdim, 0,
fitness_device_in, fitness_device_out, offset, last_n_block,blocks);
}
if(fitness_device_in != NULL){
check_error(hipFree(fitness_device_in),"free fitness_device_in");
}
fitness_device_in = fitness_device_out;
}
fitness_device_out = (fitness_pos*)malloc(sizeof(fitness_pos));
check_error(hipMemcpy(fitness_device_out, fitness_device_in, sizeof(fitness_pos),hipMemcpyDeviceToHost),"copy fitness_min");
fitness_min = fitness_device_out->fitness;
check_error(hipFree(fitness_device_in),"free fitness_device_out");
free(fitness_device_out);
}
/* Kernel function to compute the new global min fitness */
__global__ void find_min_fitness_parallel(__restrict__ const fitness_pos* in, fitness_pos* out,const int offset,const int n_in,const int blocks){
extern __shared__ fitness_pos sm[];
const int tid=threadIdx.x;
const int i=(blockIdx.x*blockDim.x+threadIdx.x) + (offset*blockDim.x);
int stride;
sm[tid].fitness = HUGE_VALF;
if(i >= num_particles || i >= n_in)
return;
if(in != NULL){
sm[tid] = in[i];
}else{
sm[tid].fitness = fitness_val[i];
sm[tid].pos = i;
}
//copy to SM
for (stride = blockDim.x/2;stride>0;stride>>=1)
{
__syncthreads();
if (tid<stride && sm[tid].fitness > sm[tid+stride].fitness){
sm[tid] = sm[tid+stride];
}
}
if (tid==0){
out[blockIdx.x+offset]=sm[0];//copy back
if(blocks == 1){
current_fitness = sm[0].fitness;
memcpy(current_best_pos,pos+sm[0].pos*DIM,sizeof(float)*DIM);
if (sm[0].fitness < global_fitness) {
global_fitness = sm[0].fitness;
memcpy(global_best_pos,current_best_pos,sizeof(float)*DIM);
}
}
}
}
void start_time_record(hipEvent_t *before, hipEvent_t *after){
check_error(hipEventCreate(&(*before)),"create cudaEvent before");
check_error(hipEventCreate(&(*after)),"create cudaEvent after");
check_error(hipEventRecord(*before),"record cudaEvent before");
}
void stop_time_record(hipEvent_t *before, hipEvent_t *after, float *runtime){
check_error(hipEventRecord(*after),"record cudaEvent after");
check_error(hipEventSynchronize(*after),"synch cudaEvent before");
check_error(hipEventElapsedTime(runtime, *before, *after),"calc cudaEvent elapsed time");
}
void check_error(hipError_t err, const char *msg)
{
if (err != hipSuccess) {
fprintf(stderr, "%s : errore %d (%s)\n",
msg, err, hipGetErrorString(err));
exit(err);
}
}
__device__ void warp_control_float2(float2* smpos, int particleIndexSHM, int indexDIM)
{
__syncthreads();
#if DIM > 1
for (int stride = blockDim.x/2;stride>0;stride>>=1)
{
if (indexDIM<stride){
smpos[particleIndexSHM].x += smpos[particleIndexSHM+stride].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM+stride].y;
}
__syncthreads();
}
#else
if (particleIndexSHM < DIM/2)
{
#if DIM >= 32
smpos[particleIndexSHM].x += smpos[particleIndexSHM + 16].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM + 16].y;
__syncthreads();
#endif
#if DIM >= 16
smpos[particleIndexSHM].x += smpos[particleIndexSHM + 8].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM + 8].y;
__syncthreads();
#endif
#if DIM >= 8
smpos[particleIndexSHM].x += smpos[particleIndexSHM + 4].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM + 4].y;
__syncthreads();
#endif
#if DIM >= 4
smpos[particleIndexSHM].x += smpos[particleIndexSHM + 2].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM + 2].y;
__syncthreads();
#endif
#if DIM >= 2
smpos[particleIndexSHM].x += smpos[particleIndexSHM + 1].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM + 1].y;
__syncthreads();
#endif
}
#endif
}
__device__ void warp_control_float(float* smpos, int particleIndexSHM)
{
__syncthreads();
#if DIM > 1024
for (int stride = blockDim.x/2;stride>0;stride>>=1)
{
if (indexDIM<stride){
smpos[particleIndexSHM] += smpos[particleIndexSHM+stride];
}
__syncthreads();
}
#else
if (particleIndexSHM < DIM/2)
{
#if DIM >= 512
smpos[particleIndexSHM] += smpos[particleIndexSHM + 256];
__syncthreads();
#endif
#if DIM >= 256
smpos[particleIndexSHM] += smpos[particleIndexSHM + 128];
__syncthreads();
#endif
#if DIM >= 128
smpos[particleIndexSHM] += smpos[particleIndexSHM + 64];
__syncthreads();
#endif
#if DIM >= 64
smpos[particleIndexSHM] += smpos[particleIndexSHM + 32];
__syncthreads();
#endif
#if DIM >= 32
smpos[particleIndexSHM] += smpos[particleIndexSHM + 16];
__syncthreads();
#endif
#if DIM >= 16
smpos[particleIndexSHM] += smpos[particleIndexSHM + 8];
__syncthreads();
#endif
#if DIM >= 8
smpos[particleIndexSHM] += smpos[particleIndexSHM + 4];
__syncthreads();
#endif
#if DIM >= 4
smpos[particleIndexSHM] += smpos[particleIndexSHM + 2];
__syncthreads();
#endif
#if DIM >= 2
smpos[particleIndexSHM] += smpos[particleIndexSHM + 1];
__syncthreads();
#endif
}
#endif
}
/* function to find the ceil of a log2 x value*/
int ceil_log2(unsigned long long x)
{
static const unsigned long long t[6] = {
0xFFFFFFFF00000000ull,
0x00000000FFFF0000ull,
0x000000000000FF00ull,
0x00000000000000F0ull,
0x000000000000000Cull,
0x0000000000000002ull
};
int y = (((x & (x - 1)) == 0) ? 0 : 1);
int j = 32;
int i;
for (i = 0; i < 6; i++) {
int k = (((x & t[i]) == 0) ? 0 : j);
y += k;
x >>= k;
j >>= 1;
}
return 1<<y;
}
| 267494d02e9ad7cb19de66063a9face0bb949ec9.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <float.h>
#include <errno.h>
#include <stdint.h>
#include <cuda_runtime_api.h>
#define DIM 64
#define SHARED_MEMORY_DIM ((1<<15)+(1<<14)) // 48KB
#define N_THREAD_GPU (1<<10) // limit is 1024
#define MAX_STEPS (1<<20) /* run for no more than 1Mi steps */
#define TARGET_FITNESS (FLT_EPSILON) /* or until the fitness is less than this much */
#define STEP_CHECK_FREQ 1 /* after how many steps to write the system and check the time */
/* needed for find fitness min in parallel */
typedef struct fitness_pos
{
int pos;
float fitness;
} fitness_pos;
/* The whole particle system */
__device__ float *current_best_pos;
__device__ float *global_best_pos;
__constant__ int num_particles;
__device__ float global_fitness = HUGE_VALF;
__device__ float current_fitness;
/* Extents of the domain in each dimension */
#define coord_min -1
#define coord_max 1
#define coord_range 2
float target_pos[DIM]; /* The target position */
__constant__ float target_pos_shared[DIM];
float fitness_min;
/* Particle components*/
__device__ float* pos;
__device__ float* vel;
__device__ float* best_pos;
__device__ uint64_t* prng_state;
__device__ float* best_fit;
__device__ float* fitness_val;
/* Overall weight for the old velocity, best position distance and global
* best position distance in the computation of the new velocity
*/
#define vel_omega 0.9
#define vel_phi_best 2
#define vel_phi_global 2
/* The contribution of the velocity to the new position. Set to 1
* to use the standard PSO approach of adding the whole velocity
* to the position.
*/
#define step_factor 1
__device__ __host__ uint32_t MWC64X(uint64_t *state);
__device__ __host__ float range_rand(float min, float max, uint64_t *prng_state);
__device__ __host__ void init_rand(uint64_t *prng_state, int i);
__device__ float fitness(float *pos);
__device__ void warp_control_float2(float2* smpos, int particleIndexSHM, int indexDIM);
__device__ void warp_control_float(float* smpos, int particleIndexSHM);
__global__ void init_particle();
__global__ void find_min_fitness_parallel(__restrict__ const fitness_pos* in, fitness_pos* out,const int offset,const int n_in,const int blocks);
__global__ void new_vel_pos();
__global__ void calc_fitness();
int ceil_log2(unsigned long long x);
void check_error(cudaError_t err, const char *msg);
void start_time_record(cudaEvent_t *before, cudaEvent_t *after);
void stop_time_record(cudaEvent_t *before, cudaEvent_t *after, float *runtime);
void parallel_fitness(const int n_particle, int n_thread);
void write_system(const int step, const float calc_fitness_time, const float new_vel_pos, const float fitness_min, const int n_particles);
void init_mem(int n_particle){
float *pos_d, *vel_d, *best_pos_d, *best_fit_d,*fitness_val_d, *current_best_pos_d, *global_best_pos_d;
uint64_t *prng_state_d;
check_error(cudaMalloc(&pos_d,sizeof(float) * n_particle * DIM),"memory alloc n particle pos");
check_error(cudaMalloc(&vel_d,sizeof(float) * n_particle * DIM),"memory alloc n particle vel");
check_error(cudaMalloc(&best_pos_d,sizeof(float) * n_particle * DIM),"memory alloc n particle best_pos");
check_error(cudaMalloc((uint64_t **)&prng_state_d,sizeof(uint64_t) * n_particle),"memory alloc n particle best_pos");
check_error(cudaMalloc(&best_fit_d,sizeof(float) * n_particle),"memory alloc n particle best_pos");
check_error(cudaMalloc(&fitness_val_d,sizeof(float) * n_particle),"memory alloc n particle best_pos");
check_error(cudaMalloc(¤t_best_pos_d,sizeof(float) * DIM),"memory alloc n particle best_pos");
check_error(cudaMalloc(&global_best_pos_d,sizeof(float) * DIM),"memory alloc n particle best_pos");
check_error(cudaMemcpyToSymbol(target_pos_shared, &target_pos, sizeof(float)*DIM),"memory cpy to device target_pos");
check_error(cudaMemcpyToSymbol(num_particles, &n_particle, sizeof(int)),"memory cpy to device num_particle");
check_error(cudaMemcpyToSymbol(prng_state, &prng_state_d, sizeof(uint64_t)),"memory cpy to device num_particle");
check_error(cudaMemcpyToSymbol(pos, &pos_d, sizeof(pos_d)),"memory cpy to device target_pos");
check_error(cudaMemcpyToSymbol(vel, &vel_d, sizeof(vel)),"memory cpy to device target_pos");
check_error(cudaMemcpyToSymbol(best_pos, &best_pos_d, sizeof(best_pos)),"memory cpy to device target_pos");
check_error(cudaMemcpyToSymbol(best_fit, &best_fit_d, sizeof(best_fit)),"memory cpy to device target_pos");
check_error(cudaMemcpyToSymbol(fitness_val, &fitness_val_d, sizeof(fitness_val)),"memory cpy to device target_pos");
check_error(cudaMemcpyToSymbol(current_best_pos, ¤t_best_pos_d, sizeof(fitness_val)),"memory cpy to device target_pos");
check_error(cudaMemcpyToSymbol(global_best_pos, &global_best_pos_d, sizeof(fitness_val)),"memory cpy to device target_pos");
}
int main(int argc, char *argv[])
{
unsigned step = 0;
unsigned n_particle;
cudaEvent_t before, after;
float calc_fitness_time = 0, new_vel_pos_time = 0, fitness_min_time = 0;
int j;
int n_blocks;
int n_thread = N_THREAD_GPU;
uint64_t prng_state_h;
dim3 init_parall(N_THREAD_GPU/DIM,DIM,1);
/* Get particle's numbers, default 128 */
n_particle = argc > 1 ? atoi(argv[1]) : 128;
/* Define n blocks for GPU parallelization */
n_blocks = ceil((float)n_particle / N_THREAD_GPU) == 0 ? 1 : ceil((float)n_particle / N_THREAD_GPU);
/* Initialize the target position */
init_rand(&prng_state_h, time(NULL));
printf("target position: (");
for(j = 0; j < DIM; j++){
target_pos[j] = range_rand(coord_min, coord_max, &prng_state_h);
printf("%f,", target_pos[j]);
}
printf(")\n");
/* Initialize a system with the number of particles given
* on the command-line or from default value (128) */
init_mem(n_particle);
/* init particle system and calculate initial fitness */
init_particle<<<n_blocks, n_thread>>>();
parallel_fitness(n_particle, n_thread);
write_system(step, calc_fitness_time, new_vel_pos_time, fitness_min_time, n_particle);
while (step < MAX_STEPS) {
++step;
int n_thread_pos = SHARED_MEMORY_DIM/(sizeof(float2)*DIM) < N_THREAD_GPU ?
SHARED_MEMORY_DIM/(sizeof(float2)*DIM) : N_THREAD_GPU;
int n_blocks_pos_calc_fit = ceil((float)n_particle / (n_thread_pos/DIM)) == 0 ? 1 : ceil((float)n_particle / (n_thread_pos/DIM));
int n_blocks_pos_vel = ceil((float)n_particle / (N_THREAD_GPU/DIM)) == 0 ? 1 : ceil((float)n_particle / (N_THREAD_GPU/DIM));
/* Compute the new velocity for each particle */
/* Update the position of each particle, and the global fitness */
dim3 n_t(DIM,N_THREAD_GPU/DIM);
start_time_record(&before,&after);
new_vel_pos<<<n_blocks_pos_vel, n_t>>>();
stop_time_record(&before,&after,&new_vel_pos_time);
/* Calculate new fitness for each particle*/
dim3 n_t_calc_fit(DIM,n_thread_pos/DIM);
start_time_record(&before,&after);
calc_fitness<<<n_blocks_pos_calc_fit, n_t_calc_fit, sizeof(float2)*n_thread_pos>>>();
stop_time_record(&before,&after,&calc_fitness_time);
/* Calculate min fitness */
start_time_record(&before,&after);
parallel_fitness(n_particle, n_thread);
stop_time_record(&before,&after,&fitness_min_time);
if (fitness_min < TARGET_FITNESS)
break;
if (step % STEP_CHECK_FREQ == 0) {
write_system(step, calc_fitness_time, new_vel_pos_time, fitness_min_time, n_particle);
}
}
write_system(step, calc_fitness_time, new_vel_pos_time, fitness_min_time, n_particle);
}
void write_system(const int step, const float calc_fitness_time, const float new_vel_pos, const float fitness_min, const int n_particles)
{
float current_fitness_d;
float global_fitness_d;
float *current_best_pos_d_addr = (float*)malloc(sizeof(float));
float *global_best_pos_addr = (float*)malloc(sizeof(float));
float *current_best_pos_d = (float*)malloc(sizeof(float) * DIM);
float *global_best_pos_d = (float*)malloc(sizeof(float) * DIM);
float *current_fitness_d_addr = (float*)malloc(sizeof(float));
float *global_fitness_d_addr = (float*)malloc(sizeof(float));
int j;
cudaGetSymbolAddress((void **)¤t_fitness_d_addr, current_fitness);
cudaGetSymbolAddress((void **)&global_fitness_d_addr, global_fitness);
cudaGetSymbolAddress((void **)¤t_best_pos_d_addr, current_best_pos);
cudaGetSymbolAddress((void **)&global_best_pos_addr, global_best_pos);
check_error(cudaMemcpy(¤t_fitness_d, current_fitness_d_addr, sizeof(float),cudaMemcpyDeviceToHost),"refresh current_fitness_d host");
check_error(cudaMemcpy(&global_fitness_d, global_fitness_d_addr, sizeof(float),cudaMemcpyDeviceToHost),"refresh global_fitness_d host");
printf("step %u, best fitness: current %g, so far %g\n", step,
current_fitness_d, global_fitness_d);
if (step > 0) {
printf("time - calc_fitness_time: %fms new_vel_pos: %fms fitness_min: %f\n",calc_fitness_time,new_vel_pos,fitness_min);
}
printf("\ttarget ");
for(j = 0; j < DIM; j++){
printf("%g,", target_pos[j]);
}
printf("\n");
}
/* Target function to be minimized: this is the square
* Euclidean distance from target_pos, “perturbed” by the distance
* to the origin: this puts a local minimum at the origin,
* which is good to test if the method actually finds the global
* minimum or not */
__device__ float fitness(float *pos)
{
int i;
float fit1 = 0,fit2 = 0, dim_val;
for(i = 0; i < DIM; i++){
dim_val = pos[i];
fit1 += pow(dim_val - target_pos_shared[i],2);
fit2 += pow(dim_val,2);
}
return fit1*(100*fit2+1)/10;
}
/* A function that generates a random float in the given range */
float range_rand(float min, float max, uint64_t *prng_state)
{
uint32_t r = MWC64X(prng_state);
return min + r*((max - min)/UINT32_MAX);
}
/* Random number generation: we use the MWC64X PRNG from
* http://cas.ee.ic.ac.uk/people/dt10/research/rngs-gpu-mwc64x.html
* which is parallel-friendly (but needs us to keep track of the state)
*/
uint32_t MWC64X(uint64_t *state)
{
uint64_t x = *state;
uint32_t c = x >> 32; // the upper 32 bits
x &= UINT32_MAX; // keep only the lower bits
*state = x*4294883355U + c;
return ((uint32_t)x)^c;
}
/* A functio to initialize the PRNG */
__device__ __host__ void init_rand(uint64_t *prng_state, int i)
{
*prng_state = i;
}
/* Function to initialize a single particle at index i. */
__global__ void init_particle()
{
const int particleIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int particleIndexDIM = particleIndex * DIM;
if(particleIndex >= num_particles)
return;
uint64_t prng_state_l;
init_rand(&prng_state_l, particleIndex);
int j;
for (j = 0; j < DIM; j++){
best_pos[particleIndexDIM + j] = pos[particleIndexDIM + j] = range_rand(coord_min, coord_max, &prng_state_l);
}
for (j = 0; j < DIM; j++){
vel[particleIndexDIM + j] = range_rand(-coord_range, coord_range, &prng_state_l);
}
best_fit[particleIndex] = fitness_val[particleIndex] = fitness(pos + particleIndexDIM);
prng_state[particleIndex] = prng_state_l;
}
/* Kernel function to compute the new position and new velocity of a given particle */
__global__ void new_vel_pos()
{
const int particleIndex = blockIdx.x * blockDim.y + threadIdx.y;
const int particleIndexDIM = particleIndex * DIM + threadIdx.x;
const int indexDIM = threadIdx.x;
if(particleIndex >= num_particles)
return;
uint64_t prng_state_l = prng_state[particleIndex];
const float best_vec_rand_coeff = range_rand(0, 1, &prng_state_l);
const float global_vec_rand_coeff = range_rand(0, 1, &prng_state_l);
// float velLocal = __ldg(&vel[particleIndexDIM])*vel_omega;
// float posLocal = __ldg(&pos[particleIndexDIM]);
float velLocal = vel[particleIndexDIM]*vel_omega;
float posLocal = pos[particleIndexDIM];
float pbest = (best_pos[particleIndexDIM] - posLocal) * best_vec_rand_coeff*vel_phi_best;
float gbest = (global_best_pos[indexDIM] - posLocal) * global_vec_rand_coeff*vel_phi_global;
velLocal+= (pbest + gbest);
if(velLocal > coord_range) velLocal = coord_range;
else if (velLocal < -coord_range) velLocal = -coord_range;
posLocal += (step_factor*velLocal);
if (posLocal > coord_max) posLocal = coord_max;
else if (posLocal < coord_min) posLocal = coord_min;
pos[particleIndexDIM] = posLocal;
vel[particleIndexDIM] = velLocal;
prng_state[particleIndex] = prng_state_l;
}
/* Kernel function to compute the new fitness val of a given particle */
__global__ void calc_fitness()
{
extern __shared__ float2 smpos[];
const int particleIndex = blockIdx.x * blockDim.y + threadIdx.y;
const int particleIndexSHM = threadIdx.y * blockDim.x + threadIdx.x;
const int particleIndexDIM = particleIndex * DIM + threadIdx.x;
const int indexDIM = threadIdx.x;
if(particleIndex >= num_particles)
return;
float posLocal = pos[particleIndexDIM];
smpos[particleIndexSHM].x = (posLocal - target_pos_shared[indexDIM])*(posLocal - target_pos_shared[indexDIM]);
smpos[particleIndexSHM].y = (posLocal*posLocal);
warp_control_float2(smpos,particleIndexSHM, indexDIM);
if (indexDIM==0){
float fitness = smpos[particleIndexSHM].x*(100*smpos[particleIndexSHM].y+1)/10;
fitness_val[particleIndex] = fitness;
if (fitness < best_fit[particleIndex]) {
best_fit[particleIndex] = fitness;
memcpy(best_pos + particleIndex,pos + particleIndex,sizeof(float)*DIM);
}
}
}
/* Function to handle the Kernel function find_min_fitness_parallel to don't let the shared memory become full */
void parallel_fitness(const int n_particle, int n_thread){
int shmdim;
fitness_pos *fitness_device_out,*fitness_device_in = NULL;
int last_n_block;
int offset;
int blocks = n_particle;
int max_parallel_particle_iteration = SHARED_MEMORY_DIM / sizeof(fitness_pos);
int iteration;
int max_blocks_per_iteration = max_parallel_particle_iteration / n_thread;
while(blocks != 1){
offset = 0;
last_n_block = blocks;
blocks = ceil((float)blocks / n_thread);
if(blocks == 1){
n_thread = ceil_log2(last_n_block);
}
cudaMalloc(&fitness_device_out, sizeof(fitness_pos) * blocks);
shmdim = n_thread*sizeof(fitness_pos);
if(max_parallel_particle_iteration < last_n_block && 0){
iteration = 0;
while(iteration + max_parallel_particle_iteration < blocks*n_thread){
find_min_fitness_parallel<<<max_blocks_per_iteration, n_thread,shmdim>>>
(fitness_device_in, fitness_device_out, offset, last_n_block, blocks);
iteration += max_parallel_particle_iteration;
offset += (max_parallel_particle_iteration/n_thread);
}
int x = (blocks*n_thread) - (offset*n_thread);
x = ceil((float)x / n_thread);
find_min_fitness_parallel<<<x, n_thread,shmdim>>>
(fitness_device_in, fitness_device_out, offset, last_n_block, blocks);
}else{
find_min_fitness_parallel<<<blocks, n_thread,shmdim>>>
(fitness_device_in, fitness_device_out, offset, last_n_block,blocks);
}
if(fitness_device_in != NULL){
check_error(cudaFree(fitness_device_in),"free fitness_device_in");
}
fitness_device_in = fitness_device_out;
}
fitness_device_out = (fitness_pos*)malloc(sizeof(fitness_pos));
check_error(cudaMemcpy(fitness_device_out, fitness_device_in, sizeof(fitness_pos),cudaMemcpyDeviceToHost),"copy fitness_min");
fitness_min = fitness_device_out->fitness;
check_error(cudaFree(fitness_device_in),"free fitness_device_out");
free(fitness_device_out);
}
/* Kernel function to compute the new global min fitness */
__global__ void find_min_fitness_parallel(__restrict__ const fitness_pos* in, fitness_pos* out,const int offset,const int n_in,const int blocks){
extern __shared__ fitness_pos sm[];
const int tid=threadIdx.x;
const int i=(blockIdx.x*blockDim.x+threadIdx.x) + (offset*blockDim.x);
int stride;
sm[tid].fitness = HUGE_VALF;
if(i >= num_particles || i >= n_in)
return;
if(in != NULL){
sm[tid] = in[i];
}else{
sm[tid].fitness = fitness_val[i];
sm[tid].pos = i;
}
//copy to SM
for (stride = blockDim.x/2;stride>0;stride>>=1)
{
__syncthreads();
if (tid<stride && sm[tid].fitness > sm[tid+stride].fitness){
sm[tid] = sm[tid+stride];
}
}
if (tid==0){
out[blockIdx.x+offset]=sm[0];//copy back
if(blocks == 1){
current_fitness = sm[0].fitness;
memcpy(current_best_pos,pos+sm[0].pos*DIM,sizeof(float)*DIM);
if (sm[0].fitness < global_fitness) {
global_fitness = sm[0].fitness;
memcpy(global_best_pos,current_best_pos,sizeof(float)*DIM);
}
}
}
}
void start_time_record(cudaEvent_t *before, cudaEvent_t *after){
check_error(cudaEventCreate(&(*before)),"create cudaEvent before");
check_error(cudaEventCreate(&(*after)),"create cudaEvent after");
check_error(cudaEventRecord(*before),"record cudaEvent before");
}
void stop_time_record(cudaEvent_t *before, cudaEvent_t *after, float *runtime){
check_error(cudaEventRecord(*after),"record cudaEvent after");
check_error(cudaEventSynchronize(*after),"synch cudaEvent before");
check_error(cudaEventElapsedTime(runtime, *before, *after),"calc cudaEvent elapsed time");
}
void check_error(cudaError_t err, const char *msg)
{
if (err != cudaSuccess) {
fprintf(stderr, "%s : errore %d (%s)\n",
msg, err, cudaGetErrorString(err));
exit(err);
}
}
__device__ void warp_control_float2(float2* smpos, int particleIndexSHM, int indexDIM)
{
__syncthreads();
#if DIM > 1
for (int stride = blockDim.x/2;stride>0;stride>>=1)
{
if (indexDIM<stride){
smpos[particleIndexSHM].x += smpos[particleIndexSHM+stride].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM+stride].y;
}
__syncthreads();
}
#else
if (particleIndexSHM < DIM/2)
{
#if DIM >= 32
smpos[particleIndexSHM].x += smpos[particleIndexSHM + 16].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM + 16].y;
__syncthreads();
#endif
#if DIM >= 16
smpos[particleIndexSHM].x += smpos[particleIndexSHM + 8].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM + 8].y;
__syncthreads();
#endif
#if DIM >= 8
smpos[particleIndexSHM].x += smpos[particleIndexSHM + 4].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM + 4].y;
__syncthreads();
#endif
#if DIM >= 4
smpos[particleIndexSHM].x += smpos[particleIndexSHM + 2].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM + 2].y;
__syncthreads();
#endif
#if DIM >= 2
smpos[particleIndexSHM].x += smpos[particleIndexSHM + 1].x;
smpos[particleIndexSHM].y += smpos[particleIndexSHM + 1].y;
__syncthreads();
#endif
}
#endif
}
__device__ void warp_control_float(float* smpos, int particleIndexSHM)
{
__syncthreads();
#if DIM > 1024
for (int stride = blockDim.x/2;stride>0;stride>>=1)
{
if (indexDIM<stride){
smpos[particleIndexSHM] += smpos[particleIndexSHM+stride];
}
__syncthreads();
}
#else
if (particleIndexSHM < DIM/2)
{
#if DIM >= 512
smpos[particleIndexSHM] += smpos[particleIndexSHM + 256];
__syncthreads();
#endif
#if DIM >= 256
smpos[particleIndexSHM] += smpos[particleIndexSHM + 128];
__syncthreads();
#endif
#if DIM >= 128
smpos[particleIndexSHM] += smpos[particleIndexSHM + 64];
__syncthreads();
#endif
#if DIM >= 64
smpos[particleIndexSHM] += smpos[particleIndexSHM + 32];
__syncthreads();
#endif
#if DIM >= 32
smpos[particleIndexSHM] += smpos[particleIndexSHM + 16];
__syncthreads();
#endif
#if DIM >= 16
smpos[particleIndexSHM] += smpos[particleIndexSHM + 8];
__syncthreads();
#endif
#if DIM >= 8
smpos[particleIndexSHM] += smpos[particleIndexSHM + 4];
__syncthreads();
#endif
#if DIM >= 4
smpos[particleIndexSHM] += smpos[particleIndexSHM + 2];
__syncthreads();
#endif
#if DIM >= 2
smpos[particleIndexSHM] += smpos[particleIndexSHM + 1];
__syncthreads();
#endif
}
#endif
}
/* function to find the ceil of a log2 x value*/
int ceil_log2(unsigned long long x)
{
static const unsigned long long t[6] = {
0xFFFFFFFF00000000ull,
0x00000000FFFF0000ull,
0x000000000000FF00ull,
0x00000000000000F0ull,
0x000000000000000Cull,
0x0000000000000002ull
};
int y = (((x & (x - 1)) == 0) ? 0 : 1);
int j = 32;
int i;
for (i = 0; i < 6; i++) {
int k = (((x & t[i]) == 0) ? 0 : j);
y += k;
x >>= k;
j >>= 1;
}
return 1<<y;
}
|
449b7dfaeabb56fd94a63ac8d066c6c15cc58c31.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef PADDLE_WITH_HETERPS
#include <thread>
#include "paddle/fluid/framework/fleet/heter_ps/hashtable.h"
#include "paddle/fluid/framework/fleet/heter_ps/optimizer.cuh.h"
namespace paddle {
namespace framework {
#if defined(PADDLE_WITH_CUDA)
template <typename value_type>
struct ReplaceOp {
__host__ __device__ value_type operator()(value_type new_value,
value_type old_value) {
return new_value;
}
};
template <typename Table>
__global__ void insert_kernel(Table* table,
const typename Table::key_type* const keys,
size_t len,
uint64_t* global_num) {
ReplaceOp<typename Table::mapped_type> op;
thrust::pair<typename Table::key_type, typename Table::mapped_type> kv;
__shared__ uint64_t local_num;
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0) {
local_num = 0;
}
__syncthreads();
if (i < len) {
kv.first = keys[i];
kv.second = 1; // fake value
auto it = table->insert(kv, op, &local_num);
assert(it != table->end() && "error: insert fails: table is full");
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(global_num, local_num);
}
}
template <typename Table>
__global__ void insert_kernel(Table* table,
const typename Table::key_type* const keys,
const typename Table::mapped_type* const vals,
size_t len) {
ReplaceOp<typename Table::mapped_type> op;
thrust::pair<typename Table::key_type, typename Table::mapped_type> kv;
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
kv.first = keys[i];
kv.second = vals[i];
auto it = table->insert(kv, op);
assert(it != table->end() && "error: insert fails: table is full");
}
}
template <typename Table>
__global__ void insert_kernel(Table* table,
const typename Table::key_type* const keys,
size_t len,
char* pool,
size_t feature_value_size,
int start_index) {
ReplaceOp<typename Table::mapped_type> op;
thrust::pair<typename Table::key_type, typename Table::mapped_type> kv;
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
kv.first = keys[i];
uint64_t offset = uint64_t(start_index + i) * feature_value_size;
kv.second = (Table::mapped_type)(pool + offset);
auto it = table->insert(kv, op);
if (it == table->end()) {
printf("error: insert fails: table is full");
}
}
}
template <typename Table>
__global__ void search_kernel(Table* table,
const typename Table::key_type* const keys,
typename Table::mapped_type* const vals,
size_t len) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
auto it = table->find(keys[i]);
if (it != table->end()) {
vals[i] = it->second;
}
}
}
template <typename Table, typename GPUAccessor>
__global__ void dy_mf_search_kernel_fill(
Table* table,
const typename Table::key_type* const keys,
char* vals,
size_t len,
size_t pull_feature_value_size,
GPUAccessor gpu_accessor) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
auto it = table->find(keys[i]);
if (it != table->end()) {
uint64_t offset = i * pull_feature_value_size;
float* cur = reinterpret_cast<float*>(vals + offset);
float* input = it->second;
gpu_accessor.PullValueFill(cur, input);
} else {
float* cur = reinterpret_cast<float*>(&vals[i * pull_feature_value_size]);
gpu_accessor.PullZeroValue(cur);
}
}
}
template <typename Table, typename GPUAccessor>
__global__ void dy_mf_search_kernel(Table* table,
const typename Table::key_type* const keys,
char* vals,
size_t len,
size_t pull_feature_value_size,
GPUAccessor gpu_accessor) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
auto it = table->find(keys[i]);
if (it != table->end()) {
uint64_t offset = i * pull_feature_value_size;
float* cur = reinterpret_cast<float*>(vals + offset);
float* input = it->second;
gpu_accessor.PullValueFill(cur, input);
} else {
printf("warning: pull miss key: %lu", keys[i]);
}
}
}
template <typename Table, typename GradType, typename Sgd>
__global__ void update_kernel(Table* table,
const OptimizerConfig& optimizer_config,
const typename Table::key_type* const keys,
const GradType* const grads,
size_t len,
Sgd sgd) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
auto it = table->find(keys[i]);
if (it != table->end()) {
sgd.update_value(optimizer_config, (it.getter())->second, grads[i]);
}
}
}
template <typename Table, typename Sgd>
__global__ void dy_mf_update_kernel(Table* table,
const OptimizerConfig& optimizer_config,
const typename Table::key_type* const keys,
const char* const grads,
size_t len,
Sgd sgd,
size_t grad_value_size) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
auto it = table->find(keys[i]);
if (it != table->end()) {
const float* cur =
reinterpret_cast<const float*>(grads + i * grad_value_size);
sgd.dy_mf_update_value(optimizer_config, (it.getter())->second, cur);
} else {
printf("warning: push miss key: %lu", keys[i]);
}
}
}
template <typename Table>
__global__ void get_keys_kernel(Table* table,
typename Table::key_type* d_out,
uint64_t* global_cursor,
uint64_t unused_key) {
extern __shared__ typename Table::key_type local_key[];
__shared__ uint64_t local_num;
__shared__ uint64_t global_num;
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0) {
local_num = 0;
}
__syncthreads();
uint64_t len = table->size();
if (idx < len) {
typename Table::value_type val = *(table->data() + idx);
if (val.first != unused_key) {
uint64_t dst = atomicAdd(&local_num, 1);
local_key[dst] = val.first;
}
}
__syncthreads();
if (threadIdx.x == 0) {
global_num = atomicAdd(global_cursor, local_num);
}
__syncthreads();
if (threadIdx.x < local_num) {
d_out[global_num + threadIdx.x] = local_key[threadIdx.x];
}
}
template <typename KeyType, typename ValType>
HashTable<KeyType, ValType>::HashTable(size_t capacity) {
container_ = new TableContainer<KeyType, ValType>(capacity);
CUDA_RT_CALL(hipMalloc(&device_optimizer_config_, sizeof(OptimizerConfig)));
CUDA_RT_CALL(hipMemcpy(device_optimizer_config_,
&host_optimizer_config_,
sizeof(OptimizerConfig),
hipMemcpyHostToDevice));
rwlock_.reset(new phi::RWLock);
}
template <typename KeyType, typename ValType>
HashTable<KeyType, ValType>::~HashTable() {
delete container_;
hipFree(device_optimizer_config_);
}
template <typename KeyType, typename ValType>
void HashTable<KeyType, ValType>::set_sparse_sgd(
const OptimizerConfig& optimizer_config) {
host_optimizer_config_.set_sparse_sgd(optimizer_config);
hipMemcpy(device_optimizer_config_,
&host_optimizer_config_,
sizeof(OptimizerConfig),
hipMemcpyHostToDevice);
}
template <typename KeyType, typename ValType>
void HashTable<KeyType, ValType>::set_embedx_sgd(
const OptimizerConfig& optimizer_config) {
host_optimizer_config_.set_embedx_sgd(optimizer_config);
hipMemcpy(device_optimizer_config_,
&host_optimizer_config_,
sizeof(OptimizerConfig),
hipMemcpyHostToDevice);
}
template <typename KeyType, typename ValType>
void HashTable<KeyType, ValType>::show() {
container_->print();
}
template <typename KeyType, typename ValType>
template <typename StreamType>
void HashTable<KeyType, ValType>::get(const KeyType* d_keys,
ValType* d_vals,
size_t len,
StreamType stream) {
if (len == 0) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
hipLaunchKernelGGL(( search_kernel), dim3(grid_size), dim3(BLOCK_SIZE_), 0, stream,
container_, d_keys, d_vals, len);
}
template <typename KeyType, typename ValType>
template <typename StreamType, typename GPUAccessor>
void HashTable<KeyType, ValType>::get(const KeyType* d_keys,
char* d_vals,
size_t len,
StreamType stream,
const GPUAccessor& fv_accessor) {
if (len == 0) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
// infer need zero fill
if (infer_mode_) {
hipLaunchKernelGGL(( dy_mf_search_kernel_fill), dim3(grid_size), dim3(BLOCK_SIZE_), 0, stream,
container_, d_keys, d_vals, len, pull_feature_value_size_, fv_accessor);
} else {
hipLaunchKernelGGL(( dy_mf_search_kernel), dim3(grid_size), dim3(BLOCK_SIZE_), 0, stream,
container_, d_keys, d_vals, len, pull_feature_value_size_, fv_accessor);
}
}
template <typename KeyType, typename ValType>
template <typename StreamType>
void HashTable<KeyType, ValType>::insert(const KeyType* d_keys,
size_t len,
uint64_t* global_num,
StreamType stream) {
if (len == 0) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
hipLaunchKernelGGL(( insert_kernel), dim3(grid_size), dim3(BLOCK_SIZE_), 0, stream,
container_, d_keys, len, global_num);
}
template <typename KeyType, typename ValType>
template <typename StreamType>
void HashTable<KeyType, ValType>::insert(const KeyType* d_keys,
const ValType* d_vals,
size_t len,
StreamType stream) {
if (len == 0) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
hipLaunchKernelGGL(( insert_kernel), dim3(grid_size), dim3(BLOCK_SIZE_), 0, stream,
container_, d_keys, d_vals, len);
}
template <typename KeyType, typename ValType>
template <typename StreamType>
void HashTable<KeyType, ValType>::get_keys(KeyType* d_out,
uint64_t* global_cursor,
StreamType stream) {
size_t len = container_->size();
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
KeyType unuse_key = std::numeric_limits<KeyType>::max();
size_t shared_mem_size = sizeof(KeyType) * BLOCK_SIZE_;
hipLaunchKernelGGL(( get_keys_kernel), dim3(grid_size), dim3(BLOCK_SIZE_), shared_mem_size, stream,
container_, d_out, global_cursor, unuse_key);
}
template <typename KeyType, typename ValType>
template <typename StreamType>
void HashTable<KeyType, ValType>::insert(const KeyType* d_keys,
size_t len,
char* pool,
size_t feature_value_size,
size_t start_index,
StreamType stream) {
if (len == 0) {
return;
}
if (pool == NULL) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
hipLaunchKernelGGL(( insert_kernel), dim3(grid_size), dim3(BLOCK_SIZE_), 0, stream,
container_, d_keys, len, pool, feature_value_size, start_index);
}
template <typename KeyType, typename ValType>
template <typename StreamType>
void HashTable<KeyType, ValType>::dump_to_cpu(int devid, StreamType stream) {
container_->prefetch(hipCpuDeviceId, stream);
}
template <typename KeyType, typename ValType>
template <typename Sgd, typename StreamType>
void HashTable<KeyType, ValType>::update(const KeyType* d_keys,
const float* d_grads,
size_t len,
Sgd sgd,
StreamType stream) {
if (len == 0) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
hipLaunchKernelGGL(( update_kernel), dim3(grid_size), dim3(BLOCK_SIZE_), 0, stream,
container_, *device_optimizer_config_, d_keys, d_grads, len, sgd);
}
template <typename KeyType, typename ValType>
template <typename Sgd, typename StreamType>
void HashTable<KeyType, ValType>::update(const KeyType* d_keys,
const char* d_grads,
size_t len,
Sgd sgd,
StreamType stream) {
if (len == 0) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
hipLaunchKernelGGL(( dy_mf_update_kernel), dim3(grid_size), dim3(BLOCK_SIZE_), 0, stream,
container_,
*device_optimizer_config_,
d_keys,
d_grads,
len,
sgd,
push_grad_value_size_);
}
template class HashTable<uint64_t, float>;
template class HashTable<uint64_t, float*>;
template class HashTable<int64_t, int>;
template class HashTable<uint64_t, int>;
template class HashTable<uint64_t, uint64_t>;
template class HashTable<uint64_t, uint64_t*>;
template class HashTable<uint64_t, int64_t>;
template class HashTable<uint64_t, int64_t*>;
template class HashTable<int64_t, int64_t>;
template class HashTable<int64_t, uint64_t>;
template class HashTable<int64_t, unsigned int>;
template void HashTable<uint64_t, float>::get<hipStream_t>(
const uint64_t* d_keys, float* d_vals, size_t len, hipStream_t stream);
template void
HashTable<uint64_t, float*>::get<hipStream_t, CommonFeatureValueAccessor>(
const uint64_t* d_keys,
char* d_vals,
size_t len,
hipStream_t stream,
const CommonFeatureValueAccessor& fv_accessor);
template void HashTable<int64_t, int>::get<hipStream_t>(const int64_t* d_keys,
int* d_vals,
size_t len,
hipStream_t stream);
template void HashTable<uint64_t, int>::get<hipStream_t>(
const uint64_t* d_keys, int* d_vals, size_t len, hipStream_t stream);
template void HashTable<uint64_t, uint64_t>::get<hipStream_t>(
const uint64_t* d_keys, uint64_t* d_vals, size_t len, hipStream_t stream);
template void HashTable<uint64_t, int64_t>::get<hipStream_t>(
const uint64_t* d_keys, int64_t* d_vals, size_t len, hipStream_t stream);
template void HashTable<int64_t, uint64_t>::get<hipStream_t>(
const int64_t* d_keys, uint64_t* d_vals, size_t len, hipStream_t stream);
template void HashTable<int64_t, int64_t>::get<hipStream_t>(
const int64_t* d_keys, int64_t* d_vals, size_t len, hipStream_t stream);
template void HashTable<int64_t, unsigned int>::get<hipStream_t>(
const int64_t* d_keys,
unsigned int* d_vals,
size_t len,
hipStream_t stream);
// template void
// HashTable<uint64_t, paddle::framework::FeatureValue>::get<hipStream_t>(
// const uint64_t* d_keys, char* d_vals, size_t len, hipStream_t
// stream);
template void HashTable<uint64_t, float>::insert<hipStream_t>(
const uint64_t* d_keys,
const float* d_vals,
size_t len,
hipStream_t stream);
template void HashTable<uint64_t, float*>::insert<hipStream_t>(
const uint64_t* d_keys,
size_t len,
char* pool,
size_t feature_value_size,
size_t start_index,
hipStream_t stream);
template void HashTable<int64_t, int>::insert<hipStream_t>(
const int64_t* d_keys, const int* d_vals, size_t len, hipStream_t stream);
template void HashTable<int64_t, int64_t>::insert<hipStream_t>(
const int64_t* d_keys,
const int64_t* d_vals,
size_t len,
hipStream_t stream);
template void HashTable<uint64_t, int>::insert<hipStream_t>(
const uint64_t* d_keys, const int* d_vals, size_t len, hipStream_t stream);
template void HashTable<uint64_t, int64_t>::insert<hipStream_t>(
const uint64_t* d_keys,
const int64_t* d_vals,
size_t len,
hipStream_t stream);
template void HashTable<int64_t, uint64_t>::insert<hipStream_t>(
const int64_t* d_keys,
const uint64_t* d_vals,
size_t len,
hipStream_t stream);
template void HashTable<int64_t, unsigned int>::insert<hipStream_t>(
const int64_t* d_keys,
const unsigned int* d_vals,
size_t len,
hipStream_t stream);
template void HashTable<uint64_t, uint64_t>::get_keys<hipStream_t>(
uint64_t* d_out, uint64_t* global_cursor, hipStream_t stream);
template void HashTable<uint64_t, uint64_t>::insert<hipStream_t>(
const uint64_t* d_keys,
uint64_t len,
uint64_t* global_num,
hipStream_t stream);
template void HashTable<uint64_t, uint64_t>::insert<hipStream_t>(
const uint64_t* d_keys,
const uint64_t* d_vals,
size_t len,
hipStream_t stream);
template void HashTable<uint64_t, float*>::dump_to_cpu<hipStream_t>(
int devid, hipStream_t stream);
template void HashTable<uint64_t, float*>::update<
SparseAdagradOptimizer<CommonFeatureValueAccessor>,
hipStream_t>(const uint64_t* d_keys,
const char* d_grads,
size_t len,
SparseAdagradOptimizer<CommonFeatureValueAccessor> sgd,
hipStream_t stream);
template void HashTable<uint64_t, float*>::update<
StdAdagradOptimizer<CommonFeatureValueAccessor>,
hipStream_t>(const uint64_t* d_keys,
const char* d_grads,
size_t len,
StdAdagradOptimizer<CommonFeatureValueAccessor> sgd,
hipStream_t stream);
template void HashTable<uint64_t, float*>::update<
SparseAdamOptimizer<CommonFeatureValueAccessor>,
hipStream_t>(const uint64_t* d_keys,
const char* d_grads,
size_t len,
SparseAdamOptimizer<CommonFeatureValueAccessor> sgd,
hipStream_t stream);
template void HashTable<uint64_t, float*>::update<
SparseAdamSharedOptimizer<CommonFeatureValueAccessor>,
hipStream_t>(const uint64_t* d_keys,
const char* d_grads,
size_t len,
SparseAdamSharedOptimizer<CommonFeatureValueAccessor> sgd,
hipStream_t stream);
// template void HashTable<uint64_t,
// paddle::framework::FeatureValue>::update<
// Optimizer<paddle::framework::FeatureValue,
// paddle::framework::FeaturePushValue>,
// hipStream_t>(const uint64_t* d_keys, const char* d_grads, size_t
// len,
// Optimizer<paddle::framework::FeatureValue,
// paddle::framework::FeaturePushValue>
// sgd,
// hipStream_t stream);
#endif
} // end namespace framework
} // end namespace paddle
#endif
| 449b7dfaeabb56fd94a63ac8d066c6c15cc58c31.cu | /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef PADDLE_WITH_HETERPS
#include <thread>
#include "paddle/fluid/framework/fleet/heter_ps/hashtable.h"
#include "paddle/fluid/framework/fleet/heter_ps/optimizer.cuh.h"
namespace paddle {
namespace framework {
#if defined(PADDLE_WITH_CUDA)
template <typename value_type>
struct ReplaceOp {
__host__ __device__ value_type operator()(value_type new_value,
value_type old_value) {
return new_value;
}
};
template <typename Table>
__global__ void insert_kernel(Table* table,
const typename Table::key_type* const keys,
size_t len,
uint64_t* global_num) {
ReplaceOp<typename Table::mapped_type> op;
thrust::pair<typename Table::key_type, typename Table::mapped_type> kv;
__shared__ uint64_t local_num;
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0) {
local_num = 0;
}
__syncthreads();
if (i < len) {
kv.first = keys[i];
kv.second = 1; // fake value
auto it = table->insert(kv, op, &local_num);
assert(it != table->end() && "error: insert fails: table is full");
}
__syncthreads();
if (threadIdx.x == 0) {
atomicAdd(global_num, local_num);
}
}
template <typename Table>
__global__ void insert_kernel(Table* table,
const typename Table::key_type* const keys,
const typename Table::mapped_type* const vals,
size_t len) {
ReplaceOp<typename Table::mapped_type> op;
thrust::pair<typename Table::key_type, typename Table::mapped_type> kv;
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
kv.first = keys[i];
kv.second = vals[i];
auto it = table->insert(kv, op);
assert(it != table->end() && "error: insert fails: table is full");
}
}
template <typename Table>
__global__ void insert_kernel(Table* table,
const typename Table::key_type* const keys,
size_t len,
char* pool,
size_t feature_value_size,
int start_index) {
ReplaceOp<typename Table::mapped_type> op;
thrust::pair<typename Table::key_type, typename Table::mapped_type> kv;
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
kv.first = keys[i];
uint64_t offset = uint64_t(start_index + i) * feature_value_size;
kv.second = (Table::mapped_type)(pool + offset);
auto it = table->insert(kv, op);
if (it == table->end()) {
printf("error: insert fails: table is full");
}
}
}
template <typename Table>
__global__ void search_kernel(Table* table,
const typename Table::key_type* const keys,
typename Table::mapped_type* const vals,
size_t len) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
auto it = table->find(keys[i]);
if (it != table->end()) {
vals[i] = it->second;
}
}
}
template <typename Table, typename GPUAccessor>
__global__ void dy_mf_search_kernel_fill(
Table* table,
const typename Table::key_type* const keys,
char* vals,
size_t len,
size_t pull_feature_value_size,
GPUAccessor gpu_accessor) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
auto it = table->find(keys[i]);
if (it != table->end()) {
uint64_t offset = i * pull_feature_value_size;
float* cur = reinterpret_cast<float*>(vals + offset);
float* input = it->second;
gpu_accessor.PullValueFill(cur, input);
} else {
float* cur = reinterpret_cast<float*>(&vals[i * pull_feature_value_size]);
gpu_accessor.PullZeroValue(cur);
}
}
}
template <typename Table, typename GPUAccessor>
__global__ void dy_mf_search_kernel(Table* table,
const typename Table::key_type* const keys,
char* vals,
size_t len,
size_t pull_feature_value_size,
GPUAccessor gpu_accessor) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
auto it = table->find(keys[i]);
if (it != table->end()) {
uint64_t offset = i * pull_feature_value_size;
float* cur = reinterpret_cast<float*>(vals + offset);
float* input = it->second;
gpu_accessor.PullValueFill(cur, input);
} else {
printf("warning: pull miss key: %lu", keys[i]);
}
}
}
template <typename Table, typename GradType, typename Sgd>
__global__ void update_kernel(Table* table,
const OptimizerConfig& optimizer_config,
const typename Table::key_type* const keys,
const GradType* const grads,
size_t len,
Sgd sgd) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
auto it = table->find(keys[i]);
if (it != table->end()) {
sgd.update_value(optimizer_config, (it.getter())->second, grads[i]);
}
}
}
template <typename Table, typename Sgd>
__global__ void dy_mf_update_kernel(Table* table,
const OptimizerConfig& optimizer_config,
const typename Table::key_type* const keys,
const char* const grads,
size_t len,
Sgd sgd,
size_t grad_value_size) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
auto it = table->find(keys[i]);
if (it != table->end()) {
const float* cur =
reinterpret_cast<const float*>(grads + i * grad_value_size);
sgd.dy_mf_update_value(optimizer_config, (it.getter())->second, cur);
} else {
printf("warning: push miss key: %lu", keys[i]);
}
}
}
template <typename Table>
__global__ void get_keys_kernel(Table* table,
typename Table::key_type* d_out,
uint64_t* global_cursor,
uint64_t unused_key) {
extern __shared__ typename Table::key_type local_key[];
__shared__ uint64_t local_num;
__shared__ uint64_t global_num;
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0) {
local_num = 0;
}
__syncthreads();
uint64_t len = table->size();
if (idx < len) {
typename Table::value_type val = *(table->data() + idx);
if (val.first != unused_key) {
uint64_t dst = atomicAdd(&local_num, 1);
local_key[dst] = val.first;
}
}
__syncthreads();
if (threadIdx.x == 0) {
global_num = atomicAdd(global_cursor, local_num);
}
__syncthreads();
if (threadIdx.x < local_num) {
d_out[global_num + threadIdx.x] = local_key[threadIdx.x];
}
}
template <typename KeyType, typename ValType>
HashTable<KeyType, ValType>::HashTable(size_t capacity) {
container_ = new TableContainer<KeyType, ValType>(capacity);
CUDA_RT_CALL(cudaMalloc(&device_optimizer_config_, sizeof(OptimizerConfig)));
CUDA_RT_CALL(cudaMemcpy(device_optimizer_config_,
&host_optimizer_config_,
sizeof(OptimizerConfig),
cudaMemcpyHostToDevice));
rwlock_.reset(new phi::RWLock);
}
template <typename KeyType, typename ValType>
HashTable<KeyType, ValType>::~HashTable() {
delete container_;
cudaFree(device_optimizer_config_);
}
template <typename KeyType, typename ValType>
void HashTable<KeyType, ValType>::set_sparse_sgd(
const OptimizerConfig& optimizer_config) {
host_optimizer_config_.set_sparse_sgd(optimizer_config);
cudaMemcpy(device_optimizer_config_,
&host_optimizer_config_,
sizeof(OptimizerConfig),
cudaMemcpyHostToDevice);
}
template <typename KeyType, typename ValType>
void HashTable<KeyType, ValType>::set_embedx_sgd(
const OptimizerConfig& optimizer_config) {
host_optimizer_config_.set_embedx_sgd(optimizer_config);
cudaMemcpy(device_optimizer_config_,
&host_optimizer_config_,
sizeof(OptimizerConfig),
cudaMemcpyHostToDevice);
}
template <typename KeyType, typename ValType>
void HashTable<KeyType, ValType>::show() {
container_->print();
}
template <typename KeyType, typename ValType>
template <typename StreamType>
void HashTable<KeyType, ValType>::get(const KeyType* d_keys,
ValType* d_vals,
size_t len,
StreamType stream) {
if (len == 0) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
search_kernel<<<grid_size, BLOCK_SIZE_, 0, stream>>>(
container_, d_keys, d_vals, len);
}
template <typename KeyType, typename ValType>
template <typename StreamType, typename GPUAccessor>
void HashTable<KeyType, ValType>::get(const KeyType* d_keys,
char* d_vals,
size_t len,
StreamType stream,
const GPUAccessor& fv_accessor) {
if (len == 0) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
// infer need zero fill
if (infer_mode_) {
dy_mf_search_kernel_fill<<<grid_size, BLOCK_SIZE_, 0, stream>>>(
container_, d_keys, d_vals, len, pull_feature_value_size_, fv_accessor);
} else {
dy_mf_search_kernel<<<grid_size, BLOCK_SIZE_, 0, stream>>>(
container_, d_keys, d_vals, len, pull_feature_value_size_, fv_accessor);
}
}
template <typename KeyType, typename ValType>
template <typename StreamType>
void HashTable<KeyType, ValType>::insert(const KeyType* d_keys,
size_t len,
uint64_t* global_num,
StreamType stream) {
if (len == 0) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
insert_kernel<<<grid_size, BLOCK_SIZE_, 0, stream>>>(
container_, d_keys, len, global_num);
}
template <typename KeyType, typename ValType>
template <typename StreamType>
void HashTable<KeyType, ValType>::insert(const KeyType* d_keys,
const ValType* d_vals,
size_t len,
StreamType stream) {
if (len == 0) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
insert_kernel<<<grid_size, BLOCK_SIZE_, 0, stream>>>(
container_, d_keys, d_vals, len);
}
template <typename KeyType, typename ValType>
template <typename StreamType>
void HashTable<KeyType, ValType>::get_keys(KeyType* d_out,
uint64_t* global_cursor,
StreamType stream) {
size_t len = container_->size();
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
KeyType unuse_key = std::numeric_limits<KeyType>::max();
size_t shared_mem_size = sizeof(KeyType) * BLOCK_SIZE_;
get_keys_kernel<<<grid_size, BLOCK_SIZE_, shared_mem_size, stream>>>(
container_, d_out, global_cursor, unuse_key);
}
template <typename KeyType, typename ValType>
template <typename StreamType>
void HashTable<KeyType, ValType>::insert(const KeyType* d_keys,
size_t len,
char* pool,
size_t feature_value_size,
size_t start_index,
StreamType stream) {
if (len == 0) {
return;
}
if (pool == NULL) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
insert_kernel<<<grid_size, BLOCK_SIZE_, 0, stream>>>(
container_, d_keys, len, pool, feature_value_size, start_index);
}
template <typename KeyType, typename ValType>
template <typename StreamType>
void HashTable<KeyType, ValType>::dump_to_cpu(int devid, StreamType stream) {
container_->prefetch(cudaCpuDeviceId, stream);
}
template <typename KeyType, typename ValType>
template <typename Sgd, typename StreamType>
void HashTable<KeyType, ValType>::update(const KeyType* d_keys,
const float* d_grads,
size_t len,
Sgd sgd,
StreamType stream) {
if (len == 0) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
update_kernel<<<grid_size, BLOCK_SIZE_, 0, stream>>>(
container_, *device_optimizer_config_, d_keys, d_grads, len, sgd);
}
template <typename KeyType, typename ValType>
template <typename Sgd, typename StreamType>
void HashTable<KeyType, ValType>::update(const KeyType* d_keys,
const char* d_grads,
size_t len,
Sgd sgd,
StreamType stream) {
if (len == 0) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
dy_mf_update_kernel<<<grid_size, BLOCK_SIZE_, 0, stream>>>(
container_,
*device_optimizer_config_,
d_keys,
d_grads,
len,
sgd,
push_grad_value_size_);
}
template class HashTable<uint64_t, float>;
template class HashTable<uint64_t, float*>;
template class HashTable<int64_t, int>;
template class HashTable<uint64_t, int>;
template class HashTable<uint64_t, uint64_t>;
template class HashTable<uint64_t, uint64_t*>;
template class HashTable<uint64_t, int64_t>;
template class HashTable<uint64_t, int64_t*>;
template class HashTable<int64_t, int64_t>;
template class HashTable<int64_t, uint64_t>;
template class HashTable<int64_t, unsigned int>;
template void HashTable<uint64_t, float>::get<cudaStream_t>(
const uint64_t* d_keys, float* d_vals, size_t len, cudaStream_t stream);
template void
HashTable<uint64_t, float*>::get<cudaStream_t, CommonFeatureValueAccessor>(
const uint64_t* d_keys,
char* d_vals,
size_t len,
cudaStream_t stream,
const CommonFeatureValueAccessor& fv_accessor);
template void HashTable<int64_t, int>::get<cudaStream_t>(const int64_t* d_keys,
int* d_vals,
size_t len,
cudaStream_t stream);
template void HashTable<uint64_t, int>::get<cudaStream_t>(
const uint64_t* d_keys, int* d_vals, size_t len, cudaStream_t stream);
template void HashTable<uint64_t, uint64_t>::get<cudaStream_t>(
const uint64_t* d_keys, uint64_t* d_vals, size_t len, cudaStream_t stream);
template void HashTable<uint64_t, int64_t>::get<cudaStream_t>(
const uint64_t* d_keys, int64_t* d_vals, size_t len, cudaStream_t stream);
template void HashTable<int64_t, uint64_t>::get<cudaStream_t>(
const int64_t* d_keys, uint64_t* d_vals, size_t len, cudaStream_t stream);
template void HashTable<int64_t, int64_t>::get<cudaStream_t>(
const int64_t* d_keys, int64_t* d_vals, size_t len, cudaStream_t stream);
template void HashTable<int64_t, unsigned int>::get<cudaStream_t>(
const int64_t* d_keys,
unsigned int* d_vals,
size_t len,
cudaStream_t stream);
// template void
// HashTable<uint64_t, paddle::framework::FeatureValue>::get<cudaStream_t>(
// const uint64_t* d_keys, char* d_vals, size_t len, cudaStream_t
// stream);
template void HashTable<uint64_t, float>::insert<cudaStream_t>(
const uint64_t* d_keys,
const float* d_vals,
size_t len,
cudaStream_t stream);
template void HashTable<uint64_t, float*>::insert<cudaStream_t>(
const uint64_t* d_keys,
size_t len,
char* pool,
size_t feature_value_size,
size_t start_index,
cudaStream_t stream);
template void HashTable<int64_t, int>::insert<cudaStream_t>(
const int64_t* d_keys, const int* d_vals, size_t len, cudaStream_t stream);
template void HashTable<int64_t, int64_t>::insert<cudaStream_t>(
const int64_t* d_keys,
const int64_t* d_vals,
size_t len,
cudaStream_t stream);
template void HashTable<uint64_t, int>::insert<cudaStream_t>(
const uint64_t* d_keys, const int* d_vals, size_t len, cudaStream_t stream);
template void HashTable<uint64_t, int64_t>::insert<cudaStream_t>(
const uint64_t* d_keys,
const int64_t* d_vals,
size_t len,
cudaStream_t stream);
template void HashTable<int64_t, uint64_t>::insert<cudaStream_t>(
const int64_t* d_keys,
const uint64_t* d_vals,
size_t len,
cudaStream_t stream);
template void HashTable<int64_t, unsigned int>::insert<cudaStream_t>(
const int64_t* d_keys,
const unsigned int* d_vals,
size_t len,
cudaStream_t stream);
template void HashTable<uint64_t, uint64_t>::get_keys<cudaStream_t>(
uint64_t* d_out, uint64_t* global_cursor, cudaStream_t stream);
template void HashTable<uint64_t, uint64_t>::insert<cudaStream_t>(
const uint64_t* d_keys,
uint64_t len,
uint64_t* global_num,
cudaStream_t stream);
template void HashTable<uint64_t, uint64_t>::insert<cudaStream_t>(
const uint64_t* d_keys,
const uint64_t* d_vals,
size_t len,
cudaStream_t stream);
template void HashTable<uint64_t, float*>::dump_to_cpu<cudaStream_t>(
int devid, cudaStream_t stream);
template void HashTable<uint64_t, float*>::update<
SparseAdagradOptimizer<CommonFeatureValueAccessor>,
cudaStream_t>(const uint64_t* d_keys,
const char* d_grads,
size_t len,
SparseAdagradOptimizer<CommonFeatureValueAccessor> sgd,
cudaStream_t stream);
template void HashTable<uint64_t, float*>::update<
StdAdagradOptimizer<CommonFeatureValueAccessor>,
cudaStream_t>(const uint64_t* d_keys,
const char* d_grads,
size_t len,
StdAdagradOptimizer<CommonFeatureValueAccessor> sgd,
cudaStream_t stream);
template void HashTable<uint64_t, float*>::update<
SparseAdamOptimizer<CommonFeatureValueAccessor>,
cudaStream_t>(const uint64_t* d_keys,
const char* d_grads,
size_t len,
SparseAdamOptimizer<CommonFeatureValueAccessor> sgd,
cudaStream_t stream);
template void HashTable<uint64_t, float*>::update<
SparseAdamSharedOptimizer<CommonFeatureValueAccessor>,
cudaStream_t>(const uint64_t* d_keys,
const char* d_grads,
size_t len,
SparseAdamSharedOptimizer<CommonFeatureValueAccessor> sgd,
cudaStream_t stream);
// template void HashTable<uint64_t,
// paddle::framework::FeatureValue>::update<
// Optimizer<paddle::framework::FeatureValue,
// paddle::framework::FeaturePushValue>,
// cudaStream_t>(const uint64_t* d_keys, const char* d_grads, size_t
// len,
// Optimizer<paddle::framework::FeatureValue,
// paddle::framework::FeaturePushValue>
// sgd,
// cudaStream_t stream);
#endif
} // end namespace framework
} // end namespace paddle
#endif
|
0861818ba93ea4c6bd92505fb01d6d38b8dddf9f.hip | // !!! This is a file automatically generated by hipify!!!
/*
============================================================================
Filename : algorithm.c
Author : Your name goes here
SCIPER : Your SCIPER number
============================================================================
*/
#include <iostream>
#include <iomanip>
#include <sys/time.h>
#include <hip/hip_runtime.h>
using namespace std;
// CPU Baseline
void array_process(double *input, double *output, int length, int iterations)
{
double *temp;
for(int n=0; n<(int) iterations; n++)
{
for(int i=1; i<length-1; i++)
{
for(int j=1; j<length-1; j++)
{
output[(i)*(length)+(j)] = (input[(i-1)*(length)+(j-1)] +
input[(i-1)*(length)+(j)] +
input[(i-1)*(length)+(j+1)] +
input[(i)*(length)+(j-1)] +
input[(i)*(length)+(j)] +
input[(i)*(length)+(j+1)] +
input[(i+1)*(length)+(j-1)] +
input[(i+1)*(length)+(j)] +
input[(i+1)*(length)+(j+1)] ) / 9;
}
}
output[(length/2-1)*length+(length/2-1)] = 1000;
output[(length/2)*length+(length/2-1)] = 1000;
output[(length/2-1)*length+(length/2)] = 1000;
output[(length/2)*length+(length/2)] = 1000;
temp = input;
input = output;
output = temp;
}
}
// GPU Optimized function
void GPU_array_process(double *input, double *output, int length, int iterations)
{
//Cuda events for calculating elapsed time
hipEvent_t cpy_H2D_start, cpy_H2D_end, comp_start, comp_end, cpy_D2H_start, cpy_D2H_end;
hipEventCreate(&cpy_H2D_start);
hipEventCreate(&cpy_H2D_end);
hipEventCreate(&cpy_D2H_start);
hipEventCreate(&cpy_D2H_end);
hipEventCreate(&comp_start);
hipEventCreate(&comp_end);
/* Preprocessing goes here */
hipEventRecord(cpy_H2D_start);
/* Copying array from host to device goes here */
hipEventRecord(cpy_H2D_end);
hipEventSynchronize(cpy_H2D_end);
//Copy array from host to device
hipEventRecord(comp_start);
/* GPU calculation goes here */
hipEventRecord(comp_end);
hipEventSynchronize(comp_end);
hipEventRecord(cpy_D2H_start);
/* Copying array from device to host goes here */
hipEventRecord(cpy_D2H_end);
hipEventSynchronize(cpy_D2H_end);
/* Postprocessing goes here */
float time;
hipEventElapsedTime(&time, cpy_H2D_start, cpy_H2D_end);
cout<<"Host to Device MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
hipEventElapsedTime(&time, comp_start, comp_end);
cout<<"Computation takes "<<setprecision(4)<<time/1000<<"s"<<endl;
hipEventElapsedTime(&time, cpy_D2H_start, cpy_D2H_end);
cout<<"Device to Host MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
} | 0861818ba93ea4c6bd92505fb01d6d38b8dddf9f.cu | /*
============================================================================
Filename : algorithm.c
Author : Your name goes here
SCIPER : Your SCIPER number
============================================================================
*/
#include <iostream>
#include <iomanip>
#include <sys/time.h>
#include <cuda_runtime.h>
using namespace std;
// CPU Baseline
void array_process(double *input, double *output, int length, int iterations)
{
double *temp;
for(int n=0; n<(int) iterations; n++)
{
for(int i=1; i<length-1; i++)
{
for(int j=1; j<length-1; j++)
{
output[(i)*(length)+(j)] = (input[(i-1)*(length)+(j-1)] +
input[(i-1)*(length)+(j)] +
input[(i-1)*(length)+(j+1)] +
input[(i)*(length)+(j-1)] +
input[(i)*(length)+(j)] +
input[(i)*(length)+(j+1)] +
input[(i+1)*(length)+(j-1)] +
input[(i+1)*(length)+(j)] +
input[(i+1)*(length)+(j+1)] ) / 9;
}
}
output[(length/2-1)*length+(length/2-1)] = 1000;
output[(length/2)*length+(length/2-1)] = 1000;
output[(length/2-1)*length+(length/2)] = 1000;
output[(length/2)*length+(length/2)] = 1000;
temp = input;
input = output;
output = temp;
}
}
// GPU Optimized function
void GPU_array_process(double *input, double *output, int length, int iterations)
{
//Cuda events for calculating elapsed time
cudaEvent_t cpy_H2D_start, cpy_H2D_end, comp_start, comp_end, cpy_D2H_start, cpy_D2H_end;
cudaEventCreate(&cpy_H2D_start);
cudaEventCreate(&cpy_H2D_end);
cudaEventCreate(&cpy_D2H_start);
cudaEventCreate(&cpy_D2H_end);
cudaEventCreate(&comp_start);
cudaEventCreate(&comp_end);
/* Preprocessing goes here */
cudaEventRecord(cpy_H2D_start);
/* Copying array from host to device goes here */
cudaEventRecord(cpy_H2D_end);
cudaEventSynchronize(cpy_H2D_end);
//Copy array from host to device
cudaEventRecord(comp_start);
/* GPU calculation goes here */
cudaEventRecord(comp_end);
cudaEventSynchronize(comp_end);
cudaEventRecord(cpy_D2H_start);
/* Copying array from device to host goes here */
cudaEventRecord(cpy_D2H_end);
cudaEventSynchronize(cpy_D2H_end);
/* Postprocessing goes here */
float time;
cudaEventElapsedTime(&time, cpy_H2D_start, cpy_H2D_end);
cout<<"Host to Device MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
cudaEventElapsedTime(&time, comp_start, comp_end);
cout<<"Computation takes "<<setprecision(4)<<time/1000<<"s"<<endl;
cudaEventElapsedTime(&time, cpy_D2H_start, cpy_D2H_end);
cout<<"Device to Host MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
} |
ae090d167956d809619820881da97c2ac11b62f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Device.h"
#include <iostream>
#include "MathTools.h"
#include "cudaTools.h"
#include <algorithm>
__host__ static bool runGPU(int n);
__global__ static void kernelHistogramme(int* ptrDevInput, int* ptrDevOut, int n, int sizeHistogramme);
__host__ static int randomMinMax(int min, int max)
{
return (int)((max-min)*((float)(rand())/(float)RAND_MAX) + min);
}
__host__ bool histogrammeGM(int n)
{
return runGPU(n);
}
__host__ bool runGPU(int n)
{
int nMaxValue = 256;
dim3 dg = dim3(nMaxValue, 1, 1);
dim3 db = dim3(nMaxValue, 1, 1);
size_t sizeHistogramme = db.x*sizeof(int);
int* ptrInput = new int[n];
int* ptrOut = new int[nMaxValue];
for(int i = 0; i < n; ++i)
ptrInput[i] = i%nMaxValue;
for(int i = 0; i < nMaxValue; ++i)
ptrOut[i] = 0;
for(int i = 0; i < n; ++i)
std::swap(ptrInput[randomMinMax(0, n-1)], ptrInput[randomMinMax(0, n-1)]);
int* ptrDevInput;
int* ptrDevOut;
HANDLE_ERROR(hipMalloc((void**)&ptrDevInput, n*sizeof(int)));
HANDLE_ERROR(hipMemcpy(ptrDevInput, ptrInput, n*sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMalloc((void**)&ptrDevOut, sizeHistogramme));
HANDLE_ERROR(hipMemcpy(ptrDevOut, ptrOut, sizeHistogramme, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernelHistogramme), dim3(dg), dim3(db), sizeHistogramme, 0, ptrDevInput, ptrDevOut, n, nMaxValue);
HANDLE_ERROR(hipMemcpy(ptrOut, ptrDevOut, sizeHistogramme, hipMemcpyDeviceToHost));
HANDLE_ERROR(hipFree(ptrDevOut));
HANDLE_ERROR(hipFree(ptrDevInput));
bool isOk = true;
for(int i = 0;isOk && i < nMaxValue-1; ++i)
isOk &= ptrOut[i] == ptrOut[i+1];
delete[] ptrInput;
delete[] ptrOut;
std::cout << "Histogramme GM : " << std::boolalpha << isOk << std::endl;
return isOk;
}
__global__ void kernelHistogramme(int* ptrDevInput, int* ptrDevOut, int n, int sizeHistogramme)
{
extern __shared__ int tabSM[];
int tid = threadIdx.x + gridDim.x*blockIdx.x;
const int NB_THREAD = gridDim.x*blockDim.x;
int s = tid;
if(threadIdx.x < sizeHistogramme)
tabSM[threadIdx.x] = 0;
__syncthreads();
while(s < n)
{
atomicAdd(&tabSM[ptrDevInput[s]], 1);
s += NB_THREAD;
}
__syncthreads();
if(threadIdx.x < sizeHistogramme)
atomicAdd(&ptrDevOut[threadIdx.x], tabSM[threadIdx.x]);
}
| ae090d167956d809619820881da97c2ac11b62f8.cu | #include "Device.h"
#include <iostream>
#include "MathTools.h"
#include "cudaTools.h"
#include <algorithm>
__host__ static bool runGPU(int n);
__global__ static void kernelHistogramme(int* ptrDevInput, int* ptrDevOut, int n, int sizeHistogramme);
__host__ static int randomMinMax(int min, int max)
{
return (int)((max-min)*((float)(rand())/(float)RAND_MAX) + min);
}
__host__ bool histogrammeGM(int n)
{
return runGPU(n);
}
__host__ bool runGPU(int n)
{
int nMaxValue = 256;
dim3 dg = dim3(nMaxValue, 1, 1);
dim3 db = dim3(nMaxValue, 1, 1);
size_t sizeHistogramme = db.x*sizeof(int);
int* ptrInput = new int[n];
int* ptrOut = new int[nMaxValue];
for(int i = 0; i < n; ++i)
ptrInput[i] = i%nMaxValue;
for(int i = 0; i < nMaxValue; ++i)
ptrOut[i] = 0;
for(int i = 0; i < n; ++i)
std::swap(ptrInput[randomMinMax(0, n-1)], ptrInput[randomMinMax(0, n-1)]);
int* ptrDevInput;
int* ptrDevOut;
HANDLE_ERROR(cudaMalloc((void**)&ptrDevInput, n*sizeof(int)));
HANDLE_ERROR(cudaMemcpy(ptrDevInput, ptrInput, n*sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMalloc((void**)&ptrDevOut, sizeHistogramme));
HANDLE_ERROR(cudaMemcpy(ptrDevOut, ptrOut, sizeHistogramme, cudaMemcpyHostToDevice));
kernelHistogramme<<<dg, db, sizeHistogramme>>>(ptrDevInput, ptrDevOut, n, nMaxValue);
HANDLE_ERROR(cudaMemcpy(ptrOut, ptrDevOut, sizeHistogramme, cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaFree(ptrDevOut));
HANDLE_ERROR(cudaFree(ptrDevInput));
bool isOk = true;
for(int i = 0;isOk && i < nMaxValue-1; ++i)
isOk &= ptrOut[i] == ptrOut[i+1];
delete[] ptrInput;
delete[] ptrOut;
std::cout << "Histogramme GM : " << std::boolalpha << isOk << std::endl;
return isOk;
}
__global__ void kernelHistogramme(int* ptrDevInput, int* ptrDevOut, int n, int sizeHistogramme)
{
extern __shared__ int tabSM[];
int tid = threadIdx.x + gridDim.x*blockIdx.x;
const int NB_THREAD = gridDim.x*blockDim.x;
int s = tid;
if(threadIdx.x < sizeHistogramme)
tabSM[threadIdx.x] = 0;
__syncthreads();
while(s < n)
{
atomicAdd(&tabSM[ptrDevInput[s]], 1);
s += NB_THREAD;
}
__syncthreads();
if(threadIdx.x < sizeHistogramme)
atomicAdd(&ptrDevOut[threadIdx.x], tabSM[threadIdx.x]);
}
|
7f73803ee08a92633c64cd3078a9c2d06323ee08.hip | // !!! This is a file automatically generated by hipify!!!
#include "input.cuh"
#include "generate_psuedo_random_seq.cuh"
#include "interleaver.cuh"
#include "scrambler.cuh"
#include "mapper.cuh"
#include "transform_precoder.cuh"
#include "generate_dmrs_pusch_hip.cuh"
#include "generate_ul_rs.cuh"
#include "compose_subframe.cuh"
#include "sc_fdma_modulator.cuh"
#include "main.cuh"
#include "roctracer/roctx.h"
int main(int argc, char **argv) {
//roctxRangePushA("1");
//For timing purpose
timerInit();
const int Qm = 6; // 64QAM Modulation
const int N_l = 1; // Number of Layers
int N_bits = 86400*N_l , N_ri = 0;
//roctxRangePop();
//roctxRangePushA("2");
hipStream_t streams[N_l];
for (int i = 0; i < N_l; i++)
{
hipStreamCreate(&streams[i]);
}
//roctxRangePop();
//roctxRangePushA("3");
// Physical layer cell identity (we need for generation of random sequence)
int N_id_cell = 2; // assume enodeB scheduled cell 2 for the UE
int M_pusch_rb = 100; // number of resource blocks assigned to the UE
int n_s = 0; // assume UE send on time slot 4
int n_RNTI = 10; // radio network temporary identifier given to the UE by enodeB(assume 10)
int N_subfr = 0; // Subframe number within a radio frame
//hipMalloc & hipMemcpy for inputBits & RI_Bits to Device
Byte *inputBits_d = 0, *riBits_d = 0;
hipMalloc((void **)&inputBits_d, sizeof(Byte)*N_bits);
hipMalloc((void **)&riBits_d, sizeof(Byte)*N_ri);
Byte* c_d = 0;
hipMalloc((void **)&c_d, sizeof(Byte)*(N_bits/N_l));
BYTE* inputBits_h = readBits(argc, argv[1], &N_bits); //Get input bits from the text file
BYTE* riBits_h = readBits(argc, argv[2], &N_ri); //Get RI bits from the text file
startTimer();
hipMemcpyAsync(inputBits_d, inputBits_h, sizeof(Byte)*N_bits, hipMemcpyHostToDevice);
hipMemcpyAsync(riBits_d, riBits_h, sizeof(Byte)*N_ri, hipMemcpyHostToDevice);
stopTimer("Average Memcpy for inputBits & RI_Bits Time= %.6f ms\n");
//roctxRangePop();
//roctxRangePushA("4");
//Create Plans
//startTimer();
hipfftHandle plan_transform_precoder[N_l];
hipfftHandle plan_sc_fdma[N_l];
int n[1] = { N_sc_rb*M_pusch_rb };
int n_2[1] = { FFT_size };
for (int i = 0; i < N_l; i++)
{
hipfftPlanMany(&plan_transform_precoder[i], 1, n, NULL, 1, n[0], NULL, 1, N_sc_rb*M_pusch_rb, HIPFFT_C2C, (((N_bits / N_l) + N_ri) / Qm) / n[0]);
hipfftPlanMany(&plan_sc_fdma[i], 1, n_2, NULL, 1, FFT_size, NULL, 1, FFT_size, HIPFFT_C2C, N_symbs_per_subframe);
}
//stopTimer("Create Plans Time= %.6f ms\n");
//roctxRangePop();
//roctxRangePushA("5");
//Device data allocation
//startTimer();
int data_vec_len = Qm*N_l;
int ri_vec_len = Qm*N_l;
int N_data_bits = N_bits / data_vec_len;
int N_ri_bits = N_ri / data_vec_len;
int H_prime = N_data_bits;
int H_vec_len = data_vec_len;
int H_prime_total = H_prime + N_ri_bits;
int R_mux = (H_prime_total*Qm*N_l) / N_pusch_symbs;
int R_prime_mux = R_mux / (Qm*N_l);
Byte *y_idx_d, *y_mat_d;
Byte *interleaved_d_total;
Byte *scrambledbits_d[N_l];
Byte *bits_each_Qm_d[N_l];
float* symbols_R_d[N_l];
float* symbols_I_d[N_l];
hipfftComplex *precoded_symbols_d[N_l];
hipfftComplex *cuComplex_symbols_d[N_l];
hipfftComplex* x_q_d[N_l];
hipfftComplex* dmrs_d_1[N_l];
hipfftComplex *dmrs_d_2[N_l];
hipfftComplex *subframe_d[N_l];
hipfftComplex* ifft_vec_d[N_l];
hipfftComplex *pusch_bb_d;
hipMalloc((void **)&y_idx_d, sizeof(Byte)*(N_pusch_symbs * R_prime_mux));
hipMalloc((void **)&y_mat_d, sizeof(Byte)*(N_pusch_symbs*R_mux));
hipMalloc((void **)&interleaved_d_total, sizeof(Byte)*(N_pusch_symbs*R_mux));
hipMalloc((void **)&pusch_bb_d, sizeof(hipfftComplex)*N_l*modulated_subframe_length);
for (int i = 0; i < N_l; i++)
{
hipMalloc((void **)&scrambledbits_d[i], sizeof(Byte)*N_bits / N_l);
hipMalloc((void **)&bits_each_Qm_d[i], sizeof(Byte)*(N_bits / (Qm*N_l)));
hipMalloc((void **)&symbols_R_d[i], sizeof(float)*(N_bits / (Qm*N_l)));
hipMalloc((void **)&symbols_I_d[i], sizeof(float)*(N_bits / (Qm*N_l)));
hipMalloc((void **)&cuComplex_symbols_d[i], sizeof(hipfftComplex)*(N_bits / (Qm*N_l)));
hipMalloc((void **)&precoded_symbols_d[i], sizeof(hipfftComplex)*(N_bits / (Qm*N_l)));
hipMalloc((void **)&dmrs_d_1[i], sizeof(hipfftComplex)*N_sc_rb*M_pusch_rb);
hipMalloc((void **)&dmrs_d_2[i], sizeof(hipfftComplex)*N_sc_rb*M_pusch_rb);
hipMalloc((void **)&x_q_d[i], sizeof(hipfftComplex)*prime_nums[M_pusch_rb - 1]);
hipMalloc((void **)&subframe_d[i], sizeof(hipfftComplex)*N_symbs_per_subframe*N_sc_rb*M_pusch_rb);
hipMalloc((void **)&ifft_vec_d[i], sizeof(hipfftComplex)*N_symbs_per_subframe*FFT_size);
}
//stopTimer("Device data allocation Time= %.6f ms\n");
//roctxRangePop();
//roctxRangePushA("6");
hipfftComplex *pusch_bb_h;
pusch_bb_h = (hipfftComplex *)malloc(sizeof(hipfftComplex)*(N_l*modulated_subframe_length));
//roctxRangePop();
//roctxRangePushA("7");
double sum = 0;
for (int i = 0; i < 100; i++)
{
startTimer();
//Generate Pseudo Random Seq.
Byte *c_h = 0;
generate_psuedo_random_seq(&c_h, (N_bits/N_l), n_RNTI, n_s, N_id_cell);
//roctxRangePop();
//roctxRangePushA("8");
//Copy (c) to Device
hipMemcpyAsync(c_d, c_h, sizeof(Byte)*(N_bits / N_l), hipMemcpyHostToDevice);
//roctxRangePop();
//roctxRangePushA("9");
//Interleaver
//Interleaver will be modified from inside in higher order of MIMO
//This is interleaver on CPU code, RI is not done in this version
//startTimer();
interleaver(inputBits_d, riBits_d, &interleaved_d_total, N_bits, N_ri, Qm, N_l, y_idx_d, y_mat_d);
//roctxRangePop();
//roctxRangePushA("10");
//int NZ = 100;
//Byte* hprint = (Byte *)malloc(sizeof(Byte)*(NZ));
//hipMemcpy(hprint, interleaved_d_total, sizeof(Byte)*(NZ), hipMemcpyDeviceToHost);
//stopTimer("Interleaver Time= %.6f ms\n");
//for (int i = 0; i < NZ; i++)
//{
// printf("%d", hprint[i]);
//}
//Scrambler
for (int i = 0; i < N_l; i++)
{
scrambler(interleaved_d_total + (i * N_bits / N_l), &scrambledbits_d[i], c_d, (N_bits / N_l) + N_ri, streams[i]);
}
//roctxRangePop();
//roctxRangePushA("11");
//int NZ = 100;
//Byte* hprint = (Byte *)malloc(sizeof(Byte)*(NZ));
//hipMemcpy(hprint, scrambledbits_d[0], sizeof(Byte)*(NZ), hipMemcpyDeviceToHost);
//for (int i = 0; i < NZ; i++)
//{
// printf("%d", hprint[i]);
//}
//Mapper
for (int i = 0; i < N_l; i++)
{
mapper(scrambledbits_d[i], (N_bits / N_l) + N_ri, Qm, &symbols_R_d[i], &symbols_I_d[i], bits_each_Qm_d[i], streams[i]);
}
//roctxRangePop();
//roctxRangePushA("12");
//int NZ = 100;
//float* hprint = (float *)malloc(sizeof(float)*(NZ));
//hipMemcpy(hprint, symbols_R_d[2], sizeof(float)*(NZ), hipMemcpyDeviceToHost);
//for (int i = 0; i < NZ; i++)
//{
// printf("%10f", hprint[i]);
//}
//Transform Precoder
for (int i = 0; i < N_l; i++)
{
transform_precoder(symbols_R_d[i], symbols_I_d[i], M_pusch_rb, ((N_bits / N_l) + N_ri) / Qm, &precoded_symbols_d[i], plan_transform_precoder[i], cuComplex_symbols_d[i], streams[i]);
}
//roctxRangePop();
//roctxRangePushA("13");
//int NZ = 100;
//hipfftComplex* hprint = (hipfftComplex *)malloc(sizeof(hipfftComplex)*(NZ));
//hipMemcpy(hprint, precoded_symbols_d[3], sizeof(hipfftComplex)*(NZ), hipMemcpyDeviceToHost);
//for (int i = 0; i < NZ; i++)
//{
// printf("%10f", hprint[i].x);
//}
//Generate DMRS
for (int i = 0; i < N_l; i++)
{
generate_dmrs_pusch(0, N_id_cell, 0, 0, 0, 0, 0, "fixed", M_pusch_rb, (i%4), &dmrs_d_1[i], &dmrs_d_2[i], x_q_d[i], streams[i]);
}
//roctxRangePop();
//roctxRangePushA("14");
//int NZ = 100;
//hipfftComplex* hprint = (hipfftComplex *)malloc(sizeof(hipfftComplex)*(NZ));
//hipMemcpy(hprint, dmrs_d_1[3], sizeof(hipfftComplex)*(NZ), hipMemcpyDeviceToHost);
//for (int i = 0; i < NZ; i++)
//{
// printf("%10f", hprint[i].x);
//}
//Multiplexing the DMRS with the Data
for (int i = 0; i < N_l; i++)
{
compose_subframe(precoded_symbols_d[i], dmrs_d_1[i], dmrs_d_2[i], M_pusch_rb, &subframe_d[i], N_l, streams[i]);
}
//roctxRangePop();
//roctxRangePushA("15");
//int NZ = 100;
//hipfftComplex* hprint = (hipfftComplex *)malloc(sizeof(hipfftComplex)*(NZ));
//hipMemcpy(hprint, subframe_d[0], sizeof(hipfftComplex)*(NZ), hipMemcpyDeviceToHost);
//for (int i = 0; i < NZ; i++)
//{
// printf("%10f", hprint[i].x);
//}
// Generate SC-FDMA signal
for (int i = 0; i < N_l; i++)
{
sc_fdma_modulator(subframe_d[i], M_pusch_rb, &pusch_bb_d, plan_sc_fdma[i], ifft_vec_d[i], streams[i],i);
}
//roctxRangePop();
//roctxRangePushA("16");
hipMemcpyAsync(pusch_bb_h, pusch_bb_d, sizeof(hipfftComplex)*(N_l*modulated_subframe_length), hipMemcpyDeviceToHost);
end = std::chrono::steady_clock::now();
sum += (double)std::chrono::duration_cast<std::chrono::nanoseconds> (end - start).count() / 1000000.0;
}
printf("Average Processing Time = %lf\n", sum / 100.0);
//roctxRangePop();
#pragma region Results Printing
//To compare with MATLAB results
//Run the file (output.m)
int NNN = modulated_subframe_length*N_l;
FILE *results;
if ((results = freopen("output.m", "w+", stdout)) == NULL) {
printf("Cannot open file.\n");
exit(1);
}
printf("clear; clc;");
printf("\nsymbols_real = [ ");
for (int i = 0; i < NNN; i++)
{
printf("%10f", pusch_bb_h[i].x);
if (i != (NNN - 1))
printf(",");
}
printf(" ];\nsymbols_imag = [ ");
for (int i = 0; i < NNN; i++)
{
printf("%10f", pusch_bb_h[i].y);
if (i != (NNN - 1))
printf(",");
}
printf(" ];\n");
printf("symbols_CUDA = symbols_real + 1i * symbols_imag;\n");
//Matlab code
printf("matlab_test");
fclose(results);
if ((results = freopen("matlab_test.m", "w+", stdout)) == NULL) {
printf("Cannot open file.\n");
exit(1);
}
printf("N_bits = %d; \n", N_bits);
if (Qm == 6)
printf("mod_type = %s; \n", "'64qam'");
else if (Qm == 4)
printf("mod_type = %s; \n", "'16qam'");
else if (Qm == 2)
printf("mod_type = %s; \n", "'qpsk'");
else if (Qm == 1)
printf("mod_type = %s; \n", "'bpsk'");
printf("N_sc_rb = 12; %% number of subcarriers in each resource block\n");
printf("M_pusch_rb = %d; %% number of resource blocks assigned to the UE\n", M_pusch_rb);
printf("M_pusch_sc = M_pusch_rb*N_sc_rb; %% total number of subcarriers\n\n");
printf("N_l = %d; \nQ_m = %d; \ndata_bits_total = (fread(fopen('%s')) - '0').';\ndata_bits = reshape(data_bits_total,length(data_bits_total)/N_l,N_l);\nri_bits = (fread(fopen('%s'))-'0').';\n", N_l, Qm, argv[1], argv[argc - 1]);
printf("interleaved_bits = channel_interleaver_MIMO(data_bits, ri_bits, [], N_l, Q_m);\ninterleaved_bits_Nlayer_col = reshape(interleaved_bits,length(interleaved_bits)/N_l,N_l);\nc_init = 10 * 2 ^ 14 + floor(0 / 2) * 2 ^ 9 + 2; \nc = generate_psuedo_random_seq(c_init, length(interleaved_bits_Nlayer_col)); \nscrambled = scrambler_MIMO(interleaved_bits_Nlayer_col.', repmat(c,N_l,1), N_l);for i = 1:N_l\n modulated_symbols(:,i) = mapper(scrambled(i,:), mod_type).';\nend\n");
if (N_l != 1) //MIMO
{
printf("\ntransform_precoded_symbols = transform_precoder_mimo(modulated_symbols, M_pusch_rb, N_l);\nprecoded_symbols = precoding_mimo(transform_precoded_symbols, N_l, N_l); \nfor i = 1:N_l\n dmrs(i, :) = generate_dmrs_pusch(0, 2, 0, 0, 0, 0, 0, 'fixed', M_pusch_rb, mod((i - 1), 4)); \nend\ndmrs1 = dmrs(:, 1 : M_pusch_sc); \ndmrs2 = dmrs(:, M_pusch_sc + 1 : 2 * M_pusch_sc); \nsubframe_per_ant = compose_subframe_mimo(precoded_symbols, dmrs1, dmrs2, M_pusch_rb, N_l); \nsymbols_MATLAB = sc_fdma_modulator_MIMO(subframe_per_ant, M_pusch_rb, N_l); \nsymbols_MATLAB_reshaped = reshape(symbols_MATLAB.',1,length(symbols_MATLAB)*N_l);");
}
else //SISO
{
printf("precoded_data = transform_precoder(modulated_symbols, M_pusch_rb);\ndmrs = generate_dmrs_pusch(0, 2, 0, 0, 0, 0, 0, 'fixed', M_pusch_rb, 0);\ndmrs_1 = dmrs(1:M_pusch_sc);\ndmrs_2 = dmrs(M_pusch_sc+1:2*M_pusch_sc);\nsubframe_1 = compose_subframe(precoded_data, dmrs_1, dmrs_2, M_pusch_rb);\nsymbols_MATLAB = sc_fdma_modulator(subframe_1, M_pusch_rb);\nsymbols_MATLAB_reshaped = reshape(symbols_MATLAB.',1,length(symbols_MATLAB)*N_l);");
}
printf("\n\nsum((abs(symbols_MATLAB_reshaped - symbols_CUDA)))");
fclose(results);
#pragma endregion
} | 7f73803ee08a92633c64cd3078a9c2d06323ee08.cu | #include "input.cuh"
#include "generate_psuedo_random_seq.cuh"
#include "interleaver.cuh"
#include "scrambler.cuh"
#include "mapper.cuh"
#include "transform_precoder.cuh"
#include "generate_dmrs_pusch.cuh"
#include "generate_ul_rs.cuh"
#include "compose_subframe.cuh"
#include "sc_fdma_modulator.cuh"
#include "main.cuh"
#include "nvToolsExt.h"
int main(int argc, char **argv) {
//nvtxRangePushA("1");
//For timing purpose
timerInit();
const int Qm = 6; // 64QAM Modulation
const int N_l = 1; // Number of Layers
int N_bits = 86400*N_l , N_ri = 0;
//nvtxRangePop();
//nvtxRangePushA("2");
cudaStream_t streams[N_l];
for (int i = 0; i < N_l; i++)
{
cudaStreamCreate(&streams[i]);
}
//nvtxRangePop();
//nvtxRangePushA("3");
// Physical layer cell identity (we need for generation of random sequence)
int N_id_cell = 2; // assume enodeB scheduled cell 2 for the UE
int M_pusch_rb = 100; // number of resource blocks assigned to the UE
int n_s = 0; // assume UE send on time slot 4
int n_RNTI = 10; // radio network temporary identifier given to the UE by enodeB(assume 10)
int N_subfr = 0; // Subframe number within a radio frame
//cudaMalloc & cudaMemcpy for inputBits & RI_Bits to Device
Byte *inputBits_d = 0, *riBits_d = 0;
cudaMalloc((void **)&inputBits_d, sizeof(Byte)*N_bits);
cudaMalloc((void **)&riBits_d, sizeof(Byte)*N_ri);
Byte* c_d = 0;
cudaMalloc((void **)&c_d, sizeof(Byte)*(N_bits/N_l));
BYTE* inputBits_h = readBits(argc, argv[1], &N_bits); //Get input bits from the text file
BYTE* riBits_h = readBits(argc, argv[2], &N_ri); //Get RI bits from the text file
startTimer();
cudaMemcpyAsync(inputBits_d, inputBits_h, sizeof(Byte)*N_bits, cudaMemcpyHostToDevice);
cudaMemcpyAsync(riBits_d, riBits_h, sizeof(Byte)*N_ri, cudaMemcpyHostToDevice);
stopTimer("Average Memcpy for inputBits & RI_Bits Time= %.6f ms\n");
//nvtxRangePop();
//nvtxRangePushA("4");
//Create Plans
//startTimer();
cufftHandle plan_transform_precoder[N_l];
cufftHandle plan_sc_fdma[N_l];
int n[1] = { N_sc_rb*M_pusch_rb };
int n_2[1] = { FFT_size };
for (int i = 0; i < N_l; i++)
{
cufftPlanMany(&plan_transform_precoder[i], 1, n, NULL, 1, n[0], NULL, 1, N_sc_rb*M_pusch_rb, CUFFT_C2C, (((N_bits / N_l) + N_ri) / Qm) / n[0]);
cufftPlanMany(&plan_sc_fdma[i], 1, n_2, NULL, 1, FFT_size, NULL, 1, FFT_size, CUFFT_C2C, N_symbs_per_subframe);
}
//stopTimer("Create Plans Time= %.6f ms\n");
//nvtxRangePop();
//nvtxRangePushA("5");
//Device data allocation
//startTimer();
int data_vec_len = Qm*N_l;
int ri_vec_len = Qm*N_l;
int N_data_bits = N_bits / data_vec_len;
int N_ri_bits = N_ri / data_vec_len;
int H_prime = N_data_bits;
int H_vec_len = data_vec_len;
int H_prime_total = H_prime + N_ri_bits;
int R_mux = (H_prime_total*Qm*N_l) / N_pusch_symbs;
int R_prime_mux = R_mux / (Qm*N_l);
Byte *y_idx_d, *y_mat_d;
Byte *interleaved_d_total;
Byte *scrambledbits_d[N_l];
Byte *bits_each_Qm_d[N_l];
float* symbols_R_d[N_l];
float* symbols_I_d[N_l];
cufftComplex *precoded_symbols_d[N_l];
cufftComplex *cuComplex_symbols_d[N_l];
cufftComplex* x_q_d[N_l];
cufftComplex* dmrs_d_1[N_l];
cufftComplex *dmrs_d_2[N_l];
cufftComplex *subframe_d[N_l];
cufftComplex* ifft_vec_d[N_l];
cufftComplex *pusch_bb_d;
cudaMalloc((void **)&y_idx_d, sizeof(Byte)*(N_pusch_symbs * R_prime_mux));
cudaMalloc((void **)&y_mat_d, sizeof(Byte)*(N_pusch_symbs*R_mux));
cudaMalloc((void **)&interleaved_d_total, sizeof(Byte)*(N_pusch_symbs*R_mux));
cudaMalloc((void **)&pusch_bb_d, sizeof(cufftComplex)*N_l*modulated_subframe_length);
for (int i = 0; i < N_l; i++)
{
cudaMalloc((void **)&scrambledbits_d[i], sizeof(Byte)*N_bits / N_l);
cudaMalloc((void **)&bits_each_Qm_d[i], sizeof(Byte)*(N_bits / (Qm*N_l)));
cudaMalloc((void **)&symbols_R_d[i], sizeof(float)*(N_bits / (Qm*N_l)));
cudaMalloc((void **)&symbols_I_d[i], sizeof(float)*(N_bits / (Qm*N_l)));
cudaMalloc((void **)&cuComplex_symbols_d[i], sizeof(cufftComplex)*(N_bits / (Qm*N_l)));
cudaMalloc((void **)&precoded_symbols_d[i], sizeof(cufftComplex)*(N_bits / (Qm*N_l)));
cudaMalloc((void **)&dmrs_d_1[i], sizeof(cufftComplex)*N_sc_rb*M_pusch_rb);
cudaMalloc((void **)&dmrs_d_2[i], sizeof(cufftComplex)*N_sc_rb*M_pusch_rb);
cudaMalloc((void **)&x_q_d[i], sizeof(cufftComplex)*prime_nums[M_pusch_rb - 1]);
cudaMalloc((void **)&subframe_d[i], sizeof(cufftComplex)*N_symbs_per_subframe*N_sc_rb*M_pusch_rb);
cudaMalloc((void **)&ifft_vec_d[i], sizeof(cufftComplex)*N_symbs_per_subframe*FFT_size);
}
//stopTimer("Device data allocation Time= %.6f ms\n");
//nvtxRangePop();
//nvtxRangePushA("6");
cufftComplex *pusch_bb_h;
pusch_bb_h = (cufftComplex *)malloc(sizeof(cufftComplex)*(N_l*modulated_subframe_length));
//nvtxRangePop();
//nvtxRangePushA("7");
double sum = 0;
for (int i = 0; i < 100; i++)
{
startTimer();
//Generate Pseudo Random Seq.
Byte *c_h = 0;
generate_psuedo_random_seq(&c_h, (N_bits/N_l), n_RNTI, n_s, N_id_cell);
//nvtxRangePop();
//nvtxRangePushA("8");
//Copy (c) to Device
cudaMemcpyAsync(c_d, c_h, sizeof(Byte)*(N_bits / N_l), cudaMemcpyHostToDevice);
//nvtxRangePop();
//nvtxRangePushA("9");
//Interleaver
//Interleaver will be modified from inside in higher order of MIMO
//This is interleaver on CPU code, RI is not done in this version
//startTimer();
interleaver(inputBits_d, riBits_d, &interleaved_d_total, N_bits, N_ri, Qm, N_l, y_idx_d, y_mat_d);
//nvtxRangePop();
//nvtxRangePushA("10");
//int NZ = 100;
//Byte* hprint = (Byte *)malloc(sizeof(Byte)*(NZ));
//cudaMemcpy(hprint, interleaved_d_total, sizeof(Byte)*(NZ), cudaMemcpyDeviceToHost);
//stopTimer("Interleaver Time= %.6f ms\n");
//for (int i = 0; i < NZ; i++)
//{
// printf("%d", hprint[i]);
//}
//Scrambler
for (int i = 0; i < N_l; i++)
{
scrambler(interleaved_d_total + (i * N_bits / N_l), &scrambledbits_d[i], c_d, (N_bits / N_l) + N_ri, streams[i]);
}
//nvtxRangePop();
//nvtxRangePushA("11");
//int NZ = 100;
//Byte* hprint = (Byte *)malloc(sizeof(Byte)*(NZ));
//cudaMemcpy(hprint, scrambledbits_d[0], sizeof(Byte)*(NZ), cudaMemcpyDeviceToHost);
//for (int i = 0; i < NZ; i++)
//{
// printf("%d", hprint[i]);
//}
//Mapper
for (int i = 0; i < N_l; i++)
{
mapper(scrambledbits_d[i], (N_bits / N_l) + N_ri, Qm, &symbols_R_d[i], &symbols_I_d[i], bits_each_Qm_d[i], streams[i]);
}
//nvtxRangePop();
//nvtxRangePushA("12");
//int NZ = 100;
//float* hprint = (float *)malloc(sizeof(float)*(NZ));
//cudaMemcpy(hprint, symbols_R_d[2], sizeof(float)*(NZ), cudaMemcpyDeviceToHost);
//for (int i = 0; i < NZ; i++)
//{
// printf("%10f", hprint[i]);
//}
//Transform Precoder
for (int i = 0; i < N_l; i++)
{
transform_precoder(symbols_R_d[i], symbols_I_d[i], M_pusch_rb, ((N_bits / N_l) + N_ri) / Qm, &precoded_symbols_d[i], plan_transform_precoder[i], cuComplex_symbols_d[i], streams[i]);
}
//nvtxRangePop();
//nvtxRangePushA("13");
//int NZ = 100;
//cufftComplex* hprint = (cufftComplex *)malloc(sizeof(cufftComplex)*(NZ));
//cudaMemcpy(hprint, precoded_symbols_d[3], sizeof(cufftComplex)*(NZ), cudaMemcpyDeviceToHost);
//for (int i = 0; i < NZ; i++)
//{
// printf("%10f", hprint[i].x);
//}
//Generate DMRS
for (int i = 0; i < N_l; i++)
{
generate_dmrs_pusch(0, N_id_cell, 0, 0, 0, 0, 0, "fixed", M_pusch_rb, (i%4), &dmrs_d_1[i], &dmrs_d_2[i], x_q_d[i], streams[i]);
}
//nvtxRangePop();
//nvtxRangePushA("14");
//int NZ = 100;
//cufftComplex* hprint = (cufftComplex *)malloc(sizeof(cufftComplex)*(NZ));
//cudaMemcpy(hprint, dmrs_d_1[3], sizeof(cufftComplex)*(NZ), cudaMemcpyDeviceToHost);
//for (int i = 0; i < NZ; i++)
//{
// printf("%10f", hprint[i].x);
//}
//Multiplexing the DMRS with the Data
for (int i = 0; i < N_l; i++)
{
compose_subframe(precoded_symbols_d[i], dmrs_d_1[i], dmrs_d_2[i], M_pusch_rb, &subframe_d[i], N_l, streams[i]);
}
//nvtxRangePop();
//nvtxRangePushA("15");
//int NZ = 100;
//cufftComplex* hprint = (cufftComplex *)malloc(sizeof(cufftComplex)*(NZ));
//cudaMemcpy(hprint, subframe_d[0], sizeof(cufftComplex)*(NZ), cudaMemcpyDeviceToHost);
//for (int i = 0; i < NZ; i++)
//{
// printf("%10f", hprint[i].x);
//}
// Generate SC-FDMA signal
for (int i = 0; i < N_l; i++)
{
sc_fdma_modulator(subframe_d[i], M_pusch_rb, &pusch_bb_d, plan_sc_fdma[i], ifft_vec_d[i], streams[i],i);
}
//nvtxRangePop();
//nvtxRangePushA("16");
cudaMemcpyAsync(pusch_bb_h, pusch_bb_d, sizeof(cufftComplex)*(N_l*modulated_subframe_length), cudaMemcpyDeviceToHost);
end = std::chrono::steady_clock::now();
sum += (double)std::chrono::duration_cast<std::chrono::nanoseconds> (end - start).count() / 1000000.0;
}
printf("Average Processing Time = %lf\n", sum / 100.0);
//nvtxRangePop();
#pragma region Results Printing
//To compare with MATLAB results
//Run the file (output.m)
int NNN = modulated_subframe_length*N_l;
FILE *results;
if ((results = freopen("output.m", "w+", stdout)) == NULL) {
printf("Cannot open file.\n");
exit(1);
}
printf("clear; clc;");
printf("\nsymbols_real = [ ");
for (int i = 0; i < NNN; i++)
{
printf("%10f", pusch_bb_h[i].x);
if (i != (NNN - 1))
printf(",");
}
printf(" ];\nsymbols_imag = [ ");
for (int i = 0; i < NNN; i++)
{
printf("%10f", pusch_bb_h[i].y);
if (i != (NNN - 1))
printf(",");
}
printf(" ];\n");
printf("symbols_CUDA = symbols_real + 1i * symbols_imag;\n");
//Matlab code
printf("matlab_test");
fclose(results);
if ((results = freopen("matlab_test.m", "w+", stdout)) == NULL) {
printf("Cannot open file.\n");
exit(1);
}
printf("N_bits = %d; \n", N_bits);
if (Qm == 6)
printf("mod_type = %s; \n", "'64qam'");
else if (Qm == 4)
printf("mod_type = %s; \n", "'16qam'");
else if (Qm == 2)
printf("mod_type = %s; \n", "'qpsk'");
else if (Qm == 1)
printf("mod_type = %s; \n", "'bpsk'");
printf("N_sc_rb = 12; %% number of subcarriers in each resource block\n");
printf("M_pusch_rb = %d; %% number of resource blocks assigned to the UE\n", M_pusch_rb);
printf("M_pusch_sc = M_pusch_rb*N_sc_rb; %% total number of subcarriers\n\n");
printf("N_l = %d; \nQ_m = %d; \ndata_bits_total = (fread(fopen('%s')) - '0').';\ndata_bits = reshape(data_bits_total,length(data_bits_total)/N_l,N_l);\nri_bits = (fread(fopen('%s'))-'0').';\n", N_l, Qm, argv[1], argv[argc - 1]);
printf("interleaved_bits = channel_interleaver_MIMO(data_bits, ri_bits, [], N_l, Q_m);\ninterleaved_bits_Nlayer_col = reshape(interleaved_bits,length(interleaved_bits)/N_l,N_l);\nc_init = 10 * 2 ^ 14 + floor(0 / 2) * 2 ^ 9 + 2; \nc = generate_psuedo_random_seq(c_init, length(interleaved_bits_Nlayer_col)); \nscrambled = scrambler_MIMO(interleaved_bits_Nlayer_col.', repmat(c,N_l,1), N_l);for i = 1:N_l\n modulated_symbols(:,i) = mapper(scrambled(i,:), mod_type).';\nend\n");
if (N_l != 1) //MIMO
{
printf("\ntransform_precoded_symbols = transform_precoder_mimo(modulated_symbols, M_pusch_rb, N_l);\nprecoded_symbols = precoding_mimo(transform_precoded_symbols, N_l, N_l); \nfor i = 1:N_l\n dmrs(i, :) = generate_dmrs_pusch(0, 2, 0, 0, 0, 0, 0, 'fixed', M_pusch_rb, mod((i - 1), 4)); \nend\ndmrs1 = dmrs(:, 1 : M_pusch_sc); \ndmrs2 = dmrs(:, M_pusch_sc + 1 : 2 * M_pusch_sc); \nsubframe_per_ant = compose_subframe_mimo(precoded_symbols, dmrs1, dmrs2, M_pusch_rb, N_l); \nsymbols_MATLAB = sc_fdma_modulator_MIMO(subframe_per_ant, M_pusch_rb, N_l); \nsymbols_MATLAB_reshaped = reshape(symbols_MATLAB.',1,length(symbols_MATLAB)*N_l);");
}
else //SISO
{
printf("precoded_data = transform_precoder(modulated_symbols, M_pusch_rb);\ndmrs = generate_dmrs_pusch(0, 2, 0, 0, 0, 0, 0, 'fixed', M_pusch_rb, 0);\ndmrs_1 = dmrs(1:M_pusch_sc);\ndmrs_2 = dmrs(M_pusch_sc+1:2*M_pusch_sc);\nsubframe_1 = compose_subframe(precoded_data, dmrs_1, dmrs_2, M_pusch_rb);\nsymbols_MATLAB = sc_fdma_modulator(subframe_1, M_pusch_rb);\nsymbols_MATLAB_reshaped = reshape(symbols_MATLAB.',1,length(symbols_MATLAB)*N_l);");
}
printf("\n\nsum((abs(symbols_MATLAB_reshaped - symbols_CUDA)))");
fclose(results);
#pragma endregion
} |
6b0f5738b3782c1d8689ae4b1139ebbb47b2d9de.hip | // !!! This is a file automatically generated by hipify!!!
#include "cudaCode.cuh"
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <cusp/coo_matrix.h>
#include <cusp/print.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
#include <thrust/iterator/zip_iterator.h>
#include <cusp/krylov/cg.h>
#include <cusp/monitor.h>
void testCuspMatAssembly()
{
// dimensions of the matrix
int num_rows = 3;
int num_cols = 3;
// number of (i,j,v) triplets
int num_triplets = 10;
// allocate storage for unordered triplets
cusp::array1d<int, cusp::device_memory> I(num_triplets); // row indices
cusp::array1d<int, cusp::device_memory> J(num_triplets); // column indices
cusp::array1d<float, cusp::device_memory> V(num_triplets); // values
// fill triplet arrays
I[0] = 2; J[0] = 0; V[0] = 10;
I[1] = 0; J[1] = 2; V[1] = 10;
I[2] = 1; J[2] = 1; V[2] = 10;
I[3] = 2; J[3] = 0; V[3] = 10;
I[4] = 1; J[4] = 1; V[4] = 10;
I[5] = 0; J[5] = 0; V[5] = 10;
I[6] = 2; J[6] = 2; V[6] = 10;
I[7] = 0; J[7] = 0; V[7] = 10;
I[8] = 1; J[8] = 0; V[8] = 10;
I[9] = 0; J[9] = 0; V[9] = 10;
// sort triplets by (i,j) index using two stable sorts (first by J, then by I)
thrust::stable_sort_by_key(J.begin(), J.end(), thrust::make_zip_iterator(thrust::make_tuple(I.begin(), V.begin())));
thrust::stable_sort_by_key(I.begin(), I.end(), thrust::make_zip_iterator(thrust::make_tuple(J.begin(), V.begin())));
// compute unique number of nonzeros in the output
int num_entries = thrust::inner_product(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end (), J.end())) - 1,
thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())) + 1,
int(0),
thrust::plus<int>(),
thrust::not_equal_to< thrust::tuple<int,int> >()) + 1;
// allocate output matrix
cusp::coo_matrix<int, float, cusp::device_memory> A(num_rows, num_cols, num_entries);
// sum values with the same (i,j) index
thrust::reduce_by_key(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end(), J.end())),
V.begin(),
thrust::make_zip_iterator(thrust::make_tuple(A.row_indices.begin(), A.column_indices.begin())),
A.values.begin(),
thrust::equal_to< thrust::tuple<int,int> >(),
thrust::plus<float>());
// print matrix
cusp::print(A);
}
void testCuspCG()
{
int num_rows = 2;
int num_cols = 2;
// number of (i,j,v) triplets
int num_triplets = 4;
// allocate storage for unordered triplets
cusp::array1d<int, cusp::device_memory> I(num_triplets); // row indices
cusp::array1d<int, cusp::device_memory> J(num_triplets); // column indices
cusp::array1d<float, cusp::device_memory> V(num_triplets); // values
// fill triplet arrays
// first row
I[0] = 0; J[0] = 0; V[0] = 4;
I[1] = 0; J[1] = 1; V[1] = 1;
// second row
I[2] = 1; J[2] = 0; V[2] = 1;
I[3] = 1; J[3] = 1; V[3] = 3;
thrust::stable_sort_by_key(J.begin(), J.end(), thrust::make_zip_iterator(thrust::make_tuple(I.begin(), V.begin())));
thrust::stable_sort_by_key(I.begin(), I.end(), thrust::make_zip_iterator(thrust::make_tuple(J.begin(), V.begin())));
// compute unique number of nonzeros in the output
int num_entries = thrust::inner_product(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end (), J.end())) - 1,
thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())) + 1,
int(0),
thrust::plus<int>(),
thrust::not_equal_to< thrust::tuple<int,int> >()) + 1;
// allocate output matrix
cusp::coo_matrix<int, float, cusp::device_memory> A(num_rows, num_cols, num_entries);
// sum values with the same (i,j) index
thrust::reduce_by_key(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end(), J.end())),
V.begin(),
thrust::make_zip_iterator(thrust::make_tuple(A.row_indices.begin(), A.column_indices.begin())),
A.values.begin(),
thrust::equal_to< thrust::tuple<int,int> >(),
thrust::plus<float>());
std::cout << "Matrix A: " << std::endl;
cusp::print_matrix(A);
cusp::array1d<float, cusp::device_memory> x(A.num_rows, 0);
cusp::array1d<float, cusp::device_memory> b(A.num_rows);
b[0] = 1; b[1] = 2;
std::cout << "Vector b: " << std::endl;
cusp::print(b);
// set stopping criteria:
// iteration_limit = 100
// relative_tolerance = 1e-3
cusp::verbose_monitor<float> monitor(b, 100, 1e-3);
// set preconditioner (identity)
cusp::identity_operator<float, cusp::device_memory> M(A.num_rows, A.num_rows);
cusp::krylov::cg(A,x, b);
std::cout << "Solution Vector x: " << std::endl;
cusp::print(x);
} | 6b0f5738b3782c1d8689ae4b1139ebbb47b2d9de.cu | #include "cudaCode.cuh"
#include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cusp/coo_matrix.h>
#include <cusp/print.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
#include <thrust/iterator/zip_iterator.h>
#include <cusp/krylov/cg.h>
#include <cusp/monitor.h>
void testCuspMatAssembly()
{
// dimensions of the matrix
int num_rows = 3;
int num_cols = 3;
// number of (i,j,v) triplets
int num_triplets = 10;
// allocate storage for unordered triplets
cusp::array1d<int, cusp::device_memory> I(num_triplets); // row indices
cusp::array1d<int, cusp::device_memory> J(num_triplets); // column indices
cusp::array1d<float, cusp::device_memory> V(num_triplets); // values
// fill triplet arrays
I[0] = 2; J[0] = 0; V[0] = 10;
I[1] = 0; J[1] = 2; V[1] = 10;
I[2] = 1; J[2] = 1; V[2] = 10;
I[3] = 2; J[3] = 0; V[3] = 10;
I[4] = 1; J[4] = 1; V[4] = 10;
I[5] = 0; J[5] = 0; V[5] = 10;
I[6] = 2; J[6] = 2; V[6] = 10;
I[7] = 0; J[7] = 0; V[7] = 10;
I[8] = 1; J[8] = 0; V[8] = 10;
I[9] = 0; J[9] = 0; V[9] = 10;
// sort triplets by (i,j) index using two stable sorts (first by J, then by I)
thrust::stable_sort_by_key(J.begin(), J.end(), thrust::make_zip_iterator(thrust::make_tuple(I.begin(), V.begin())));
thrust::stable_sort_by_key(I.begin(), I.end(), thrust::make_zip_iterator(thrust::make_tuple(J.begin(), V.begin())));
// compute unique number of nonzeros in the output
int num_entries = thrust::inner_product(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end (), J.end())) - 1,
thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())) + 1,
int(0),
thrust::plus<int>(),
thrust::not_equal_to< thrust::tuple<int,int> >()) + 1;
// allocate output matrix
cusp::coo_matrix<int, float, cusp::device_memory> A(num_rows, num_cols, num_entries);
// sum values with the same (i,j) index
thrust::reduce_by_key(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end(), J.end())),
V.begin(),
thrust::make_zip_iterator(thrust::make_tuple(A.row_indices.begin(), A.column_indices.begin())),
A.values.begin(),
thrust::equal_to< thrust::tuple<int,int> >(),
thrust::plus<float>());
// print matrix
cusp::print(A);
}
void testCuspCG()
{
int num_rows = 2;
int num_cols = 2;
// number of (i,j,v) triplets
int num_triplets = 4;
// allocate storage for unordered triplets
cusp::array1d<int, cusp::device_memory> I(num_triplets); // row indices
cusp::array1d<int, cusp::device_memory> J(num_triplets); // column indices
cusp::array1d<float, cusp::device_memory> V(num_triplets); // values
// fill triplet arrays
// first row
I[0] = 0; J[0] = 0; V[0] = 4;
I[1] = 0; J[1] = 1; V[1] = 1;
// second row
I[2] = 1; J[2] = 0; V[2] = 1;
I[3] = 1; J[3] = 1; V[3] = 3;
thrust::stable_sort_by_key(J.begin(), J.end(), thrust::make_zip_iterator(thrust::make_tuple(I.begin(), V.begin())));
thrust::stable_sort_by_key(I.begin(), I.end(), thrust::make_zip_iterator(thrust::make_tuple(J.begin(), V.begin())));
// compute unique number of nonzeros in the output
int num_entries = thrust::inner_product(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end (), J.end())) - 1,
thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())) + 1,
int(0),
thrust::plus<int>(),
thrust::not_equal_to< thrust::tuple<int,int> >()) + 1;
// allocate output matrix
cusp::coo_matrix<int, float, cusp::device_memory> A(num_rows, num_cols, num_entries);
// sum values with the same (i,j) index
thrust::reduce_by_key(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end(), J.end())),
V.begin(),
thrust::make_zip_iterator(thrust::make_tuple(A.row_indices.begin(), A.column_indices.begin())),
A.values.begin(),
thrust::equal_to< thrust::tuple<int,int> >(),
thrust::plus<float>());
std::cout << "Matrix A: " << std::endl;
cusp::print_matrix(A);
cusp::array1d<float, cusp::device_memory> x(A.num_rows, 0);
cusp::array1d<float, cusp::device_memory> b(A.num_rows);
b[0] = 1; b[1] = 2;
std::cout << "Vector b: " << std::endl;
cusp::print(b);
// set stopping criteria:
// iteration_limit = 100
// relative_tolerance = 1e-3
cusp::verbose_monitor<float> monitor(b, 100, 1e-3);
// set preconditioner (identity)
cusp::identity_operator<float, cusp::device_memory> M(A.num_rows, A.num_rows);
cusp::krylov::cg(A,x, b);
std::cout << "Solution Vector x: " << std::endl;
cusp::print(x);
} |
d0d82b5d232ba394e49205792df62e3da386de93.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include <cuda/LocalGeometry.h>
#include <cuda/random.h>
#include <sutil/vec_math.h>
#include "GlobalParameters.h"
#include <stdint.h>
#include <stdio.h>
// For each camera Datatype:
#include "cameras/PerspectiveCameraDataTypes.h"
#include "cameras/PanoramicCameraDataTypes.h"
#include "cameras/GenericCameraDataTypes.h"
#include "cameras/OrthographicCameraDataTypes.h"
#include "cameras/CompoundEyeDataTypes.h"
// cuRand
#include <hiprand/hiprand_kernel.h>
__constant__ float FWHM_SD_RATIO = 2.35482004503094938202313865291f;//939927549477137877164107704505151300005317709396985361683627673754162213494315716402473805711790020883378678441772367335067327119300654086099581027060701147250592490674309776452246690206347679431657862550790224141333488894447689644236226579600412626548283966926341892712473657396439184227529340027195703289818425375703612253952994171698822696215836693931109079884506177990740279369004153115665698570697083992256
extern "C"
{
__constant__ globalParameters::LaunchParams params;
}
//------------------------------------------------------------------------------
//
// GGX/smith shading helpers
// TODO: move into header so can be shared by path tracer and bespoke renderers
//
//------------------------------------------------------------------------------
__device__ float3 schlick( const float3 spec_color, const float V_dot_H )
{
return spec_color + ( make_float3( 1.0f ) - spec_color ) * powf( 1.0f - V_dot_H, 5.0f );
}
__device__ float vis( const float N_dot_L, const float N_dot_V, const float alpha )
{
const float alpha_sq = alpha*alpha;
const float ggx0 = N_dot_L * sqrtf( N_dot_V*N_dot_V * ( 1.0f - alpha_sq ) + alpha_sq );
const float ggx1 = N_dot_V * sqrtf( N_dot_L*N_dot_L * ( 1.0f - alpha_sq ) + alpha_sq );
return 2.0f * N_dot_L * N_dot_V / (ggx0+ggx1);
}
__device__ float ggxNormal( const float N_dot_H, const float alpha )
{
const float alpha_sq = alpha*alpha;
const float N_dot_H_sq = N_dot_H*N_dot_H;
const float x = N_dot_H_sq*( alpha_sq - 1.0f ) + 1.0f;
return alpha_sq/( M_PIf*x*x );
}
__device__ float3 linearize( float3 c )
{
return make_float3(
powf( c.x, 2.2f ),
powf( c.y, 2.2f ),
powf( c.z, 2.2f )
);
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
static __forceinline__ __device__ void traceRadiance(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax,
globalParameters::PayloadRadiance* payload
)
{
uint32_t u0=0, u1=0, u2=0, u3=0;
optixTrace(
handle,
ray_origin, ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
globalParameters::RAY_TYPE_RADIANCE, // SBT offset
globalParameters::RAY_TYPE_COUNT, // SBT stride
globalParameters::RAY_TYPE_RADIANCE, // missSBTIndex
u0, u1, u2, u3 );
payload->result.x = __int_as_float( u0 );
payload->result.y = __int_as_float( u1 );
payload->result.z = __int_as_float( u2 );
payload->depth = u3;
}
static __forceinline__ __device__ bool traceOcclusion(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax
)
{
uint32_t occluded = 0u;
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT,
globalParameters::RAY_TYPE_OCCLUSION, // SBT offset
globalParameters::RAY_TYPE_COUNT, // SBT stride
globalParameters::RAY_TYPE_OCCLUSION, // missSBTIndex
occluded );
return occluded;
}
__forceinline__ __device__ void setPayloadResult( float3 p )
{
optixSetPayload_0( float_as_int( p.x ) );
optixSetPayload_1( float_as_int( p.y ) );
optixSetPayload_2( float_as_int( p.z ) );
}
__forceinline__ __device__ void setPayloadOcclusion( bool occluded )
{
optixSetPayload_0( static_cast<uint32_t>( occluded ) );
}
__forceinline__ __device__ uchar4 make_color( const float3& c )
{
const float gamma = 2.2f;
return make_uchar4(
static_cast<uint8_t>( powf( clamp( c.x, 0.0f, 1.0f ), 1.0/gamma )*255.0f ),
static_cast<uint8_t>( powf( clamp( c.y, 0.0f, 1.0f ), 1.0/gamma )*255.0f ),
static_cast<uint8_t>( powf( clamp( c.z, 0.0f, 1.0f ), 1.0/gamma )*255.0f ),
255u
);
}
//------------------------------------------------------------------------------
//
// Ray Generation Programs
//
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__pinhole()
{
PerspectiveCameraPosedData* posedData = (PerspectiveCameraPosedData*)optixGetSbtDataPointer();
const uint3 launch_idx = optixGetLaunchIndex();
const uint3 launch_dims = optixGetLaunchDimensions();
//
// Generate camera ray
//
const float2 subpixel_jitter = make_float2(0.0f);// No subpixel jitter here.
const float2 d = 2.0f * make_float2(
( static_cast<float>( launch_idx.x ) + subpixel_jitter.x ) / static_cast<float>( launch_dims.x ),
( static_cast<float>( launch_idx.y ) + subpixel_jitter.y ) / static_cast<float>( launch_dims.y )
) - 1.0f;
const LocalSpace& ls = posedData->localSpace;
const float3 scale = posedData->specializedData.scale;
const float3 ray_direction = ls.zAxis*scale.z + d.x*ls.xAxis*scale.x + d.y*ls.yAxis*scale.y;
const float3 ray_origin = posedData->position;
//
// Trace camera ray
//
globalParameters::PayloadRadiance payload;
payload.result = make_float3( 0.0f );
payload.importance = 1.0f;
payload.depth = 0.0f;
traceRadiance(
params.handle,
ray_origin,
ray_direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&payload );
//
// Update results
//
const uint32_t image_index = launch_idx.y * launch_dims.x + launch_idx.x;
params.frame_buffer[image_index] = make_color(payload.result);
}
extern "C" __global__ void __raygen__panoramic()
{
PanoramicCameraPosedData* posedData = (PanoramicCameraPosedData*)optixGetSbtDataPointer();
const uint3 launch_idx = optixGetLaunchIndex();
const uint3 launch_dims = optixGetLaunchDimensions();
//
// Generate camera ray
//
const float2 subpixel_jitter = make_float2(0.0f);// No subpixel jitter here
const float2 d = 2.0f * make_float2(
( static_cast<float>( launch_idx.x ) + subpixel_jitter.x ) / static_cast<float>( launch_dims.x ),
( static_cast<float>( launch_idx.y ) + subpixel_jitter.y ) / static_cast<float>( launch_dims.y )
) - 1.0f;
const float2 angles = d * make_float2(-M_PIf, M_PIf/2.0f) + make_float2(M_PIf/2.0f, 0.0f);
const float cosY = cos(angles.y);
const float3 originalDir = make_float3(cos(angles.x)*cosY, sin(angles.y), sin(angles.x)*cosY);
const float3 lxAxis = posedData->localSpace.xAxis;
const float3 lyAxis = posedData->localSpace.yAxis;
const float3 lzAxis = posedData->localSpace.zAxis;
const float3 ray_direction = normalize(originalDir.x * lxAxis + originalDir.y * lyAxis + originalDir.z * lzAxis);
//const float3 ray_direction = normalize(posedData->localSpace.transform(originalDir));
const float3 ray_origin = posedData->position + ray_direction*posedData->specializedData.startRadius;
//
// Trace camera ray
//
globalParameters::PayloadRadiance payload;
payload.result = make_float3( 0.0f );
payload.importance = 1.0f;
payload.depth = 0.0f;
traceRadiance(
params.handle,
ray_origin,
ray_direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&payload );
//
// Update results
//
const uint32_t image_index = launch_idx.y * launch_dims.x + launch_idx.x;
params.frame_buffer[image_index] = make_color(payload.result);
}
extern "C" __global__ void __raygen__orthographic()
{
OrthographicCameraPosedData* posedData = (OrthographicCameraPosedData*)optixGetSbtDataPointer();
const uint3 launch_idx = optixGetLaunchIndex();
const uint3 launch_dims = optixGetLaunchDimensions();
//
// Generate camera ray
//
const float2 subpixel_jitter = make_float2(0.0f);// No subpixel jitter here.
const float2 d = 2.0f * make_float2(
( static_cast<float>( launch_idx.x ) + subpixel_jitter.x ) / static_cast<float>( launch_dims.x ),
( static_cast<float>( launch_idx.y ) + subpixel_jitter.y ) / static_cast<float>( launch_dims.y )
) - 1.0f;
const LocalSpace& ls = posedData->localSpace;
const float2 scale = posedData->specializedData.scale;
const float3 ray_direction = ls.zAxis;
const float3 ray_origin = posedData->position + d.x*ls.xAxis*scale.x + d.y*ls.yAxis*scale.y;
//
// Trace camera ray
//
globalParameters::PayloadRadiance payload;
payload.result = make_float3( 0.0f );
payload.importance = 1.0f;
payload.depth = 0.0f;
traceRadiance(
params.handle,
ray_origin,
ray_direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&payload );
//
// Update results
//
const uint32_t image_index = launch_idx.y * launch_dims.x + launch_idx.x;
params.frame_buffer[image_index] = make_color(payload.result);
}
//------------------------------------------------------------------------------
//
// Ommatidial Ray Projection Generation Programs
//
//------------------------------------------------------------------------------
__device__ float3 getSummedOmmatidiumData(const uint32_t eyeIndex, const uint32_t ommatidialIndex, const uint32_t samples)
{
// Sum the compound elements that make up each sample from the ommatidium
const uint32_t compoundXYindex = eyeIndex*params.compoundBufferWidth + ommatidialIndex;
const uint32_t compoundImageArea = params.compoundBufferWidth * params.compoundBufferHeight;
float3 summation = make_float3(0,0,0);
for(uint32_t i = 0; i<samples; i++)
summation += ((float3*)params.compoundBufferPtr)[compoundXYindex + i * compoundImageArea];
return summation;
}
//// Displays the raw (still sample-wise summed) data from all compound eyes
//extern "C" __global__ void __raygen__compound_projection_all_eyes()
//{
// CompoundEyePosedData* posedData = (CompoundEyePosedData*)optixGetSbtDataPointer();
// const uint3 launch_idx = optixGetLaunchIndex();
// const uint3 launch_dims = optixGetLaunchDimensions();
// const uint32_t eyeIndex = posedData->specializedData.eyeIndex;
// const uint32_t compWidth = params.compoundBufferWidth;
// const uint32_t compHeight = params.compoundBufferHeight;
// const size_t ommatidialCount = posedData->specializedData.ommatidialCount;
//
// hipDeviceptr_t* eyes = (hipDeviceptr_t*)posedData->specializedData.d_compoundArray
//
// // Scale the x coordinate by the number of ommatidia (we don't want to be reading too far off the edge of the assigned ommatidia)
// const uint32_t ommatidiumIndex = (launch_idx.x * ommatidialCount)/launch_dims.x;
//
// //
// // Update results
// //
// const uint32_t image_index = launch_idx.y * launch_dims.x + launch_idx.x;
// params.frame_buffer[image_index] = make_color(getSummedOmmatidiumData(eyeIndex, ommatidiumIndex, posedData->specializedData.samplesPerOmmatidium));
//}
extern "C" __global__ void __raygen__compound_projection_single_dimension()
{
CompoundEyePosedData* posedData = (CompoundEyePosedData*)optixGetSbtDataPointer();
const uint3 launch_idx = optixGetLaunchIndex();
const uint3 launch_dims = optixGetLaunchDimensions();
const uint32_t eyeIndex = posedData->specializedData.eyeIndex;
const uint32_t compWidth = params.compoundBufferWidth;
const uint32_t compHeight = params.compoundBufferHeight;
const size_t ommatidialCount = posedData->specializedData.ommatidialCount;
// Scale the x coordinate by the number of ommatidia (we don't want to be reading too far off the edge of the assigned ommatidia)
const uint32_t ommatidiumIndex = (launch_idx.x * ommatidialCount)/launch_dims.x;
//
// Update results
//
const uint32_t image_index = launch_idx.y * launch_dims.x + launch_idx.x;
params.frame_buffer[image_index] = make_color(getSummedOmmatidiumData(eyeIndex, ommatidiumIndex, posedData->specializedData.samplesPerOmmatidium));
}
extern "C" __global__ void __raygen__compound_projection_single_dimension_fast()
{
CompoundEyePosedData* posedData = (CompoundEyePosedData*)optixGetSbtDataPointer();
const uint3 launch_idx = optixGetLaunchIndex();
// Break if this is not a pixel to render:
if(launch_idx.y > 0 || launch_idx.x >= posedData->specializedData.ommatidialCount) return;
// Set the colour based on the ommatidia this pixel represents
params.frame_buffer[(uint32_t)launch_idx.x] = make_color(getSummedOmmatidiumData(posedData->specializedData.eyeIndex, launch_idx.x, posedData->specializedData.samplesPerOmmatidium));
}
// Projects the positions of each ommatidium down to a sphere and samples the closest one, position-wise
extern "C" __global__ void __raygen__compound_projection_spherical_positionwise()
{
CompoundEyePosedData* posedData = (CompoundEyePosedData*)optixGetSbtDataPointer();
const uint3 launch_idx = optixGetLaunchIndex();
const uint3 launch_dims = optixGetLaunchDimensions();
const uint32_t eyeIndex = posedData->specializedData.eyeIndex;
//const uint32_t compWidth = params.compoundBufferWidth;
//const uint32_t compHeight = params.compoundBufferHeight;
const size_t ommatidialCount = posedData->specializedData.ommatidialCount;
// Project the 2D coordinates of the display window to spherical coordinates
const float2 d = 2.0f * make_float2(
static_cast<float>( launch_idx.x ) / static_cast<float>( launch_dims.x ),
static_cast<float>( launch_idx.y ) / static_cast<float>( launch_dims.y )
) - 1.0f;
const float2 angles = d * make_float2(-M_PIf, M_PIf/2.0f) + make_float2(M_PIf/2.0f, 0.0f);
const float cosY = cos(angles.y);
const float3 unitSpherePosition= make_float3(cos(angles.x)*cosY, sin(angles.y), sin(angles.x)*cosY);
// Finds the closest ommatidium (NOTE: This is explicitly based on position)
Ommatidium* allOmmatidia = (Ommatidium*)(posedData->specializedData.d_ommatidialArray);// List of all ommatidia
float dx = allOmmatidia->relativePosition.x - unitSpherePosition.x;
float dy = allOmmatidia->relativePosition.y - unitSpherePosition.y;
float dz = allOmmatidia->relativePosition.z - unitSpherePosition.z;
float closestDistance = dx*dx+dy*dy+dz*dz;
float dist;
uint32_t i, closestIndex = 0;
for(i = 1; i<ommatidialCount; i++)
{
dx = (allOmmatidia + i)->relativePosition.x - unitSpherePosition.x;
dy = (allOmmatidia + i)->relativePosition.y - unitSpherePosition.y;
dz = (allOmmatidia + i)->relativePosition.z - unitSpherePosition.z;
dist = dx*dx+dy*dy+dz*dz;
if(dist <closestDistance)
{
closestDistance = dist;
closestIndex = i;
}
}
// Save the summed samples frome the closest ommatidium as the pixel colour
const uint32_t image_index = launch_idx.y * launch_dims.x + launch_idx.x;
params.frame_buffer[image_index] = make_color(getSummedOmmatidiumData(eyeIndex, closestIndex, posedData->specializedData.samplesPerOmmatidium));
}
extern "C" __global__ void __raygen__compound_projection_spherical_orientationwise()
{
CompoundEyePosedData* posedData = (CompoundEyePosedData*)optixGetSbtDataPointer();
const uint3 launch_idx = optixGetLaunchIndex();
const uint3 launch_dims = optixGetLaunchDimensions();
const uint32_t eyeIndex = posedData->specializedData.eyeIndex;
const uint32_t compWidth = params.compoundBufferWidth;
const uint32_t compHeight = params.compoundBufferHeight;
const size_t ommatidialCount = posedData->specializedData.ommatidialCount;
// Project the 2D coordinates of the display window to spherical coordinates
const float2 d = 2.0f * make_float2(
static_cast<float>( launch_idx.x ) / static_cast<float>( launch_dims.x ),
static_cast<float>( launch_idx.y ) / static_cast<float>( launch_dims.y )
) - 1.0f;
const float2 angles = d * make_float2(-M_PIf, M_PIf/2.0f) + make_float2(M_PIf/2.0f, 0.0f);
const float cosY = cos(angles.y);
const float3 unitSpherePosition= make_float3(cos(angles.x)*cosY, sin(angles.y), sin(angles.x)*cosY);
// Finds the closest ommatidium (NOTE: This is explicitly based on orientation)
Ommatidium* allOmmatidia = (Ommatidium*)(posedData->specializedData.d_ommatidialArray);// List of all ommatidia
float smallestAngle = acos(dot(allOmmatidia->relativeDirection, unitSpherePosition)/(length(allOmmatidia->relativeDirection)*length(unitSpherePosition)));
float angle;
uint32_t i, closestIndex = 0;
for(i = 1; i<ommatidialCount; i++)
{
angle = acos(dot((allOmmatidia+i)->relativeDirection, unitSpherePosition)/(length((allOmmatidia+i)->relativeDirection)*length(unitSpherePosition)));
if(angle < smallestAngle)
{
smallestAngle = angle;
closestIndex = i;
}
}
//
// Update results
//
const uint32_t image_index = launch_idx.y * launch_dims.x + launch_idx.x;
params.frame_buffer[image_index] = make_color(getSummedOmmatidiumData(eyeIndex, closestIndex, posedData->specializedData.samplesPerOmmatidium));
}
//------------------------------------------------------------------------------
//
// Ommatidial Ray Generation Programs
//
//------------------------------------------------------------------------------
__device__ inline float3 rotatePoint(const float3 point, const float angle, const float3 axis)
{
return (cos(angle)*point + sin(angle)*cross(axis, point) + (1 - cos(angle))*dot(axis, point)*axis);
}
__device__ float3 generateOffsetRay( const float ommatidialAxisAngle, const float splayAngle, const float3 ommatidialAxis)
{
//// Rotate the ommatidial axis about a perpendicular vector by splay angle
float3 perpAxis = cross(make_float3(0.0f, 1.0f, 0.0f), ommatidialAxis);
// Check that the perpAxis isn't zero (because ommatidialAxis was pointing directly up) (could probably be done with a memcmp for speed)
perpAxis = (perpAxis.x + perpAxis.y + perpAxis.z == 0.0f) ? make_float3(0.0f, 0.0f, 1.0f) : normalize(perpAxis);
// Rotate by the splay angle
const float3 splayedAxis = rotatePoint(ommatidialAxis, splayAngle, perpAxis);
//// Rotate the new axis around the original ommatidial axis by the ommatidialAxisAngle
return rotatePoint(splayedAxis, ommatidialAxisAngle, ommatidialAxis);
}
extern "C" __global__ void __raygen__ommatidium()
{
const uint3 launch_idx = optixGetLaunchIndex();
const uint3 launch_dims = optixGetLaunchDimensions();
const uint32_t eyeIndex = launch_idx.y;
const uint32_t ommatidialIndex = launch_idx.x;
const uint32_t sampleIndex = launch_idx.z;
const int id = launch_idx.z*launch_dims.y*launch_dims.x + launch_idx.y*launch_dims.x + launch_idx.x;
CompoundEyeCollectionData* eyeCollection = (CompoundEyeCollectionData*)optixGetSbtDataPointer();
//if(threadIdx.x == 1)
//{
// printf("%i eyes found\n", eyeCollection->eyeCount);
// hipDeviceptr_t* eyes = (hipDeviceptr_t*)eyeCollection->d_compoundEyes;
// for(uint32_t i = 0; i<eyeCollection->eyeCount; i++)
// {
// CompoundEyePosedDataRecord* eyeRecord = (CompoundEyePosedDataRecord*)(*(eyes + i));
// printf(" Eye pointer : %p\n", eyeRecord);
// CompoundEyePosedData eyeData = eyeRecord->data;
// printf(" Eye %i position: (%f, %f, %f)\n", i, eyeData.position.x, eyeData.position.y, eyeData.position.z);
// }
//}
hipDeviceptr_t* eyes = (hipDeviceptr_t*)eyeCollection->d_compoundEyes;// List of all eye records
CompoundEyePosedDataRecord* eyeRecord = (CompoundEyePosedDataRecord*)(*(eyes + eyeIndex)); // This eye record
CompoundEyePosedData eyeData = eyeRecord->data;// This eye record's data
if(ommatidialIndex >= eyeData.specializedData.ommatidialCount)
return;// Exit if you're going to be trying to reference ommatidia that don't exist
Ommatidium* allOmmatidia = (Ommatidium*)(eyeData.specializedData.d_ommatidialArray);// List of all ommatidia
Ommatidium ommatidium = *(allOmmatidia + ommatidialIndex);// This ommatidium
const float3 relativePos = ommatidium.relativePosition;
float3 relativeDir = ommatidium.relativeDirection;
//// Current nasty hack to make the spread work. Will add ommatidial-based spread next.
//uint32_t seed = tea<4>( launch_idx.z*launch_dims.y*launch_dims.x + launch_idx.y*launch_dims.x + launch_idx.x + params.frame , 42 );
//const float ommatidialAxisAngle = rnd(seed)*M_PIf*2.0f;
//const float splayAngle = rnd(seed)*(02.0f/180.0f)*M_PIf;//rnd(seed)*ommatidium.halfAcceptanceAngle;
//// Generate a pair of angles away from the ommatidial axis
//relativeDir = generateOffsetRay(ommatidialAxisAngle, splayAngle, relativeDir);
hiprandState_t state;
if(params.initializeRandos == true)
{
// First, initialize the random number generator if it needs to be initialized
hiprand_init(42, id, 0, &state);
((hiprandState_t*)params.randomsBufferPtr)[id] = state;
}else{
// If not, pull down a local copy of the state for the random number generator
state = ((hiprandState_t*)params.randomsBufferPtr)[id];
}
// Calculate the s.d. to scale a standard normal random value up to so that it matches the acceptance angle
const float standardDeviation = ommatidium.acceptanceAngleRadians/FWHM_SD_RATIO;
float splayAngle = hiprand_normal(&state) * standardDeviation;// Angle away from the ommatidial axis
float ommatidialAxisAngle = hiprand_uniform(&state)*M_PIf;// Angle around the ommatidial axis (note that it only needs to rotate through 180 degrees because splayAngle can be negative)
// Copy the RNG state back into the buffer for use next time
((hiprandState_t*)params.randomsBufferPtr)[id] = state;
// Generate a pair of angles away from the ommatidial axis
relativeDir = generateOffsetRay(ommatidialAxisAngle, splayAngle, relativeDir);
// Transform ray information into world-space
const float3 ray_origin = eyeData.position + eyeData.localSpace.xAxis*relativePos.x
+ eyeData.localSpace.yAxis*relativePos.y
+ eyeData.localSpace.zAxis*relativePos.z;
const float3 ray_direction = eyeData.localSpace.xAxis * relativeDir.x
+ eyeData.localSpace.yAxis * relativeDir.y
+ eyeData.localSpace.zAxis * relativeDir.z;
// Transmit the ray
globalParameters::PayloadRadiance payload;
payload.result = make_float3( 0.0f );
payload.importance = 1.0f;
payload.depth = 0.0f;
traceRadiance(
params.handle,
ray_origin,
ray_direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&payload );
//
// Add results to the compound buffer
// This mixes in the feedback from each sample ray with respect to the it's position in the rendering volume.
// For instance, if each ommatidium is to make 20 samples then each launch of this shader is one sample and only
// contributes 0.05/1 to the final colour in the compound buffer.
//
const uint32_t compoundIndex = eyeIndex * launch_dims.x + ommatidialIndex + sampleIndex * (launch_dims.x*launch_dims.y);
((float3*)params.compoundBufferPtr)[compoundIndex] = payload.result*(1.0f/eyeData.specializedData.samplesPerOmmatidium);// Scale it down as these will be summed in the projection shader
}
//------------------------------------------------------------------------------
//
// Miss programs
//
//------------------------------------------------------------------------------
extern "C" __global__ void __miss__constant_radiance()
{
//setPayloadResult( params.miss_color );
const float3 dir = normalize(optixGetWorldRayDirection());
setPayloadResult(make_float3((atan2(dir.z, dir.x)+M_PIf)/(M_PIf*2.0f), (asin(dir.y)+M_PIf/2.0f)/(M_PIf), 0.0f));
const float border = 0.01;
if(abs(dir.x) < border || abs(dir.y) < border || abs(dir.z) < border)
setPayloadResult(make_float3(0.0f));
}
//------------------------------------------------------------------------------
//
// Hit Programs
//
//------------------------------------------------------------------------------
extern "C" __global__ void __closesthit__occlusion()
{
setPayloadOcclusion( true );
}
extern "C" __global__ void __closesthit__radiance()
{
//setPayloadResult( make_float3(1.0f));
const globalParameters::HitGroupData* hit_group_data = reinterpret_cast<globalParameters::HitGroupData*>( optixGetSbtDataPointer() );
const LocalGeometry geom = getLocalGeometry( hit_group_data->geometry_data );
//
// Retrieve material data
//
float3 base_color = make_float3( hit_group_data->material_data.pbr.base_color );
if( hit_group_data->material_data.pbr.base_color_tex )
base_color *= linearize( make_float3(
tex2D<float4>( hit_group_data->material_data.pbr.base_color_tex, geom.UV.x, geom.UV.y )
) );
if(!params.lighting)
{
setPayloadResult( base_color);
return;
}
float metallic = hit_group_data->material_data.pbr.metallic;
float roughness = hit_group_data->material_data.pbr.roughness;
float4 mr_tex = make_float4( 1.0f );
if( hit_group_data->material_data.pbr.metallic_roughness_tex )
// MR tex is (occlusion, roughness, metallic )
mr_tex = tex2D<float4>( hit_group_data->material_data.pbr.metallic_roughness_tex, geom.UV.x, geom.UV.y );
roughness *= mr_tex.y;
metallic *= mr_tex.z;
//
// Convert to material params
//
const float F0 = 0.04f;
const float3 diff_color = base_color*( 1.0f - F0 )*( 1.0f - metallic );
const float3 spec_color = lerp( make_float3( F0 ), base_color, metallic );
const float alpha = roughness*roughness;
//
// compute direct lighting
//
float3 N = geom.N;
if( hit_group_data->material_data.pbr.normal_tex )
{
const float4 NN = 2.0f*tex2D<float4>( hit_group_data->material_data.pbr.normal_tex, geom.UV.x, geom.UV.y ) - make_float4(1.0f);
N = normalize( NN.x*normalize( geom.dpdu ) + NN.y*normalize( geom.dpdv ) + NN.z*geom.N );
}
float3 result = make_float3( 0.0f );
for( int i = 0; i < params.lights.count; ++i )
{
Light::Point light = params.lights[i];
// TODO: optimize
const float L_dist = length( light.position - geom.P );
const float3 L = ( light.position - geom.P ) / L_dist;
const float3 V = -normalize( optixGetWorldRayDirection() );
const float3 H = normalize( L + V );
const float N_dot_L = dot( N, L );
const float N_dot_V = dot( N, V );
const float N_dot_H = dot( N, H );
const float V_dot_H = dot( V, H );
if( N_dot_L > 0.0f && N_dot_V > 0.0f )
{
const float tmin = 0.001f; // TODO
const float tmax = L_dist - 0.001f; // TODO
const bool occluded = traceOcclusion( params.handle, geom.P, L, tmin, tmax );
if( !occluded )
{
const float3 F = schlick( spec_color, V_dot_H );
const float G_vis = vis( N_dot_L, N_dot_V, alpha );
const float D = ggxNormal( N_dot_H, alpha );
const float3 diff = ( 1.0f - F )*diff_color / M_PIf;
const float3 spec = F*G_vis*D;
result += light.color*light.intensity*N_dot_L*( diff + spec );
}
}
}
// TODO: add debug viewing mode that allows runtime switchable views of shading params, normals, etc
//result = make_float3( roughness );
//result = N*0.5f + make_float3( 0.5f );
//result = geom.N*0.5f + make_float3( 0.5f );
setPayloadResult( result );
}
| d0d82b5d232ba394e49205792df62e3da386de93.cu | //
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include <cuda/LocalGeometry.h>
#include <cuda/random.h>
#include <sutil/vec_math.h>
#include "GlobalParameters.h"
#include <stdint.h>
#include <stdio.h>
// For each camera Datatype:
#include "cameras/PerspectiveCameraDataTypes.h"
#include "cameras/PanoramicCameraDataTypes.h"
#include "cameras/GenericCameraDataTypes.h"
#include "cameras/OrthographicCameraDataTypes.h"
#include "cameras/CompoundEyeDataTypes.h"
// cuRand
#include <curand_kernel.h>
__constant__ float FWHM_SD_RATIO = 2.35482004503094938202313865291f;//939927549477137877164107704505151300005317709396985361683627673754162213494315716402473805711790020883378678441772367335067327119300654086099581027060701147250592490674309776452246690206347679431657862550790224141333488894447689644236226579600412626548283966926341892712473657396439184227529340027195703289818425375703612253952994171698822696215836693931109079884506177990740279369004153115665698570697083992256
extern "C"
{
__constant__ globalParameters::LaunchParams params;
}
//------------------------------------------------------------------------------
//
// GGX/smith shading helpers
// TODO: move into header so can be shared by path tracer and bespoke renderers
//
//------------------------------------------------------------------------------
__device__ float3 schlick( const float3 spec_color, const float V_dot_H )
{
return spec_color + ( make_float3( 1.0f ) - spec_color ) * powf( 1.0f - V_dot_H, 5.0f );
}
__device__ float vis( const float N_dot_L, const float N_dot_V, const float alpha )
{
const float alpha_sq = alpha*alpha;
const float ggx0 = N_dot_L * sqrtf( N_dot_V*N_dot_V * ( 1.0f - alpha_sq ) + alpha_sq );
const float ggx1 = N_dot_V * sqrtf( N_dot_L*N_dot_L * ( 1.0f - alpha_sq ) + alpha_sq );
return 2.0f * N_dot_L * N_dot_V / (ggx0+ggx1);
}
__device__ float ggxNormal( const float N_dot_H, const float alpha )
{
const float alpha_sq = alpha*alpha;
const float N_dot_H_sq = N_dot_H*N_dot_H;
const float x = N_dot_H_sq*( alpha_sq - 1.0f ) + 1.0f;
return alpha_sq/( M_PIf*x*x );
}
__device__ float3 linearize( float3 c )
{
return make_float3(
powf( c.x, 2.2f ),
powf( c.y, 2.2f ),
powf( c.z, 2.2f )
);
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
static __forceinline__ __device__ void traceRadiance(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax,
globalParameters::PayloadRadiance* payload
)
{
uint32_t u0=0, u1=0, u2=0, u3=0;
optixTrace(
handle,
ray_origin, ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
globalParameters::RAY_TYPE_RADIANCE, // SBT offset
globalParameters::RAY_TYPE_COUNT, // SBT stride
globalParameters::RAY_TYPE_RADIANCE, // missSBTIndex
u0, u1, u2, u3 );
payload->result.x = __int_as_float( u0 );
payload->result.y = __int_as_float( u1 );
payload->result.z = __int_as_float( u2 );
payload->depth = u3;
}
static __forceinline__ __device__ bool traceOcclusion(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax
)
{
uint32_t occluded = 0u;
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT,
globalParameters::RAY_TYPE_OCCLUSION, // SBT offset
globalParameters::RAY_TYPE_COUNT, // SBT stride
globalParameters::RAY_TYPE_OCCLUSION, // missSBTIndex
occluded );
return occluded;
}
__forceinline__ __device__ void setPayloadResult( float3 p )
{
optixSetPayload_0( float_as_int( p.x ) );
optixSetPayload_1( float_as_int( p.y ) );
optixSetPayload_2( float_as_int( p.z ) );
}
__forceinline__ __device__ void setPayloadOcclusion( bool occluded )
{
optixSetPayload_0( static_cast<uint32_t>( occluded ) );
}
__forceinline__ __device__ uchar4 make_color( const float3& c )
{
const float gamma = 2.2f;
return make_uchar4(
static_cast<uint8_t>( powf( clamp( c.x, 0.0f, 1.0f ), 1.0/gamma )*255.0f ),
static_cast<uint8_t>( powf( clamp( c.y, 0.0f, 1.0f ), 1.0/gamma )*255.0f ),
static_cast<uint8_t>( powf( clamp( c.z, 0.0f, 1.0f ), 1.0/gamma )*255.0f ),
255u
);
}
//------------------------------------------------------------------------------
//
// Ray Generation Programs
//
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__pinhole()
{
PerspectiveCameraPosedData* posedData = (PerspectiveCameraPosedData*)optixGetSbtDataPointer();
const uint3 launch_idx = optixGetLaunchIndex();
const uint3 launch_dims = optixGetLaunchDimensions();
//
// Generate camera ray
//
const float2 subpixel_jitter = make_float2(0.0f);// No subpixel jitter here.
const float2 d = 2.0f * make_float2(
( static_cast<float>( launch_idx.x ) + subpixel_jitter.x ) / static_cast<float>( launch_dims.x ),
( static_cast<float>( launch_idx.y ) + subpixel_jitter.y ) / static_cast<float>( launch_dims.y )
) - 1.0f;
const LocalSpace& ls = posedData->localSpace;
const float3 scale = posedData->specializedData.scale;
const float3 ray_direction = ls.zAxis*scale.z + d.x*ls.xAxis*scale.x + d.y*ls.yAxis*scale.y;
const float3 ray_origin = posedData->position;
//
// Trace camera ray
//
globalParameters::PayloadRadiance payload;
payload.result = make_float3( 0.0f );
payload.importance = 1.0f;
payload.depth = 0.0f;
traceRadiance(
params.handle,
ray_origin,
ray_direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&payload );
//
// Update results
//
const uint32_t image_index = launch_idx.y * launch_dims.x + launch_idx.x;
params.frame_buffer[image_index] = make_color(payload.result);
}
extern "C" __global__ void __raygen__panoramic()
{
PanoramicCameraPosedData* posedData = (PanoramicCameraPosedData*)optixGetSbtDataPointer();
const uint3 launch_idx = optixGetLaunchIndex();
const uint3 launch_dims = optixGetLaunchDimensions();
//
// Generate camera ray
//
const float2 subpixel_jitter = make_float2(0.0f);// No subpixel jitter here
const float2 d = 2.0f * make_float2(
( static_cast<float>( launch_idx.x ) + subpixel_jitter.x ) / static_cast<float>( launch_dims.x ),
( static_cast<float>( launch_idx.y ) + subpixel_jitter.y ) / static_cast<float>( launch_dims.y )
) - 1.0f;
const float2 angles = d * make_float2(-M_PIf, M_PIf/2.0f) + make_float2(M_PIf/2.0f, 0.0f);
const float cosY = cos(angles.y);
const float3 originalDir = make_float3(cos(angles.x)*cosY, sin(angles.y), sin(angles.x)*cosY);
const float3 lxAxis = posedData->localSpace.xAxis;
const float3 lyAxis = posedData->localSpace.yAxis;
const float3 lzAxis = posedData->localSpace.zAxis;
const float3 ray_direction = normalize(originalDir.x * lxAxis + originalDir.y * lyAxis + originalDir.z * lzAxis);
//const float3 ray_direction = normalize(posedData->localSpace.transform(originalDir));
const float3 ray_origin = posedData->position + ray_direction*posedData->specializedData.startRadius;
//
// Trace camera ray
//
globalParameters::PayloadRadiance payload;
payload.result = make_float3( 0.0f );
payload.importance = 1.0f;
payload.depth = 0.0f;
traceRadiance(
params.handle,
ray_origin,
ray_direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&payload );
//
// Update results
//
const uint32_t image_index = launch_idx.y * launch_dims.x + launch_idx.x;
params.frame_buffer[image_index] = make_color(payload.result);
}
extern "C" __global__ void __raygen__orthographic()
{
OrthographicCameraPosedData* posedData = (OrthographicCameraPosedData*)optixGetSbtDataPointer();
const uint3 launch_idx = optixGetLaunchIndex();
const uint3 launch_dims = optixGetLaunchDimensions();
//
// Generate camera ray
//
const float2 subpixel_jitter = make_float2(0.0f);// No subpixel jitter here.
const float2 d = 2.0f * make_float2(
( static_cast<float>( launch_idx.x ) + subpixel_jitter.x ) / static_cast<float>( launch_dims.x ),
( static_cast<float>( launch_idx.y ) + subpixel_jitter.y ) / static_cast<float>( launch_dims.y )
) - 1.0f;
const LocalSpace& ls = posedData->localSpace;
const float2 scale = posedData->specializedData.scale;
const float3 ray_direction = ls.zAxis;
const float3 ray_origin = posedData->position + d.x*ls.xAxis*scale.x + d.y*ls.yAxis*scale.y;
//
// Trace camera ray
//
globalParameters::PayloadRadiance payload;
payload.result = make_float3( 0.0f );
payload.importance = 1.0f;
payload.depth = 0.0f;
traceRadiance(
params.handle,
ray_origin,
ray_direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&payload );
//
// Update results
//
const uint32_t image_index = launch_idx.y * launch_dims.x + launch_idx.x;
params.frame_buffer[image_index] = make_color(payload.result);
}
//------------------------------------------------------------------------------
//
// Ommatidial Ray Projection Generation Programs
//
//------------------------------------------------------------------------------
__device__ float3 getSummedOmmatidiumData(const uint32_t eyeIndex, const uint32_t ommatidialIndex, const uint32_t samples)
{
// Sum the compound elements that make up each sample from the ommatidium
const uint32_t compoundXYindex = eyeIndex*params.compoundBufferWidth + ommatidialIndex;
const uint32_t compoundImageArea = params.compoundBufferWidth * params.compoundBufferHeight;
float3 summation = make_float3(0,0,0);
for(uint32_t i = 0; i<samples; i++)
summation += ((float3*)params.compoundBufferPtr)[compoundXYindex + i * compoundImageArea];
return summation;
}
//// Displays the raw (still sample-wise summed) data from all compound eyes
//extern "C" __global__ void __raygen__compound_projection_all_eyes()
//{
// CompoundEyePosedData* posedData = (CompoundEyePosedData*)optixGetSbtDataPointer();
// const uint3 launch_idx = optixGetLaunchIndex();
// const uint3 launch_dims = optixGetLaunchDimensions();
// const uint32_t eyeIndex = posedData->specializedData.eyeIndex;
// const uint32_t compWidth = params.compoundBufferWidth;
// const uint32_t compHeight = params.compoundBufferHeight;
// const size_t ommatidialCount = posedData->specializedData.ommatidialCount;
//
// CUdeviceptr* eyes = (CUdeviceptr*)posedData->specializedData.d_compoundArray
//
// // Scale the x coordinate by the number of ommatidia (we don't want to be reading too far off the edge of the assigned ommatidia)
// const uint32_t ommatidiumIndex = (launch_idx.x * ommatidialCount)/launch_dims.x;
//
// //
// // Update results
// //
// const uint32_t image_index = launch_idx.y * launch_dims.x + launch_idx.x;
// params.frame_buffer[image_index] = make_color(getSummedOmmatidiumData(eyeIndex, ommatidiumIndex, posedData->specializedData.samplesPerOmmatidium));
//}
extern "C" __global__ void __raygen__compound_projection_single_dimension()
{
CompoundEyePosedData* posedData = (CompoundEyePosedData*)optixGetSbtDataPointer();
const uint3 launch_idx = optixGetLaunchIndex();
const uint3 launch_dims = optixGetLaunchDimensions();
const uint32_t eyeIndex = posedData->specializedData.eyeIndex;
const uint32_t compWidth = params.compoundBufferWidth;
const uint32_t compHeight = params.compoundBufferHeight;
const size_t ommatidialCount = posedData->specializedData.ommatidialCount;
// Scale the x coordinate by the number of ommatidia (we don't want to be reading too far off the edge of the assigned ommatidia)
const uint32_t ommatidiumIndex = (launch_idx.x * ommatidialCount)/launch_dims.x;
//
// Update results
//
const uint32_t image_index = launch_idx.y * launch_dims.x + launch_idx.x;
params.frame_buffer[image_index] = make_color(getSummedOmmatidiumData(eyeIndex, ommatidiumIndex, posedData->specializedData.samplesPerOmmatidium));
}
extern "C" __global__ void __raygen__compound_projection_single_dimension_fast()
{
CompoundEyePosedData* posedData = (CompoundEyePosedData*)optixGetSbtDataPointer();
const uint3 launch_idx = optixGetLaunchIndex();
// Break if this is not a pixel to render:
if(launch_idx.y > 0 || launch_idx.x >= posedData->specializedData.ommatidialCount) return;
// Set the colour based on the ommatidia this pixel represents
params.frame_buffer[(uint32_t)launch_idx.x] = make_color(getSummedOmmatidiumData(posedData->specializedData.eyeIndex, launch_idx.x, posedData->specializedData.samplesPerOmmatidium));
}
// Projects the positions of each ommatidium down to a sphere and samples the closest one, position-wise
extern "C" __global__ void __raygen__compound_projection_spherical_positionwise()
{
CompoundEyePosedData* posedData = (CompoundEyePosedData*)optixGetSbtDataPointer();
const uint3 launch_idx = optixGetLaunchIndex();
const uint3 launch_dims = optixGetLaunchDimensions();
const uint32_t eyeIndex = posedData->specializedData.eyeIndex;
//const uint32_t compWidth = params.compoundBufferWidth;
//const uint32_t compHeight = params.compoundBufferHeight;
const size_t ommatidialCount = posedData->specializedData.ommatidialCount;
// Project the 2D coordinates of the display window to spherical coordinates
const float2 d = 2.0f * make_float2(
static_cast<float>( launch_idx.x ) / static_cast<float>( launch_dims.x ),
static_cast<float>( launch_idx.y ) / static_cast<float>( launch_dims.y )
) - 1.0f;
const float2 angles = d * make_float2(-M_PIf, M_PIf/2.0f) + make_float2(M_PIf/2.0f, 0.0f);
const float cosY = cos(angles.y);
const float3 unitSpherePosition= make_float3(cos(angles.x)*cosY, sin(angles.y), sin(angles.x)*cosY);
// Finds the closest ommatidium (NOTE: This is explicitly based on position)
Ommatidium* allOmmatidia = (Ommatidium*)(posedData->specializedData.d_ommatidialArray);// List of all ommatidia
float dx = allOmmatidia->relativePosition.x - unitSpherePosition.x;
float dy = allOmmatidia->relativePosition.y - unitSpherePosition.y;
float dz = allOmmatidia->relativePosition.z - unitSpherePosition.z;
float closestDistance = dx*dx+dy*dy+dz*dz;
float dist;
uint32_t i, closestIndex = 0;
for(i = 1; i<ommatidialCount; i++)
{
dx = (allOmmatidia + i)->relativePosition.x - unitSpherePosition.x;
dy = (allOmmatidia + i)->relativePosition.y - unitSpherePosition.y;
dz = (allOmmatidia + i)->relativePosition.z - unitSpherePosition.z;
dist = dx*dx+dy*dy+dz*dz;
if(dist <closestDistance)
{
closestDistance = dist;
closestIndex = i;
}
}
// Save the summed samples frome the closest ommatidium as the pixel colour
const uint32_t image_index = launch_idx.y * launch_dims.x + launch_idx.x;
params.frame_buffer[image_index] = make_color(getSummedOmmatidiumData(eyeIndex, closestIndex, posedData->specializedData.samplesPerOmmatidium));
}
extern "C" __global__ void __raygen__compound_projection_spherical_orientationwise()
{
CompoundEyePosedData* posedData = (CompoundEyePosedData*)optixGetSbtDataPointer();
const uint3 launch_idx = optixGetLaunchIndex();
const uint3 launch_dims = optixGetLaunchDimensions();
const uint32_t eyeIndex = posedData->specializedData.eyeIndex;
const uint32_t compWidth = params.compoundBufferWidth;
const uint32_t compHeight = params.compoundBufferHeight;
const size_t ommatidialCount = posedData->specializedData.ommatidialCount;
// Project the 2D coordinates of the display window to spherical coordinates
const float2 d = 2.0f * make_float2(
static_cast<float>( launch_idx.x ) / static_cast<float>( launch_dims.x ),
static_cast<float>( launch_idx.y ) / static_cast<float>( launch_dims.y )
) - 1.0f;
const float2 angles = d * make_float2(-M_PIf, M_PIf/2.0f) + make_float2(M_PIf/2.0f, 0.0f);
const float cosY = cos(angles.y);
const float3 unitSpherePosition= make_float3(cos(angles.x)*cosY, sin(angles.y), sin(angles.x)*cosY);
// Finds the closest ommatidium (NOTE: This is explicitly based on orientation)
Ommatidium* allOmmatidia = (Ommatidium*)(posedData->specializedData.d_ommatidialArray);// List of all ommatidia
float smallestAngle = acos(dot(allOmmatidia->relativeDirection, unitSpherePosition)/(length(allOmmatidia->relativeDirection)*length(unitSpherePosition)));
float angle;
uint32_t i, closestIndex = 0;
for(i = 1; i<ommatidialCount; i++)
{
angle = acos(dot((allOmmatidia+i)->relativeDirection, unitSpherePosition)/(length((allOmmatidia+i)->relativeDirection)*length(unitSpherePosition)));
if(angle < smallestAngle)
{
smallestAngle = angle;
closestIndex = i;
}
}
//
// Update results
//
const uint32_t image_index = launch_idx.y * launch_dims.x + launch_idx.x;
params.frame_buffer[image_index] = make_color(getSummedOmmatidiumData(eyeIndex, closestIndex, posedData->specializedData.samplesPerOmmatidium));
}
//------------------------------------------------------------------------------
//
// Ommatidial Ray Generation Programs
//
//------------------------------------------------------------------------------
__device__ inline float3 rotatePoint(const float3 point, const float angle, const float3 axis)
{
return (cos(angle)*point + sin(angle)*cross(axis, point) + (1 - cos(angle))*dot(axis, point)*axis);
}
__device__ float3 generateOffsetRay( const float ommatidialAxisAngle, const float splayAngle, const float3 ommatidialAxis)
{
//// Rotate the ommatidial axis about a perpendicular vector by splay angle
float3 perpAxis = cross(make_float3(0.0f, 1.0f, 0.0f), ommatidialAxis);
// Check that the perpAxis isn't zero (because ommatidialAxis was pointing directly up) (could probably be done with a memcmp for speed)
perpAxis = (perpAxis.x + perpAxis.y + perpAxis.z == 0.0f) ? make_float3(0.0f, 0.0f, 1.0f) : normalize(perpAxis);
// Rotate by the splay angle
const float3 splayedAxis = rotatePoint(ommatidialAxis, splayAngle, perpAxis);
//// Rotate the new axis around the original ommatidial axis by the ommatidialAxisAngle
return rotatePoint(splayedAxis, ommatidialAxisAngle, ommatidialAxis);
}
extern "C" __global__ void __raygen__ommatidium()
{
const uint3 launch_idx = optixGetLaunchIndex();
const uint3 launch_dims = optixGetLaunchDimensions();
const uint32_t eyeIndex = launch_idx.y;
const uint32_t ommatidialIndex = launch_idx.x;
const uint32_t sampleIndex = launch_idx.z;
const int id = launch_idx.z*launch_dims.y*launch_dims.x + launch_idx.y*launch_dims.x + launch_idx.x;
CompoundEyeCollectionData* eyeCollection = (CompoundEyeCollectionData*)optixGetSbtDataPointer();
//if(threadIdx.x == 1)
//{
// printf("%i eyes found\n", eyeCollection->eyeCount);
// CUdeviceptr* eyes = (CUdeviceptr*)eyeCollection->d_compoundEyes;
// for(uint32_t i = 0; i<eyeCollection->eyeCount; i++)
// {
// CompoundEyePosedDataRecord* eyeRecord = (CompoundEyePosedDataRecord*)(*(eyes + i));
// printf(" Eye pointer : %p\n", eyeRecord);
// CompoundEyePosedData eyeData = eyeRecord->data;
// printf(" Eye %i position: (%f, %f, %f)\n", i, eyeData.position.x, eyeData.position.y, eyeData.position.z);
// }
//}
CUdeviceptr* eyes = (CUdeviceptr*)eyeCollection->d_compoundEyes;// List of all eye records
CompoundEyePosedDataRecord* eyeRecord = (CompoundEyePosedDataRecord*)(*(eyes + eyeIndex)); // This eye record
CompoundEyePosedData eyeData = eyeRecord->data;// This eye record's data
if(ommatidialIndex >= eyeData.specializedData.ommatidialCount)
return;// Exit if you're going to be trying to reference ommatidia that don't exist
Ommatidium* allOmmatidia = (Ommatidium*)(eyeData.specializedData.d_ommatidialArray);// List of all ommatidia
Ommatidium ommatidium = *(allOmmatidia + ommatidialIndex);// This ommatidium
const float3 relativePos = ommatidium.relativePosition;
float3 relativeDir = ommatidium.relativeDirection;
//// Current nasty hack to make the spread work. Will add ommatidial-based spread next.
//uint32_t seed = tea<4>( launch_idx.z*launch_dims.y*launch_dims.x + launch_idx.y*launch_dims.x + launch_idx.x + params.frame , 42 );
//const float ommatidialAxisAngle = rnd(seed)*M_PIf*2.0f;
//const float splayAngle = rnd(seed)*(02.0f/180.0f)*M_PIf;//rnd(seed)*ommatidium.halfAcceptanceAngle;
//// Generate a pair of angles away from the ommatidial axis
//relativeDir = generateOffsetRay(ommatidialAxisAngle, splayAngle, relativeDir);
curandState state;
if(params.initializeRandos == true)
{
// First, initialize the random number generator if it needs to be initialized
curand_init(42, id, 0, &state);
((curandState*)params.randomsBufferPtr)[id] = state;
}else{
// If not, pull down a local copy of the state for the random number generator
state = ((curandState*)params.randomsBufferPtr)[id];
}
// Calculate the s.d. to scale a standard normal random value up to so that it matches the acceptance angle
const float standardDeviation = ommatidium.acceptanceAngleRadians/FWHM_SD_RATIO;
float splayAngle = curand_normal(&state) * standardDeviation;// Angle away from the ommatidial axis
float ommatidialAxisAngle = curand_uniform(&state)*M_PIf;// Angle around the ommatidial axis (note that it only needs to rotate through 180 degrees because splayAngle can be negative)
// Copy the RNG state back into the buffer for use next time
((curandState*)params.randomsBufferPtr)[id] = state;
// Generate a pair of angles away from the ommatidial axis
relativeDir = generateOffsetRay(ommatidialAxisAngle, splayAngle, relativeDir);
// Transform ray information into world-space
const float3 ray_origin = eyeData.position + eyeData.localSpace.xAxis*relativePos.x
+ eyeData.localSpace.yAxis*relativePos.y
+ eyeData.localSpace.zAxis*relativePos.z;
const float3 ray_direction = eyeData.localSpace.xAxis * relativeDir.x
+ eyeData.localSpace.yAxis * relativeDir.y
+ eyeData.localSpace.zAxis * relativeDir.z;
// Transmit the ray
globalParameters::PayloadRadiance payload;
payload.result = make_float3( 0.0f );
payload.importance = 1.0f;
payload.depth = 0.0f;
traceRadiance(
params.handle,
ray_origin,
ray_direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&payload );
//
// Add results to the compound buffer
// This mixes in the feedback from each sample ray with respect to the it's position in the rendering volume.
// For instance, if each ommatidium is to make 20 samples then each launch of this shader is one sample and only
// contributes 0.05/1 to the final colour in the compound buffer.
//
const uint32_t compoundIndex = eyeIndex * launch_dims.x + ommatidialIndex + sampleIndex * (launch_dims.x*launch_dims.y);
((float3*)params.compoundBufferPtr)[compoundIndex] = payload.result*(1.0f/eyeData.specializedData.samplesPerOmmatidium);// Scale it down as these will be summed in the projection shader
}
//------------------------------------------------------------------------------
//
// Miss programs
//
//------------------------------------------------------------------------------
extern "C" __global__ void __miss__constant_radiance()
{
//setPayloadResult( params.miss_color );
const float3 dir = normalize(optixGetWorldRayDirection());
setPayloadResult(make_float3((atan2(dir.z, dir.x)+M_PIf)/(M_PIf*2.0f), (asin(dir.y)+M_PIf/2.0f)/(M_PIf), 0.0f));
const float border = 0.01;
if(abs(dir.x) < border || abs(dir.y) < border || abs(dir.z) < border)
setPayloadResult(make_float3(0.0f));
}
//------------------------------------------------------------------------------
//
// Hit Programs
//
//------------------------------------------------------------------------------
extern "C" __global__ void __closesthit__occlusion()
{
setPayloadOcclusion( true );
}
extern "C" __global__ void __closesthit__radiance()
{
//setPayloadResult( make_float3(1.0f));
const globalParameters::HitGroupData* hit_group_data = reinterpret_cast<globalParameters::HitGroupData*>( optixGetSbtDataPointer() );
const LocalGeometry geom = getLocalGeometry( hit_group_data->geometry_data );
//
// Retrieve material data
//
float3 base_color = make_float3( hit_group_data->material_data.pbr.base_color );
if( hit_group_data->material_data.pbr.base_color_tex )
base_color *= linearize( make_float3(
tex2D<float4>( hit_group_data->material_data.pbr.base_color_tex, geom.UV.x, geom.UV.y )
) );
if(!params.lighting)
{
setPayloadResult( base_color);
return;
}
float metallic = hit_group_data->material_data.pbr.metallic;
float roughness = hit_group_data->material_data.pbr.roughness;
float4 mr_tex = make_float4( 1.0f );
if( hit_group_data->material_data.pbr.metallic_roughness_tex )
// MR tex is (occlusion, roughness, metallic )
mr_tex = tex2D<float4>( hit_group_data->material_data.pbr.metallic_roughness_tex, geom.UV.x, geom.UV.y );
roughness *= mr_tex.y;
metallic *= mr_tex.z;
//
// Convert to material params
//
const float F0 = 0.04f;
const float3 diff_color = base_color*( 1.0f - F0 )*( 1.0f - metallic );
const float3 spec_color = lerp( make_float3( F0 ), base_color, metallic );
const float alpha = roughness*roughness;
//
// compute direct lighting
//
float3 N = geom.N;
if( hit_group_data->material_data.pbr.normal_tex )
{
const float4 NN = 2.0f*tex2D<float4>( hit_group_data->material_data.pbr.normal_tex, geom.UV.x, geom.UV.y ) - make_float4(1.0f);
N = normalize( NN.x*normalize( geom.dpdu ) + NN.y*normalize( geom.dpdv ) + NN.z*geom.N );
}
float3 result = make_float3( 0.0f );
for( int i = 0; i < params.lights.count; ++i )
{
Light::Point light = params.lights[i];
// TODO: optimize
const float L_dist = length( light.position - geom.P );
const float3 L = ( light.position - geom.P ) / L_dist;
const float3 V = -normalize( optixGetWorldRayDirection() );
const float3 H = normalize( L + V );
const float N_dot_L = dot( N, L );
const float N_dot_V = dot( N, V );
const float N_dot_H = dot( N, H );
const float V_dot_H = dot( V, H );
if( N_dot_L > 0.0f && N_dot_V > 0.0f )
{
const float tmin = 0.001f; // TODO
const float tmax = L_dist - 0.001f; // TODO
const bool occluded = traceOcclusion( params.handle, geom.P, L, tmin, tmax );
if( !occluded )
{
const float3 F = schlick( spec_color, V_dot_H );
const float G_vis = vis( N_dot_L, N_dot_V, alpha );
const float D = ggxNormal( N_dot_H, alpha );
const float3 diff = ( 1.0f - F )*diff_color / M_PIf;
const float3 spec = F*G_vis*D;
result += light.color*light.intensity*N_dot_L*( diff + spec );
}
}
}
// TODO: add debug viewing mode that allows runtime switchable views of shading params, normals, etc
//result = make_float3( roughness );
//result = N*0.5f + make_float3( 0.5f );
//result = geom.N*0.5f + make_float3( 0.5f );
setPayloadResult( result );
}
|
34e2d9b924e33df77e8c4877fb283eb4a75f5dd6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//===----- data_sharing.cu - NVPTX OpenMP debug utilities -------- CUDA -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of data sharing environments/
//
//===----------------------------------------------------------------------===//
#include "omptarget-nvptx.h"
#include <stdio.h>
// Warp ID in the CUDA block
INLINE static unsigned getWarpId() { return threadIdx.x / WARPSIZE; }
// Lane ID in the CUDA warp.
INLINE static unsigned getLaneId() { return threadIdx.x % WARPSIZE; }
// Return true if this is the first active thread in the warp.
INLINE static bool IsWarpMasterActiveThread() {
unsigned long long Mask = __ACTIVEMASK();
unsigned long long ShNum = WARPSIZE - (GetThreadIdInBlock() % WARPSIZE);
unsigned long long Sh = Mask << ShNum;
// Truncate Sh to the 32 lower bits
return (unsigned)Sh == 0;
}
// Return true if this is the master thread.
INLINE static bool IsMasterThread(bool isSPMDExecutionMode) {
return !isSPMDExecutionMode && GetMasterThreadID() == GetThreadIdInBlock();
}
/// Return the provided size aligned to the size of a pointer.
INLINE static size_t AlignVal(size_t Val) {
const size_t Align = (size_t)sizeof(void *);
if (Val & (Align - 1)) {
Val += Align;
Val &= ~(Align - 1);
}
return Val;
}
#define DSFLAG 0
#define DSFLAG_INIT 0
#define DSPRINT(_flag, _str, _args...) \
{ \
if (_flag) { \
/*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x, _args);*/ \
} \
}
#define DSPRINT0(_flag, _str) \
{ \
if (_flag) { \
/*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x);*/ \
} \
}
// Initialize the shared data structures. This is expected to be called for the
// master thread and warp masters. \param RootS: A pointer to the root of the
// data sharing stack. \param InitialDataSize: The initial size of the data in
// the slot.
EXTERN void
__kmpc_initialize_data_sharing_environment(__kmpc_data_sharing_slot *rootS,
size_t InitialDataSize) {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized.");
DSPRINT0(DSFLAG_INIT,
"Entering __kmpc_initialize_data_sharing_environment\n");
unsigned WID = getWarpId();
DSPRINT(DSFLAG_INIT, "Warp ID: %u\n", WID);
omptarget_nvptx_TeamDescr *teamDescr =
&omptarget_nvptx_threadPrivateContext->TeamContext();
__kmpc_data_sharing_slot *RootS =
teamDescr->RootS(WID, IsMasterThread(isSPMDMode()));
DataSharingState.SlotPtr[WID] = RootS;
DataSharingState.StackPtr[WID] = (void *)&RootS->Data[0];
// We don't need to initialize the frame and active threads.
DSPRINT(DSFLAG_INIT, "Initial data size: %08x \n", (unsigned)InitialDataSize);
DSPRINT(DSFLAG_INIT, "Root slot at: %016llx \n", (unsigned long long)RootS);
DSPRINT(DSFLAG_INIT, "Root slot data-end at: %016llx \n",
(unsigned long long)RootS->DataEnd);
DSPRINT(DSFLAG_INIT, "Root slot next at: %016llx \n",
(unsigned long long)RootS->Next);
DSPRINT(DSFLAG_INIT, "Shared slot ptr at: %016llx \n",
(unsigned long long)DataSharingState.SlotPtr[WID]);
DSPRINT(DSFLAG_INIT, "Shared stack ptr at: %016llx \n",
(unsigned long long)DataSharingState.StackPtr[WID]);
DSPRINT0(DSFLAG_INIT, "Exiting __kmpc_initialize_data_sharing_environment\n");
}
EXTERN void *__kmpc_data_sharing_environment_begin(
__kmpc_data_sharing_slot **SavedSharedSlot, void **SavedSharedStack,
void **SavedSharedFrame, int32_t *SavedActiveThreads,
size_t SharingDataSize, size_t SharingDefaultDataSize,
int16_t IsOMPRuntimeInitialized) {
DSPRINT0(DSFLAG, "Entering __kmpc_data_sharing_environment_begin\n");
// If the runtime has been elided, used __shared__ memory for master-worker
// data sharing.
if (!IsOMPRuntimeInitialized)
return (void *)&DataSharingState;
DSPRINT(DSFLAG, "Data Size %016llx\n", (unsigned long long)SharingDataSize);
DSPRINT(DSFLAG, "Default Data Size %016llx\n",
(unsigned long long)SharingDefaultDataSize);
unsigned WID = getWarpId();
unsigned CurActiveThreads = __ACTIVEMASK();
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
void * volatile &FrameP = DataSharingState.FramePtr[WID];
int32_t &ActiveT = DataSharingState.ActiveThreads[WID];
DSPRINT0(DSFLAG, "Save current slot/stack values.\n");
// Save the current values.
*SavedSharedSlot = SlotP;
*SavedSharedStack = StackP;
*SavedSharedFrame = FrameP;
*SavedActiveThreads = ActiveT;
DSPRINT(DSFLAG, "Warp ID: %u\n", WID);
DSPRINT(DSFLAG, "Saved slot ptr at: %016llx \n", (unsigned long long)SlotP);
DSPRINT(DSFLAG, "Saved stack ptr at: %016llx \n", (unsigned long long)StackP);
DSPRINT(DSFLAG, "Saved frame ptr at: %016llx \n", (long long)FrameP);
DSPRINT(DSFLAG, "Active threads: %08x \n", (unsigned)ActiveT);
// Only the warp active master needs to grow the stack.
if (IsWarpMasterActiveThread()) {
// Save the current active threads.
ActiveT = CurActiveThreads;
// Make sure we use aligned sizes to avoid rematerialization of data.
SharingDataSize = AlignVal(SharingDataSize);
// FIXME: The default data size can be assumed to be aligned?
SharingDefaultDataSize = AlignVal(SharingDefaultDataSize);
// Check if we have room for the data in the current slot.
const uintptr_t CurrentStartAddress = (uintptr_t)StackP;
const uintptr_t CurrentEndAddress = (uintptr_t)SlotP->DataEnd;
const uintptr_t RequiredEndAddress =
CurrentStartAddress + (uintptr_t)SharingDataSize;
DSPRINT(DSFLAG, "Data Size %016llx\n", (unsigned long long)SharingDataSize);
DSPRINT(DSFLAG, "Default Data Size %016llx\n",
(unsigned long long)SharingDefaultDataSize);
DSPRINT(DSFLAG, "Current Start Address %016llx\n",
(unsigned long long)CurrentStartAddress);
DSPRINT(DSFLAG, "Current End Address %016llx\n",
(unsigned long long)CurrentEndAddress);
DSPRINT(DSFLAG, "Required End Address %016llx\n",
(unsigned long long)RequiredEndAddress);
DSPRINT(DSFLAG, "Active Threads %08x\n", (unsigned)ActiveT);
// If we require a new slot, allocate it and initialize it (or attempt to
// reuse one). Also, set the shared stack and slot pointers to the new
// place. If we do not need to grow the stack, just adapt the stack and
// frame pointers.
if (CurrentEndAddress < RequiredEndAddress) {
size_t NewSize = (SharingDataSize > SharingDefaultDataSize)
? SharingDataSize
: SharingDefaultDataSize;
__kmpc_data_sharing_slot *NewSlot = 0;
// Attempt to reuse an existing slot.
if (__kmpc_data_sharing_slot *ExistingSlot = SlotP->Next) {
uintptr_t ExistingSlotSize = (uintptr_t)ExistingSlot->DataEnd -
(uintptr_t)(&ExistingSlot->Data[0]);
if (ExistingSlotSize >= NewSize) {
DSPRINT(DSFLAG, "Reusing stack slot %016llx\n",
(unsigned long long)ExistingSlot);
NewSlot = ExistingSlot;
} else {
DSPRINT(DSFLAG, "Cleaning up -failed reuse - %016llx\n",
(unsigned long long)SlotP->Next);
free(ExistingSlot);
}
}
if (!NewSlot) {
NewSlot = (__kmpc_data_sharing_slot *)malloc(
sizeof(__kmpc_data_sharing_slot) + NewSize);
DSPRINT(DSFLAG, "New slot allocated %016llx (data size=%016llx)\n",
(unsigned long long)NewSlot, NewSize);
}
NewSlot->Next = 0;
NewSlot->DataEnd = &NewSlot->Data[NewSize];
SlotP->Next = NewSlot;
SlotP = NewSlot;
StackP = &NewSlot->Data[SharingDataSize];
FrameP = &NewSlot->Data[0];
} else {
// Clean up any old slot that we may still have. The slot producers, do
// not eliminate them because that may be used to return data.
if (SlotP->Next) {
DSPRINT(DSFLAG, "Cleaning up - old not required - %016llx\n",
(unsigned long long)SlotP->Next);
free(SlotP->Next);
SlotP->Next = 0;
}
FrameP = StackP;
StackP = (void *)RequiredEndAddress;
}
}
// FIXME: Need to see the impact of doing it here.
__threadfence_block();
DSPRINT0(DSFLAG, "Exiting __kmpc_data_sharing_environment_begin\n");
// All the threads in this warp get the frame they should work with.
return FrameP;
}
EXTERN void __kmpc_data_sharing_environment_end(
__kmpc_data_sharing_slot **SavedSharedSlot, void **SavedSharedStack,
void **SavedSharedFrame, int32_t *SavedActiveThreads,
int32_t IsEntryPoint) {
DSPRINT0(DSFLAG, "Entering __kmpc_data_sharing_environment_end\n");
unsigned WID = getWarpId();
if (IsEntryPoint) {
if (IsWarpMasterActiveThread()) {
DSPRINT0(DSFLAG, "Doing clean up\n");
// The master thread cleans the saved slot, because this is an environment
// only for the master.
__kmpc_data_sharing_slot *S = IsMasterThread(isSPMDMode())
? *SavedSharedSlot
: DataSharingState.SlotPtr[WID];
if (S->Next) {
free(S->Next);
S->Next = 0;
}
}
DSPRINT0(DSFLAG, "Exiting Exiting __kmpc_data_sharing_environment_end\n");
return;
}
int32_t CurActive = __ACTIVEMASK();
// Only the warp master can restore the stack and frame information, and only
// if there are no other threads left behind in this environment (i.e. the
// warp diverged and returns in different places). This only works if we
// assume that threads will converge right after the call site that started
// the environment.
if (IsWarpMasterActiveThread()) {
int32_t &ActiveT = DataSharingState.ActiveThreads[WID];
DSPRINT0(DSFLAG, "Before restoring the stack\n");
// Zero the bits in the mask. If it is still different from zero, then we
// have other threads that will return after the current ones.
ActiveT &= ~CurActive;
DSPRINT(DSFLAG, "Active threads: %08x; New mask: %08x\n",
(unsigned)CurActive, (unsigned)ActiveT);
if (!ActiveT) {
// No other active threads? Great, lets restore the stack.
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
void * volatile &FrameP = DataSharingState.FramePtr[WID];
SlotP = *SavedSharedSlot;
StackP = *SavedSharedStack;
FrameP = *SavedSharedFrame;
ActiveT = *SavedActiveThreads;
DSPRINT(DSFLAG, "Restored slot ptr at: %016llx \n",
(unsigned long long)SlotP);
DSPRINT(DSFLAG, "Restored stack ptr at: %016llx \n",
(unsigned long long)StackP);
DSPRINT(DSFLAG, "Restored frame ptr at: %016llx \n",
(unsigned long long)FrameP);
DSPRINT(DSFLAG, "Active threads: %08x \n", (unsigned)ActiveT);
}
}
// FIXME: Need to see the impact of doing it here.
__threadfence_block();
DSPRINT0(DSFLAG, "Exiting __kmpc_data_sharing_environment_end\n");
return;
}
EXTERN void *
__kmpc_get_data_sharing_environment_frame(int32_t SourceThreadID,
int16_t IsOMPRuntimeInitialized) {
DSPRINT0(DSFLAG, "Entering __kmpc_get_data_sharing_environment_frame\n");
// If the runtime has been elided, use __shared__ memory for master-worker
// data sharing. We're reusing the statically allocated data structure
// that is used for standard data sharing.
if (!IsOMPRuntimeInitialized)
return (void *)&DataSharingState;
// Get the frame used by the requested thread.
unsigned SourceWID = SourceThreadID / WARPSIZE;
DSPRINT(DSFLAG, "Source warp: %u\n", SourceWID);
void * volatile P = DataSharingState.FramePtr[SourceWID];
DSPRINT0(DSFLAG, "Exiting __kmpc_get_data_sharing_environment_frame\n");
return P;
}
////////////////////////////////////////////////////////////////////////////////
// Runtime functions for trunk data sharing scheme.
////////////////////////////////////////////////////////////////////////////////
INLINE static void data_sharing_init_stack_common() {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized.");
omptarget_nvptx_TeamDescr *teamDescr =
&omptarget_nvptx_threadPrivateContext->TeamContext();
for (int WID = 0; WID < WARPSIZE; WID++) {
__kmpc_data_sharing_slot *RootS = teamDescr->GetPreallocatedSlotAddr(WID);
DataSharingState.SlotPtr[WID] = RootS;
DataSharingState.StackPtr[WID] = (void *)&RootS->Data[0];
}
}
// Initialize data sharing data structure. This function needs to be called
// once at the beginning of a data sharing context (coincides with the kernel
// initialization). This function is called only by the MASTER thread of each
// team in non-SPMD mode.
EXTERN void __kmpc_data_sharing_init_stack() {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized.");
// This function initializes the stack pointer with the pointer to the
// statically allocated shared memory slots. The size of a shared memory
// slot is pre-determined to be 256 bytes.
data_sharing_init_stack_common();
omptarget_nvptx_globalArgs.Init();
}
// Initialize data sharing data structure. This function needs to be called
// once at the beginning of a data sharing context (coincides with the kernel
// initialization). This function is called in SPMD mode only.
EXTERN void __kmpc_data_sharing_init_stack_spmd() {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized.");
// This function initializes the stack pointer with the pointer to the
// statically allocated shared memory slots. The size of a shared memory
// slot is pre-determined to be 256 bytes.
if (threadIdx.x == 0)
data_sharing_init_stack_common();
__threadfence_block();
}
INLINE static void* data_sharing_push_stack_common(size_t PushSize) {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Expected initialized runtime.");
// Only warp active master threads manage the stack.
bool IsWarpMaster = (GetThreadIdInBlock() % WARPSIZE) == 0;
// Add worst-case padding to DataSize so that future stack allocations are
// correctly aligned.
const size_t Alignment = 8;
PushSize = (PushSize + (Alignment - 1)) / Alignment * Alignment;
// Frame pointer must be visible to all workers in the same warp.
const unsigned WID = getWarpId();
void *FrameP = 0;
int32_t CurActive = __ACTIVEMASK();
if (IsWarpMaster) {
// SlotP will point to either the shared memory slot or an existing
// global memory slot.
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
// Check if we have room for the data in the current slot.
const uintptr_t StartAddress = (uintptr_t)StackP;
const uintptr_t EndAddress = (uintptr_t)SlotP->DataEnd;
const uintptr_t RequestedEndAddress = StartAddress + (uintptr_t)PushSize;
// If we requested more data than there is room for in the rest
// of the slot then we need to either re-use the next slot, if one exists,
// or create a new slot.
if (EndAddress < RequestedEndAddress) {
__kmpc_data_sharing_slot *NewSlot = 0;
size_t NewSize = PushSize;
// Allocate at least the default size for each type of slot.
// Master is a special case and even though there is only one thread,
// it can share more things with the workers. For uniformity, it uses
// the full size of a worker warp slot.
size_t DefaultSlotSize = DS_Worker_Warp_Slot_Size;
if (DefaultSlotSize > NewSize)
NewSize = DefaultSlotSize;
NewSlot = (__kmpc_data_sharing_slot *) SafeMalloc(
sizeof(__kmpc_data_sharing_slot) + NewSize,
"Global memory slot allocation.");
NewSlot->Next = 0;
NewSlot->Prev = SlotP;
NewSlot->PrevSlotStackPtr = StackP;
NewSlot->DataEnd = &NewSlot->Data[0] + NewSize;
// Make previous slot point to the newly allocated slot.
SlotP->Next = NewSlot;
// The current slot becomes the new slot.
SlotP = NewSlot;
// The stack pointer always points to the next free stack frame.
StackP = &NewSlot->Data[0] + PushSize;
// The frame pointer always points to the beginning of the frame.
FrameP = DataSharingState.FramePtr[WID] = &NewSlot->Data[0];
} else {
// Add the data chunk to the current slot. The frame pointer is set to
// point to the start of the new frame held in StackP.
FrameP = DataSharingState.FramePtr[WID] = StackP;
// Reset stack pointer to the requested address.
StackP = (void *)RequestedEndAddress;
}
}
// Get address from lane 0.
((int *)&FrameP)[0] = __SHFL_SYNC(CurActive, ((int *)&FrameP)[0], 0);
if (sizeof(FrameP) == 8)
((int *)&FrameP)[1] = __SHFL_SYNC(CurActive, ((int *)&FrameP)[1], 0);
return FrameP;
}
EXTERN void *__kmpc_data_sharing_coalesced_push_stack(size_t DataSize,
int16_t UseSharedMemory) {
return data_sharing_push_stack_common(DataSize);
}
// Called at the time of the kernel initialization. This is used to initilize
// the list of references to shared variables and to pre-allocate global storage
// for holding the globalized variables.
//
// By default the globalized variables are stored in global memory. If the
// UseSharedMemory is set to true, the runtime will attempt to use shared memory
// as long as the size requested fits the pre-allocated size.
EXTERN void *__kmpc_data_sharing_push_stack(size_t DataSize,
int16_t UseSharedMemory) {
// Compute the total memory footprint of the requested data.
// The master thread requires a stack only for itself. A worker
// thread (which at this point is a warp master) will require
// space for the variables of each thread in the warp,
// i.e. one DataSize chunk per warp lane.
// TODO: change WARPSIZE to the number of active threads in the warp.
size_t PushSize = (isRuntimeUninitialized() || IsMasterThread(isSPMDMode()))
? DataSize
: WARPSIZE * DataSize;
// Compute the start address of the frame of each thread in the warp.
uintptr_t FrameStartAddress =
(uintptr_t) data_sharing_push_stack_common(PushSize);
FrameStartAddress += (uintptr_t) (getLaneId() * DataSize);
return (void *)FrameStartAddress;
}
// Pop the stack and free any memory which can be reclaimed.
//
// When the pop operation removes the last global memory slot,
// reclaim all outstanding global memory slots since it is
// likely we have reached the end of the kernel.
EXTERN void __kmpc_data_sharing_pop_stack(void *FrameStart) {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Expected initialized runtime.");
__threadfence_block();
if (GetThreadIdInBlock() % WARPSIZE == 0) {
unsigned WID = getWarpId();
// Current slot
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
// Pointer to next available stack.
void *&StackP = DataSharingState.StackPtr[WID];
// Pop the frame.
StackP = FrameStart;
// If the current slot is empty, we need to free the slot after the
// pop.
bool SlotEmpty = (StackP == &SlotP->Data[0]);
if (SlotEmpty && SlotP->Prev) {
// Before removing the slot we need to reset StackP.
StackP = SlotP->PrevSlotStackPtr;
// Remove the slot.
SlotP = SlotP->Prev;
SafeFree(SlotP->Next, "Free slot.");
SlotP->Next = 0;
}
}
}
// Begin a data sharing context. Maintain a list of references to shared
// variables. This list of references to shared variables will be passed
// to one or more threads.
// In L0 data sharing this is called by master thread.
// In L1 data sharing this is called by active warp master thread.
EXTERN void __kmpc_begin_sharing_variables(void ***GlobalArgs, size_t nArgs) {
omptarget_nvptx_globalArgs.EnsureSize(nArgs);
*GlobalArgs = omptarget_nvptx_globalArgs.GetArgs();
}
// End a data sharing context. There is no need to have a list of refs
// to shared variables because the context in which those variables were
// shared has now ended. This should clean-up the list of references only
// without affecting the actual global storage of the variables.
// In L0 data sharing this is called by master thread.
// In L1 data sharing this is called by active warp master thread.
EXTERN void __kmpc_end_sharing_variables() {
omptarget_nvptx_globalArgs.DeInit();
}
// This function will return a list of references to global variables. This
// is how the workers will get a reference to the globalized variable. The
// members of this list will be passed to the outlined parallel function
// preserving the order.
// Called by all workers.
EXTERN void __kmpc_get_shared_variables(void ***GlobalArgs) {
*GlobalArgs = omptarget_nvptx_globalArgs.GetArgs();
}
// This function is used to init static memory manager. This manager is used to
// manage statically allocated global memory. This memory is allocated by the
// compiler and used to correctly implement globalization of the variables in
// target, teams and distribute regions.
EXTERN void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode,
const void *buf, size_t size,
int16_t is_shared,
const void **frame) {
if (is_shared) {
*frame = buf;
return;
}
if (isSPMDExecutionMode) {
if (GetThreadIdInBlock() == 0) {
*frame = omptarget_nvptx_simpleMemoryManager.Acquire(buf, size);
}
// FIXME: use __syncthreads instead when the function copy is fixed in LLVM.
__SYNCTHREADS();
return;
}
ASSERT0(LT_FUSSY, GetThreadIdInBlock() == GetMasterThreadID(),
"Must be called only in the target master thread.");
*frame = omptarget_nvptx_simpleMemoryManager.Acquire(buf, size);
__threadfence();
}
EXTERN void __kmpc_restore_team_static_memory(int16_t isSPMDExecutionMode,
int16_t is_shared) {
if (is_shared)
return;
if (isSPMDExecutionMode) {
// FIXME: use __syncthreads instead when the function copy is fixed in LLVM.
__SYNCTHREADS();
if (GetThreadIdInBlock() == 0) {
omptarget_nvptx_simpleMemoryManager.Release();
}
return;
}
__threadfence();
ASSERT0(LT_FUSSY, GetThreadIdInBlock() == GetMasterThreadID(),
"Must be called only in the target master thread.");
omptarget_nvptx_simpleMemoryManager.Release();
}
| 34e2d9b924e33df77e8c4877fb283eb4a75f5dd6.cu | //===----- data_sharing.cu - NVPTX OpenMP debug utilities -------- CUDA -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of data sharing environments/
//
//===----------------------------------------------------------------------===//
#include "omptarget-nvptx.h"
#include <stdio.h>
// Warp ID in the CUDA block
INLINE static unsigned getWarpId() { return threadIdx.x / WARPSIZE; }
// Lane ID in the CUDA warp.
INLINE static unsigned getLaneId() { return threadIdx.x % WARPSIZE; }
// Return true if this is the first active thread in the warp.
INLINE static bool IsWarpMasterActiveThread() {
unsigned long long Mask = __ACTIVEMASK();
unsigned long long ShNum = WARPSIZE - (GetThreadIdInBlock() % WARPSIZE);
unsigned long long Sh = Mask << ShNum;
// Truncate Sh to the 32 lower bits
return (unsigned)Sh == 0;
}
// Return true if this is the master thread.
INLINE static bool IsMasterThread(bool isSPMDExecutionMode) {
return !isSPMDExecutionMode && GetMasterThreadID() == GetThreadIdInBlock();
}
/// Return the provided size aligned to the size of a pointer.
INLINE static size_t AlignVal(size_t Val) {
const size_t Align = (size_t)sizeof(void *);
if (Val & (Align - 1)) {
Val += Align;
Val &= ~(Align - 1);
}
return Val;
}
#define DSFLAG 0
#define DSFLAG_INIT 0
#define DSPRINT(_flag, _str, _args...) \
{ \
if (_flag) { \
/*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x, _args);*/ \
} \
}
#define DSPRINT0(_flag, _str) \
{ \
if (_flag) { \
/*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x);*/ \
} \
}
// Initialize the shared data structures. This is expected to be called for the
// master thread and warp masters. \param RootS: A pointer to the root of the
// data sharing stack. \param InitialDataSize: The initial size of the data in
// the slot.
EXTERN void
__kmpc_initialize_data_sharing_environment(__kmpc_data_sharing_slot *rootS,
size_t InitialDataSize) {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized.");
DSPRINT0(DSFLAG_INIT,
"Entering __kmpc_initialize_data_sharing_environment\n");
unsigned WID = getWarpId();
DSPRINT(DSFLAG_INIT, "Warp ID: %u\n", WID);
omptarget_nvptx_TeamDescr *teamDescr =
&omptarget_nvptx_threadPrivateContext->TeamContext();
__kmpc_data_sharing_slot *RootS =
teamDescr->RootS(WID, IsMasterThread(isSPMDMode()));
DataSharingState.SlotPtr[WID] = RootS;
DataSharingState.StackPtr[WID] = (void *)&RootS->Data[0];
// We don't need to initialize the frame and active threads.
DSPRINT(DSFLAG_INIT, "Initial data size: %08x \n", (unsigned)InitialDataSize);
DSPRINT(DSFLAG_INIT, "Root slot at: %016llx \n", (unsigned long long)RootS);
DSPRINT(DSFLAG_INIT, "Root slot data-end at: %016llx \n",
(unsigned long long)RootS->DataEnd);
DSPRINT(DSFLAG_INIT, "Root slot next at: %016llx \n",
(unsigned long long)RootS->Next);
DSPRINT(DSFLAG_INIT, "Shared slot ptr at: %016llx \n",
(unsigned long long)DataSharingState.SlotPtr[WID]);
DSPRINT(DSFLAG_INIT, "Shared stack ptr at: %016llx \n",
(unsigned long long)DataSharingState.StackPtr[WID]);
DSPRINT0(DSFLAG_INIT, "Exiting __kmpc_initialize_data_sharing_environment\n");
}
EXTERN void *__kmpc_data_sharing_environment_begin(
__kmpc_data_sharing_slot **SavedSharedSlot, void **SavedSharedStack,
void **SavedSharedFrame, int32_t *SavedActiveThreads,
size_t SharingDataSize, size_t SharingDefaultDataSize,
int16_t IsOMPRuntimeInitialized) {
DSPRINT0(DSFLAG, "Entering __kmpc_data_sharing_environment_begin\n");
// If the runtime has been elided, used __shared__ memory for master-worker
// data sharing.
if (!IsOMPRuntimeInitialized)
return (void *)&DataSharingState;
DSPRINT(DSFLAG, "Data Size %016llx\n", (unsigned long long)SharingDataSize);
DSPRINT(DSFLAG, "Default Data Size %016llx\n",
(unsigned long long)SharingDefaultDataSize);
unsigned WID = getWarpId();
unsigned CurActiveThreads = __ACTIVEMASK();
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
void * volatile &FrameP = DataSharingState.FramePtr[WID];
int32_t &ActiveT = DataSharingState.ActiveThreads[WID];
DSPRINT0(DSFLAG, "Save current slot/stack values.\n");
// Save the current values.
*SavedSharedSlot = SlotP;
*SavedSharedStack = StackP;
*SavedSharedFrame = FrameP;
*SavedActiveThreads = ActiveT;
DSPRINT(DSFLAG, "Warp ID: %u\n", WID);
DSPRINT(DSFLAG, "Saved slot ptr at: %016llx \n", (unsigned long long)SlotP);
DSPRINT(DSFLAG, "Saved stack ptr at: %016llx \n", (unsigned long long)StackP);
DSPRINT(DSFLAG, "Saved frame ptr at: %016llx \n", (long long)FrameP);
DSPRINT(DSFLAG, "Active threads: %08x \n", (unsigned)ActiveT);
// Only the warp active master needs to grow the stack.
if (IsWarpMasterActiveThread()) {
// Save the current active threads.
ActiveT = CurActiveThreads;
// Make sure we use aligned sizes to avoid rematerialization of data.
SharingDataSize = AlignVal(SharingDataSize);
// FIXME: The default data size can be assumed to be aligned?
SharingDefaultDataSize = AlignVal(SharingDefaultDataSize);
// Check if we have room for the data in the current slot.
const uintptr_t CurrentStartAddress = (uintptr_t)StackP;
const uintptr_t CurrentEndAddress = (uintptr_t)SlotP->DataEnd;
const uintptr_t RequiredEndAddress =
CurrentStartAddress + (uintptr_t)SharingDataSize;
DSPRINT(DSFLAG, "Data Size %016llx\n", (unsigned long long)SharingDataSize);
DSPRINT(DSFLAG, "Default Data Size %016llx\n",
(unsigned long long)SharingDefaultDataSize);
DSPRINT(DSFLAG, "Current Start Address %016llx\n",
(unsigned long long)CurrentStartAddress);
DSPRINT(DSFLAG, "Current End Address %016llx\n",
(unsigned long long)CurrentEndAddress);
DSPRINT(DSFLAG, "Required End Address %016llx\n",
(unsigned long long)RequiredEndAddress);
DSPRINT(DSFLAG, "Active Threads %08x\n", (unsigned)ActiveT);
// If we require a new slot, allocate it and initialize it (or attempt to
// reuse one). Also, set the shared stack and slot pointers to the new
// place. If we do not need to grow the stack, just adapt the stack and
// frame pointers.
if (CurrentEndAddress < RequiredEndAddress) {
size_t NewSize = (SharingDataSize > SharingDefaultDataSize)
? SharingDataSize
: SharingDefaultDataSize;
__kmpc_data_sharing_slot *NewSlot = 0;
// Attempt to reuse an existing slot.
if (__kmpc_data_sharing_slot *ExistingSlot = SlotP->Next) {
uintptr_t ExistingSlotSize = (uintptr_t)ExistingSlot->DataEnd -
(uintptr_t)(&ExistingSlot->Data[0]);
if (ExistingSlotSize >= NewSize) {
DSPRINT(DSFLAG, "Reusing stack slot %016llx\n",
(unsigned long long)ExistingSlot);
NewSlot = ExistingSlot;
} else {
DSPRINT(DSFLAG, "Cleaning up -failed reuse - %016llx\n",
(unsigned long long)SlotP->Next);
free(ExistingSlot);
}
}
if (!NewSlot) {
NewSlot = (__kmpc_data_sharing_slot *)malloc(
sizeof(__kmpc_data_sharing_slot) + NewSize);
DSPRINT(DSFLAG, "New slot allocated %016llx (data size=%016llx)\n",
(unsigned long long)NewSlot, NewSize);
}
NewSlot->Next = 0;
NewSlot->DataEnd = &NewSlot->Data[NewSize];
SlotP->Next = NewSlot;
SlotP = NewSlot;
StackP = &NewSlot->Data[SharingDataSize];
FrameP = &NewSlot->Data[0];
} else {
// Clean up any old slot that we may still have. The slot producers, do
// not eliminate them because that may be used to return data.
if (SlotP->Next) {
DSPRINT(DSFLAG, "Cleaning up - old not required - %016llx\n",
(unsigned long long)SlotP->Next);
free(SlotP->Next);
SlotP->Next = 0;
}
FrameP = StackP;
StackP = (void *)RequiredEndAddress;
}
}
// FIXME: Need to see the impact of doing it here.
__threadfence_block();
DSPRINT0(DSFLAG, "Exiting __kmpc_data_sharing_environment_begin\n");
// All the threads in this warp get the frame they should work with.
return FrameP;
}
EXTERN void __kmpc_data_sharing_environment_end(
__kmpc_data_sharing_slot **SavedSharedSlot, void **SavedSharedStack,
void **SavedSharedFrame, int32_t *SavedActiveThreads,
int32_t IsEntryPoint) {
DSPRINT0(DSFLAG, "Entering __kmpc_data_sharing_environment_end\n");
unsigned WID = getWarpId();
if (IsEntryPoint) {
if (IsWarpMasterActiveThread()) {
DSPRINT0(DSFLAG, "Doing clean up\n");
// The master thread cleans the saved slot, because this is an environment
// only for the master.
__kmpc_data_sharing_slot *S = IsMasterThread(isSPMDMode())
? *SavedSharedSlot
: DataSharingState.SlotPtr[WID];
if (S->Next) {
free(S->Next);
S->Next = 0;
}
}
DSPRINT0(DSFLAG, "Exiting Exiting __kmpc_data_sharing_environment_end\n");
return;
}
int32_t CurActive = __ACTIVEMASK();
// Only the warp master can restore the stack and frame information, and only
// if there are no other threads left behind in this environment (i.e. the
// warp diverged and returns in different places). This only works if we
// assume that threads will converge right after the call site that started
// the environment.
if (IsWarpMasterActiveThread()) {
int32_t &ActiveT = DataSharingState.ActiveThreads[WID];
DSPRINT0(DSFLAG, "Before restoring the stack\n");
// Zero the bits in the mask. If it is still different from zero, then we
// have other threads that will return after the current ones.
ActiveT &= ~CurActive;
DSPRINT(DSFLAG, "Active threads: %08x; New mask: %08x\n",
(unsigned)CurActive, (unsigned)ActiveT);
if (!ActiveT) {
// No other active threads? Great, lets restore the stack.
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
void * volatile &FrameP = DataSharingState.FramePtr[WID];
SlotP = *SavedSharedSlot;
StackP = *SavedSharedStack;
FrameP = *SavedSharedFrame;
ActiveT = *SavedActiveThreads;
DSPRINT(DSFLAG, "Restored slot ptr at: %016llx \n",
(unsigned long long)SlotP);
DSPRINT(DSFLAG, "Restored stack ptr at: %016llx \n",
(unsigned long long)StackP);
DSPRINT(DSFLAG, "Restored frame ptr at: %016llx \n",
(unsigned long long)FrameP);
DSPRINT(DSFLAG, "Active threads: %08x \n", (unsigned)ActiveT);
}
}
// FIXME: Need to see the impact of doing it here.
__threadfence_block();
DSPRINT0(DSFLAG, "Exiting __kmpc_data_sharing_environment_end\n");
return;
}
EXTERN void *
__kmpc_get_data_sharing_environment_frame(int32_t SourceThreadID,
int16_t IsOMPRuntimeInitialized) {
DSPRINT0(DSFLAG, "Entering __kmpc_get_data_sharing_environment_frame\n");
// If the runtime has been elided, use __shared__ memory for master-worker
// data sharing. We're reusing the statically allocated data structure
// that is used for standard data sharing.
if (!IsOMPRuntimeInitialized)
return (void *)&DataSharingState;
// Get the frame used by the requested thread.
unsigned SourceWID = SourceThreadID / WARPSIZE;
DSPRINT(DSFLAG, "Source warp: %u\n", SourceWID);
void * volatile P = DataSharingState.FramePtr[SourceWID];
DSPRINT0(DSFLAG, "Exiting __kmpc_get_data_sharing_environment_frame\n");
return P;
}
////////////////////////////////////////////////////////////////////////////////
// Runtime functions for trunk data sharing scheme.
////////////////////////////////////////////////////////////////////////////////
INLINE static void data_sharing_init_stack_common() {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized.");
omptarget_nvptx_TeamDescr *teamDescr =
&omptarget_nvptx_threadPrivateContext->TeamContext();
for (int WID = 0; WID < WARPSIZE; WID++) {
__kmpc_data_sharing_slot *RootS = teamDescr->GetPreallocatedSlotAddr(WID);
DataSharingState.SlotPtr[WID] = RootS;
DataSharingState.StackPtr[WID] = (void *)&RootS->Data[0];
}
}
// Initialize data sharing data structure. This function needs to be called
// once at the beginning of a data sharing context (coincides with the kernel
// initialization). This function is called only by the MASTER thread of each
// team in non-SPMD mode.
EXTERN void __kmpc_data_sharing_init_stack() {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized.");
// This function initializes the stack pointer with the pointer to the
// statically allocated shared memory slots. The size of a shared memory
// slot is pre-determined to be 256 bytes.
data_sharing_init_stack_common();
omptarget_nvptx_globalArgs.Init();
}
// Initialize data sharing data structure. This function needs to be called
// once at the beginning of a data sharing context (coincides with the kernel
// initialization). This function is called in SPMD mode only.
EXTERN void __kmpc_data_sharing_init_stack_spmd() {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized.");
// This function initializes the stack pointer with the pointer to the
// statically allocated shared memory slots. The size of a shared memory
// slot is pre-determined to be 256 bytes.
if (threadIdx.x == 0)
data_sharing_init_stack_common();
__threadfence_block();
}
INLINE static void* data_sharing_push_stack_common(size_t PushSize) {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Expected initialized runtime.");
// Only warp active master threads manage the stack.
bool IsWarpMaster = (GetThreadIdInBlock() % WARPSIZE) == 0;
// Add worst-case padding to DataSize so that future stack allocations are
// correctly aligned.
const size_t Alignment = 8;
PushSize = (PushSize + (Alignment - 1)) / Alignment * Alignment;
// Frame pointer must be visible to all workers in the same warp.
const unsigned WID = getWarpId();
void *FrameP = 0;
int32_t CurActive = __ACTIVEMASK();
if (IsWarpMaster) {
// SlotP will point to either the shared memory slot or an existing
// global memory slot.
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
// Check if we have room for the data in the current slot.
const uintptr_t StartAddress = (uintptr_t)StackP;
const uintptr_t EndAddress = (uintptr_t)SlotP->DataEnd;
const uintptr_t RequestedEndAddress = StartAddress + (uintptr_t)PushSize;
// If we requested more data than there is room for in the rest
// of the slot then we need to either re-use the next slot, if one exists,
// or create a new slot.
if (EndAddress < RequestedEndAddress) {
__kmpc_data_sharing_slot *NewSlot = 0;
size_t NewSize = PushSize;
// Allocate at least the default size for each type of slot.
// Master is a special case and even though there is only one thread,
// it can share more things with the workers. For uniformity, it uses
// the full size of a worker warp slot.
size_t DefaultSlotSize = DS_Worker_Warp_Slot_Size;
if (DefaultSlotSize > NewSize)
NewSize = DefaultSlotSize;
NewSlot = (__kmpc_data_sharing_slot *) SafeMalloc(
sizeof(__kmpc_data_sharing_slot) + NewSize,
"Global memory slot allocation.");
NewSlot->Next = 0;
NewSlot->Prev = SlotP;
NewSlot->PrevSlotStackPtr = StackP;
NewSlot->DataEnd = &NewSlot->Data[0] + NewSize;
// Make previous slot point to the newly allocated slot.
SlotP->Next = NewSlot;
// The current slot becomes the new slot.
SlotP = NewSlot;
// The stack pointer always points to the next free stack frame.
StackP = &NewSlot->Data[0] + PushSize;
// The frame pointer always points to the beginning of the frame.
FrameP = DataSharingState.FramePtr[WID] = &NewSlot->Data[0];
} else {
// Add the data chunk to the current slot. The frame pointer is set to
// point to the start of the new frame held in StackP.
FrameP = DataSharingState.FramePtr[WID] = StackP;
// Reset stack pointer to the requested address.
StackP = (void *)RequestedEndAddress;
}
}
// Get address from lane 0.
((int *)&FrameP)[0] = __SHFL_SYNC(CurActive, ((int *)&FrameP)[0], 0);
if (sizeof(FrameP) == 8)
((int *)&FrameP)[1] = __SHFL_SYNC(CurActive, ((int *)&FrameP)[1], 0);
return FrameP;
}
EXTERN void *__kmpc_data_sharing_coalesced_push_stack(size_t DataSize,
int16_t UseSharedMemory) {
return data_sharing_push_stack_common(DataSize);
}
// Called at the time of the kernel initialization. This is used to initilize
// the list of references to shared variables and to pre-allocate global storage
// for holding the globalized variables.
//
// By default the globalized variables are stored in global memory. If the
// UseSharedMemory is set to true, the runtime will attempt to use shared memory
// as long as the size requested fits the pre-allocated size.
EXTERN void *__kmpc_data_sharing_push_stack(size_t DataSize,
int16_t UseSharedMemory) {
// Compute the total memory footprint of the requested data.
// The master thread requires a stack only for itself. A worker
// thread (which at this point is a warp master) will require
// space for the variables of each thread in the warp,
// i.e. one DataSize chunk per warp lane.
// TODO: change WARPSIZE to the number of active threads in the warp.
size_t PushSize = (isRuntimeUninitialized() || IsMasterThread(isSPMDMode()))
? DataSize
: WARPSIZE * DataSize;
// Compute the start address of the frame of each thread in the warp.
uintptr_t FrameStartAddress =
(uintptr_t) data_sharing_push_stack_common(PushSize);
FrameStartAddress += (uintptr_t) (getLaneId() * DataSize);
return (void *)FrameStartAddress;
}
// Pop the stack and free any memory which can be reclaimed.
//
// When the pop operation removes the last global memory slot,
// reclaim all outstanding global memory slots since it is
// likely we have reached the end of the kernel.
EXTERN void __kmpc_data_sharing_pop_stack(void *FrameStart) {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Expected initialized runtime.");
__threadfence_block();
if (GetThreadIdInBlock() % WARPSIZE == 0) {
unsigned WID = getWarpId();
// Current slot
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
// Pointer to next available stack.
void *&StackP = DataSharingState.StackPtr[WID];
// Pop the frame.
StackP = FrameStart;
// If the current slot is empty, we need to free the slot after the
// pop.
bool SlotEmpty = (StackP == &SlotP->Data[0]);
if (SlotEmpty && SlotP->Prev) {
// Before removing the slot we need to reset StackP.
StackP = SlotP->PrevSlotStackPtr;
// Remove the slot.
SlotP = SlotP->Prev;
SafeFree(SlotP->Next, "Free slot.");
SlotP->Next = 0;
}
}
}
// Begin a data sharing context. Maintain a list of references to shared
// variables. This list of references to shared variables will be passed
// to one or more threads.
// In L0 data sharing this is called by master thread.
// In L1 data sharing this is called by active warp master thread.
EXTERN void __kmpc_begin_sharing_variables(void ***GlobalArgs, size_t nArgs) {
omptarget_nvptx_globalArgs.EnsureSize(nArgs);
*GlobalArgs = omptarget_nvptx_globalArgs.GetArgs();
}
// End a data sharing context. There is no need to have a list of refs
// to shared variables because the context in which those variables were
// shared has now ended. This should clean-up the list of references only
// without affecting the actual global storage of the variables.
// In L0 data sharing this is called by master thread.
// In L1 data sharing this is called by active warp master thread.
EXTERN void __kmpc_end_sharing_variables() {
omptarget_nvptx_globalArgs.DeInit();
}
// This function will return a list of references to global variables. This
// is how the workers will get a reference to the globalized variable. The
// members of this list will be passed to the outlined parallel function
// preserving the order.
// Called by all workers.
EXTERN void __kmpc_get_shared_variables(void ***GlobalArgs) {
*GlobalArgs = omptarget_nvptx_globalArgs.GetArgs();
}
// This function is used to init static memory manager. This manager is used to
// manage statically allocated global memory. This memory is allocated by the
// compiler and used to correctly implement globalization of the variables in
// target, teams and distribute regions.
EXTERN void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode,
const void *buf, size_t size,
int16_t is_shared,
const void **frame) {
if (is_shared) {
*frame = buf;
return;
}
if (isSPMDExecutionMode) {
if (GetThreadIdInBlock() == 0) {
*frame = omptarget_nvptx_simpleMemoryManager.Acquire(buf, size);
}
// FIXME: use __syncthreads instead when the function copy is fixed in LLVM.
__SYNCTHREADS();
return;
}
ASSERT0(LT_FUSSY, GetThreadIdInBlock() == GetMasterThreadID(),
"Must be called only in the target master thread.");
*frame = omptarget_nvptx_simpleMemoryManager.Acquire(buf, size);
__threadfence();
}
EXTERN void __kmpc_restore_team_static_memory(int16_t isSPMDExecutionMode,
int16_t is_shared) {
if (is_shared)
return;
if (isSPMDExecutionMode) {
// FIXME: use __syncthreads instead when the function copy is fixed in LLVM.
__SYNCTHREADS();
if (GetThreadIdInBlock() == 0) {
omptarget_nvptx_simpleMemoryManager.Release();
}
return;
}
__threadfence();
ASSERT0(LT_FUSSY, GetThreadIdInBlock() == GetMasterThreadID(),
"Must be called only in the target master thread.");
omptarget_nvptx_simpleMemoryManager.Release();
}
|
159e033f02c767ba8fa5d6ddb86c68bc595b14b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Using different memory spaces in CUDA
#include <stdio.h>
/**********************
* using local memory *
**********************/
// a __device__ or __global__ function runs on the GPU
__global__ void use_local_memory_GPU(float in)
{
float f; // variable "f" is in local memory and private to each thread
f = in; // parameter "in" is in local memory and private to each thread
// ... real code would presumably do other stuff here ...
}
/**********************
* using global memory *
**********************/
// a __global__ function runs on the GPU & can be called from host
__global__ void use_global_memory_GPU(float *array)
{
// "array" is a pointer into global memory on the device
array[threadIdx.x] = 2.0f * (float) threadIdx.x;
}
/**********************
* using shared memory *
**********************/
// (for clarity, hardcoding 128 threads/elements and omitting out-of-bounds checks)
__global__ void use_shared_memory_GPU(float *array)
{
// local variables, private to each thread
int i, index = threadIdx.x;
float average, sum = 0.0f;
// __shared__ variables are visible to all threads in the thread block
// and have the same lifetime as the thread block
__shared__ float sh_arr[128];
// copy data from "array" in global memory to sh_arr in shared memory.
// here, each thread is responsible for copying a single element.
sh_arr[index] = array[index];
__syncthreads(); // ensure all the writes to shared memory have completed
// now, sh_arr is fully populated. Let's find the average of all previous elements
for (i=0; i<index; i++) { sum += sh_arr[i]; }
average = sum / (index + 1.0f);
// if array[index] is greater than the average of array[0..index-1], replace with average.
// since array[] is in global memory, this change will be seen by the host (and potentially
// other thread blocks, if any)
if (array[index] > average) { array[index] = average; }
// the following code has NO EFFECT: it modifies shared memory, but
// the resulting modified data is never copied back to global memory
// and vanishes when the thread block completes
sh_arr[index] = 3.14;
}
//==========================================================
//==========================================================
int main(int argc, char **argv)
{
std::cout << " starts ... \n";
/*
* First, call a kernel that shows using local memory
*/
hipLaunchKernelGGL(( use_local_memory_GPU), dim3(1), dim3(128), 0, 0, 2.0f);
/*
* Next, call a kernel that shows using global memory
*/
float h_arr[128]; // convention: h_ variables live on host
float *d_arr; // convention: d_ variables live on device (GPU global mem)
// allocate global memory on the device, place result in "d_arr"
hipMalloc((void **) &d_arr, sizeof(float) * 128);
// now copy data from host memory "h_arr" to device memory "d_arr"
hipMemcpy((void *)d_arr, (void *)h_arr, sizeof(float) * 128, hipMemcpyHostToDevice);
// launch the kernel (1 block of 128 threads)
hipLaunchKernelGGL(( use_global_memory_GPU), dim3(1), dim3(128), 0, 0, d_arr); // modifies the contents of array at d_arr
// copy the modified array back to the host, overwriting contents of h_arr
hipMemcpy((void *)h_arr, (void *)d_arr, sizeof(float) * 128, hipMemcpyDeviceToHost);
// ... do other stuff ...
/*
* Next, call a kernel that shows using shared memory
*/
// as before, pass in a pointer to data in global memory
hipLaunchKernelGGL(( use_shared_memory_GPU), dim3(1), dim3(128), 0, 0, d_arr);
// copy the modified array back to the host
hipMemcpy((void *)h_arr, (void *)d_arr, sizeof(float) * 128, hipMemcpyHostToDevice);
// ... do other stuff ...
std::cout << " ends \n";
return 0;
}
| 159e033f02c767ba8fa5d6ddb86c68bc595b14b6.cu |
// Using different memory spaces in CUDA
#include <stdio.h>
/**********************
* using local memory *
**********************/
// a __device__ or __global__ function runs on the GPU
__global__ void use_local_memory_GPU(float in)
{
float f; // variable "f" is in local memory and private to each thread
f = in; // parameter "in" is in local memory and private to each thread
// ... real code would presumably do other stuff here ...
}
/**********************
* using global memory *
**********************/
// a __global__ function runs on the GPU & can be called from host
__global__ void use_global_memory_GPU(float *array)
{
// "array" is a pointer into global memory on the device
array[threadIdx.x] = 2.0f * (float) threadIdx.x;
}
/**********************
* using shared memory *
**********************/
// (for clarity, hardcoding 128 threads/elements and omitting out-of-bounds checks)
__global__ void use_shared_memory_GPU(float *array)
{
// local variables, private to each thread
int i, index = threadIdx.x;
float average, sum = 0.0f;
// __shared__ variables are visible to all threads in the thread block
// and have the same lifetime as the thread block
__shared__ float sh_arr[128];
// copy data from "array" in global memory to sh_arr in shared memory.
// here, each thread is responsible for copying a single element.
sh_arr[index] = array[index];
__syncthreads(); // ensure all the writes to shared memory have completed
// now, sh_arr is fully populated. Let's find the average of all previous elements
for (i=0; i<index; i++) { sum += sh_arr[i]; }
average = sum / (index + 1.0f);
// if array[index] is greater than the average of array[0..index-1], replace with average.
// since array[] is in global memory, this change will be seen by the host (and potentially
// other thread blocks, if any)
if (array[index] > average) { array[index] = average; }
// the following code has NO EFFECT: it modifies shared memory, but
// the resulting modified data is never copied back to global memory
// and vanishes when the thread block completes
sh_arr[index] = 3.14;
}
//==========================================================
//==========================================================
int main(int argc, char **argv)
{
std::cout << " starts ... \n";
/*
* First, call a kernel that shows using local memory
*/
use_local_memory_GPU<<<1, 128>>>(2.0f);
/*
* Next, call a kernel that shows using global memory
*/
float h_arr[128]; // convention: h_ variables live on host
float *d_arr; // convention: d_ variables live on device (GPU global mem)
// allocate global memory on the device, place result in "d_arr"
cudaMalloc((void **) &d_arr, sizeof(float) * 128);
// now copy data from host memory "h_arr" to device memory "d_arr"
cudaMemcpy((void *)d_arr, (void *)h_arr, sizeof(float) * 128, cudaMemcpyHostToDevice);
// launch the kernel (1 block of 128 threads)
use_global_memory_GPU<<<1, 128>>>(d_arr); // modifies the contents of array at d_arr
// copy the modified array back to the host, overwriting contents of h_arr
cudaMemcpy((void *)h_arr, (void *)d_arr, sizeof(float) * 128, cudaMemcpyDeviceToHost);
// ... do other stuff ...
/*
* Next, call a kernel that shows using shared memory
*/
// as before, pass in a pointer to data in global memory
use_shared_memory_GPU<<<1, 128>>>(d_arr);
// copy the modified array back to the host
cudaMemcpy((void *)h_arr, (void *)d_arr, sizeof(float) * 128, cudaMemcpyHostToDevice);
// ... do other stuff ...
std::cout << " ends \n";
return 0;
}
|
d1e534d3ae41e578ce153916f0bfc12ab577d285.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
#include "common/book.h"
// global will let the compiler know that this should run on device
// instead of host
__global__ void add(int a, int b, int *c) {
*c = a + b;
}
int main() {
int c;
int *dev_c;
int a = 2, b = 7;
HANDLE_ERROR(hipMalloc((void**) &dev_c, sizeof(int))); // allocated on device
hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, a, b, dev_c);
// copy result from device to host
HANDLE_ERROR(hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost));
printf("%d + %d = %d\n", a, b, c);
hipFree(dev_c);
return 0;
}
| d1e534d3ae41e578ce153916f0bfc12ab577d285.cu | #include "stdio.h"
#include "common/book.h"
// global will let the compiler know that this should run on device
// instead of host
__global__ void add(int a, int b, int *c) {
*c = a + b;
}
int main() {
int c;
int *dev_c;
int a = 2, b = 7;
HANDLE_ERROR(cudaMalloc((void**) &dev_c, sizeof(int))); // allocated on device
add<<<1, 1>>>(a, b, dev_c);
// copy result from device to host
HANDLE_ERROR(cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost));
printf("%d + %d = %d\n", a, b, c);
cudaFree(dev_c);
return 0;
}
|
32205c054c4e4f6905e8671627a79eeb896ea257.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "consolidate.hpp"
#include <math.h>
#include <thrust/functional.h>
#include <thrust/iterator/reverse_iterator.h>
#include <thrust/merge.h>
#include <thrust/scan.h>
#include <thrust/unique.h>
#include "globals.h"
#include "interpolate.hpp"
#define FLAG_LHS (1 << 0)
#define FLAG_RHS (1 << 1)
#define FLAG_ISC (1 << 2)
typedef struct {
float t; /**< The time value. */
int i; /**< The original index. */
int ilhs, irhs; /**< The indices of the last elements in lhs, rhs with t' <= t. */
int flags;
} seqpt_t;
struct seqpt_less : public thrust::binary_function<seqpt_t, seqpt_t, bool>
{
__device__ bool
operator()(const seqpt_t &lhs, const seqpt_t &rhs) const
{
return lhs.t < rhs.t;
}
};
__global__ static void
sigpt_extrapolate(const sigpt_t *lhs,
const sigpt_t *rhs,
const seqpt_t *ts,
sigpt_t *clhs,
sigpt_t *crhs,
const int n_lhs,
const int n_rhs,
const int n_ts)
{
/* Use the information provided by lhs, rhs, and ts
* to extrapolated a signal point sequence for both lhs and rhs for each
* time point in ts. */
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n_ts; i += blockDim.x * gridDim.x) {
const seqpt_t seqpt = ts[i];
const int ilhs = seqpt.ilhs;
const int irhs = seqpt.irhs;
/* TODO: Optimize. */
if (ilhs >= n_lhs - 1) {
clhs[i] = (sigpt_t){ seqpt.t, lhs[ilhs].y, lhs[ilhs].dy };
} else {
clhs[i] = interpolate(lhs + ilhs, lhs + ilhs + 1, seqpt.t);
}
if (irhs >= n_rhs - 1) {
crhs[i] = (sigpt_t){ seqpt.t, rhs[irhs].y, rhs[irhs].dy };
} else {
crhs[i] = interpolate(rhs + irhs, rhs + irhs + 1, seqpt.t);
}
}
}
__global__ static void
extract_i(const seqpt_t *in,
int *out,
const int n,
const int flag)
{
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
seqpt_t s = in[i];
const int has_flag = (s.flags & flag) != 0;
out[i] = has_flag * s.i;
}
}
__global__ static void
merge_i(const int *lhs,
const int *rhs,
seqpt_t *out,
const int n)
{
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
out[i].ilhs = lhs[i];
out[i].irhs = rhs[i];
}
}
__global__ static void
sigpt_to_seqpt(const sigpt_t *in,
seqpt_t *out,
const int n,
const int flags)
{
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
seqpt_t seqpt = { in[i].t, i, 0, 0, flags };
out[i] = seqpt;
}
}
__global__ static void
insert_proto_intersections(const seqpt_t *in,
seqpt_t *out,
const int n)
{
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
seqpt_t seqpt = in[i];
out[i * 2] = seqpt;
seqpt.flags |= FLAG_ISC;
out[i * 2 + 1] = seqpt;
}
}
__global__ static void
calc_intersections(const sigpt_t *lhs,
const sigpt_t *rhs,
seqpt_t *ts,
const int n_lhs,
const int n_rhs,
const int n_ts)
{
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
/* At this point, we are only interested in intersection elements in ts.
* These are located at every index 2 * i + 1, i <- n_ts / 2.
*
* ts[i].ilhs is the index of the last point in lhs with t <= ts[i].t,
* and ts[i].irhs is the corresponding point in rhs.
*
* This is enough information to determine the time of the signal
* intersection.
*/
for (int i = tid; 2 * i + 1 < n_ts; i += blockDim.x * gridDim.x) {
const int ii = 2 * i + 1;
const seqpt_t s = ts[ii];
/* We now have four points corresponding to the end points of the
* two line segments. (x1, y1) and (x2, y2) for one line segment,
* (x3, y3) and (x4, y4) for the other line segment.
* We are interested in the x coordinate of the intersection:
* x = ((x1y2 - y1x2)(x3 - x4) - (x1 - x2)(x3y4 - y3x4)) /
* ((x1 - x2)(y3 - y4) - (y1 - y2)(x3 - x4)).
* If the denominator is 0, the lines are parallel. We only
* care about intersections in a specific interval.
*/
if (s.ilhs > n_lhs - 2 || s.irhs > n_rhs - 2) {
continue; /* TODO: Optimize */
}
const sigpt_t p1 = lhs[s.ilhs];
const sigpt_t p2 = lhs[s.ilhs + 1];
const sigpt_t p3 = rhs[s.irhs];
const sigpt_t p4 = rhs[s.irhs + 1];
const float denom = (p1.t - p2.t) * (p3.y - p4.y) -
(p1.y - p2.y) * (p3.t - p4.t);
const float numer = (p1.t * p2.y - p1.y * p2.t) * (p3.t - p4.t) -
(p1.t - p2.t) * (p3.t * p4.y - p3.y * p4.t);
/* Lines parallel? */
if (denom == 0.f) {
continue; /* TODO: Optimize */
}
const float t = numer / denom;
/* Intersection outside of line segment range? */
if (t <= p1.t || t >= p2.t || t <= p3.t || t >= p4.t) {
continue; /* TODO: Optimize */
}
ts[ii].t = t;
}
}
struct seqpt_same_time : public thrust::binary_function<seqpt_t, seqpt_t, bool>
{
__device__ bool
operator()(const seqpt_t &lhs, const seqpt_t &rhs) const
{
return fabsf(lhs.t - rhs.t) < FLOAT_DELTA;
}
};
void
consolidate(const thrust::device_ptr<sigpt_t> &lhs,
const int nlhs,
const thrust::device_ptr<sigpt_t> &rhs,
const int nrhs,
thrust::device_ptr<sigpt_t> *olhs,
thrust::device_ptr<sigpt_t> *orhs,
int *nout)
{
/* A rough outline of the function:
*
* Construct a sorted time sequence ts
* which contains all t <- lhs, all t <- rhs, and all intersection points
* between lhs and rhs. The sequence contains only unique points.
*
* Using interpolation, construct lhs' and rhs' such that they contain all
* t <- ts.
*/
/* First, extract the time sequences and merge them. */
thrust::device_ptr<seqpt_t> lhs_ts = thrust::device_malloc<seqpt_t>(nlhs);
hipLaunchKernelGGL(( sigpt_to_seqpt), dim3(NBLOCKS), dim3(NTHREADS), 0, 0, lhs.get(), lhs_ts.get(), nlhs, FLAG_LHS);
thrust::device_ptr<seqpt_t> rhs_ts = thrust::device_malloc<seqpt_t>(nrhs);
hipLaunchKernelGGL(( sigpt_to_seqpt), dim3(NBLOCKS), dim3(NTHREADS), 0, 0, rhs.get(), rhs_ts.get(), nrhs, FLAG_RHS);
int n_ts = nrhs + nlhs;
thrust::device_ptr<seqpt_t> ts = thrust::device_malloc<seqpt_t>(n_ts);
thrust::merge(lhs_ts, lhs_ts + nrhs, rhs_ts, rhs_ts + nrhs, ts, seqpt_less());
thrust::device_free(lhs_ts);
thrust::device_free(rhs_ts);
/* Associate every sequence point t <- ts with the latest element of the other signal
* that satisfies t' <= t. For example, if the current t has FLAG_LHS and t = 3.5,
* we associate it with the latest point p <- rhs such that p.t <= 3.5.
*
* We do this by first extracting the indices of each side to an array,
* running a max() scan over it, and finally merging these arrays back into
* seqpt_t.ilhs and seqpt_t.irhs.
*/
thrust::device_ptr<int> lhs_i_max = thrust::device_malloc<int>(n_ts);
hipLaunchKernelGGL(( extract_i), dim3(NBLOCKS), dim3(NTHREADS), 0, 0, ts.get(), lhs_i_max.get(), n_ts, FLAG_LHS);
thrust::inclusive_scan(lhs_i_max, lhs_i_max + n_ts, lhs_i_max,
thrust::maximum<int>());
thrust::device_ptr<int> rhs_i_max = thrust::device_malloc<int>(n_ts);
hipLaunchKernelGGL(( extract_i), dim3(NBLOCKS), dim3(NTHREADS), 0, 0, ts.get(), rhs_i_max.get(), n_ts, FLAG_RHS);
thrust::inclusive_scan(rhs_i_max, rhs_i_max + n_ts, rhs_i_max,
thrust::maximum<int>());
hipLaunchKernelGGL(( merge_i), dim3(NBLOCKS), dim3(NTHREADS), 0, 0, lhs_i_max.get(), rhs_i_max.get(), ts.get(), n_ts);
thrust::device_free(lhs_i_max);
thrust::device_free(rhs_i_max);
/* Remove duplicates. Again, this is less trivial than it looks at first. If
* we need to keep the *last* element of each run of equal elements in order
* to pick up the correct associated index at points in time where both LHS
* and RHS are defined. Therefore, run unique() with a reverse iterator
* and use some pointer arithmetic to address the compacted range afterwards.
*/
thrust::reverse_iterator<thrust::device_ptr<seqpt_t> > rts(ts + n_ts);
thrust::reverse_iterator<thrust::device_ptr<seqpt_t> > rts_end =
thrust::unique(rts, rts + n_ts, seqpt_same_time());
const int n_rts = rts_end - rts;
/* Add a proto-intersection after each point in the resulting sequence. */
int n_tsi = n_rts * 2;
thrust::device_ptr<seqpt_t> tsi = thrust::device_malloc<seqpt_t>(n_tsi);
hipLaunchKernelGGL(( insert_proto_intersections), dim3(NBLOCKS), dim3(NTHREADS), 0, 0, ts.get() + n_ts - n_rts, tsi.get(), n_rts);
thrust::device_free(ts);
/* Next, we go through and fill in ISC elements; if there's an intersection
* we set the time accordingly.
*/
hipLaunchKernelGGL(( calc_intersections), dim3(NBLOCKS), dim3(NTHREADS), 0, 0, lhs.get(), rhs.get(), tsi.get(),
nlhs, nrhs, n_tsi);
/* Finally we again remove all duplicate elements (= all proto-intersections
* which did not turn out to actually be real intersections).
*/
thrust::device_ptr<seqpt_t> tsi_end =
thrust::unique(tsi, tsi + n_tsi, seqpt_same_time());
n_tsi = tsi_end - tsi;
/* We now have the complete time sequence stored in tsi, including
* all points in lhs, rhs, and intersections of the two (what a bitch).
* Extrapolate the sigpt_t sequence of both signals for each point <- tsi.
*/
thrust::device_ptr<sigpt_t> lhs_extrapolated = thrust::device_malloc<sigpt_t>(n_tsi);
thrust::device_ptr<sigpt_t> rhs_extrapolated = thrust::device_malloc<sigpt_t>(n_tsi);
hipLaunchKernelGGL(( sigpt_extrapolate), dim3(NBLOCKS), dim3(NTHREADS), 0, 0, lhs.get(), rhs.get(), tsi.get(),
lhs_extrapolated.get(), rhs_extrapolated.get(),
nlhs, nrhs, n_tsi);
*olhs = lhs_extrapolated;
*orhs = rhs_extrapolated;
*nout = n_tsi;
/* TODO: Instead of allocating all of these device vectors between
* kernel calls, try to be a bit smarter about it. For example,
* we could queue the allocations on a separate stream. */
thrust::device_free(tsi);
}
| 32205c054c4e4f6905e8671627a79eeb896ea257.cu | #include "consolidate.hpp"
#include <math.h>
#include <thrust/functional.h>
#include <thrust/iterator/reverse_iterator.h>
#include <thrust/merge.h>
#include <thrust/scan.h>
#include <thrust/unique.h>
#include "globals.h"
#include "interpolate.hpp"
#define FLAG_LHS (1 << 0)
#define FLAG_RHS (1 << 1)
#define FLAG_ISC (1 << 2)
typedef struct {
float t; /**< The time value. */
int i; /**< The original index. */
int ilhs, irhs; /**< The indices of the last elements in lhs, rhs with t' <= t. */
int flags;
} seqpt_t;
struct seqpt_less : public thrust::binary_function<seqpt_t, seqpt_t, bool>
{
__device__ bool
operator()(const seqpt_t &lhs, const seqpt_t &rhs) const
{
return lhs.t < rhs.t;
}
};
__global__ static void
sigpt_extrapolate(const sigpt_t *lhs,
const sigpt_t *rhs,
const seqpt_t *ts,
sigpt_t *clhs,
sigpt_t *crhs,
const int n_lhs,
const int n_rhs,
const int n_ts)
{
/* Use the information provided by lhs, rhs, and ts
* to extrapolated a signal point sequence for both lhs and rhs for each
* time point in ts. */
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n_ts; i += blockDim.x * gridDim.x) {
const seqpt_t seqpt = ts[i];
const int ilhs = seqpt.ilhs;
const int irhs = seqpt.irhs;
/* TODO: Optimize. */
if (ilhs >= n_lhs - 1) {
clhs[i] = (sigpt_t){ seqpt.t, lhs[ilhs].y, lhs[ilhs].dy };
} else {
clhs[i] = interpolate(lhs + ilhs, lhs + ilhs + 1, seqpt.t);
}
if (irhs >= n_rhs - 1) {
crhs[i] = (sigpt_t){ seqpt.t, rhs[irhs].y, rhs[irhs].dy };
} else {
crhs[i] = interpolate(rhs + irhs, rhs + irhs + 1, seqpt.t);
}
}
}
__global__ static void
extract_i(const seqpt_t *in,
int *out,
const int n,
const int flag)
{
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
seqpt_t s = in[i];
const int has_flag = (s.flags & flag) != 0;
out[i] = has_flag * s.i;
}
}
__global__ static void
merge_i(const int *lhs,
const int *rhs,
seqpt_t *out,
const int n)
{
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
out[i].ilhs = lhs[i];
out[i].irhs = rhs[i];
}
}
__global__ static void
sigpt_to_seqpt(const sigpt_t *in,
seqpt_t *out,
const int n,
const int flags)
{
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
seqpt_t seqpt = { in[i].t, i, 0, 0, flags };
out[i] = seqpt;
}
}
__global__ static void
insert_proto_intersections(const seqpt_t *in,
seqpt_t *out,
const int n)
{
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
seqpt_t seqpt = in[i];
out[i * 2] = seqpt;
seqpt.flags |= FLAG_ISC;
out[i * 2 + 1] = seqpt;
}
}
__global__ static void
calc_intersections(const sigpt_t *lhs,
const sigpt_t *rhs,
seqpt_t *ts,
const int n_lhs,
const int n_rhs,
const int n_ts)
{
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
/* At this point, we are only interested in intersection elements in ts.
* These are located at every index 2 * i + 1, i <- n_ts / 2.
*
* ts[i].ilhs is the index of the last point in lhs with t <= ts[i].t,
* and ts[i].irhs is the corresponding point in rhs.
*
* This is enough information to determine the time of the signal
* intersection.
*/
for (int i = tid; 2 * i + 1 < n_ts; i += blockDim.x * gridDim.x) {
const int ii = 2 * i + 1;
const seqpt_t s = ts[ii];
/* We now have four points corresponding to the end points of the
* two line segments. (x1, y1) and (x2, y2) for one line segment,
* (x3, y3) and (x4, y4) for the other line segment.
* We are interested in the x coordinate of the intersection:
* x = ((x1y2 - y1x2)(x3 - x4) - (x1 - x2)(x3y4 - y3x4)) /
* ((x1 - x2)(y3 - y4) - (y1 - y2)(x3 - x4)).
* If the denominator is 0, the lines are parallel. We only
* care about intersections in a specific interval.
*/
if (s.ilhs > n_lhs - 2 || s.irhs > n_rhs - 2) {
continue; /* TODO: Optimize */
}
const sigpt_t p1 = lhs[s.ilhs];
const sigpt_t p2 = lhs[s.ilhs + 1];
const sigpt_t p3 = rhs[s.irhs];
const sigpt_t p4 = rhs[s.irhs + 1];
const float denom = (p1.t - p2.t) * (p3.y - p4.y) -
(p1.y - p2.y) * (p3.t - p4.t);
const float numer = (p1.t * p2.y - p1.y * p2.t) * (p3.t - p4.t) -
(p1.t - p2.t) * (p3.t * p4.y - p3.y * p4.t);
/* Lines parallel? */
if (denom == 0.f) {
continue; /* TODO: Optimize */
}
const float t = numer / denom;
/* Intersection outside of line segment range? */
if (t <= p1.t || t >= p2.t || t <= p3.t || t >= p4.t) {
continue; /* TODO: Optimize */
}
ts[ii].t = t;
}
}
struct seqpt_same_time : public thrust::binary_function<seqpt_t, seqpt_t, bool>
{
__device__ bool
operator()(const seqpt_t &lhs, const seqpt_t &rhs) const
{
return fabsf(lhs.t - rhs.t) < FLOAT_DELTA;
}
};
void
consolidate(const thrust::device_ptr<sigpt_t> &lhs,
const int nlhs,
const thrust::device_ptr<sigpt_t> &rhs,
const int nrhs,
thrust::device_ptr<sigpt_t> *olhs,
thrust::device_ptr<sigpt_t> *orhs,
int *nout)
{
/* A rough outline of the function:
*
* Construct a sorted time sequence ts
* which contains all t <- lhs, all t <- rhs, and all intersection points
* between lhs and rhs. The sequence contains only unique points.
*
* Using interpolation, construct lhs' and rhs' such that they contain all
* t <- ts.
*/
/* First, extract the time sequences and merge them. */
thrust::device_ptr<seqpt_t> lhs_ts = thrust::device_malloc<seqpt_t>(nlhs);
sigpt_to_seqpt<<<NBLOCKS, NTHREADS>>>(lhs.get(), lhs_ts.get(), nlhs, FLAG_LHS);
thrust::device_ptr<seqpt_t> rhs_ts = thrust::device_malloc<seqpt_t>(nrhs);
sigpt_to_seqpt<<<NBLOCKS, NTHREADS>>>(rhs.get(), rhs_ts.get(), nrhs, FLAG_RHS);
int n_ts = nrhs + nlhs;
thrust::device_ptr<seqpt_t> ts = thrust::device_malloc<seqpt_t>(n_ts);
thrust::merge(lhs_ts, lhs_ts + nrhs, rhs_ts, rhs_ts + nrhs, ts, seqpt_less());
thrust::device_free(lhs_ts);
thrust::device_free(rhs_ts);
/* Associate every sequence point t <- ts with the latest element of the other signal
* that satisfies t' <= t. For example, if the current t has FLAG_LHS and t = 3.5,
* we associate it with the latest point p <- rhs such that p.t <= 3.5.
*
* We do this by first extracting the indices of each side to an array,
* running a max() scan over it, and finally merging these arrays back into
* seqpt_t.ilhs and seqpt_t.irhs.
*/
thrust::device_ptr<int> lhs_i_max = thrust::device_malloc<int>(n_ts);
extract_i<<<NBLOCKS, NTHREADS>>>(ts.get(), lhs_i_max.get(), n_ts, FLAG_LHS);
thrust::inclusive_scan(lhs_i_max, lhs_i_max + n_ts, lhs_i_max,
thrust::maximum<int>());
thrust::device_ptr<int> rhs_i_max = thrust::device_malloc<int>(n_ts);
extract_i<<<NBLOCKS, NTHREADS>>>(ts.get(), rhs_i_max.get(), n_ts, FLAG_RHS);
thrust::inclusive_scan(rhs_i_max, rhs_i_max + n_ts, rhs_i_max,
thrust::maximum<int>());
merge_i<<<NBLOCKS, NTHREADS>>>(lhs_i_max.get(), rhs_i_max.get(), ts.get(), n_ts);
thrust::device_free(lhs_i_max);
thrust::device_free(rhs_i_max);
/* Remove duplicates. Again, this is less trivial than it looks at first. If
* we need to keep the *last* element of each run of equal elements in order
* to pick up the correct associated index at points in time where both LHS
* and RHS are defined. Therefore, run unique() with a reverse iterator
* and use some pointer arithmetic to address the compacted range afterwards.
*/
thrust::reverse_iterator<thrust::device_ptr<seqpt_t> > rts(ts + n_ts);
thrust::reverse_iterator<thrust::device_ptr<seqpt_t> > rts_end =
thrust::unique(rts, rts + n_ts, seqpt_same_time());
const int n_rts = rts_end - rts;
/* Add a proto-intersection after each point in the resulting sequence. */
int n_tsi = n_rts * 2;
thrust::device_ptr<seqpt_t> tsi = thrust::device_malloc<seqpt_t>(n_tsi);
insert_proto_intersections<<<NBLOCKS, NTHREADS>>>(ts.get() + n_ts - n_rts, tsi.get(), n_rts);
thrust::device_free(ts);
/* Next, we go through and fill in ISC elements; if there's an intersection
* we set the time accordingly.
*/
calc_intersections<<<NBLOCKS, NTHREADS>>>(lhs.get(), rhs.get(), tsi.get(),
nlhs, nrhs, n_tsi);
/* Finally we again remove all duplicate elements (= all proto-intersections
* which did not turn out to actually be real intersections).
*/
thrust::device_ptr<seqpt_t> tsi_end =
thrust::unique(tsi, tsi + n_tsi, seqpt_same_time());
n_tsi = tsi_end - tsi;
/* We now have the complete time sequence stored in tsi, including
* all points in lhs, rhs, and intersections of the two (what a bitch).
* Extrapolate the sigpt_t sequence of both signals for each point <- tsi.
*/
thrust::device_ptr<sigpt_t> lhs_extrapolated = thrust::device_malloc<sigpt_t>(n_tsi);
thrust::device_ptr<sigpt_t> rhs_extrapolated = thrust::device_malloc<sigpt_t>(n_tsi);
sigpt_extrapolate<<<NBLOCKS, NTHREADS>>>(lhs.get(), rhs.get(), tsi.get(),
lhs_extrapolated.get(), rhs_extrapolated.get(),
nlhs, nrhs, n_tsi);
*olhs = lhs_extrapolated;
*orhs = rhs_extrapolated;
*nout = n_tsi;
/* TODO: Instead of allocating all of these device vectors between
* kernel calls, try to be a bit smarter about it. For example,
* we could queue the allocations on a separate stream. */
thrust::device_free(tsi);
}
|
c67da8236d90c5f0a8982ea0bca342122f598cd6.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cutil.h>
// includes, kernels
#include <scan_largearray_kernel.cu>
#define DEFAULT_NUM_ELEMENTS 16777216
#define MAX_RAND 3
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
int ReadFile(float*, char* file_name, int size);
void WriteFile(float*, char* file_name, int size);
extern "C"
unsigned int compare( const float* reference, const float* data,
const unsigned int len);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
int errorM = 0;
float device_time;
float host_time;
int* size = NULL; //(int*)malloc(1 * sizeof(int));
unsigned int data2read = 1;
int num_elements = 0; // Must support large, non-power-of-2 arrays
// allocate host memory to store the input data
unsigned int mem_size = sizeof( float) * num_elements;
float* h_data = (float*) malloc( mem_size);
//real_mem_size is the memsize rounded to the next highest power of two
//this is so we can memset to 0 later to enable non-power of two arrays
int exponent = (int)ceil(log2 ((double)DEFAULT_NUM_ELEMENTS));
int real_mem_size = (int)pow(2, exponent) * sizeof(float);
// * No arguments: Randomly generate input data and compare against the
// host's result.
// * One argument: Randomly generate input data and write the result to
// file name specified by first argument
// * Two arguments: Read the first argument which indicate the size of the array,
// randomly generate input data and write the input data
// to the second argument. (for generating random input data)
// * Three arguments: Read the first file which indicate the size of the array,
// then input data from the file name specified by 2nd argument and write the
// SCAN output to file name specified by the 3rd argument.
switch(argc-1)
{
case 2:
// Determine size of array
cutReadFilei(argv[1], &size, &data2read, true);
if(data2read != 1){
printf("Error reading parameter file\n");
exit(1);
}
num_elements = size[0];
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
for( unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = (int)(rand() % MAX_RAND);
}
WriteFile(h_data, argv[2], num_elements);
break;
case 3: // Three Arguments
cutReadFilei(argv[1], &size, &data2read, true);
if(data2read != 1){
printf("Error reading parameter file\n");
exit(1);
}
num_elements = size[0];
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
errorM = ReadFile(h_data, argv[2], size[0]);
if(errorM != 1)
{
printf("Error reading input file!\n");
exit(1);
}
break;
default: // No Arguments or one argument
// initialize the input data on the host to be integer values
// between 0 and 1000
// Use DEFAULT_NUM_ELEMENTS num_elements
num_elements = DEFAULT_NUM_ELEMENTS;
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
// initialize the input data on the host
for( unsigned int i = 0; i < num_elements; ++i)
{
// h_data[i] = 1.0f;
h_data[i] = (int)(rand() % MAX_RAND);
}
break;
}
unsigned int timer;
CUT_SAFE_CALL(cutCreateTimer(&timer));
// compute reference solution
float* reference = (float*) malloc( mem_size);
cutStartTimer(timer);
computeGold( reference, h_data, num_elements);
cutStopTimer(timer);
printf("\n\n**===-------------------------------------------------===**\n");
printf("Processing %d elements...\n", num_elements);
printf("Host CPU Processing time: %f (ms)\n", cutGetTimerValue(timer));
host_time = cutGetTimerValue(timer);
CUT_SAFE_CALL(cutDeleteTimer(timer));
// **===-------- Lab4: Allocate data structure here -----------===**
// allocate device memory input and output arrays
float* d_idata = NULL;
float* d_odata = NULL;
float* sums = NULL;
float* incr = NULL;
float* incr_sums = NULL;
float* incr_incr = NULL;
CUDA_SAFE_CALL( hipMalloc( (void**) &d_idata, real_mem_size));
CUDA_SAFE_CALL( hipMalloc( (void**) &d_odata, real_mem_size));
//the consolidation arrays
int num_blocks_needed = DEFAULT_NUM_ELEMENTS/(BLOCK_SIZE*2); //plus one is for leading zero so we have exclusive scan
CUDA_SAFE_CALL( hipMalloc( (void**) &sums, sizeof(float)*num_blocks_needed));
CUDA_SAFE_CALL( hipMalloc( (void**) &incr, sizeof(float)*num_blocks_needed));
CUDA_SAFE_CALL( hipMalloc( (void**) &incr_sums, sizeof(float)*20));
CUDA_SAFE_CALL( hipMalloc( (void**) &incr_incr, sizeof(float)*20));
//memset crucial to enable non power of 2 arrays
CUDA_SAFE_CALL( hipMemset( d_idata, 0, real_mem_size));
CUDA_SAFE_CALL( hipMemset( d_odata, 0, real_mem_size));
CUDA_SAFE_CALL( hipMemset( sums, 0, sizeof(float)*num_blocks_needed));
CUDA_SAFE_CALL( hipMemset( incr, 0, sizeof(float)*num_blocks_needed));
// copy host memory to device input array
CUDA_SAFE_CALL( hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice) );
// initialize all the other device arrays to be safe
CUDA_SAFE_CALL( hipMemcpy( d_odata, h_data, mem_size, hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemset( incr_sums, 0, sizeof(float)*20));
CUDA_SAFE_CALL( hipMemset( incr_incr, 0, sizeof(float)*20));
// **===-----------------------------------------------------------===**
// Run just once to remove startup overhead for more accurate performance
// measurement
prescanArray(d_odata, d_idata, sums, incr, incr_sums, incr_incr, 16);
// Run the prescan
CUT_SAFE_CALL(cutCreateTimer(&timer));
cutStartTimer(timer);
// **===-------- Lab4: Modify the body of this function -----------===**
prescanArray(d_odata, d_idata, sums, incr, incr_sums, incr_incr, num_elements);
// **===-----------------------------------------------------------===**
CUDA_SAFE_CALL( hipDeviceSynchronize() );
cutStopTimer(timer);
printf("CUDA Processing time: %f (ms)\n", cutGetTimerValue(timer));
device_time = cutGetTimerValue(timer);
printf("Speedup: %fX\n", host_time/device_time);
// **===-------- Lab4: Deallocate data structure here -----------===**
// deallocBlockSums();
// **===-----------------------------------------------------------===**
// copy result from device to host
CUDA_SAFE_CALL(hipMemcpy( h_data, d_odata, sizeof(float) * num_elements,
hipMemcpyDeviceToHost));
float* h_sums = (float*) malloc( sizeof(float)*num_blocks_needed);
float* h_incr = (float*) malloc( sizeof(float)*num_blocks_needed);
CUDA_SAFE_CALL(hipMemcpy( h_sums, sums, sizeof(float) * num_blocks_needed,
hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy( h_incr, incr, sizeof(float) * num_blocks_needed,
hipMemcpyDeviceToHost));
if ((argc - 1) == 3) // Three Arguments, write result to file
{
WriteFile(h_data, argv[3], num_elements);
}
else if ((argc - 1) == 1) // One Argument, write result to file
{
WriteFile(h_data, argv[1], num_elements);
}
//debug
int count = 0;
// int count_cuda = 0;
// for (int i=0; i<4096;i +=1){
// count += reference[i];
// count_cuda += h_data[i];
// if (reference[i]==h_data[i])
// printf("%i gold %f cuda %f \n",i,reference[i],h_data[i]);
// // else
// // printf("%i gold %f cuda %f \n",i,reference[i],h_data[i]);
// }
// for (int i=0; i<4096;i +=1){
// printf("%i sum %f incr %f \n",i,h_sums[i],h_incr[i]);
// // else
// // printf("%i gold %f cuda %f \n",i,reference[i],h_data[i]);
// }
// Check if the result is equivalent to the expected soluion
unsigned int result_regtest = cutComparef( reference, h_data, num_elements);
printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
// cleanup memory
cutDeleteTimer(timer);
free( h_data);
free( reference);
hipFree( d_odata);
hipFree( d_idata);
}
int ReadFile(float* M, char* file_name, int size)
{
unsigned int elements_read = size;
if (cutReadFilef(file_name, &M, &elements_read, true))
return 1;
else
return 0;
}
void WriteFile(float* M, char* file_name, int size)
{
cutWriteFilef(file_name, M, size, 0.0001f);
}
| c67da8236d90c5f0a8982ea0bca342122f598cd6.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cutil.h>
// includes, kernels
#include <scan_largearray_kernel.cu>
#define DEFAULT_NUM_ELEMENTS 16777216
#define MAX_RAND 3
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
int ReadFile(float*, char* file_name, int size);
void WriteFile(float*, char* file_name, int size);
extern "C"
unsigned int compare( const float* reference, const float* data,
const unsigned int len);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
int errorM = 0;
float device_time;
float host_time;
int* size = NULL; //(int*)malloc(1 * sizeof(int));
unsigned int data2read = 1;
int num_elements = 0; // Must support large, non-power-of-2 arrays
// allocate host memory to store the input data
unsigned int mem_size = sizeof( float) * num_elements;
float* h_data = (float*) malloc( mem_size);
//real_mem_size is the memsize rounded to the next highest power of two
//this is so we can memset to 0 later to enable non-power of two arrays
int exponent = (int)ceil(log2 ((double)DEFAULT_NUM_ELEMENTS));
int real_mem_size = (int)pow(2, exponent) * sizeof(float);
// * No arguments: Randomly generate input data and compare against the
// host's result.
// * One argument: Randomly generate input data and write the result to
// file name specified by first argument
// * Two arguments: Read the first argument which indicate the size of the array,
// randomly generate input data and write the input data
// to the second argument. (for generating random input data)
// * Three arguments: Read the first file which indicate the size of the array,
// then input data from the file name specified by 2nd argument and write the
// SCAN output to file name specified by the 3rd argument.
switch(argc-1)
{
case 2:
// Determine size of array
cutReadFilei(argv[1], &size, &data2read, true);
if(data2read != 1){
printf("Error reading parameter file\n");
exit(1);
}
num_elements = size[0];
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
for( unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = (int)(rand() % MAX_RAND);
}
WriteFile(h_data, argv[2], num_elements);
break;
case 3: // Three Arguments
cutReadFilei(argv[1], &size, &data2read, true);
if(data2read != 1){
printf("Error reading parameter file\n");
exit(1);
}
num_elements = size[0];
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
errorM = ReadFile(h_data, argv[2], size[0]);
if(errorM != 1)
{
printf("Error reading input file!\n");
exit(1);
}
break;
default: // No Arguments or one argument
// initialize the input data on the host to be integer values
// between 0 and 1000
// Use DEFAULT_NUM_ELEMENTS num_elements
num_elements = DEFAULT_NUM_ELEMENTS;
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
// initialize the input data on the host
for( unsigned int i = 0; i < num_elements; ++i)
{
// h_data[i] = 1.0f;
h_data[i] = (int)(rand() % MAX_RAND);
}
break;
}
unsigned int timer;
CUT_SAFE_CALL(cutCreateTimer(&timer));
// compute reference solution
float* reference = (float*) malloc( mem_size);
cutStartTimer(timer);
computeGold( reference, h_data, num_elements);
cutStopTimer(timer);
printf("\n\n**===-------------------------------------------------===**\n");
printf("Processing %d elements...\n", num_elements);
printf("Host CPU Processing time: %f (ms)\n", cutGetTimerValue(timer));
host_time = cutGetTimerValue(timer);
CUT_SAFE_CALL(cutDeleteTimer(timer));
// **===-------- Lab4: Allocate data structure here -----------===**
// allocate device memory input and output arrays
float* d_idata = NULL;
float* d_odata = NULL;
float* sums = NULL;
float* incr = NULL;
float* incr_sums = NULL;
float* incr_incr = NULL;
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_idata, real_mem_size));
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_odata, real_mem_size));
//the consolidation arrays
int num_blocks_needed = DEFAULT_NUM_ELEMENTS/(BLOCK_SIZE*2); //plus one is for leading zero so we have exclusive scan
CUDA_SAFE_CALL( cudaMalloc( (void**) &sums, sizeof(float)*num_blocks_needed));
CUDA_SAFE_CALL( cudaMalloc( (void**) &incr, sizeof(float)*num_blocks_needed));
CUDA_SAFE_CALL( cudaMalloc( (void**) &incr_sums, sizeof(float)*20));
CUDA_SAFE_CALL( cudaMalloc( (void**) &incr_incr, sizeof(float)*20));
//memset crucial to enable non power of 2 arrays
CUDA_SAFE_CALL( cudaMemset( d_idata, 0, real_mem_size));
CUDA_SAFE_CALL( cudaMemset( d_odata, 0, real_mem_size));
CUDA_SAFE_CALL( cudaMemset( sums, 0, sizeof(float)*num_blocks_needed));
CUDA_SAFE_CALL( cudaMemset( incr, 0, sizeof(float)*num_blocks_needed));
// copy host memory to device input array
CUDA_SAFE_CALL( cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice) );
// initialize all the other device arrays to be safe
CUDA_SAFE_CALL( cudaMemcpy( d_odata, h_data, mem_size, cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemset( incr_sums, 0, sizeof(float)*20));
CUDA_SAFE_CALL( cudaMemset( incr_incr, 0, sizeof(float)*20));
// **===-----------------------------------------------------------===**
// Run just once to remove startup overhead for more accurate performance
// measurement
prescanArray(d_odata, d_idata, sums, incr, incr_sums, incr_incr, 16);
// Run the prescan
CUT_SAFE_CALL(cutCreateTimer(&timer));
cutStartTimer(timer);
// **===-------- Lab4: Modify the body of this function -----------===**
prescanArray(d_odata, d_idata, sums, incr, incr_sums, incr_incr, num_elements);
// **===-----------------------------------------------------------===**
CUDA_SAFE_CALL( cudaThreadSynchronize() );
cutStopTimer(timer);
printf("CUDA Processing time: %f (ms)\n", cutGetTimerValue(timer));
device_time = cutGetTimerValue(timer);
printf("Speedup: %fX\n", host_time/device_time);
// **===-------- Lab4: Deallocate data structure here -----------===**
// deallocBlockSums();
// **===-----------------------------------------------------------===**
// copy result from device to host
CUDA_SAFE_CALL(cudaMemcpy( h_data, d_odata, sizeof(float) * num_elements,
cudaMemcpyDeviceToHost));
float* h_sums = (float*) malloc( sizeof(float)*num_blocks_needed);
float* h_incr = (float*) malloc( sizeof(float)*num_blocks_needed);
CUDA_SAFE_CALL(cudaMemcpy( h_sums, sums, sizeof(float) * num_blocks_needed,
cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy( h_incr, incr, sizeof(float) * num_blocks_needed,
cudaMemcpyDeviceToHost));
if ((argc - 1) == 3) // Three Arguments, write result to file
{
WriteFile(h_data, argv[3], num_elements);
}
else if ((argc - 1) == 1) // One Argument, write result to file
{
WriteFile(h_data, argv[1], num_elements);
}
//debug
int count = 0;
// int count_cuda = 0;
// for (int i=0; i<4096;i +=1){
// count += reference[i];
// count_cuda += h_data[i];
// if (reference[i]==h_data[i])
// printf("%i gold %f cuda %f \n",i,reference[i],h_data[i]);
// // else
// // printf("%i gold %f cuda %f \n",i,reference[i],h_data[i]);
// }
// for (int i=0; i<4096;i +=1){
// printf("%i sum %f incr %f \n",i,h_sums[i],h_incr[i]);
// // else
// // printf("%i gold %f cuda %f \n",i,reference[i],h_data[i]);
// }
// Check if the result is equivalent to the expected soluion
unsigned int result_regtest = cutComparef( reference, h_data, num_elements);
printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
// cleanup memory
cutDeleteTimer(timer);
free( h_data);
free( reference);
cudaFree( d_odata);
cudaFree( d_idata);
}
int ReadFile(float* M, char* file_name, int size)
{
unsigned int elements_read = size;
if (cutReadFilef(file_name, &M, &elements_read, true))
return 1;
else
return 0;
}
void WriteFile(float* M, char* file_name, int size)
{
cutWriteFilef(file_name, M, size, 0.0001f);
}
|
d9dfd8fc016cc597c252070695962f8053af11b9.hip | // !!! This is a file automatically generated by hipify!!!
#include"cuda_need.h"
void mallocHostAll(char filepath[], int datasize, float *kernel_HOST, float *total_HOST){
float *data;
float *host2dev;
float *d_data;
float *dev_host2dev;
float delay;
clock_t begintime, endtime, totalbegintime, totalendtime;
hipSetDeviceFlags(hipDeviceMapHost);
hipHostMalloc((void**)&data, sizeof(float)*datasize*datasize, hipHostMallocMapped | hipHostMallocWriteCombined);
hipHostMalloc((void**)&host2dev, sizeof(float)*datasize*datasize, hipHostMallocMapped | hipHostMallocWriteCombined);
//cudaHostAllocWriteCombinedGPUCPU
readData(filepath, data, datasize);
totalbegintime = clock();
hipHostGetDevicePointer(&dev_host2dev, host2dev, 0);
hipHostGetDevicePointer(&d_data, data, 0);
begintime = clock();
dim3 dimBlock(32, 32);
dim3 dimGrid((datasize + dimBlock.x - 1) / (dimBlock.x), (datasize + dimBlock.y - 1) / (dimBlock.y));
smooth1D << <dimGrid, dimBlock >> >(d_data, dev_host2dev, datasize, WINSIZE);
hipDeviceSynchronize();
endtime = clock();
totalendtime = clock();
delay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*kernel_HOST = *kernel_HOST + delay;
printf("in function kernel_HOSTALL:%.3f\n", delay);
delay = (double)(totalendtime - totalbegintime) * 1000 / CLOCKS_PER_SEC;
*total_HOST = *total_HOST + delay;
printf("in funtcion total_HOSTALL:%.3f\n", delay);
//hipMemcpy(data, d_data, sizeof(float)*datasize*datasize, hipMemcpyDeviceToHost);
// for (int i = 0; i < datasize*datasize; i++){
// if (i%datasize == 0)
// printf("\n");
// printf("%.3f ", host2dev[i]);
// }
// printf("\n");
/* for (int i = 0; i <10; i++){
if (i%datasize == 0)
printf("\n");
printf("%.3f ", host2dev[i]);
}
printf("\n");*/
hipFree(d_data);
hipHostFree(dev_host2dev);
}
| d9dfd8fc016cc597c252070695962f8053af11b9.cu | #include"cuda_need.h"
void mallocHostAll(char filepath[], int datasize, float *kernel_HOST, float *total_HOST){
float *data;
float *host2dev;
float *d_data;
float *dev_host2dev;
float delay;
clock_t begintime, endtime, totalbegintime, totalendtime;
cudaSetDeviceFlags(cudaDeviceMapHost);
cudaHostAlloc((void**)&data, sizeof(float)*datasize*datasize, cudaHostAllocMapped | cudaHostAllocWriteCombined);
cudaHostAlloc((void**)&host2dev, sizeof(float)*datasize*datasize, cudaHostAllocMapped | cudaHostAllocWriteCombined);
//cudaHostAllocWriteCombined合并式写入,有效提高GPU读取这个内存,但是如果CPU也需要读取这个内存,会降低性能
readData(filepath, data, datasize);
totalbegintime = clock();
cudaHostGetDevicePointer(&dev_host2dev, host2dev, 0);
cudaHostGetDevicePointer(&d_data, data, 0);
begintime = clock();
dim3 dimBlock(32, 32);
dim3 dimGrid((datasize + dimBlock.x - 1) / (dimBlock.x), (datasize + dimBlock.y - 1) / (dimBlock.y));
smooth1D << <dimGrid, dimBlock >> >(d_data, dev_host2dev, datasize, WINSIZE);
cudaThreadSynchronize();
endtime = clock();
totalendtime = clock();
delay = (double)(endtime - begintime) * 1000 / CLOCKS_PER_SEC;
*kernel_HOST = *kernel_HOST + delay;
printf("in function kernel_HOSTALL:%.3f\n", delay);
delay = (double)(totalendtime - totalbegintime) * 1000 / CLOCKS_PER_SEC;
*total_HOST = *total_HOST + delay;
printf("in funtcion total_HOSTALL:%.3f\n", delay);
//cudaMemcpy(data, d_data, sizeof(float)*datasize*datasize, cudaMemcpyDeviceToHost);
// for (int i = 0; i < datasize*datasize; i++){
// if (i%datasize == 0)
// printf("\n");
// printf("%.3f ", host2dev[i]);
// }
// printf("\n");
/* for (int i = 0; i <10; i++){
if (i%datasize == 0)
printf("\n");
printf("%.3f ", host2dev[i]);
}
printf("\n");*/
cudaFree(d_data);
cudaFreeHost(dev_host2dev);
}
|
3c6ccafbfc904b631bf0d58aa3cc2149bf181453.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void kernaldownsweep(int n, int d, int* input) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k >= n) return;
// 2 ^ d
int pow_2d = 1 << d;
// 2 ^ (d+1)
int pow_2d1 = 1 << (d + 1);
// we want the even indices to add into the odd indices
if (k % pow_2d1 == 0) {
int t = input[k + pow_2d - 1];
input[k + pow_2d - 1] = input[k + pow_2d1 - 1];
input[k + pow_2d1 - 1] += t;
}
}
__global__ void kernalupsweep(int n, int d, int* input) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k >= n) return;
// 2 ^ d
int pow_2d = 1 << d;
// 2 ^ (d+1)
int pow_2d1 = 1 << (d + 1);
// we want the even indices to add into the odd indices
if (k % pow_2d1 == 0) {
input[k + pow_2d1 - 1] += input[k + pow_2d - 1];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// The idea is to build a balanced binary tree on the input data and
// sweep it to and from the root to compute the prefix sum. A binary
// tree with n leaves has d = log2 n levels, and each level d has 2 d nodes.
int padded_size = 1 << ilog2ceil(n);
int* temp_array = new int[padded_size];
// make sure to pad temp array with 0s!
// to do: is this faster or cuda memcpying 0s faster? hmm
for (int i = 0; i < padded_size; i++) {
if (i < n) {
temp_array[i] = idata[i];
}
else {
temp_array[i] = 0;
}
}
// initialize some temporary buffers to write in place
// your intermediate array sizes will need to be rounded to the next power of two.
int* temp_input;
hipMalloc((void**)&temp_input, padded_size * sizeof(int));
// fill temp input buffer with the padded array above
hipMemcpy(temp_input, temp_array, padded_size * sizeof(int), hipMemcpyHostToDevice);
// set up the blocks and grids
int blockSize = 64;
dim3 blocksPerGrid((padded_size + blockSize - 1) / blockSize);
dim3 threadsPerBlock(blockSize);
timer().startGpuTimer();
// The algorithm consists of two phases :
// the reduce phase(also known as the up - sweep phase)
// and the down - sweep phase.
// up sweep phase
for (int d = 0; d < ilog2ceil(n); d++) {
kernalupsweep << <blocksPerGrid, threadsPerBlock >> > (padded_size, d, temp_input);
}
// replace last index as 0
int zero = 0;
hipMemcpy(temp_input + padded_size - 1, &zero, sizeof(int), hipMemcpyHostToDevice);
// downsweep phase
for (int d = ilog2ceil(n) - 1; d >= 0; d--) {
kernaldownsweep << <blocksPerGrid, threadsPerBlock >> > (padded_size, d, temp_input);
}
timer().endGpuTimer();
// copy from GPU to CPU
hipMemcpy(temp_array, temp_input, padded_size * sizeof(int), hipMemcpyDeviceToHost);
// copy into outdata
for (int i = 0; i < n; i++) {
odata[i] = temp_array[i];
}
// cleanup
hipFree(temp_input);
delete[] temp_array;
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
// we want to setup stuff for scans because we don't want the setup to be within the timer
// your intermediate array sizes will need to be rounded to the next power of two.
int padded_size = 1 << ilog2ceil(n);
int* temp_array = new int[padded_size];
int* temp_bool; // stores the bool array on gpu
hipMalloc((void**)&temp_bool, padded_size * sizeof(int));
int* temp_scan_output; // stores the scanned bool array
hipMalloc((void**)&temp_scan_output, padded_size * sizeof(int));
// make sure to pad temp array with 0s!
// to do: is this faster or cuda memcpying 0s faster? hmm
for (int i = 0; i < padded_size; i++) {
if (i < n) {
temp_array[i] = idata[i];
}
else {
temp_array[i] = 0;
}
}
int* temp_input; // stores the padded input on the gpu
hipMalloc((void**)&temp_input, padded_size * sizeof(int));
// fill with padded data from above
hipMemcpy(temp_input, idata, n * sizeof(int), hipMemcpyHostToDevice);
int* temp_output; // stores the output of the scatter on the gpu
hipMalloc((void**)&temp_output, padded_size * sizeof(int));
// set up the blocks and grids
int blockSize = 128;
dim3 threadsPerBlock(blockSize);
dim3 blocksPerGrid((padded_size + blockSize - 1) / blockSize);
timer().startGpuTimer(); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// similar to the cpu... we want to:
// fill temp array with 0 if idata is 0 or 1 otherwise
// ================= MAP TO BOOL =======================
Common::kernMapToBoolean << <blocksPerGrid, threadsPerBlock >> > (padded_size, temp_bool, temp_input);
// ================= SCAN ===========================
// The algorithm consists of two phases :
// the reduce phase(also known as the up - sweep phase)
// and the down - sweep phase.
hipMemcpy(temp_scan_output, temp_bool, padded_size * sizeof(int), hipMemcpyDeviceToDevice);
// up sweep phase
for (int d = 0; d < ilog2ceil(n); d++) {
kernalupsweep << <blocksPerGrid, threadsPerBlock >> > (padded_size, d, temp_scan_output);
}
// replace last index as 0
int zero = 0;
hipMemcpy(temp_scan_output + padded_size - 1, &zero, sizeof(int), hipMemcpyHostToDevice);
// downsweep phase
for (int d = ilog2ceil(n) - 1; d >= 0; d--) {
kernaldownsweep << <blocksPerGrid, threadsPerBlock >> > (padded_size, d, temp_scan_output);
}
// ================= SCATTER =======================
Common::kernScatter << <blocksPerGrid, threadsPerBlock >> > (padded_size, temp_output, temp_input, temp_bool, temp_scan_output);
timer().endGpuTimer(); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// we want to copy information from gpu to cpu now
hipMemcpy(odata, temp_output, padded_size * sizeof(int), hipMemcpyDeviceToHost);
// we also want to print the result of the scan so we can count how many non-zeros there are
int result = -1;
hipMemcpy(&result, temp_scan_output + padded_size - 1, sizeof(int), hipMemcpyDeviceToHost);
// cleanup
hipFree(temp_output);
hipFree(temp_input);
hipFree(temp_bool);
hipFree(temp_scan_output);
delete[] temp_array;
return result;
}
}
}
| 3c6ccafbfc904b631bf0d58aa3cc2149bf181453.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void kernaldownsweep(int n, int d, int* input) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k >= n) return;
// 2 ^ d
int pow_2d = 1 << d;
// 2 ^ (d+1)
int pow_2d1 = 1 << (d + 1);
// we want the even indices to add into the odd indices
if (k % pow_2d1 == 0) {
int t = input[k + pow_2d - 1];
input[k + pow_2d - 1] = input[k + pow_2d1 - 1];
input[k + pow_2d1 - 1] += t;
}
}
__global__ void kernalupsweep(int n, int d, int* input) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k >= n) return;
// 2 ^ d
int pow_2d = 1 << d;
// 2 ^ (d+1)
int pow_2d1 = 1 << (d + 1);
// we want the even indices to add into the odd indices
if (k % pow_2d1 == 0) {
input[k + pow_2d1 - 1] += input[k + pow_2d - 1];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// The idea is to build a balanced binary tree on the input data and
// sweep it to and from the root to compute the prefix sum. A binary
// tree with n leaves has d = log2 n levels, and each level d has 2 d nodes.
int padded_size = 1 << ilog2ceil(n);
int* temp_array = new int[padded_size];
// make sure to pad temp array with 0s!
// to do: is this faster or cuda memcpying 0s faster? hmm
for (int i = 0; i < padded_size; i++) {
if (i < n) {
temp_array[i] = idata[i];
}
else {
temp_array[i] = 0;
}
}
// initialize some temporary buffers to write in place
// your intermediate array sizes will need to be rounded to the next power of two.
int* temp_input;
cudaMalloc((void**)&temp_input, padded_size * sizeof(int));
// fill temp input buffer with the padded array above
cudaMemcpy(temp_input, temp_array, padded_size * sizeof(int), cudaMemcpyHostToDevice);
// set up the blocks and grids
int blockSize = 64;
dim3 blocksPerGrid((padded_size + blockSize - 1) / blockSize);
dim3 threadsPerBlock(blockSize);
timer().startGpuTimer();
// The algorithm consists of two phases :
// the reduce phase(also known as the up - sweep phase)
// and the down - sweep phase.
// up sweep phase
for (int d = 0; d < ilog2ceil(n); d++) {
kernalupsweep << <blocksPerGrid, threadsPerBlock >> > (padded_size, d, temp_input);
}
// replace last index as 0
int zero = 0;
cudaMemcpy(temp_input + padded_size - 1, &zero, sizeof(int), cudaMemcpyHostToDevice);
// downsweep phase
for (int d = ilog2ceil(n) - 1; d >= 0; d--) {
kernaldownsweep << <blocksPerGrid, threadsPerBlock >> > (padded_size, d, temp_input);
}
timer().endGpuTimer();
// copy from GPU to CPU
cudaMemcpy(temp_array, temp_input, padded_size * sizeof(int), cudaMemcpyDeviceToHost);
// copy into outdata
for (int i = 0; i < n; i++) {
odata[i] = temp_array[i];
}
// cleanup
cudaFree(temp_input);
delete[] temp_array;
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
// we want to setup stuff for scans because we don't want the setup to be within the timer
// your intermediate array sizes will need to be rounded to the next power of two.
int padded_size = 1 << ilog2ceil(n);
int* temp_array = new int[padded_size];
int* temp_bool; // stores the bool array on gpu
cudaMalloc((void**)&temp_bool, padded_size * sizeof(int));
int* temp_scan_output; // stores the scanned bool array
cudaMalloc((void**)&temp_scan_output, padded_size * sizeof(int));
// make sure to pad temp array with 0s!
// to do: is this faster or cuda memcpying 0s faster? hmm
for (int i = 0; i < padded_size; i++) {
if (i < n) {
temp_array[i] = idata[i];
}
else {
temp_array[i] = 0;
}
}
int* temp_input; // stores the padded input on the gpu
cudaMalloc((void**)&temp_input, padded_size * sizeof(int));
// fill with padded data from above
cudaMemcpy(temp_input, idata, n * sizeof(int), cudaMemcpyHostToDevice);
int* temp_output; // stores the output of the scatter on the gpu
cudaMalloc((void**)&temp_output, padded_size * sizeof(int));
// set up the blocks and grids
int blockSize = 128;
dim3 threadsPerBlock(blockSize);
dim3 blocksPerGrid((padded_size + blockSize - 1) / blockSize);
timer().startGpuTimer(); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// similar to the cpu... we want to:
// fill temp array with 0 if idata is 0 or 1 otherwise
// ================= MAP TO BOOL =======================
Common::kernMapToBoolean << <blocksPerGrid, threadsPerBlock >> > (padded_size, temp_bool, temp_input);
// ================= SCAN ===========================
// The algorithm consists of two phases :
// the reduce phase(also known as the up - sweep phase)
// and the down - sweep phase.
cudaMemcpy(temp_scan_output, temp_bool, padded_size * sizeof(int), cudaMemcpyDeviceToDevice);
// up sweep phase
for (int d = 0; d < ilog2ceil(n); d++) {
kernalupsweep << <blocksPerGrid, threadsPerBlock >> > (padded_size, d, temp_scan_output);
}
// replace last index as 0
int zero = 0;
cudaMemcpy(temp_scan_output + padded_size - 1, &zero, sizeof(int), cudaMemcpyHostToDevice);
// downsweep phase
for (int d = ilog2ceil(n) - 1; d >= 0; d--) {
kernaldownsweep << <blocksPerGrid, threadsPerBlock >> > (padded_size, d, temp_scan_output);
}
// ================= SCATTER =======================
Common::kernScatter << <blocksPerGrid, threadsPerBlock >> > (padded_size, temp_output, temp_input, temp_bool, temp_scan_output);
timer().endGpuTimer(); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// we want to copy information from gpu to cpu now
cudaMemcpy(odata, temp_output, padded_size * sizeof(int), cudaMemcpyDeviceToHost);
// we also want to print the result of the scan so we can count how many non-zeros there are
int result = -1;
cudaMemcpy(&result, temp_scan_output + padded_size - 1, sizeof(int), cudaMemcpyDeviceToHost);
// cleanup
cudaFree(temp_output);
cudaFree(temp_input);
cudaFree(temp_bool);
cudaFree(temp_scan_output);
delete[] temp_array;
return result;
}
}
}
|
c36c8065bf7d675aad2d0f7d5fa178b502db6372.hip | // !!! This is a file automatically generated by hipify!!!
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <helpers/StringUtils.h>
#include <loops/broadcasting.h>
#include <loops/legacy_ops.h>
#include <ops/specials_cuda.h>
#include <system/Environment.h>
#include <system/op_boilerplate.h>
#include <types/types.h>
#include <stdexcept>
#include <string>
namespace functions {
namespace broadcast {}
} // namespace functions
| c36c8065bf7d675aad2d0f7d5fa178b502db6372.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <cuda.h>
#include <cuda_runtime.h>
#include <helpers/StringUtils.h>
#include <loops/broadcasting.h>
#include <loops/legacy_ops.h>
#include <ops/specials_cuda.h>
#include <system/Environment.h>
#include <system/op_boilerplate.h>
#include <types/types.h>
#include <stdexcept>
#include <string>
namespace functions {
namespace broadcast {}
} // namespace functions
|
3dc1d9feb53ebf2849ca278972acb2ca5f519ba0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <helper_cuda.h>
#include <rocblas.h>
#include "cuda_timing.h"
const int NB_THREADS_PER_BLOC = 256;
__global__
void add(int size, double *d_C, double *d_A, double *d_B) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
d_C[tid] = d_A[tid] + d_B[tid];
}
}
__global__
void inv(int size, double *d_x) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
d_x[tid] = 1. / d_x[tid];
}
}
__global__
void inv_dot(int size, double *d_x, double *d_y, double *dot) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
*dot += (1. / d_x[tid]) * (1. / d_y[tid]);
}
}
int main(int argc, char **argv) {
if (argc != 2) {
printf("Usage: add nb components\n");
exit(0);
}
int size = atoi(argv[1]);
int i;
hipblasStatus_t cublas_status;
hipblasHandle_t cublas;
cublas_status = hipblasCreate(&cublas);
// CPU memory
double *h_arrayA = (double*)malloc(size * sizeof(double));
double *h_arrayB = (double*)malloc(size * sizeof(double));
double *h_arrayC = (double*)malloc(size * sizeof(double));
double *h_arrayCgpu = (double*)malloc(size * sizeof(double));
// GPU memory
double *d_arrayA, *d_arrayB, *d_arrayC;
checkCudaErrors(
hipMalloc((void**)&d_arrayA, size * sizeof(double))
);
checkCudaErrors(
hipMalloc((void**)&d_arrayB, size * sizeof(double))
);
checkCudaErrors(
hipMalloc((void**)&d_arrayC, size * sizeof(double))
);
for (i = 0; i < size; i++) {
h_arrayA[i] = i + 1;
h_arrayB[i] = 2 * (i + 1);
}
// CPU loop
double cpu_dot = 0.0;
timeit__("CPU processing time: ", {
for (i = 0; i < size; i++) {
h_arrayC[i] = h_arrayA[i] + h_arrayB[i];
cpu_dot += (1. / h_arrayC[i]) * (1. / h_arrayA[i]);
}
})
// GPU kernel loop
int nb_blocs = (size + NB_THREADS_PER_BLOC - 1) / NB_THREADS_PER_BLOC;
double gpu_dot_1 = 0.0;
timeit__("GPU processing time (inv + hipblasDdot): ", {
cublas_status = hipblasSetVector(size, sizeof(double), h_arrayA, 1, d_arrayA, 1);
cublas_status = hipblasSetVector(size, sizeof(double), h_arrayB, 1, d_arrayB, 1);
hipLaunchKernelGGL(( add), dim3(nb_blocs), dim3(NB_THREADS_PER_BLOC), 0, 0, size, d_arrayC, d_arrayA, d_arrayB);
hipLaunchKernelGGL(( inv), dim3(nb_blocs), dim3(NB_THREADS_PER_BLOC), 0, 0, size, d_arrayA);
hipLaunchKernelGGL(( inv), dim3(nb_blocs), dim3(NB_THREADS_PER_BLOC), 0, 0, size, d_arrayC);
cublas_status = hipblasDdot(cublas, size, d_arrayC, 1, d_arrayA, 1, &gpu_dot_1);
})
/*
double *gpu_dot_2 = (double*)malloc(sizeof(double));
double *gpu_dot_2_gpu;
checkCudaErrors(
hipMalloc((void**)&gpu_dot_2_gpu, sizeof(double))
);
*gpu_dot_2_gpu = 0.0;
timeit__("GPU processing time (inv_dot): ", {
cublas_status = hipblasSetVector(size, sizeof(double), h_arrayA, 1, d_arrayA, 1);
cublas_status = hipblasSetVector(size, sizeof(double), h_arrayB, 1, d_arrayB, 1);
hipLaunchKernelGGL(( add), dim3(nb_blocs), dim3(NB_THREADS_PER_BLOC), 0, 0, size, d_arrayC, d_arrayA, d_arrayB);
inv_dot<<<nb_blocs, NB_THREADS_PER_BLOC>>>(size, d_arrayA, d_arrayC, gpu_dot_2_gpu);
checkCudaErrors(
hipMemcpy(
gpu_dot_2_gpu, gpu_dot_2, sizeof(double),
hipMemcpyDeviceToHost
)
);
})
*/
// Check equivalence
// assert(cpu_dot == gpu_dot);
// Clean up
checkCudaErrors(hipFree(d_arrayA));
checkCudaErrors(hipFree(d_arrayB));
checkCudaErrors(hipFree(d_arrayC));
free(h_arrayA);
free(h_arrayB);
free(h_arrayC);
free(h_arrayCgpu);
hipblasDestroy(cublas);
return 0;
}
| 3dc1d9feb53ebf2849ca278972acb2ca5f519ba0.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <helper_cuda.h>
#include <cublas_v2.h>
#include "cuda_timing.h"
const int NB_THREADS_PER_BLOC = 256;
__global__
void add(int size, double *d_C, double *d_A, double *d_B) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
d_C[tid] = d_A[tid] + d_B[tid];
}
}
__global__
void inv(int size, double *d_x) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
d_x[tid] = 1. / d_x[tid];
}
}
__global__
void inv_dot(int size, double *d_x, double *d_y, double *dot) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
*dot += (1. / d_x[tid]) * (1. / d_y[tid]);
}
}
int main(int argc, char **argv) {
if (argc != 2) {
printf("Usage: add nb components\n");
exit(0);
}
int size = atoi(argv[1]);
int i;
cublasStatus_t cublas_status;
cublasHandle_t cublas;
cublas_status = cublasCreate(&cublas);
// CPU memory
double *h_arrayA = (double*)malloc(size * sizeof(double));
double *h_arrayB = (double*)malloc(size * sizeof(double));
double *h_arrayC = (double*)malloc(size * sizeof(double));
double *h_arrayCgpu = (double*)malloc(size * sizeof(double));
// GPU memory
double *d_arrayA, *d_arrayB, *d_arrayC;
checkCudaErrors(
cudaMalloc((void**)&d_arrayA, size * sizeof(double))
);
checkCudaErrors(
cudaMalloc((void**)&d_arrayB, size * sizeof(double))
);
checkCudaErrors(
cudaMalloc((void**)&d_arrayC, size * sizeof(double))
);
for (i = 0; i < size; i++) {
h_arrayA[i] = i + 1;
h_arrayB[i] = 2 * (i + 1);
}
// CPU loop
double cpu_dot = 0.0;
timeit__("CPU processing time: ", {
for (i = 0; i < size; i++) {
h_arrayC[i] = h_arrayA[i] + h_arrayB[i];
cpu_dot += (1. / h_arrayC[i]) * (1. / h_arrayA[i]);
}
})
// GPU kernel loop
int nb_blocs = (size + NB_THREADS_PER_BLOC - 1) / NB_THREADS_PER_BLOC;
double gpu_dot_1 = 0.0;
timeit__("GPU processing time (inv + cublasDdot): ", {
cublas_status = cublasSetVector(size, sizeof(double), h_arrayA, 1, d_arrayA, 1);
cublas_status = cublasSetVector(size, sizeof(double), h_arrayB, 1, d_arrayB, 1);
add<<<nb_blocs, NB_THREADS_PER_BLOC>>>(size, d_arrayC, d_arrayA, d_arrayB);
inv<<<nb_blocs, NB_THREADS_PER_BLOC>>>(size, d_arrayA);
inv<<<nb_blocs, NB_THREADS_PER_BLOC>>>(size, d_arrayC);
cublas_status = cublasDdot(cublas, size, d_arrayC, 1, d_arrayA, 1, &gpu_dot_1);
})
/*
double *gpu_dot_2 = (double*)malloc(sizeof(double));
double *gpu_dot_2_gpu;
checkCudaErrors(
cudaMalloc((void**)&gpu_dot_2_gpu, sizeof(double))
);
*gpu_dot_2_gpu = 0.0;
timeit__("GPU processing time (inv_dot): ", {
cublas_status = cublasSetVector(size, sizeof(double), h_arrayA, 1, d_arrayA, 1);
cublas_status = cublasSetVector(size, sizeof(double), h_arrayB, 1, d_arrayB, 1);
add<<<nb_blocs, NB_THREADS_PER_BLOC>>>(size, d_arrayC, d_arrayA, d_arrayB);
inv_dot<<<nb_blocs, NB_THREADS_PER_BLOC>>>(size, d_arrayA, d_arrayC, gpu_dot_2_gpu);
checkCudaErrors(
cudaMemcpy(
gpu_dot_2_gpu, gpu_dot_2, sizeof(double),
cudaMemcpyDeviceToHost
)
);
})
*/
// Check equivalence
// assert(cpu_dot == gpu_dot);
// Clean up
checkCudaErrors(cudaFree(d_arrayA));
checkCudaErrors(cudaFree(d_arrayB));
checkCudaErrors(cudaFree(d_arrayC));
free(h_arrayA);
free(h_arrayB);
free(h_arrayC);
free(h_arrayCgpu);
cublasDestroy(cublas);
return 0;
}
|
9400b62601a5e9b8cf8515affce5e5000a8ee1bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/svm/svm_model.h>
#include <cuml/svm/svm_parameter.h>
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/linalg/transpose.h>
#include <test_utils.h>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform.h>
#include <common/cumlHandle.hpp>
#include <common/device_buffer.hpp>
#include <hipcub/hipcub.hpp>
#include <cuml/common/logger.hpp>
#include <cuml/datasets/make_blobs.hpp>
#include <cuml/svm/svc.hpp>
#include <cuml/svm/svr.hpp>
#include <iostream>
#include <matrix/grammatrix.cuh>
#include <matrix/kernelmatrices.cuh>
#include <raft/cuda_utils.cuh>
#include <raft/linalg/binary_op.cuh>
#include <raft/linalg/map_then_reduce.cuh>
#include <raft/random/rng.cuh>
#include <random/make_blobs.cuh>
#include <string>
#include <svm/smoblocksolve.cuh>
#include <svm/smosolver.cuh>
#include <svm/workingset.cuh>
#include <type_traits>
#include <vector>
namespace ML {
namespace SVM {
using namespace MLCommon;
using namespace Matrix;
// Initialize device vector C_vec with scalar C
template <typename math_t>
void init_C(math_t C, math_t *C_vec, int n, hipStream_t stream) {
thrust::device_ptr<math_t> c_ptr(C_vec);
thrust::fill(thrust::hip::par.on(stream), c_ptr, c_ptr + n, C);
}
template <typename math_t>
class WorkingSetTest : public ::testing::Test {
protected:
void SetUp() override {
CUDA_CHECK(hipStreamCreate(&stream));
handle.set_stream(stream);
raft::allocate(f_dev, 10);
raft::allocate(y_dev, 10);
raft::allocate(C_dev, 10);
raft::allocate(alpha_dev, 10);
init_C(C, C_dev, 10, stream);
raft::update_device(f_dev, f_host, 10, stream);
raft::update_device(y_dev, y_host, 10, stream);
raft::update_device(alpha_dev, alpha_host, 10, stream);
}
void TearDown() override {
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(f_dev));
CUDA_CHECK(hipFree(y_dev));
CUDA_CHECK(hipFree(C_dev));
CUDA_CHECK(hipFree(alpha_dev));
}
raft::handle_t handle;
hipStream_t stream;
WorkingSet<math_t> *ws;
math_t f_host[10] = {1, 3, 10, 4, 2, 8, 6, 5, 9, 7};
math_t *f_dev;
math_t y_host[10] = {-1, -1, -1, -1, -1, 1, 1, 1, 1, 1};
math_t *y_dev;
math_t *C_dev;
math_t C = 1.5;
math_t alpha_host[10] = {0, 0, 0.1, 0.2, 1.5, 0, 0.2, 0.4, 1.5, 1.5};
math_t *alpha_dev; // l l l/u l/u u u l/u l/u l l
int expected_idx[4] = {4, 3, 8, 2};
int expected_idx2[4] = {8, 2, 4, 9};
};
typedef ::testing::Types<float, double> FloatTypes;
TYPED_TEST_CASE(WorkingSetTest, FloatTypes);
TYPED_TEST(WorkingSetTest, Init) {
this->ws =
new WorkingSet<TypeParam>(this->handle, this->handle.get_stream(), 10);
EXPECT_EQ(this->ws->GetSize(), 10);
delete this->ws;
this->ws = new WorkingSet<TypeParam>(this->handle, this->stream, 100000);
EXPECT_EQ(this->ws->GetSize(), 1024);
delete this->ws;
}
TYPED_TEST(WorkingSetTest, Select) {
this->ws = new WorkingSet<TypeParam>(this->handle, this->stream, 10, 4);
EXPECT_EQ(this->ws->GetSize(), 4);
this->ws->SimpleSelect(this->f_dev, this->alpha_dev, this->y_dev,
this->C_dev);
ASSERT_TRUE(devArrMatchHost(this->expected_idx, this->ws->GetIndices(),
this->ws->GetSize(), raft::Compare<int>()));
this->ws->Select(this->f_dev, this->alpha_dev, this->y_dev, this->C_dev);
ASSERT_TRUE(devArrMatchHost(this->expected_idx, this->ws->GetIndices(),
this->ws->GetSize(), raft::Compare<int>()));
this->ws->Select(this->f_dev, this->alpha_dev, this->y_dev, this->C_dev);
ASSERT_TRUE(devArrMatchHost(this->expected_idx2, this->ws->GetIndices(),
this->ws->GetSize(), raft::Compare<int>()));
delete this->ws;
}
//TYPED_TEST(WorkingSetTest, Priority) {
// See Issue #946
//}
template <typename math_t>
class KernelCacheTest : public ::testing::Test {
protected:
void SetUp() override {
CUDA_CHECK(hipStreamCreate(&stream));
handle.set_stream(stream);
cublas_handle = handle.get_cublas_handle();
raft::allocate(x_dev, n_rows * n_cols);
raft::update_device(x_dev, x_host, n_rows * n_cols, stream);
raft::allocate(ws_idx_dev, 2 * n_ws);
raft::update_device(ws_idx_dev, ws_idx_host, n_ws, stream);
}
void TearDown() override {
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(x_dev));
CUDA_CHECK(hipFree(ws_idx_dev));
}
// Naive host side kernel implementation used for comparison
void ApplyNonlin(Matrix::KernelParams params) {
switch (params.kernel) {
case Matrix::LINEAR:
break;
case Matrix::POLYNOMIAL:
for (int z = 0; z < n_rows * n_ws; z++) {
math_t val = params.gamma * tile_host_expected[z] + params.coef0;
tile_host_expected[z] = pow(val, params.degree);
}
break;
case Matrix::TANH:
for (int z = 0; z < n_rows * n_ws; z++) {
math_t val = params.gamma * tile_host_expected[z] + params.coef0;
tile_host_expected[z] = tanh(val);
}
break;
case Matrix::RBF:
for (int i = 0; i < n_ws; i++) {
for (int j = 0; j < n_rows; j++) {
math_t d = 0;
for (int k = 0; k < n_cols; k++) {
int idx_i = ws_idx_host[i];
math_t diff = x_host[idx_i + k * n_rows] - x_host[j + k * n_rows];
d += diff * diff;
}
tile_host_expected[i * n_rows + j] = exp(-params.gamma * d);
}
}
break;
}
}
void check(const math_t *tile_dev, int n_ws, int n_rows, const int *ws_idx,
const int *kColIdx) {
host_buffer<int> ws_idx_h(handle.get_host_allocator(), stream, n_ws);
raft::update_host(ws_idx_h.data(), ws_idx, n_ws, stream);
host_buffer<int> kidx_h(handle.get_host_allocator(), stream, n_ws);
raft::update_host(kidx_h.data(), kColIdx, n_ws, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
// Note: kernel cache can permute the working set, so we have to look
// up which rows we compare
for (int i = 0; i < n_ws; i++) {
SCOPED_TRACE(i);
int widx = ws_idx_h[i] % n_rows;
int kidx = kidx_h[i];
const math_t *cache_row = tile_dev + kidx * n_rows;
const math_t *row_exp = tile_host_all + widx * n_rows;
EXPECT_TRUE(devArrMatchHost(row_exp, cache_row, n_rows,
raft::CompareApprox<math_t>(1e-6f)));
}
}
raft::handle_t handle;
hipblasHandle_t cublas_handle;
hipStream_t stream;
int n_rows = 4;
int n_cols = 2;
int n_ws = 3;
math_t *x_dev;
int *ws_idx_dev;
math_t x_host[8] = {1, 2, 3, 4, 5, 6, 7, 8};
int ws_idx_host[4] = {0, 1, 3};
math_t tile_host_expected[12] = {26, 32, 38, 44, 32, 40,
48, 56, 44, 56, 68, 80};
math_t tile_host_all[16] = {26, 32, 38, 44, 32, 40, 48, 56,
38, 48, 58, 68, 44, 56, 68, 80};
};
TYPED_TEST_CASE_P(KernelCacheTest);
TYPED_TEST_P(KernelCacheTest, EvalTest) {
std::vector<Matrix::KernelParams> param_vec{
Matrix::KernelParams{Matrix::LINEAR, 3, 1, 0},
Matrix::KernelParams{Matrix::POLYNOMIAL, 2, 1.3, 1},
Matrix::KernelParams{Matrix::TANH, 2, 0.5, 2.4},
Matrix::KernelParams{Matrix::RBF, 2, 0.5, 0}};
float cache_size = 0;
for (auto params : param_vec) {
Matrix::GramMatrixBase<TypeParam> *kernel =
Matrix::KernelFactory<TypeParam>::create(
params, this->handle.get_cublas_handle());
KernelCache<TypeParam> cache(this->handle, this->x_dev, this->n_rows,
this->n_cols, this->n_ws, kernel, cache_size,
C_SVC);
TypeParam *tile_dev = cache.GetTile(this->ws_idx_dev);
// apply nonlinearity on tile_host_expected
this->ApplyNonlin(params);
ASSERT_TRUE(devArrMatchHost(this->tile_host_expected, tile_dev,
this->n_rows * this->n_ws,
raft::CompareApprox<TypeParam>(1e-6f)));
delete kernel;
}
}
TYPED_TEST_P(KernelCacheTest, CacheEvalTest) {
Matrix::KernelParams param{Matrix::LINEAR, 3, 1, 0};
float cache_size = sizeof(TypeParam) * this->n_rows * 32 / (1024.0 * 1024);
Matrix::GramMatrixBase<TypeParam> *kernel =
Matrix::KernelFactory<TypeParam>::create(param,
this->handle.get_cublas_handle());
KernelCache<TypeParam> cache(this->handle, this->x_dev, this->n_rows,
this->n_cols, this->n_ws, kernel, cache_size,
C_SVC);
for (int i = 0; i < 2; i++) {
// We calculate cache tile multiple times to see if cache lookup works
TypeParam *tile_dev = cache.GetTile(this->ws_idx_dev);
this->check(tile_dev, this->n_ws, this->n_rows, cache.GetWsIndices(),
cache.GetColIdxMap());
}
delete kernel;
}
TYPED_TEST_P(KernelCacheTest, SvrEvalTest) {
Matrix::KernelParams param{Matrix::LINEAR, 3, 1, 0};
float cache_size = sizeof(TypeParam) * this->n_rows * 32 / (1024.0 * 1024);
this->n_ws = 6;
int ws_idx_svr[6] = {0, 5, 1, 4, 3, 7};
raft::update_device(this->ws_idx_dev, ws_idx_svr, 6, this->stream);
Matrix::GramMatrixBase<TypeParam> *kernel =
Matrix::KernelFactory<TypeParam>::create(param,
this->handle.get_cublas_handle());
KernelCache<TypeParam> cache(this->handle, this->x_dev, this->n_rows,
this->n_cols, this->n_ws, kernel, cache_size,
EPSILON_SVR);
for (int i = 0; i < 2; i++) {
// We calculate cache tile multiple times to see if cache lookup works
TypeParam *tile_dev = cache.GetTile(this->ws_idx_dev);
this->check(tile_dev, this->n_ws, this->n_rows, cache.GetWsIndices(),
cache.GetColIdxMap());
}
delete kernel;
}
REGISTER_TYPED_TEST_CASE_P(KernelCacheTest, EvalTest, CacheEvalTest,
SvrEvalTest);
INSTANTIATE_TYPED_TEST_CASE_P(My, KernelCacheTest, FloatTypes);
template <typename math_t>
class GetResultsTest : public ::testing::Test {
protected:
void SetUp() override {
CUDA_CHECK(hipStreamCreate(&stream));
handle.set_stream(stream);
}
void TearDown() override { CUDA_CHECK(hipStreamDestroy(stream)); }
void TestResults() {
auto allocator = handle.get_device_allocator();
device_buffer<math_t> x_dev(allocator, stream, n_rows * n_cols);
raft::update_device(x_dev.data(), x_host, n_rows * n_cols, stream);
device_buffer<math_t> f_dev(allocator, stream, n_rows);
raft::update_device(f_dev.data(), f_host, n_rows, stream);
device_buffer<math_t> y_dev(allocator, stream, n_rows);
raft::update_device(y_dev.data(), y_host, n_rows, stream);
device_buffer<math_t> alpha_dev(allocator, stream, n_rows);
raft::update_device(alpha_dev.data(), alpha_host, n_rows, stream);
device_buffer<math_t> C_dev(allocator, stream, n_rows);
init_C(C, C_dev.data(), n_rows, stream);
Results<math_t> res(handle, x_dev.data(), y_dev.data(), n_rows, n_cols,
C_dev.data(), C_SVC);
res.Get(alpha_dev.data(), f_dev.data(), &dual_coefs, &n_coefs, &idx,
&x_support, &b);
ASSERT_EQ(n_coefs, 7);
math_t dual_coefs_exp[] = {-0.1, -0.2, -1.5, 0.2, 0.4, 1.5, 1.5};
EXPECT_TRUE(devArrMatchHost(dual_coefs_exp, dual_coefs, n_coefs,
raft::CompareApprox<math_t>(1e-6f)));
int idx_exp[] = {2, 3, 4, 6, 7, 8, 9};
EXPECT_TRUE(devArrMatchHost(idx_exp, idx, n_coefs, raft::Compare<int>()));
math_t x_support_exp[] = {3, 4, 5, 7, 8, 9, 10, 13, 14, 15, 17, 18, 19, 20};
EXPECT_TRUE(devArrMatchHost(x_support_exp, x_support, n_coefs * n_cols,
raft::CompareApprox<math_t>(1e-6f)));
EXPECT_FLOAT_EQ(b, -6.25f);
if (n_coefs > 0) {
allocator->deallocate(dual_coefs, n_coefs * sizeof(math_t), stream);
allocator->deallocate(idx, n_coefs * sizeof(int), stream);
allocator->deallocate(x_support, n_coefs * n_cols * sizeof(math_t),
stream);
}
// Modify the test by setting all SVs bound, then b is calculated differently
math_t alpha_host2[10] = {0, 0, 1.5, 1.5, 1.5, 0, 1.5, 1.5, 1.5, 1.5};
raft::update_device(alpha_dev.data(), alpha_host2, n_rows, stream);
res.Get(alpha_dev.data(), f_dev.data(), &dual_coefs, &n_coefs, &idx,
&x_support, &b);
EXPECT_FLOAT_EQ(b, -5.5f);
}
int n_rows = 10;
int n_cols = 2;
math_t x_host[20] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20};
math_t f_host[10] = {1, 3, 10, 4, 2, 8, 6, 5, 9, 7};
math_t y_host[10] = {-1, -1, -1, -1, -1, 1, 1, 1, 1, 1};
math_t alpha_host[10] = {0, 0, 0.1, 0.2, 1.5, 0, 0.2, 0.4, 1.5, 1.5};
// l l l/u l/u u u l/u l/u l l
math_t C = 1.5;
math_t *dual_coefs;
int n_coefs;
int *idx;
math_t *x_support;
math_t b;
raft::handle_t handle;
hipStream_t stream;
};
TYPED_TEST_CASE(GetResultsTest, FloatTypes);
TYPED_TEST(GetResultsTest, Results) { this->TestResults(); }
svmParameter getDefaultSvmParameter() {
svmParameter param;
param.C = 1;
param.tol = 0.001;
param.cache_size = 200;
param.max_iter = -1;
param.nochange_steps = 1000;
param.verbosity = CUML_LEVEL_INFO;
param.epsilon = 0.1;
param.svmType = C_SVC;
return param;
}
template <typename math_t>
class SmoUpdateTest : public ::testing::Test {
protected:
void SetUp() override {
stream = handle.get_stream();
hipblasHandle_t cublas_handle = handle.get_cublas_handle();
raft::allocate(f_dev, n_rows, true);
raft::allocate(kernel_dev, n_rows * n_ws);
raft::update_device(kernel_dev, kernel_host, n_ws * n_rows, stream);
raft::allocate(delta_alpha_dev, n_ws);
raft::update_device(delta_alpha_dev, delta_alpha_host, n_ws, stream);
}
void RunTest() {
svmParameter param = getDefaultSvmParameter();
SmoSolver<float> smo(handle, param, nullptr);
smo.UpdateF(f_dev, n_rows, delta_alpha_dev, n_ws, kernel_dev);
float f_host_expected[] = {0.1f, 7.4505806e-9f, 0.3f, 0.2f, 0.5f, 0.4f};
devArrMatchHost(f_host_expected, f_dev, n_rows,
raft::CompareApprox<math_t>(1e-6));
}
void TearDown() override {
CUDA_CHECK(hipFree(delta_alpha_dev));
CUDA_CHECK(hipFree(kernel_dev));
CUDA_CHECK(hipFree(f_dev));
}
raft::handle_t handle;
hipStream_t stream;
int n_rows = 6;
int n_ws = 2;
float *kernel_dev;
float *f_dev;
float *delta_alpha_dev;
float kernel_host[12] = {3, 5, 4, 6, 5, 7, 4, 5, 7, 8, 10, 11};
float delta_alpha_host[2] = {-0.1f, 0.1f};
};
TYPED_TEST_CASE(SmoUpdateTest, FloatTypes);
TYPED_TEST(SmoUpdateTest, Update) { this->RunTest(); }
template <typename math_t>
class SmoBlockSolverTest : public ::testing::Test {
protected:
void SetUp() override {
CUDA_CHECK(hipStreamCreate(&stream));
handle.set_stream(stream);
cublas_handle = handle.get_cublas_handle();
raft::allocate(ws_idx_dev, n_ws);
raft::allocate(y_dev, n_rows);
raft::allocate(C_dev, n_rows);
raft::allocate(f_dev, n_rows);
raft::allocate(alpha_dev, n_rows, true);
raft::allocate(delta_alpha_dev, n_ws, true);
raft::allocate(kernel_dev, n_ws * n_rows);
raft::allocate(return_buff_dev, 2);
init_C(C, C_dev, n_rows, stream);
raft::update_device(ws_idx_dev, ws_idx_host, n_ws, stream);
raft::update_device(y_dev, y_host, n_rows, stream);
raft::update_device(f_dev, f_host, n_rows, stream);
raft::update_device(kernel_dev, kernel_host, n_ws * n_rows, stream);
}
public: // because of the device lambda
void testBlockSolve() {
hipLaunchKernelGGL(( SmoBlockSolve<math_t, 1024>), dim3(1), dim3(n_ws), 0, stream,
y_dev, n_rows, alpha_dev, n_ws, delta_alpha_dev, f_dev, kernel_dev,
ws_idx_dev, C_dev, 1e-3f, return_buff_dev, 1);
CUDA_CHECK(hipPeekAtLastError());
math_t return_buff_exp[2] = {0.2, 1};
devArrMatchHost(return_buff_exp, return_buff_dev, 2,
raft::CompareApprox<math_t>(1e-6));
math_t *delta_alpha_calc;
raft::allocate(delta_alpha_calc, n_rows);
raft::linalg::binaryOp(
delta_alpha_calc, y_dev, alpha_dev, n_rows,
[] __device__(math_t a, math_t b) { return a * b; }, stream);
raft::devArrMatch(delta_alpha_dev, delta_alpha_calc, n_rows,
raft::CompareApprox<math_t>(1e-6));
CUDA_CHECK(hipFree(delta_alpha_calc));
math_t alpha_expected[] = {0, 0.1f, 0.1f, 0};
raft::devArrMatch(alpha_expected, alpha_dev, n_rows,
raft::CompareApprox<math_t>(1e-6));
}
protected:
void TearDown() override {
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(y_dev));
CUDA_CHECK(hipFree(C_dev));
CUDA_CHECK(hipFree(f_dev));
CUDA_CHECK(hipFree(ws_idx_dev));
CUDA_CHECK(hipFree(alpha_dev));
CUDA_CHECK(hipFree(delta_alpha_dev));
CUDA_CHECK(hipFree(kernel_dev));
CUDA_CHECK(hipFree(return_buff_dev));
}
raft::handle_t handle;
hipStream_t stream;
hipblasHandle_t cublas_handle;
int n_rows = 4;
int n_cols = 2;
int n_ws = 4;
int *ws_idx_dev;
math_t *y_dev;
math_t *f_dev;
math_t *C_dev;
math_t *alpha_dev;
math_t *delta_alpha_dev;
math_t *kernel_dev;
math_t *return_buff_dev;
int ws_idx_host[4] = {0, 1, 2, 3};
math_t y_host[4] = {1, 1, -1, -1};
math_t C = 1.5;
math_t f_host[4] = {0.4, 0.3, 0.5, 0.1};
math_t kernel_host[16] = {26, 32, 38, 44, 32, 40, 48, 56,
38, 48, 58, 68, 44, 56, 68, 80};
};
TYPED_TEST_CASE(SmoBlockSolverTest, FloatTypes);
// test a single iteration of the block solver
TYPED_TEST(SmoBlockSolverTest, SolveSingleTest) { this->testBlockSolve(); }
template <typename math_t>
struct smoInput {
math_t C;
math_t tol;
KernelParams kernel_params;
int max_iter;
int max_inner_iter;
};
template <typename math_t>
struct svcInput {
math_t C;
math_t tol;
KernelParams kernel_params;
int n_rows;
int n_cols;
math_t *x_dev;
math_t *y_dev;
bool predict;
};
template <typename math_t>
struct smoOutput {
int n_support;
std::vector<math_t> dual_coefs;
math_t b;
std::vector<math_t> w;
std::vector<math_t> x_support;
std::vector<int> idx;
};
// If we want to compare decision function values too
template <typename math_t>
struct smoOutput2 { //: smoOutput<math_t> {
int n_support;
std::vector<math_t> dual_coefs;
math_t b;
std::vector<math_t> w;
std::vector<math_t> x_support;
std::vector<int> idx;
std::vector<math_t> decision_function;
};
template <typename math_t>
smoOutput<math_t> toSmoOutput(smoOutput2<math_t> x) {
smoOutput<math_t> y{x.n_support, x.dual_coefs, x.b, x.w, x.x_support, x.idx};
return y;
}
template <typename math_t>
struct svmTol {
math_t b;
math_t cs;
int n_sv;
};
template <typename math_t>
void checkResults(svmModel<math_t> model, smoOutput<math_t> expected,
hipStream_t stream,
svmTol<math_t> tol = svmTol<math_t>{0.001, 0.99999, -1}) {
math_t *dcoef_exp =
expected.dual_coefs.size() > 0 ? expected.dual_coefs.data() : nullptr;
math_t *w_exp = expected.w.size() > 0 ? expected.w.data() : nullptr;
math_t *x_support_exp =
expected.x_support.size() > 0 ? expected.x_support.data() : nullptr;
int *idx_exp = expected.idx.size() > 0 ? expected.idx.data() : nullptr;
math_t ay_tol = 1e-5;
if (tol.n_sv == -1) {
tol.n_sv = expected.n_support * 0.01;
if (expected.n_support > 10 && tol.n_sv < 3) tol.n_sv = 3;
}
EXPECT_LE(abs(model.n_support - expected.n_support), tol.n_sv);
if (dcoef_exp) {
EXPECT_TRUE(devArrMatchHost(dcoef_exp, model.dual_coefs, model.n_support,
raft::CompareApprox<math_t>(1e-3f)));
}
math_t *dual_coefs_host = new math_t[model.n_support];
raft::update_host(dual_coefs_host, model.dual_coefs, model.n_support, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
math_t ay = 0;
for (int i = 0; i < model.n_support; i++) {
ay += dual_coefs_host[i];
}
// Test if \sum \alpha_i y_i = 0
EXPECT_LT(raft::abs(ay), ay_tol);
if (x_support_exp) {
EXPECT_TRUE(devArrMatchHost(x_support_exp, model.x_support,
model.n_support * model.n_cols,
raft::CompareApprox<math_t>(1e-6f)));
}
if (idx_exp) {
EXPECT_TRUE(devArrMatchHost(idx_exp, model.support_idx, model.n_support,
raft::Compare<int>()));
}
math_t *x_support_host = new math_t[model.n_support * model.n_cols];
raft::update_host(x_support_host, model.x_support,
model.n_support * model.n_cols, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
if (w_exp) {
std::vector<math_t> w(model.n_cols, 0);
for (int i = 0; i < model.n_support; i++) {
for (int j = 0; j < model.n_cols; j++)
w[j] += x_support_host[i + model.n_support * j] * dual_coefs_host[i];
}
// Calculate the cosine similarity between w and w_exp
math_t abs_w = 0;
math_t abs_w_exp = 0;
math_t cs = 0;
for (int i = 0; i < model.n_cols; i++) {
abs_w += w[i] * w[i];
abs_w_exp += w_exp[i] * w_exp[i];
cs += w[i] * w_exp[i];
}
cs /= sqrt(abs_w * abs_w_exp);
EXPECT_GT(cs, tol.cs);
}
EXPECT_LT(raft::abs(model.b - expected.b), tol.b);
delete[] dual_coefs_host;
delete[] x_support_host;
}
template <typename math_t>
class SmoSolverTest : public ::testing::Test {
protected:
void SetUp() override {
CUDA_CHECK(hipStreamCreate(&stream));
handle.set_stream(stream);
raft::allocate(x_dev, n_rows * n_cols);
raft::allocate(ws_idx_dev, n_ws);
raft::allocate(y_dev, n_rows);
raft::allocate(C_dev, n_rows);
raft::allocate(y_pred, n_rows);
raft::allocate(f_dev, n_rows);
raft::allocate(alpha_dev, n_rows, true);
raft::allocate(delta_alpha_dev, n_ws, true);
raft::allocate(kernel_dev, n_ws * n_rows);
raft::allocate(return_buff_dev, 2);
raft::allocate(sample_weights_dev, n_rows);
LinAlg::range(sample_weights_dev, 1, n_rows + 1, stream);
cublas_handle = handle.get_cublas_handle();
raft::update_device(x_dev, x_host, n_rows * n_cols, stream);
raft::update_device(ws_idx_dev, ws_idx_host, n_ws, stream);
raft::update_device(y_dev, y_host, n_rows, stream);
init_C(C, C_dev, n_rows, stream);
raft::update_device(f_dev, f_host, n_rows, stream);
raft::update_device(kernel_dev, kernel_host, n_ws * n_rows, stream);
CUDA_CHECK(
hipMemsetAsync(delta_alpha_dev, 0, n_ws * sizeof(math_t), stream));
kernel = new Matrix::GramMatrixBase<math_t>(cublas_handle);
}
void FreeResultBuffers() {
if (dual_coefs_d) CUDA_CHECK(hipFree(dual_coefs_d));
if (idx_d) CUDA_CHECK(hipFree(idx_d));
if (x_support_d) CUDA_CHECK(hipFree(x_support_d));
dual_coefs_d = nullptr;
idx_d = nullptr;
x_support_d = nullptr;
}
void TearDown() override {
delete kernel;
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(x_dev));
CUDA_CHECK(hipFree(y_dev));
CUDA_CHECK(hipFree(C_dev));
CUDA_CHECK(hipFree(y_pred));
CUDA_CHECK(hipFree(f_dev));
CUDA_CHECK(hipFree(ws_idx_dev));
CUDA_CHECK(hipFree(alpha_dev));
CUDA_CHECK(hipFree(delta_alpha_dev));
CUDA_CHECK(hipFree(kernel_dev));
CUDA_CHECK(hipFree(return_buff_dev));
CUDA_CHECK(hipFree(sample_weights_dev));
FreeResultBuffers();
}
public:
void blockSolveTest() {
hipLaunchKernelGGL(( SmoBlockSolve<math_t, 1024>), dim3(1), dim3(n_ws), 0, stream,
y_dev, n_rows, alpha_dev, n_ws, delta_alpha_dev, f_dev, kernel_dev,
ws_idx_dev, C_dev, 1e-3, return_buff_dev);
CUDA_CHECK(hipPeekAtLastError());
math_t return_buff[2];
raft::update_host(return_buff, return_buff_dev, 2, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
EXPECT_FLOAT_EQ(return_buff[0], 2.0f) << return_buff[0];
EXPECT_LT(return_buff[1], 100) << return_buff[1];
// check results won't work, because it expects that GetResults was called
math_t *delta_alpha_calc;
raft::allocate(delta_alpha_calc, n_rows);
raft::linalg::binaryOp(
delta_alpha_calc, y_dev, alpha_dev, n_rows,
[] __device__(math_t a, math_t b) { return a * b; }, stream);
raft::devArrMatch(delta_alpha_dev, delta_alpha_calc, n_rows,
raft::CompareApprox<math_t>(1e-6));
CUDA_CHECK(hipFree(delta_alpha_calc));
math_t alpha_expected[] = {0.6f, 0, 1, 1, 0, 0.6f};
//for C=10: {0.25f, 0, 2.25f, 3.75f, 0, 1.75f};
raft::devArrMatch(alpha_expected, alpha_dev, n_rows,
raft::CompareApprox<math_t>(1e-6));
math_t host_alpha[6];
raft::update_host(host_alpha, alpha_dev, n_rows, stream);
math_t w[] = {0, 0};
math_t ay = 0;
for (int i = 0; i < n_rows; i++) {
EXPECT_FLOAT_EQ(host_alpha[i], alpha_expected[i]) << "alpha " << i;
w[0] += x_host[i] * host_alpha[i] * y_host[i];
w[1] += x_host[i + n_rows] * host_alpha[i] * y_host[i];
ay += host_alpha[i] * y_host[i];
}
EXPECT_FLOAT_EQ(ay, 0.0);
EXPECT_FLOAT_EQ(w[0], -0.4);
EXPECT_FLOAT_EQ(w[1], 1.2);
// for C=10
//EXPECT_FLOAT_EQ(w[0], -2.0);
//EXPECT_FLOAT_EQ(w[1], 2.0);
}
void svrBlockSolveTest() {
int n_ws = 4;
int n_rows = 2;
// int n_cols = 1;
// math_t x[2] = {1, 2};
// yr = {2, 3}
math_t f[4] = {-1.9, -2.9, -2.1 - 3.1};
math_t kernel[4] = {1, 2, 2, 4};
// ws_idx is defined as {0, 1, 2, 3}
int kColIdx[4] = {0, 1, 0, 1};
device_buffer<int> kColIdx_dev(handle.get_device_allocator(), stream, 4);
raft::update_device(f_dev, f, 4, stream);
raft::update_device(kernel_dev, kernel, 4, stream);
raft::update_device(kColIdx_dev.data(), kColIdx, 4, stream);
hipLaunchKernelGGL(( SmoBlockSolve<math_t, 1024>), dim3(1), dim3(n_ws), 0, stream,
y_dev, 2 * n_rows, alpha_dev, n_ws, delta_alpha_dev, f_dev, kernel_dev,
ws_idx_dev, C_dev, 1e-3, return_buff_dev, 10, EPSILON_SVR,
kColIdx_dev.data());
CUDA_CHECK(hipPeekAtLastError());
math_t return_buff[2];
raft::update_host(return_buff, return_buff_dev, 2, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
EXPECT_LT(return_buff[1], 10) << return_buff[1];
math_t alpha_exp[] = {0, 0.8, 0.8, 0};
raft::devArrMatch(alpha_exp, alpha_dev, 4,
raft::CompareApprox<math_t>(1e-6));
math_t dalpha_exp[] = {-0.8, 0.8};
raft::devArrMatch(dalpha_exp, delta_alpha_dev, 2,
raft::CompareApprox<math_t>(1e-6));
}
protected:
raft::handle_t handle;
hipStream_t stream;
Matrix::GramMatrixBase<math_t> *kernel;
int n_rows = 6;
const int n_cols = 2;
int n_ws = 6;
math_t *x_dev;
int *ws_idx_dev;
math_t *y_dev;
math_t *C_dev;
math_t *y_pred;
math_t *f_dev;
math_t *alpha_dev;
math_t *delta_alpha_dev;
math_t *kernel_dev;
math_t *return_buff_dev;
math_t *sample_weights_dev;
math_t x_host[12] = {1, 2, 1, 2, 1, 2, 1, 1, 2, 2, 3, 3};
int ws_idx_host[6] = {0, 1, 2, 3, 4, 5};
math_t y_host[6] = {-1, -1, 1, -1, 1, 1};
math_t C = 1;
math_t f_host[6] = {1, 1, -1, 1, -1, -1};
math_t kernel_host[36] = {2, 3, 3, 4, 4, 5, 3, 5, 4, 6, 5, 7,
3, 4, 5, 6, 7, 8, 4, 6, 6, 8, 8, 10,
4, 5, 7, 8, 10, 11, 5, 7, 8, 10, 11, 13};
hipblasHandle_t cublas_handle;
math_t *dual_coefs_d = nullptr;
int n_coefs;
int *idx_d = nullptr;
math_t *x_support_d = nullptr;
math_t b;
};
TYPED_TEST_CASE(SmoSolverTest, FloatTypes);
TYPED_TEST(SmoSolverTest, BlockSolveTest) { this->blockSolveTest(); }
TYPED_TEST(SmoSolverTest, SvrBlockSolveTest) { this->svrBlockSolveTest(); }
std::string kernelName(KernelParams k) {
std::vector<std::string> names{"linear", "poly", "rbf", "tanh"};
return names[k.kernel];
}
template <typename math_t>
std::ostream &operator<<(std::ostream &os, const smoInput<math_t> &b) {
os << kernelName(b.kernel_params) << ", C=" << b.C << ", tol=" << b.tol;
return os;
}
TYPED_TEST(SmoSolverTest, SmoSolveTest) {
std::vector<std::pair<smoInput<TypeParam>, smoOutput<TypeParam>>> data{
{smoInput<TypeParam>{1, 0.001, KernelParams{LINEAR, 3, 1, 0}, 100, 1},
smoOutput<TypeParam>{4, // n_sv
{-0.6, 1, -1, 0.6}, // dual_coefs
-1.8, // b
{-0.4, 1.2}, // w
{1, 1, 2, 2, 1, 2, 2, 3}, // x_support
{0, 2, 3, 5}}}, // support idx
{smoInput<TypeParam>{10, 0.001, KernelParams{LINEAR, 3, 1, 0}, 100, 1},
smoOutput<TypeParam>{3, {-2, 4, -2, 0, 0}, -1.0, {-2, 2}, {}, {}}},
{smoInput<TypeParam>{1, 1e-6, KernelParams{POLYNOMIAL, 3, 1, 1}, 100, 1},
smoOutput<TypeParam>{3,
{-0.02556136, 0.03979708, -0.01423571},
-1.07739149,
{},
{1, 1, 2, 1, 2, 2},
{0, 2, 3}}}};
for (auto d : data) {
auto p = d.first;
auto exp = d.second;
SCOPED_TRACE(p);
svmParameter param = getDefaultSvmParameter();
param.C = p.C;
param.tol = p.tol;
//param.max_iter = p.max_iter;
GramMatrixBase<TypeParam> *kernel = KernelFactory<TypeParam>::create(
p.kernel_params, this->handle.get_cublas_handle());
SmoSolver<TypeParam> smo(this->handle, param, kernel);
svmModel<TypeParam> model{0, this->n_cols, 0, nullptr,
nullptr, nullptr, 0, nullptr};
smo.Solve(this->x_dev, this->n_rows, this->n_cols, this->y_dev, nullptr,
&model.dual_coefs, &model.n_support, &model.x_support,
&model.support_idx, &model.b, p.max_iter, p.max_inner_iter);
checkResults(model, exp, this->stream);
svmFreeBuffers(this->handle, model);
}
}
TYPED_TEST(SmoSolverTest, SvcTest) {
std::vector<std::pair<svcInput<TypeParam>, smoOutput2<TypeParam>>> data{
{svcInput<TypeParam>{1, 0.001, KernelParams{LINEAR, 3, 1, 0}, this->n_rows,
this->n_cols, this->x_dev, this->y_dev, true},
smoOutput2<TypeParam>{4,
{-0.6, 1, -1, 0.6},
-1.8f,
{-0.4, 1.2},
{1, 1, 2, 2, 1, 2, 2, 3},
{0, 2, 3, 5},
{-1.0, -1.4, 0.2, -0.2, 1.4, 1.0}}},
{// C == 0 marks a special tast case with sample weights
svcInput<TypeParam>{0, 0.001, KernelParams{LINEAR, 3, 1, 0}, this->n_rows,
this->n_cols, this->x_dev, this->y_dev, true},
smoOutput2<TypeParam>{4,
{},
-1.0f,
{-2, 2},
{1, 1, 2, 2, 1, 2, 2, 3},
{0, 2, 3, 5},
{-1.0, -3.0, 1.0, -1.0, 3.0, 1.0}}},
{svcInput<TypeParam>{1, 1e-6, KernelParams{POLYNOMIAL, 3, 1, 0},
this->n_rows, this->n_cols, this->x_dev, this->y_dev,
true},
smoOutput2<TypeParam>{3,
{-0.03900895, 0.05904058, -0.02003163},
-0.99999959,
{},
{1, 1, 2, 1, 2, 2},
{0, 2, 3},
{-0.9996812, -2.60106647, 0.9998406, -1.0001594,
6.49681105, 4.31951232}}},
{svcInput<TypeParam>{10, 1e-6, KernelParams{TANH, 3, 0.3, 1.0},
this->n_rows, this->n_cols, this->x_dev, this->y_dev,
false},
smoOutput2<TypeParam>{6,
{-10., -10., 10., -10., 10., 10.},
-0.3927505,
{},
{1, 2, 1, 2, 1, 2, 1, 1, 2, 2, 3, 3},
{0, 1, 2, 3, 4, 5},
{0.25670694, -0.16451539, 0.16451427, -0.1568888,
-0.04496891, -0.2387212}}},
{svcInput<TypeParam>{1, 1.0e-6, KernelParams{RBF, 0, 0.15, 0}, this->n_rows,
this->n_cols, this->x_dev, this->y_dev, true},
smoOutput2<TypeParam>{6,
{-1., -1, 1., -1., 1, 1.},
0,
{},
{1, 2, 1, 2, 1, 2, 1, 1, 2, 2, 3, 3},
{0, 1, 2, 3, 4, 5},
{-0.71964003, -0.95941954, 0.13929202, -0.13929202,
0.95941954, 0.71964003}}}};
for (auto d : data) {
auto p = d.first;
auto exp = d.second;
SCOPED_TRACE(kernelName(p.kernel_params));
TypeParam *sample_weights = nullptr;
if (p.C == 0) {
p.C = 1;
sample_weights = this->sample_weights_dev;
}
SVC<TypeParam> svc(this->handle, p.C, p.tol, p.kernel_params);
svc.fit(p.x_dev, p.n_rows, p.n_cols, p.y_dev, sample_weights);
checkResults(svc.model, toSmoOutput(exp), this->stream);
device_buffer<TypeParam> y_pred(this->handle.get_device_allocator(),
this->stream, p.n_rows);
if (p.predict) {
svc.predict(p.x_dev, p.n_rows, p.n_cols, y_pred.data());
EXPECT_TRUE(raft::devArrMatch(this->y_dev, y_pred.data(), p.n_rows,
raft::CompareApprox<TypeParam>(1e-6f)));
}
if (exp.decision_function.size() > 0) {
svc.decisionFunction(p.x_dev, p.n_rows, p.n_cols, y_pred.data());
EXPECT_TRUE(devArrMatchHost(exp.decision_function.data(), y_pred.data(),
p.n_rows,
raft::CompareApprox<TypeParam>(1e-3f)));
}
}
}
struct blobInput {
double C;
double tol;
KernelParams kernel_params;
int n_rows;
int n_cols;
};
std::ostream &operator<<(std::ostream &os, const blobInput &b) {
os << kernelName(b.kernel_params) << " " << b.n_rows << "x" << b.n_cols;
return os;
}
// until there is progress with Issue #935
template <typename inType, typename outType>
__global__ void cast(outType *out, int n, inType *in) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n) out[tid] = in[tid];
}
// To have the same input data for both single and double precision,
// we generate the blobs in single precision only, and cast to dp if needed.
template <typename math_t>
void make_blobs(const raft::handle_t &handle, math_t *x, math_t *y, int n_rows,
int n_cols, int n_cluster, float *centers = nullptr) {
auto allocator = handle.get_device_allocator();
auto cublas_h = handle.get_cublas_handle();
auto stream = handle.get_stream();
device_buffer<float> x_float(allocator, stream, n_rows * n_cols);
device_buffer<int> y_int(allocator, stream, n_rows);
Datasets::make_blobs(handle, x_float.data(), y_int.data(), n_rows, n_cols,
n_cluster, true, centers, (float *)nullptr, 1.0f, true,
-2.0f, 2.0f, 0);
int TPB = 256;
if (std::is_same<float, math_t>::value) {
raft::linalg::transpose(handle, x_float.data(), (float *)x, n_cols, n_rows,
stream);
} else {
device_buffer<math_t> x2(allocator, stream, n_rows * n_cols);
hipLaunchKernelGGL(( cast), dim3(raft::ceildiv(n_rows * n_cols, TPB)), dim3(TPB), 0, stream,
x2.data(), n_rows * n_cols, x_float.data());
raft::linalg::transpose(handle, x2.data(), x, n_cols, n_rows, stream);
CUDA_CHECK(hipPeekAtLastError());
}
hipLaunchKernelGGL(( cast), dim3(raft::ceildiv(n_rows, TPB)), dim3(TPB), 0, stream, y, n_rows, y_int.data());
CUDA_CHECK(hipPeekAtLastError());
}
struct is_same_functor {
template <typename Tuple>
__host__ __device__ int operator()(Tuple t) {
return thrust::get<0>(t) == thrust::get<1>(t);
}
};
TYPED_TEST(SmoSolverTest, BlobPredict) {
// Pair.second is the expected accuracy. It might change if the Rng changes.
std::vector<std::pair<blobInput, TypeParam>> data{
{blobInput{1, 0.001, KernelParams{LINEAR, 3, 1, 0}, 200, 10}, 98},
{blobInput{1, 0.001, KernelParams{POLYNOMIAL, 3, 1, 0}, 200, 10}, 98},
{blobInput{1, 0.001, KernelParams{RBF, 3, 1, 0}, 200, 2}, 98},
{blobInput{1, 0.009, KernelParams{TANH, 3, 0.1, 0}, 200, 10}, 98}};
// This should be larger then N_PRED_BATCH in svcPredict
const int n_pred = 5000;
auto allocator = this->handle.get_device_allocator();
for (auto d : data) {
auto p = d.first;
SCOPED_TRACE(p);
// explicit centers for the blobs
device_buffer<float> centers(allocator, this->stream, 2 * p.n_cols);
thrust::device_ptr<float> thrust_ptr(centers.data());
thrust::fill(thrust::hip::par.on(this->stream), thrust_ptr,
thrust_ptr + p.n_cols, -5.0f);
thrust::fill(thrust::hip::par.on(this->stream), thrust_ptr + p.n_cols,
thrust_ptr + 2 * p.n_cols, +5.0f);
device_buffer<TypeParam> x(allocator, this->stream, p.n_rows * p.n_cols);
device_buffer<TypeParam> y(allocator, this->stream, p.n_rows);
device_buffer<TypeParam> x_pred(allocator, this->stream, n_pred * p.n_cols);
device_buffer<TypeParam> y_pred(allocator, this->stream, n_pred);
make_blobs(this->handle, x.data(), y.data(), p.n_rows, p.n_cols, 2,
centers.data());
SVC<TypeParam> svc(this->handle, p.C, p.tol, p.kernel_params, 0, -1, 50,
CUML_LEVEL_INFO);
svc.fit(x.data(), p.n_rows, p.n_cols, y.data());
// Create a different dataset for prediction
make_blobs(this->handle, x_pred.data(), y_pred.data(), n_pred, p.n_cols, 2,
centers.data());
device_buffer<TypeParam> y_pred2(this->handle.get_device_allocator(),
this->stream, n_pred);
svc.predict(x_pred.data(), n_pred, p.n_cols, y_pred2.data());
// Count the number of correct predictions
device_buffer<int> is_correct(this->handle.get_device_allocator(),
this->stream, n_pred);
thrust::device_ptr<TypeParam> ptr1(y_pred.data());
thrust::device_ptr<TypeParam> ptr2(y_pred2.data());
thrust::device_ptr<int> ptr3(is_correct.data());
auto first = thrust::make_zip_iterator(thrust::make_tuple(ptr1, ptr2));
auto last = thrust::make_zip_iterator(
thrust::make_tuple(ptr1 + n_pred, ptr2 + n_pred));
thrust::transform(thrust::hip::par.on(this->stream), first, last, ptr3,
is_same_functor());
int n_correct =
thrust::reduce(thrust::hip::par.on(this->stream), ptr3, ptr3 + n_pred);
TypeParam accuracy = 100 * n_correct / n_pred;
TypeParam accuracy_exp = d.second;
EXPECT_GE(accuracy, accuracy_exp);
}
}
TYPED_TEST(SmoSolverTest, MemoryLeak) {
// We measure that we have the same amount of free memory available on the GPU
// before and after we call SVM. This can help catch memory leaks, but it is
// not 100% sure. Small allocations might be pooled together by hipMalloc,
// and some of those would be missed by this method.
enum class ThrowException { Yes, No };
std::vector<std::pair<blobInput, ThrowException>> data{
{blobInput{1, 0.001, KernelParams{LINEAR, 3, 0.01, 0}, 1000, 1000},
ThrowException::No},
{blobInput{1, 0.001, KernelParams{POLYNOMIAL, 400, 5, 10}, 1000, 1000},
ThrowException::Yes}};
// For the second set of input parameters training will fail, some kernel
// function values would be 1e400 or larger, which does not fit fp64.
// This will lead to NaN diff in SmoSolver, which whill throw an exception
// to stop fitting.
size_t free1, total, free2;
CUDA_CHECK(hipMemGetInfo(&free1, &total));
auto allocator = this->handle.get_device_allocator();
for (auto d : data) {
auto p = d.first;
SCOPED_TRACE(p);
device_buffer<TypeParam> x(allocator, this->stream, p.n_rows * p.n_cols);
device_buffer<TypeParam> y(allocator, this->stream, p.n_rows);
make_blobs(this->handle, x.data(), y.data(), p.n_rows, p.n_cols, 2);
SVC<TypeParam> svc(this->handle, p.C, p.tol, p.kernel_params);
if (d.second == ThrowException::Yes) {
// We want to check whether we leak any memory while we unwind the stack
EXPECT_THROW(svc.fit(x.data(), p.n_rows, p.n_cols, y.data()),
raft::exception);
} else {
svc.fit(x.data(), p.n_rows, p.n_cols, y.data());
device_buffer<TypeParam> y_pred(this->handle.get_device_allocator(),
this->stream, p.n_rows);
CUDA_CHECK(hipStreamSynchronize(this->stream));
CUDA_CHECK(hipMemGetInfo(&free2, &total));
float delta = (free1 - free2);
// Just to make sure that we measure any mem consumption at all:
// we check if we see the memory consumption of x[n_rows*n_cols].
// If this error is triggered, increasing the test size might help to fix
// it (one could additionally control the exec time by the max_iter arg to
// SVC).
EXPECT_GT(delta, p.n_rows * p.n_cols * 4);
CUDA_CHECK(hipStreamSynchronize(this->stream));
svc.predict(x.data(), p.n_rows, p.n_cols, y_pred.data());
}
}
CUDA_CHECK(hipMemGetInfo(&free2, &total));
float delta = (free1 - free2);
EXPECT_EQ(delta, 0);
}
template <typename math_t>
struct SvrInput {
svmParameter param;
KernelParams kernel;
int n_rows;
int n_cols;
std::vector<math_t> x;
std::vector<math_t> y;
std::vector<math_t> sample_weighs;
};
template <typename math_t>
std::ostream &operator<<(std::ostream &os, const SvrInput<math_t> &b) {
os << kernelName(b.kernel) << " " << b.n_rows << "x" << b.n_cols
<< ", C=" << b.param.C << ", tol=" << b.param.tol;
return os;
}
template <typename math_t>
class SvrTest : public ::testing::Test {
protected:
void SetUp() override {
CUDA_CHECK(hipStreamCreate(&stream));
handle.set_stream(stream);
allocator = handle.get_device_allocator();
raft::allocate(x_dev, n_rows * n_cols);
raft::allocate(y_dev, n_rows);
raft::allocate(C_dev, 2 * n_rows);
raft::allocate(y_pred, n_rows);
raft::allocate(yc, n_train);
raft::allocate(f, n_train);
raft::allocate(alpha, n_train);
raft::update_device(x_dev, x_host, n_rows * n_cols, stream);
raft::update_device(y_dev, y_host, n_rows, stream);
model.n_support = 0;
model.dual_coefs = nullptr;
model.x_support = nullptr;
model.support_idx = nullptr;
model.n_classes = 0;
model.unique_labels = nullptr;
}
void TearDown() override {
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(x_dev));
CUDA_CHECK(hipFree(y_dev));
CUDA_CHECK(hipFree(C_dev));
CUDA_CHECK(hipFree(y_pred));
CUDA_CHECK(hipFree(yc));
CUDA_CHECK(hipFree(f));
CUDA_CHECK(hipFree(alpha));
svmFreeBuffers(handle, model);
}
public:
void TestSvrInit() {
svmParameter param = getDefaultSvmParameter();
param.svmType = EPSILON_SVR;
SmoSolver<math_t> smo(handle, param, nullptr);
smo.SvrInit(y_dev, n_rows, yc, f);
EXPECT_TRUE(devArrMatchHost(yc_exp, yc, n_train,
raft::CompareApprox<math_t>(1.0e-9)));
EXPECT_TRUE(devArrMatchHost(f_exp, f, n_train, raft::Compare<math_t>()));
}
void TestSvrWorkingSet() {
init_C((math_t)1.0, C_dev, 2 * n_rows, stream);
WorkingSet<math_t> *ws;
ws = new WorkingSet<math_t>(handle, stream, n_rows, 20, EPSILON_SVR);
EXPECT_EQ(ws->GetSize(), 2 * n_rows);
raft::update_device(alpha, alpha_host, n_train, stream);
raft::update_device(f, f_exp, n_train, stream);
raft::update_device(yc, yc_exp, n_train, stream);
ws->Select(f, alpha, yc, C_dev);
int exp_idx[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13};
ASSERT_TRUE(devArrMatchHost(exp_idx, ws->GetIndices(), ws->GetSize(),
raft::Compare<int>()));
delete ws;
ws = new WorkingSet<math_t>(handle, stream, n_rows, 10, EPSILON_SVR);
EXPECT_EQ(ws->GetSize(), 10);
ws->Select(f, alpha, yc, C_dev);
int exp_idx2[] = {6, 12, 5, 11, 3, 9, 8, 1, 7, 0};
ASSERT_TRUE(devArrMatchHost(exp_idx2, ws->GetIndices(), ws->GetSize(),
raft::Compare<int>()));
delete ws;
}
void TestSvrResults() {
raft::update_device(yc, yc_exp, n_train, stream);
init_C((math_t)0.001, C_dev, n_rows * 2, stream);
Results<math_t> res(handle, x_dev, yc, n_rows, n_cols, C_dev, EPSILON_SVR);
model.n_cols = n_cols;
raft::update_device(alpha, alpha_host, n_train, stream);
raft::update_device(f, f_exp, n_train, stream);
res.Get(alpha, f, &model.dual_coefs, &model.n_support, &model.support_idx,
&model.x_support, &model.b);
ASSERT_EQ(model.n_support, 5);
math_t dc_exp[] = {0.1, 0.3, -0.4, 0.9, -0.9};
EXPECT_TRUE(devArrMatchHost(dc_exp, model.dual_coefs, model.n_support,
raft::CompareApprox<math_t>(1.0e-6)));
math_t x_exp[] = {1, 2, 3, 5, 6};
EXPECT_TRUE(devArrMatchHost(x_exp, model.x_support,
model.n_support * n_cols,
raft::CompareApprox<math_t>(1.0e-6)));
int idx_exp[] = {0, 1, 2, 4, 5};
EXPECT_TRUE(devArrMatchHost(idx_exp, model.support_idx, model.n_support,
raft::CompareApprox<math_t>(1.0e-6)));
}
void TestSvrFitPredict() {
std::vector<std::pair<SvrInput<math_t>, smoOutput2<math_t>>> data{
{SvrInput<math_t>{
svmParameter{1, 0, 1, 10, 1e-3, CUML_LEVEL_INFO, 0.1, EPSILON_SVR},
KernelParams{LINEAR, 3, 1, 0},
2, // n_rows
1, // n_cols
{0, 1}, //x
{2, 3} //y
},
smoOutput2<math_t>{
2, {-0.8, 0.8}, 2.1, {0.8}, {0, 1}, {0, 1}, {2.1, 2.9}}},
{SvrInput<math_t>{
svmParameter{1, 10, 1, 1, 1e-3, CUML_LEVEL_INFO, 0.1, EPSILON_SVR},
KernelParams{LINEAR, 3, 1, 0},
2, // n_rows
1, // n_cols
{1, 2}, //x
{2, 3} //y
},
smoOutput2<math_t>{
2, {-0.8, 0.8}, 1.3, {0.8}, {1, 2}, {0, 1}, {2.1, 2.9}}},
{SvrInput<math_t>{
svmParameter{1, 0, 1, 1, 1e-3, CUML_LEVEL_INFO, 0.1, EPSILON_SVR},
KernelParams{LINEAR, 3, 1, 0},
2, // n_rows
2, // n_cols
{1, 2, 5, 5}, //x
{2, 3} //y
},
smoOutput2<math_t>{
2, {-0.8, 0.8}, 1.3, {0.8, 0.0}, {1, 2, 5, 5}, {0, 1}, {2.1, 2.9}}},
{SvrInput<math_t>{
svmParameter{1, 0, 100, 10, 1e-6, CUML_LEVEL_INFO, 0.1, EPSILON_SVR},
KernelParams{LINEAR, 3, 1, 0},
7, // n_rows
1, //n_cols
{1, 2, 3, 4, 5, 6, 7}, //x
{0, 2, 3, 4, 5, 6, 8} //y
},
smoOutput2<math_t>{6,
{-1, 1, 0.45, -0.45, -1, 1},
-0.4,
{1.1},
{1.0, 2.0, 3.0, 5.0, 6.0, 7.0},
{0, 1, 2, 4, 5, 6},
{0.7, 1.8, 2.9, 4, 5.1, 6.2, 7.3}}},
// Almost same as above, but with sample weights
{SvrInput<math_t>{
svmParameter{1, 0, 100, 10, 1e-3, CUML_LEVEL_INFO, 0.1, EPSILON_SVR},
KernelParams{LINEAR, 3, 1, 0},
7, // n_rows
1, // n_cols
{1, 2, 3, 4, 5, 6, 7}, // x
{0, 2, 3, 0, 4, 8, 12}, // y
{1, 1, 1, 10, 2, 10, 1} // sample weights
},
smoOutput2<math_t>{6,
{},
-15.5,
{3.9},
{1.0, 2.0, 3.0, 4.0, 6.0, 7.0},
{0, 1, 2, 3, 5, 6},
{}}}};
for (auto d : data) {
auto p = d.first;
auto exp = d.second;
SCOPED_TRACE(p);
device_buffer<math_t> x_dev(allocator, stream, p.n_rows * p.n_cols);
raft::update_device(x_dev.data(), p.x.data(), p.n_rows * p.n_cols,
stream);
device_buffer<math_t> y_dev(allocator, stream, p.n_rows);
raft::update_device(y_dev.data(), p.y.data(), p.n_rows, stream);
MLCommon::device_buffer<math_t> sample_weights_dev(allocator, stream);
math_t *sample_weights = nullptr;
if (!p.sample_weighs.empty()) {
sample_weights_dev.resize(p.n_rows, stream);
sample_weights = sample_weights_dev.data();
raft::update_device(sample_weights_dev.data(), p.sample_weighs.data(),
p.n_rows, stream);
}
svrFit(handle, x_dev.data(), p.n_rows, p.n_cols, y_dev.data(), p.param,
p.kernel, model, sample_weights);
checkResults(model, toSmoOutput(exp), stream);
device_buffer<math_t> preds(allocator, stream, p.n_rows);
svcPredict(handle, x_dev.data(), p.n_rows, p.n_cols, p.kernel, model,
preds.data(), (math_t)200.0, false);
if (!exp.decision_function.empty()) {
EXPECT_TRUE(devArrMatchHost(exp.decision_function.data(), preds.data(),
p.n_rows,
raft::CompareApprox<math_t>(1.0e-5)));
}
}
}
protected:
raft::handle_t handle;
hipStream_t stream;
std::shared_ptr<deviceAllocator> allocator;
int n_rows = 7;
int n_train = 2 * n_rows;
const int n_cols = 1;
svmModel<math_t> model;
math_t *x_dev;
math_t *y_dev;
math_t *C_dev;
math_t *y_pred;
math_t *yc;
math_t *f;
math_t *alpha;
math_t x_host[7] = {1, 2, 3, 4, 5, 6, 7};
math_t y_host[7] = {0, 2, 3, 4, 5, 6, 8};
math_t yc_exp[14] = {1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1};
math_t f_exp[14] = {0.1, -1.9, -2.9, -3.9, -4.9, -5.9, -7.9,
-0.1, -2.1, -3.1, -4.1, -5.1, -6.1, -8.1};
math_t alpha_host[14] = {0.2, 0.3, 0, 0, 1, 0.1, 0,
0.1, 0, 0.4, 0, 0.1, 1, 0};
}; // namespace SVM
typedef ::testing::Types<float> OnlyFp32;
TYPED_TEST_CASE(SvrTest, FloatTypes);
TYPED_TEST(SvrTest, Init) { this->TestSvrInit(); }
TYPED_TEST(SvrTest, WorkingSet) { this->TestSvrWorkingSet(); }
TYPED_TEST(SvrTest, Results) { this->TestSvrResults(); }
TYPED_TEST(SvrTest, FitPredict) { this->TestSvrFitPredict(); }
}; // namespace SVM
}; // namespace ML
| 9400b62601a5e9b8cf8515affce5e5000a8ee1bb.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/svm/svm_model.h>
#include <cuml/svm/svm_parameter.h>
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/linalg/transpose.h>
#include <test_utils.h>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform.h>
#include <common/cumlHandle.hpp>
#include <common/device_buffer.hpp>
#include <cub/cub.cuh>
#include <cuml/common/logger.hpp>
#include <cuml/datasets/make_blobs.hpp>
#include <cuml/svm/svc.hpp>
#include <cuml/svm/svr.hpp>
#include <iostream>
#include <matrix/grammatrix.cuh>
#include <matrix/kernelmatrices.cuh>
#include <raft/cuda_utils.cuh>
#include <raft/linalg/binary_op.cuh>
#include <raft/linalg/map_then_reduce.cuh>
#include <raft/random/rng.cuh>
#include <random/make_blobs.cuh>
#include <string>
#include <svm/smoblocksolve.cuh>
#include <svm/smosolver.cuh>
#include <svm/workingset.cuh>
#include <type_traits>
#include <vector>
namespace ML {
namespace SVM {
using namespace MLCommon;
using namespace Matrix;
// Initialize device vector C_vec with scalar C
template <typename math_t>
void init_C(math_t C, math_t *C_vec, int n, cudaStream_t stream) {
thrust::device_ptr<math_t> c_ptr(C_vec);
thrust::fill(thrust::cuda::par.on(stream), c_ptr, c_ptr + n, C);
}
template <typename math_t>
class WorkingSetTest : public ::testing::Test {
protected:
void SetUp() override {
CUDA_CHECK(cudaStreamCreate(&stream));
handle.set_stream(stream);
raft::allocate(f_dev, 10);
raft::allocate(y_dev, 10);
raft::allocate(C_dev, 10);
raft::allocate(alpha_dev, 10);
init_C(C, C_dev, 10, stream);
raft::update_device(f_dev, f_host, 10, stream);
raft::update_device(y_dev, y_host, 10, stream);
raft::update_device(alpha_dev, alpha_host, 10, stream);
}
void TearDown() override {
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(f_dev));
CUDA_CHECK(cudaFree(y_dev));
CUDA_CHECK(cudaFree(C_dev));
CUDA_CHECK(cudaFree(alpha_dev));
}
raft::handle_t handle;
cudaStream_t stream;
WorkingSet<math_t> *ws;
math_t f_host[10] = {1, 3, 10, 4, 2, 8, 6, 5, 9, 7};
math_t *f_dev;
math_t y_host[10] = {-1, -1, -1, -1, -1, 1, 1, 1, 1, 1};
math_t *y_dev;
math_t *C_dev;
math_t C = 1.5;
math_t alpha_host[10] = {0, 0, 0.1, 0.2, 1.5, 0, 0.2, 0.4, 1.5, 1.5};
math_t *alpha_dev; // l l l/u l/u u u l/u l/u l l
int expected_idx[4] = {4, 3, 8, 2};
int expected_idx2[4] = {8, 2, 4, 9};
};
typedef ::testing::Types<float, double> FloatTypes;
TYPED_TEST_CASE(WorkingSetTest, FloatTypes);
TYPED_TEST(WorkingSetTest, Init) {
this->ws =
new WorkingSet<TypeParam>(this->handle, this->handle.get_stream(), 10);
EXPECT_EQ(this->ws->GetSize(), 10);
delete this->ws;
this->ws = new WorkingSet<TypeParam>(this->handle, this->stream, 100000);
EXPECT_EQ(this->ws->GetSize(), 1024);
delete this->ws;
}
TYPED_TEST(WorkingSetTest, Select) {
this->ws = new WorkingSet<TypeParam>(this->handle, this->stream, 10, 4);
EXPECT_EQ(this->ws->GetSize(), 4);
this->ws->SimpleSelect(this->f_dev, this->alpha_dev, this->y_dev,
this->C_dev);
ASSERT_TRUE(devArrMatchHost(this->expected_idx, this->ws->GetIndices(),
this->ws->GetSize(), raft::Compare<int>()));
this->ws->Select(this->f_dev, this->alpha_dev, this->y_dev, this->C_dev);
ASSERT_TRUE(devArrMatchHost(this->expected_idx, this->ws->GetIndices(),
this->ws->GetSize(), raft::Compare<int>()));
this->ws->Select(this->f_dev, this->alpha_dev, this->y_dev, this->C_dev);
ASSERT_TRUE(devArrMatchHost(this->expected_idx2, this->ws->GetIndices(),
this->ws->GetSize(), raft::Compare<int>()));
delete this->ws;
}
//TYPED_TEST(WorkingSetTest, Priority) {
// See Issue #946
//}
template <typename math_t>
class KernelCacheTest : public ::testing::Test {
protected:
void SetUp() override {
CUDA_CHECK(cudaStreamCreate(&stream));
handle.set_stream(stream);
cublas_handle = handle.get_cublas_handle();
raft::allocate(x_dev, n_rows * n_cols);
raft::update_device(x_dev, x_host, n_rows * n_cols, stream);
raft::allocate(ws_idx_dev, 2 * n_ws);
raft::update_device(ws_idx_dev, ws_idx_host, n_ws, stream);
}
void TearDown() override {
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(x_dev));
CUDA_CHECK(cudaFree(ws_idx_dev));
}
// Naive host side kernel implementation used for comparison
void ApplyNonlin(Matrix::KernelParams params) {
switch (params.kernel) {
case Matrix::LINEAR:
break;
case Matrix::POLYNOMIAL:
for (int z = 0; z < n_rows * n_ws; z++) {
math_t val = params.gamma * tile_host_expected[z] + params.coef0;
tile_host_expected[z] = pow(val, params.degree);
}
break;
case Matrix::TANH:
for (int z = 0; z < n_rows * n_ws; z++) {
math_t val = params.gamma * tile_host_expected[z] + params.coef0;
tile_host_expected[z] = tanh(val);
}
break;
case Matrix::RBF:
for (int i = 0; i < n_ws; i++) {
for (int j = 0; j < n_rows; j++) {
math_t d = 0;
for (int k = 0; k < n_cols; k++) {
int idx_i = ws_idx_host[i];
math_t diff = x_host[idx_i + k * n_rows] - x_host[j + k * n_rows];
d += diff * diff;
}
tile_host_expected[i * n_rows + j] = exp(-params.gamma * d);
}
}
break;
}
}
void check(const math_t *tile_dev, int n_ws, int n_rows, const int *ws_idx,
const int *kColIdx) {
host_buffer<int> ws_idx_h(handle.get_host_allocator(), stream, n_ws);
raft::update_host(ws_idx_h.data(), ws_idx, n_ws, stream);
host_buffer<int> kidx_h(handle.get_host_allocator(), stream, n_ws);
raft::update_host(kidx_h.data(), kColIdx, n_ws, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
// Note: kernel cache can permute the working set, so we have to look
// up which rows we compare
for (int i = 0; i < n_ws; i++) {
SCOPED_TRACE(i);
int widx = ws_idx_h[i] % n_rows;
int kidx = kidx_h[i];
const math_t *cache_row = tile_dev + kidx * n_rows;
const math_t *row_exp = tile_host_all + widx * n_rows;
EXPECT_TRUE(devArrMatchHost(row_exp, cache_row, n_rows,
raft::CompareApprox<math_t>(1e-6f)));
}
}
raft::handle_t handle;
cublasHandle_t cublas_handle;
cudaStream_t stream;
int n_rows = 4;
int n_cols = 2;
int n_ws = 3;
math_t *x_dev;
int *ws_idx_dev;
math_t x_host[8] = {1, 2, 3, 4, 5, 6, 7, 8};
int ws_idx_host[4] = {0, 1, 3};
math_t tile_host_expected[12] = {26, 32, 38, 44, 32, 40,
48, 56, 44, 56, 68, 80};
math_t tile_host_all[16] = {26, 32, 38, 44, 32, 40, 48, 56,
38, 48, 58, 68, 44, 56, 68, 80};
};
TYPED_TEST_CASE_P(KernelCacheTest);
TYPED_TEST_P(KernelCacheTest, EvalTest) {
std::vector<Matrix::KernelParams> param_vec{
Matrix::KernelParams{Matrix::LINEAR, 3, 1, 0},
Matrix::KernelParams{Matrix::POLYNOMIAL, 2, 1.3, 1},
Matrix::KernelParams{Matrix::TANH, 2, 0.5, 2.4},
Matrix::KernelParams{Matrix::RBF, 2, 0.5, 0}};
float cache_size = 0;
for (auto params : param_vec) {
Matrix::GramMatrixBase<TypeParam> *kernel =
Matrix::KernelFactory<TypeParam>::create(
params, this->handle.get_cublas_handle());
KernelCache<TypeParam> cache(this->handle, this->x_dev, this->n_rows,
this->n_cols, this->n_ws, kernel, cache_size,
C_SVC);
TypeParam *tile_dev = cache.GetTile(this->ws_idx_dev);
// apply nonlinearity on tile_host_expected
this->ApplyNonlin(params);
ASSERT_TRUE(devArrMatchHost(this->tile_host_expected, tile_dev,
this->n_rows * this->n_ws,
raft::CompareApprox<TypeParam>(1e-6f)));
delete kernel;
}
}
TYPED_TEST_P(KernelCacheTest, CacheEvalTest) {
Matrix::KernelParams param{Matrix::LINEAR, 3, 1, 0};
float cache_size = sizeof(TypeParam) * this->n_rows * 32 / (1024.0 * 1024);
Matrix::GramMatrixBase<TypeParam> *kernel =
Matrix::KernelFactory<TypeParam>::create(param,
this->handle.get_cublas_handle());
KernelCache<TypeParam> cache(this->handle, this->x_dev, this->n_rows,
this->n_cols, this->n_ws, kernel, cache_size,
C_SVC);
for (int i = 0; i < 2; i++) {
// We calculate cache tile multiple times to see if cache lookup works
TypeParam *tile_dev = cache.GetTile(this->ws_idx_dev);
this->check(tile_dev, this->n_ws, this->n_rows, cache.GetWsIndices(),
cache.GetColIdxMap());
}
delete kernel;
}
TYPED_TEST_P(KernelCacheTest, SvrEvalTest) {
Matrix::KernelParams param{Matrix::LINEAR, 3, 1, 0};
float cache_size = sizeof(TypeParam) * this->n_rows * 32 / (1024.0 * 1024);
this->n_ws = 6;
int ws_idx_svr[6] = {0, 5, 1, 4, 3, 7};
raft::update_device(this->ws_idx_dev, ws_idx_svr, 6, this->stream);
Matrix::GramMatrixBase<TypeParam> *kernel =
Matrix::KernelFactory<TypeParam>::create(param,
this->handle.get_cublas_handle());
KernelCache<TypeParam> cache(this->handle, this->x_dev, this->n_rows,
this->n_cols, this->n_ws, kernel, cache_size,
EPSILON_SVR);
for (int i = 0; i < 2; i++) {
// We calculate cache tile multiple times to see if cache lookup works
TypeParam *tile_dev = cache.GetTile(this->ws_idx_dev);
this->check(tile_dev, this->n_ws, this->n_rows, cache.GetWsIndices(),
cache.GetColIdxMap());
}
delete kernel;
}
REGISTER_TYPED_TEST_CASE_P(KernelCacheTest, EvalTest, CacheEvalTest,
SvrEvalTest);
INSTANTIATE_TYPED_TEST_CASE_P(My, KernelCacheTest, FloatTypes);
template <typename math_t>
class GetResultsTest : public ::testing::Test {
protected:
void SetUp() override {
CUDA_CHECK(cudaStreamCreate(&stream));
handle.set_stream(stream);
}
void TearDown() override { CUDA_CHECK(cudaStreamDestroy(stream)); }
void TestResults() {
auto allocator = handle.get_device_allocator();
device_buffer<math_t> x_dev(allocator, stream, n_rows * n_cols);
raft::update_device(x_dev.data(), x_host, n_rows * n_cols, stream);
device_buffer<math_t> f_dev(allocator, stream, n_rows);
raft::update_device(f_dev.data(), f_host, n_rows, stream);
device_buffer<math_t> y_dev(allocator, stream, n_rows);
raft::update_device(y_dev.data(), y_host, n_rows, stream);
device_buffer<math_t> alpha_dev(allocator, stream, n_rows);
raft::update_device(alpha_dev.data(), alpha_host, n_rows, stream);
device_buffer<math_t> C_dev(allocator, stream, n_rows);
init_C(C, C_dev.data(), n_rows, stream);
Results<math_t> res(handle, x_dev.data(), y_dev.data(), n_rows, n_cols,
C_dev.data(), C_SVC);
res.Get(alpha_dev.data(), f_dev.data(), &dual_coefs, &n_coefs, &idx,
&x_support, &b);
ASSERT_EQ(n_coefs, 7);
math_t dual_coefs_exp[] = {-0.1, -0.2, -1.5, 0.2, 0.4, 1.5, 1.5};
EXPECT_TRUE(devArrMatchHost(dual_coefs_exp, dual_coefs, n_coefs,
raft::CompareApprox<math_t>(1e-6f)));
int idx_exp[] = {2, 3, 4, 6, 7, 8, 9};
EXPECT_TRUE(devArrMatchHost(idx_exp, idx, n_coefs, raft::Compare<int>()));
math_t x_support_exp[] = {3, 4, 5, 7, 8, 9, 10, 13, 14, 15, 17, 18, 19, 20};
EXPECT_TRUE(devArrMatchHost(x_support_exp, x_support, n_coefs * n_cols,
raft::CompareApprox<math_t>(1e-6f)));
EXPECT_FLOAT_EQ(b, -6.25f);
if (n_coefs > 0) {
allocator->deallocate(dual_coefs, n_coefs * sizeof(math_t), stream);
allocator->deallocate(idx, n_coefs * sizeof(int), stream);
allocator->deallocate(x_support, n_coefs * n_cols * sizeof(math_t),
stream);
}
// Modify the test by setting all SVs bound, then b is calculated differently
math_t alpha_host2[10] = {0, 0, 1.5, 1.5, 1.5, 0, 1.5, 1.5, 1.5, 1.5};
raft::update_device(alpha_dev.data(), alpha_host2, n_rows, stream);
res.Get(alpha_dev.data(), f_dev.data(), &dual_coefs, &n_coefs, &idx,
&x_support, &b);
EXPECT_FLOAT_EQ(b, -5.5f);
}
int n_rows = 10;
int n_cols = 2;
math_t x_host[20] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20};
math_t f_host[10] = {1, 3, 10, 4, 2, 8, 6, 5, 9, 7};
math_t y_host[10] = {-1, -1, -1, -1, -1, 1, 1, 1, 1, 1};
math_t alpha_host[10] = {0, 0, 0.1, 0.2, 1.5, 0, 0.2, 0.4, 1.5, 1.5};
// l l l/u l/u u u l/u l/u l l
math_t C = 1.5;
math_t *dual_coefs;
int n_coefs;
int *idx;
math_t *x_support;
math_t b;
raft::handle_t handle;
cudaStream_t stream;
};
TYPED_TEST_CASE(GetResultsTest, FloatTypes);
TYPED_TEST(GetResultsTest, Results) { this->TestResults(); }
svmParameter getDefaultSvmParameter() {
svmParameter param;
param.C = 1;
param.tol = 0.001;
param.cache_size = 200;
param.max_iter = -1;
param.nochange_steps = 1000;
param.verbosity = CUML_LEVEL_INFO;
param.epsilon = 0.1;
param.svmType = C_SVC;
return param;
}
template <typename math_t>
class SmoUpdateTest : public ::testing::Test {
protected:
void SetUp() override {
stream = handle.get_stream();
cublasHandle_t cublas_handle = handle.get_cublas_handle();
raft::allocate(f_dev, n_rows, true);
raft::allocate(kernel_dev, n_rows * n_ws);
raft::update_device(kernel_dev, kernel_host, n_ws * n_rows, stream);
raft::allocate(delta_alpha_dev, n_ws);
raft::update_device(delta_alpha_dev, delta_alpha_host, n_ws, stream);
}
void RunTest() {
svmParameter param = getDefaultSvmParameter();
SmoSolver<float> smo(handle, param, nullptr);
smo.UpdateF(f_dev, n_rows, delta_alpha_dev, n_ws, kernel_dev);
float f_host_expected[] = {0.1f, 7.4505806e-9f, 0.3f, 0.2f, 0.5f, 0.4f};
devArrMatchHost(f_host_expected, f_dev, n_rows,
raft::CompareApprox<math_t>(1e-6));
}
void TearDown() override {
CUDA_CHECK(cudaFree(delta_alpha_dev));
CUDA_CHECK(cudaFree(kernel_dev));
CUDA_CHECK(cudaFree(f_dev));
}
raft::handle_t handle;
cudaStream_t stream;
int n_rows = 6;
int n_ws = 2;
float *kernel_dev;
float *f_dev;
float *delta_alpha_dev;
float kernel_host[12] = {3, 5, 4, 6, 5, 7, 4, 5, 7, 8, 10, 11};
float delta_alpha_host[2] = {-0.1f, 0.1f};
};
TYPED_TEST_CASE(SmoUpdateTest, FloatTypes);
TYPED_TEST(SmoUpdateTest, Update) { this->RunTest(); }
template <typename math_t>
class SmoBlockSolverTest : public ::testing::Test {
protected:
void SetUp() override {
CUDA_CHECK(cudaStreamCreate(&stream));
handle.set_stream(stream);
cublas_handle = handle.get_cublas_handle();
raft::allocate(ws_idx_dev, n_ws);
raft::allocate(y_dev, n_rows);
raft::allocate(C_dev, n_rows);
raft::allocate(f_dev, n_rows);
raft::allocate(alpha_dev, n_rows, true);
raft::allocate(delta_alpha_dev, n_ws, true);
raft::allocate(kernel_dev, n_ws * n_rows);
raft::allocate(return_buff_dev, 2);
init_C(C, C_dev, n_rows, stream);
raft::update_device(ws_idx_dev, ws_idx_host, n_ws, stream);
raft::update_device(y_dev, y_host, n_rows, stream);
raft::update_device(f_dev, f_host, n_rows, stream);
raft::update_device(kernel_dev, kernel_host, n_ws * n_rows, stream);
}
public: // because of the device lambda
void testBlockSolve() {
SmoBlockSolve<math_t, 1024><<<1, n_ws, 0, stream>>>(
y_dev, n_rows, alpha_dev, n_ws, delta_alpha_dev, f_dev, kernel_dev,
ws_idx_dev, C_dev, 1e-3f, return_buff_dev, 1);
CUDA_CHECK(cudaPeekAtLastError());
math_t return_buff_exp[2] = {0.2, 1};
devArrMatchHost(return_buff_exp, return_buff_dev, 2,
raft::CompareApprox<math_t>(1e-6));
math_t *delta_alpha_calc;
raft::allocate(delta_alpha_calc, n_rows);
raft::linalg::binaryOp(
delta_alpha_calc, y_dev, alpha_dev, n_rows,
[] __device__(math_t a, math_t b) { return a * b; }, stream);
raft::devArrMatch(delta_alpha_dev, delta_alpha_calc, n_rows,
raft::CompareApprox<math_t>(1e-6));
CUDA_CHECK(cudaFree(delta_alpha_calc));
math_t alpha_expected[] = {0, 0.1f, 0.1f, 0};
raft::devArrMatch(alpha_expected, alpha_dev, n_rows,
raft::CompareApprox<math_t>(1e-6));
}
protected:
void TearDown() override {
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(y_dev));
CUDA_CHECK(cudaFree(C_dev));
CUDA_CHECK(cudaFree(f_dev));
CUDA_CHECK(cudaFree(ws_idx_dev));
CUDA_CHECK(cudaFree(alpha_dev));
CUDA_CHECK(cudaFree(delta_alpha_dev));
CUDA_CHECK(cudaFree(kernel_dev));
CUDA_CHECK(cudaFree(return_buff_dev));
}
raft::handle_t handle;
cudaStream_t stream;
cublasHandle_t cublas_handle;
int n_rows = 4;
int n_cols = 2;
int n_ws = 4;
int *ws_idx_dev;
math_t *y_dev;
math_t *f_dev;
math_t *C_dev;
math_t *alpha_dev;
math_t *delta_alpha_dev;
math_t *kernel_dev;
math_t *return_buff_dev;
int ws_idx_host[4] = {0, 1, 2, 3};
math_t y_host[4] = {1, 1, -1, -1};
math_t C = 1.5;
math_t f_host[4] = {0.4, 0.3, 0.5, 0.1};
math_t kernel_host[16] = {26, 32, 38, 44, 32, 40, 48, 56,
38, 48, 58, 68, 44, 56, 68, 80};
};
TYPED_TEST_CASE(SmoBlockSolverTest, FloatTypes);
// test a single iteration of the block solver
TYPED_TEST(SmoBlockSolverTest, SolveSingleTest) { this->testBlockSolve(); }
template <typename math_t>
struct smoInput {
math_t C;
math_t tol;
KernelParams kernel_params;
int max_iter;
int max_inner_iter;
};
template <typename math_t>
struct svcInput {
math_t C;
math_t tol;
KernelParams kernel_params;
int n_rows;
int n_cols;
math_t *x_dev;
math_t *y_dev;
bool predict;
};
template <typename math_t>
struct smoOutput {
int n_support;
std::vector<math_t> dual_coefs;
math_t b;
std::vector<math_t> w;
std::vector<math_t> x_support;
std::vector<int> idx;
};
// If we want to compare decision function values too
template <typename math_t>
struct smoOutput2 { //: smoOutput<math_t> {
int n_support;
std::vector<math_t> dual_coefs;
math_t b;
std::vector<math_t> w;
std::vector<math_t> x_support;
std::vector<int> idx;
std::vector<math_t> decision_function;
};
template <typename math_t>
smoOutput<math_t> toSmoOutput(smoOutput2<math_t> x) {
smoOutput<math_t> y{x.n_support, x.dual_coefs, x.b, x.w, x.x_support, x.idx};
return y;
}
template <typename math_t>
struct svmTol {
math_t b;
math_t cs;
int n_sv;
};
template <typename math_t>
void checkResults(svmModel<math_t> model, smoOutput<math_t> expected,
cudaStream_t stream,
svmTol<math_t> tol = svmTol<math_t>{0.001, 0.99999, -1}) {
math_t *dcoef_exp =
expected.dual_coefs.size() > 0 ? expected.dual_coefs.data() : nullptr;
math_t *w_exp = expected.w.size() > 0 ? expected.w.data() : nullptr;
math_t *x_support_exp =
expected.x_support.size() > 0 ? expected.x_support.data() : nullptr;
int *idx_exp = expected.idx.size() > 0 ? expected.idx.data() : nullptr;
math_t ay_tol = 1e-5;
if (tol.n_sv == -1) {
tol.n_sv = expected.n_support * 0.01;
if (expected.n_support > 10 && tol.n_sv < 3) tol.n_sv = 3;
}
EXPECT_LE(abs(model.n_support - expected.n_support), tol.n_sv);
if (dcoef_exp) {
EXPECT_TRUE(devArrMatchHost(dcoef_exp, model.dual_coefs, model.n_support,
raft::CompareApprox<math_t>(1e-3f)));
}
math_t *dual_coefs_host = new math_t[model.n_support];
raft::update_host(dual_coefs_host, model.dual_coefs, model.n_support, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
math_t ay = 0;
for (int i = 0; i < model.n_support; i++) {
ay += dual_coefs_host[i];
}
// Test if \sum \alpha_i y_i = 0
EXPECT_LT(raft::abs(ay), ay_tol);
if (x_support_exp) {
EXPECT_TRUE(devArrMatchHost(x_support_exp, model.x_support,
model.n_support * model.n_cols,
raft::CompareApprox<math_t>(1e-6f)));
}
if (idx_exp) {
EXPECT_TRUE(devArrMatchHost(idx_exp, model.support_idx, model.n_support,
raft::Compare<int>()));
}
math_t *x_support_host = new math_t[model.n_support * model.n_cols];
raft::update_host(x_support_host, model.x_support,
model.n_support * model.n_cols, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
if (w_exp) {
std::vector<math_t> w(model.n_cols, 0);
for (int i = 0; i < model.n_support; i++) {
for (int j = 0; j < model.n_cols; j++)
w[j] += x_support_host[i + model.n_support * j] * dual_coefs_host[i];
}
// Calculate the cosine similarity between w and w_exp
math_t abs_w = 0;
math_t abs_w_exp = 0;
math_t cs = 0;
for (int i = 0; i < model.n_cols; i++) {
abs_w += w[i] * w[i];
abs_w_exp += w_exp[i] * w_exp[i];
cs += w[i] * w_exp[i];
}
cs /= sqrt(abs_w * abs_w_exp);
EXPECT_GT(cs, tol.cs);
}
EXPECT_LT(raft::abs(model.b - expected.b), tol.b);
delete[] dual_coefs_host;
delete[] x_support_host;
}
template <typename math_t>
class SmoSolverTest : public ::testing::Test {
protected:
void SetUp() override {
CUDA_CHECK(cudaStreamCreate(&stream));
handle.set_stream(stream);
raft::allocate(x_dev, n_rows * n_cols);
raft::allocate(ws_idx_dev, n_ws);
raft::allocate(y_dev, n_rows);
raft::allocate(C_dev, n_rows);
raft::allocate(y_pred, n_rows);
raft::allocate(f_dev, n_rows);
raft::allocate(alpha_dev, n_rows, true);
raft::allocate(delta_alpha_dev, n_ws, true);
raft::allocate(kernel_dev, n_ws * n_rows);
raft::allocate(return_buff_dev, 2);
raft::allocate(sample_weights_dev, n_rows);
LinAlg::range(sample_weights_dev, 1, n_rows + 1, stream);
cublas_handle = handle.get_cublas_handle();
raft::update_device(x_dev, x_host, n_rows * n_cols, stream);
raft::update_device(ws_idx_dev, ws_idx_host, n_ws, stream);
raft::update_device(y_dev, y_host, n_rows, stream);
init_C(C, C_dev, n_rows, stream);
raft::update_device(f_dev, f_host, n_rows, stream);
raft::update_device(kernel_dev, kernel_host, n_ws * n_rows, stream);
CUDA_CHECK(
cudaMemsetAsync(delta_alpha_dev, 0, n_ws * sizeof(math_t), stream));
kernel = new Matrix::GramMatrixBase<math_t>(cublas_handle);
}
void FreeResultBuffers() {
if (dual_coefs_d) CUDA_CHECK(cudaFree(dual_coefs_d));
if (idx_d) CUDA_CHECK(cudaFree(idx_d));
if (x_support_d) CUDA_CHECK(cudaFree(x_support_d));
dual_coefs_d = nullptr;
idx_d = nullptr;
x_support_d = nullptr;
}
void TearDown() override {
delete kernel;
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(x_dev));
CUDA_CHECK(cudaFree(y_dev));
CUDA_CHECK(cudaFree(C_dev));
CUDA_CHECK(cudaFree(y_pred));
CUDA_CHECK(cudaFree(f_dev));
CUDA_CHECK(cudaFree(ws_idx_dev));
CUDA_CHECK(cudaFree(alpha_dev));
CUDA_CHECK(cudaFree(delta_alpha_dev));
CUDA_CHECK(cudaFree(kernel_dev));
CUDA_CHECK(cudaFree(return_buff_dev));
CUDA_CHECK(cudaFree(sample_weights_dev));
FreeResultBuffers();
}
public:
void blockSolveTest() {
SmoBlockSolve<math_t, 1024><<<1, n_ws, 0, stream>>>(
y_dev, n_rows, alpha_dev, n_ws, delta_alpha_dev, f_dev, kernel_dev,
ws_idx_dev, C_dev, 1e-3, return_buff_dev);
CUDA_CHECK(cudaPeekAtLastError());
math_t return_buff[2];
raft::update_host(return_buff, return_buff_dev, 2, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
EXPECT_FLOAT_EQ(return_buff[0], 2.0f) << return_buff[0];
EXPECT_LT(return_buff[1], 100) << return_buff[1];
// check results won't work, because it expects that GetResults was called
math_t *delta_alpha_calc;
raft::allocate(delta_alpha_calc, n_rows);
raft::linalg::binaryOp(
delta_alpha_calc, y_dev, alpha_dev, n_rows,
[] __device__(math_t a, math_t b) { return a * b; }, stream);
raft::devArrMatch(delta_alpha_dev, delta_alpha_calc, n_rows,
raft::CompareApprox<math_t>(1e-6));
CUDA_CHECK(cudaFree(delta_alpha_calc));
math_t alpha_expected[] = {0.6f, 0, 1, 1, 0, 0.6f};
//for C=10: {0.25f, 0, 2.25f, 3.75f, 0, 1.75f};
raft::devArrMatch(alpha_expected, alpha_dev, n_rows,
raft::CompareApprox<math_t>(1e-6));
math_t host_alpha[6];
raft::update_host(host_alpha, alpha_dev, n_rows, stream);
math_t w[] = {0, 0};
math_t ay = 0;
for (int i = 0; i < n_rows; i++) {
EXPECT_FLOAT_EQ(host_alpha[i], alpha_expected[i]) << "alpha " << i;
w[0] += x_host[i] * host_alpha[i] * y_host[i];
w[1] += x_host[i + n_rows] * host_alpha[i] * y_host[i];
ay += host_alpha[i] * y_host[i];
}
EXPECT_FLOAT_EQ(ay, 0.0);
EXPECT_FLOAT_EQ(w[0], -0.4);
EXPECT_FLOAT_EQ(w[1], 1.2);
// for C=10
//EXPECT_FLOAT_EQ(w[0], -2.0);
//EXPECT_FLOAT_EQ(w[1], 2.0);
}
void svrBlockSolveTest() {
int n_ws = 4;
int n_rows = 2;
// int n_cols = 1;
// math_t x[2] = {1, 2};
// yr = {2, 3}
math_t f[4] = {-1.9, -2.9, -2.1 - 3.1};
math_t kernel[4] = {1, 2, 2, 4};
// ws_idx is defined as {0, 1, 2, 3}
int kColIdx[4] = {0, 1, 0, 1};
device_buffer<int> kColIdx_dev(handle.get_device_allocator(), stream, 4);
raft::update_device(f_dev, f, 4, stream);
raft::update_device(kernel_dev, kernel, 4, stream);
raft::update_device(kColIdx_dev.data(), kColIdx, 4, stream);
SmoBlockSolve<math_t, 1024><<<1, n_ws, 0, stream>>>(
y_dev, 2 * n_rows, alpha_dev, n_ws, delta_alpha_dev, f_dev, kernel_dev,
ws_idx_dev, C_dev, 1e-3, return_buff_dev, 10, EPSILON_SVR,
kColIdx_dev.data());
CUDA_CHECK(cudaPeekAtLastError());
math_t return_buff[2];
raft::update_host(return_buff, return_buff_dev, 2, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
EXPECT_LT(return_buff[1], 10) << return_buff[1];
math_t alpha_exp[] = {0, 0.8, 0.8, 0};
raft::devArrMatch(alpha_exp, alpha_dev, 4,
raft::CompareApprox<math_t>(1e-6));
math_t dalpha_exp[] = {-0.8, 0.8};
raft::devArrMatch(dalpha_exp, delta_alpha_dev, 2,
raft::CompareApprox<math_t>(1e-6));
}
protected:
raft::handle_t handle;
cudaStream_t stream;
Matrix::GramMatrixBase<math_t> *kernel;
int n_rows = 6;
const int n_cols = 2;
int n_ws = 6;
math_t *x_dev;
int *ws_idx_dev;
math_t *y_dev;
math_t *C_dev;
math_t *y_pred;
math_t *f_dev;
math_t *alpha_dev;
math_t *delta_alpha_dev;
math_t *kernel_dev;
math_t *return_buff_dev;
math_t *sample_weights_dev;
math_t x_host[12] = {1, 2, 1, 2, 1, 2, 1, 1, 2, 2, 3, 3};
int ws_idx_host[6] = {0, 1, 2, 3, 4, 5};
math_t y_host[6] = {-1, -1, 1, -1, 1, 1};
math_t C = 1;
math_t f_host[6] = {1, 1, -1, 1, -1, -1};
math_t kernel_host[36] = {2, 3, 3, 4, 4, 5, 3, 5, 4, 6, 5, 7,
3, 4, 5, 6, 7, 8, 4, 6, 6, 8, 8, 10,
4, 5, 7, 8, 10, 11, 5, 7, 8, 10, 11, 13};
cublasHandle_t cublas_handle;
math_t *dual_coefs_d = nullptr;
int n_coefs;
int *idx_d = nullptr;
math_t *x_support_d = nullptr;
math_t b;
};
TYPED_TEST_CASE(SmoSolverTest, FloatTypes);
TYPED_TEST(SmoSolverTest, BlockSolveTest) { this->blockSolveTest(); }
TYPED_TEST(SmoSolverTest, SvrBlockSolveTest) { this->svrBlockSolveTest(); }
std::string kernelName(KernelParams k) {
std::vector<std::string> names{"linear", "poly", "rbf", "tanh"};
return names[k.kernel];
}
template <typename math_t>
std::ostream &operator<<(std::ostream &os, const smoInput<math_t> &b) {
os << kernelName(b.kernel_params) << ", C=" << b.C << ", tol=" << b.tol;
return os;
}
TYPED_TEST(SmoSolverTest, SmoSolveTest) {
std::vector<std::pair<smoInput<TypeParam>, smoOutput<TypeParam>>> data{
{smoInput<TypeParam>{1, 0.001, KernelParams{LINEAR, 3, 1, 0}, 100, 1},
smoOutput<TypeParam>{4, // n_sv
{-0.6, 1, -1, 0.6}, // dual_coefs
-1.8, // b
{-0.4, 1.2}, // w
{1, 1, 2, 2, 1, 2, 2, 3}, // x_support
{0, 2, 3, 5}}}, // support idx
{smoInput<TypeParam>{10, 0.001, KernelParams{LINEAR, 3, 1, 0}, 100, 1},
smoOutput<TypeParam>{3, {-2, 4, -2, 0, 0}, -1.0, {-2, 2}, {}, {}}},
{smoInput<TypeParam>{1, 1e-6, KernelParams{POLYNOMIAL, 3, 1, 1}, 100, 1},
smoOutput<TypeParam>{3,
{-0.02556136, 0.03979708, -0.01423571},
-1.07739149,
{},
{1, 1, 2, 1, 2, 2},
{0, 2, 3}}}};
for (auto d : data) {
auto p = d.first;
auto exp = d.second;
SCOPED_TRACE(p);
svmParameter param = getDefaultSvmParameter();
param.C = p.C;
param.tol = p.tol;
//param.max_iter = p.max_iter;
GramMatrixBase<TypeParam> *kernel = KernelFactory<TypeParam>::create(
p.kernel_params, this->handle.get_cublas_handle());
SmoSolver<TypeParam> smo(this->handle, param, kernel);
svmModel<TypeParam> model{0, this->n_cols, 0, nullptr,
nullptr, nullptr, 0, nullptr};
smo.Solve(this->x_dev, this->n_rows, this->n_cols, this->y_dev, nullptr,
&model.dual_coefs, &model.n_support, &model.x_support,
&model.support_idx, &model.b, p.max_iter, p.max_inner_iter);
checkResults(model, exp, this->stream);
svmFreeBuffers(this->handle, model);
}
}
TYPED_TEST(SmoSolverTest, SvcTest) {
std::vector<std::pair<svcInput<TypeParam>, smoOutput2<TypeParam>>> data{
{svcInput<TypeParam>{1, 0.001, KernelParams{LINEAR, 3, 1, 0}, this->n_rows,
this->n_cols, this->x_dev, this->y_dev, true},
smoOutput2<TypeParam>{4,
{-0.6, 1, -1, 0.6},
-1.8f,
{-0.4, 1.2},
{1, 1, 2, 2, 1, 2, 2, 3},
{0, 2, 3, 5},
{-1.0, -1.4, 0.2, -0.2, 1.4, 1.0}}},
{// C == 0 marks a special tast case with sample weights
svcInput<TypeParam>{0, 0.001, KernelParams{LINEAR, 3, 1, 0}, this->n_rows,
this->n_cols, this->x_dev, this->y_dev, true},
smoOutput2<TypeParam>{4,
{},
-1.0f,
{-2, 2},
{1, 1, 2, 2, 1, 2, 2, 3},
{0, 2, 3, 5},
{-1.0, -3.0, 1.0, -1.0, 3.0, 1.0}}},
{svcInput<TypeParam>{1, 1e-6, KernelParams{POLYNOMIAL, 3, 1, 0},
this->n_rows, this->n_cols, this->x_dev, this->y_dev,
true},
smoOutput2<TypeParam>{3,
{-0.03900895, 0.05904058, -0.02003163},
-0.99999959,
{},
{1, 1, 2, 1, 2, 2},
{0, 2, 3},
{-0.9996812, -2.60106647, 0.9998406, -1.0001594,
6.49681105, 4.31951232}}},
{svcInput<TypeParam>{10, 1e-6, KernelParams{TANH, 3, 0.3, 1.0},
this->n_rows, this->n_cols, this->x_dev, this->y_dev,
false},
smoOutput2<TypeParam>{6,
{-10., -10., 10., -10., 10., 10.},
-0.3927505,
{},
{1, 2, 1, 2, 1, 2, 1, 1, 2, 2, 3, 3},
{0, 1, 2, 3, 4, 5},
{0.25670694, -0.16451539, 0.16451427, -0.1568888,
-0.04496891, -0.2387212}}},
{svcInput<TypeParam>{1, 1.0e-6, KernelParams{RBF, 0, 0.15, 0}, this->n_rows,
this->n_cols, this->x_dev, this->y_dev, true},
smoOutput2<TypeParam>{6,
{-1., -1, 1., -1., 1, 1.},
0,
{},
{1, 2, 1, 2, 1, 2, 1, 1, 2, 2, 3, 3},
{0, 1, 2, 3, 4, 5},
{-0.71964003, -0.95941954, 0.13929202, -0.13929202,
0.95941954, 0.71964003}}}};
for (auto d : data) {
auto p = d.first;
auto exp = d.second;
SCOPED_TRACE(kernelName(p.kernel_params));
TypeParam *sample_weights = nullptr;
if (p.C == 0) {
p.C = 1;
sample_weights = this->sample_weights_dev;
}
SVC<TypeParam> svc(this->handle, p.C, p.tol, p.kernel_params);
svc.fit(p.x_dev, p.n_rows, p.n_cols, p.y_dev, sample_weights);
checkResults(svc.model, toSmoOutput(exp), this->stream);
device_buffer<TypeParam> y_pred(this->handle.get_device_allocator(),
this->stream, p.n_rows);
if (p.predict) {
svc.predict(p.x_dev, p.n_rows, p.n_cols, y_pred.data());
EXPECT_TRUE(raft::devArrMatch(this->y_dev, y_pred.data(), p.n_rows,
raft::CompareApprox<TypeParam>(1e-6f)));
}
if (exp.decision_function.size() > 0) {
svc.decisionFunction(p.x_dev, p.n_rows, p.n_cols, y_pred.data());
EXPECT_TRUE(devArrMatchHost(exp.decision_function.data(), y_pred.data(),
p.n_rows,
raft::CompareApprox<TypeParam>(1e-3f)));
}
}
}
struct blobInput {
double C;
double tol;
KernelParams kernel_params;
int n_rows;
int n_cols;
};
std::ostream &operator<<(std::ostream &os, const blobInput &b) {
os << kernelName(b.kernel_params) << " " << b.n_rows << "x" << b.n_cols;
return os;
}
// until there is progress with Issue #935
template <typename inType, typename outType>
__global__ void cast(outType *out, int n, inType *in) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n) out[tid] = in[tid];
}
// To have the same input data for both single and double precision,
// we generate the blobs in single precision only, and cast to dp if needed.
template <typename math_t>
void make_blobs(const raft::handle_t &handle, math_t *x, math_t *y, int n_rows,
int n_cols, int n_cluster, float *centers = nullptr) {
auto allocator = handle.get_device_allocator();
auto cublas_h = handle.get_cublas_handle();
auto stream = handle.get_stream();
device_buffer<float> x_float(allocator, stream, n_rows * n_cols);
device_buffer<int> y_int(allocator, stream, n_rows);
Datasets::make_blobs(handle, x_float.data(), y_int.data(), n_rows, n_cols,
n_cluster, true, centers, (float *)nullptr, 1.0f, true,
-2.0f, 2.0f, 0);
int TPB = 256;
if (std::is_same<float, math_t>::value) {
raft::linalg::transpose(handle, x_float.data(), (float *)x, n_cols, n_rows,
stream);
} else {
device_buffer<math_t> x2(allocator, stream, n_rows * n_cols);
cast<<<raft::ceildiv(n_rows * n_cols, TPB), TPB, 0, stream>>>(
x2.data(), n_rows * n_cols, x_float.data());
raft::linalg::transpose(handle, x2.data(), x, n_cols, n_rows, stream);
CUDA_CHECK(cudaPeekAtLastError());
}
cast<<<raft::ceildiv(n_rows, TPB), TPB, 0, stream>>>(y, n_rows, y_int.data());
CUDA_CHECK(cudaPeekAtLastError());
}
struct is_same_functor {
template <typename Tuple>
__host__ __device__ int operator()(Tuple t) {
return thrust::get<0>(t) == thrust::get<1>(t);
}
};
TYPED_TEST(SmoSolverTest, BlobPredict) {
// Pair.second is the expected accuracy. It might change if the Rng changes.
std::vector<std::pair<blobInput, TypeParam>> data{
{blobInput{1, 0.001, KernelParams{LINEAR, 3, 1, 0}, 200, 10}, 98},
{blobInput{1, 0.001, KernelParams{POLYNOMIAL, 3, 1, 0}, 200, 10}, 98},
{blobInput{1, 0.001, KernelParams{RBF, 3, 1, 0}, 200, 2}, 98},
{blobInput{1, 0.009, KernelParams{TANH, 3, 0.1, 0}, 200, 10}, 98}};
// This should be larger then N_PRED_BATCH in svcPredict
const int n_pred = 5000;
auto allocator = this->handle.get_device_allocator();
for (auto d : data) {
auto p = d.first;
SCOPED_TRACE(p);
// explicit centers for the blobs
device_buffer<float> centers(allocator, this->stream, 2 * p.n_cols);
thrust::device_ptr<float> thrust_ptr(centers.data());
thrust::fill(thrust::cuda::par.on(this->stream), thrust_ptr,
thrust_ptr + p.n_cols, -5.0f);
thrust::fill(thrust::cuda::par.on(this->stream), thrust_ptr + p.n_cols,
thrust_ptr + 2 * p.n_cols, +5.0f);
device_buffer<TypeParam> x(allocator, this->stream, p.n_rows * p.n_cols);
device_buffer<TypeParam> y(allocator, this->stream, p.n_rows);
device_buffer<TypeParam> x_pred(allocator, this->stream, n_pred * p.n_cols);
device_buffer<TypeParam> y_pred(allocator, this->stream, n_pred);
make_blobs(this->handle, x.data(), y.data(), p.n_rows, p.n_cols, 2,
centers.data());
SVC<TypeParam> svc(this->handle, p.C, p.tol, p.kernel_params, 0, -1, 50,
CUML_LEVEL_INFO);
svc.fit(x.data(), p.n_rows, p.n_cols, y.data());
// Create a different dataset for prediction
make_blobs(this->handle, x_pred.data(), y_pred.data(), n_pred, p.n_cols, 2,
centers.data());
device_buffer<TypeParam> y_pred2(this->handle.get_device_allocator(),
this->stream, n_pred);
svc.predict(x_pred.data(), n_pred, p.n_cols, y_pred2.data());
// Count the number of correct predictions
device_buffer<int> is_correct(this->handle.get_device_allocator(),
this->stream, n_pred);
thrust::device_ptr<TypeParam> ptr1(y_pred.data());
thrust::device_ptr<TypeParam> ptr2(y_pred2.data());
thrust::device_ptr<int> ptr3(is_correct.data());
auto first = thrust::make_zip_iterator(thrust::make_tuple(ptr1, ptr2));
auto last = thrust::make_zip_iterator(
thrust::make_tuple(ptr1 + n_pred, ptr2 + n_pred));
thrust::transform(thrust::cuda::par.on(this->stream), first, last, ptr3,
is_same_functor());
int n_correct =
thrust::reduce(thrust::cuda::par.on(this->stream), ptr3, ptr3 + n_pred);
TypeParam accuracy = 100 * n_correct / n_pred;
TypeParam accuracy_exp = d.second;
EXPECT_GE(accuracy, accuracy_exp);
}
}
TYPED_TEST(SmoSolverTest, MemoryLeak) {
// We measure that we have the same amount of free memory available on the GPU
// before and after we call SVM. This can help catch memory leaks, but it is
// not 100% sure. Small allocations might be pooled together by cudaMalloc,
// and some of those would be missed by this method.
enum class ThrowException { Yes, No };
std::vector<std::pair<blobInput, ThrowException>> data{
{blobInput{1, 0.001, KernelParams{LINEAR, 3, 0.01, 0}, 1000, 1000},
ThrowException::No},
{blobInput{1, 0.001, KernelParams{POLYNOMIAL, 400, 5, 10}, 1000, 1000},
ThrowException::Yes}};
// For the second set of input parameters training will fail, some kernel
// function values would be 1e400 or larger, which does not fit fp64.
// This will lead to NaN diff in SmoSolver, which whill throw an exception
// to stop fitting.
size_t free1, total, free2;
CUDA_CHECK(cudaMemGetInfo(&free1, &total));
auto allocator = this->handle.get_device_allocator();
for (auto d : data) {
auto p = d.first;
SCOPED_TRACE(p);
device_buffer<TypeParam> x(allocator, this->stream, p.n_rows * p.n_cols);
device_buffer<TypeParam> y(allocator, this->stream, p.n_rows);
make_blobs(this->handle, x.data(), y.data(), p.n_rows, p.n_cols, 2);
SVC<TypeParam> svc(this->handle, p.C, p.tol, p.kernel_params);
if (d.second == ThrowException::Yes) {
// We want to check whether we leak any memory while we unwind the stack
EXPECT_THROW(svc.fit(x.data(), p.n_rows, p.n_cols, y.data()),
raft::exception);
} else {
svc.fit(x.data(), p.n_rows, p.n_cols, y.data());
device_buffer<TypeParam> y_pred(this->handle.get_device_allocator(),
this->stream, p.n_rows);
CUDA_CHECK(cudaStreamSynchronize(this->stream));
CUDA_CHECK(cudaMemGetInfo(&free2, &total));
float delta = (free1 - free2);
// Just to make sure that we measure any mem consumption at all:
// we check if we see the memory consumption of x[n_rows*n_cols].
// If this error is triggered, increasing the test size might help to fix
// it (one could additionally control the exec time by the max_iter arg to
// SVC).
EXPECT_GT(delta, p.n_rows * p.n_cols * 4);
CUDA_CHECK(cudaStreamSynchronize(this->stream));
svc.predict(x.data(), p.n_rows, p.n_cols, y_pred.data());
}
}
CUDA_CHECK(cudaMemGetInfo(&free2, &total));
float delta = (free1 - free2);
EXPECT_EQ(delta, 0);
}
template <typename math_t>
struct SvrInput {
svmParameter param;
KernelParams kernel;
int n_rows;
int n_cols;
std::vector<math_t> x;
std::vector<math_t> y;
std::vector<math_t> sample_weighs;
};
template <typename math_t>
std::ostream &operator<<(std::ostream &os, const SvrInput<math_t> &b) {
os << kernelName(b.kernel) << " " << b.n_rows << "x" << b.n_cols
<< ", C=" << b.param.C << ", tol=" << b.param.tol;
return os;
}
template <typename math_t>
class SvrTest : public ::testing::Test {
protected:
void SetUp() override {
CUDA_CHECK(cudaStreamCreate(&stream));
handle.set_stream(stream);
allocator = handle.get_device_allocator();
raft::allocate(x_dev, n_rows * n_cols);
raft::allocate(y_dev, n_rows);
raft::allocate(C_dev, 2 * n_rows);
raft::allocate(y_pred, n_rows);
raft::allocate(yc, n_train);
raft::allocate(f, n_train);
raft::allocate(alpha, n_train);
raft::update_device(x_dev, x_host, n_rows * n_cols, stream);
raft::update_device(y_dev, y_host, n_rows, stream);
model.n_support = 0;
model.dual_coefs = nullptr;
model.x_support = nullptr;
model.support_idx = nullptr;
model.n_classes = 0;
model.unique_labels = nullptr;
}
void TearDown() override {
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(x_dev));
CUDA_CHECK(cudaFree(y_dev));
CUDA_CHECK(cudaFree(C_dev));
CUDA_CHECK(cudaFree(y_pred));
CUDA_CHECK(cudaFree(yc));
CUDA_CHECK(cudaFree(f));
CUDA_CHECK(cudaFree(alpha));
svmFreeBuffers(handle, model);
}
public:
void TestSvrInit() {
svmParameter param = getDefaultSvmParameter();
param.svmType = EPSILON_SVR;
SmoSolver<math_t> smo(handle, param, nullptr);
smo.SvrInit(y_dev, n_rows, yc, f);
EXPECT_TRUE(devArrMatchHost(yc_exp, yc, n_train,
raft::CompareApprox<math_t>(1.0e-9)));
EXPECT_TRUE(devArrMatchHost(f_exp, f, n_train, raft::Compare<math_t>()));
}
void TestSvrWorkingSet() {
init_C((math_t)1.0, C_dev, 2 * n_rows, stream);
WorkingSet<math_t> *ws;
ws = new WorkingSet<math_t>(handle, stream, n_rows, 20, EPSILON_SVR);
EXPECT_EQ(ws->GetSize(), 2 * n_rows);
raft::update_device(alpha, alpha_host, n_train, stream);
raft::update_device(f, f_exp, n_train, stream);
raft::update_device(yc, yc_exp, n_train, stream);
ws->Select(f, alpha, yc, C_dev);
int exp_idx[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13};
ASSERT_TRUE(devArrMatchHost(exp_idx, ws->GetIndices(), ws->GetSize(),
raft::Compare<int>()));
delete ws;
ws = new WorkingSet<math_t>(handle, stream, n_rows, 10, EPSILON_SVR);
EXPECT_EQ(ws->GetSize(), 10);
ws->Select(f, alpha, yc, C_dev);
int exp_idx2[] = {6, 12, 5, 11, 3, 9, 8, 1, 7, 0};
ASSERT_TRUE(devArrMatchHost(exp_idx2, ws->GetIndices(), ws->GetSize(),
raft::Compare<int>()));
delete ws;
}
void TestSvrResults() {
raft::update_device(yc, yc_exp, n_train, stream);
init_C((math_t)0.001, C_dev, n_rows * 2, stream);
Results<math_t> res(handle, x_dev, yc, n_rows, n_cols, C_dev, EPSILON_SVR);
model.n_cols = n_cols;
raft::update_device(alpha, alpha_host, n_train, stream);
raft::update_device(f, f_exp, n_train, stream);
res.Get(alpha, f, &model.dual_coefs, &model.n_support, &model.support_idx,
&model.x_support, &model.b);
ASSERT_EQ(model.n_support, 5);
math_t dc_exp[] = {0.1, 0.3, -0.4, 0.9, -0.9};
EXPECT_TRUE(devArrMatchHost(dc_exp, model.dual_coefs, model.n_support,
raft::CompareApprox<math_t>(1.0e-6)));
math_t x_exp[] = {1, 2, 3, 5, 6};
EXPECT_TRUE(devArrMatchHost(x_exp, model.x_support,
model.n_support * n_cols,
raft::CompareApprox<math_t>(1.0e-6)));
int idx_exp[] = {0, 1, 2, 4, 5};
EXPECT_TRUE(devArrMatchHost(idx_exp, model.support_idx, model.n_support,
raft::CompareApprox<math_t>(1.0e-6)));
}
void TestSvrFitPredict() {
std::vector<std::pair<SvrInput<math_t>, smoOutput2<math_t>>> data{
{SvrInput<math_t>{
svmParameter{1, 0, 1, 10, 1e-3, CUML_LEVEL_INFO, 0.1, EPSILON_SVR},
KernelParams{LINEAR, 3, 1, 0},
2, // n_rows
1, // n_cols
{0, 1}, //x
{2, 3} //y
},
smoOutput2<math_t>{
2, {-0.8, 0.8}, 2.1, {0.8}, {0, 1}, {0, 1}, {2.1, 2.9}}},
{SvrInput<math_t>{
svmParameter{1, 10, 1, 1, 1e-3, CUML_LEVEL_INFO, 0.1, EPSILON_SVR},
KernelParams{LINEAR, 3, 1, 0},
2, // n_rows
1, // n_cols
{1, 2}, //x
{2, 3} //y
},
smoOutput2<math_t>{
2, {-0.8, 0.8}, 1.3, {0.8}, {1, 2}, {0, 1}, {2.1, 2.9}}},
{SvrInput<math_t>{
svmParameter{1, 0, 1, 1, 1e-3, CUML_LEVEL_INFO, 0.1, EPSILON_SVR},
KernelParams{LINEAR, 3, 1, 0},
2, // n_rows
2, // n_cols
{1, 2, 5, 5}, //x
{2, 3} //y
},
smoOutput2<math_t>{
2, {-0.8, 0.8}, 1.3, {0.8, 0.0}, {1, 2, 5, 5}, {0, 1}, {2.1, 2.9}}},
{SvrInput<math_t>{
svmParameter{1, 0, 100, 10, 1e-6, CUML_LEVEL_INFO, 0.1, EPSILON_SVR},
KernelParams{LINEAR, 3, 1, 0},
7, // n_rows
1, //n_cols
{1, 2, 3, 4, 5, 6, 7}, //x
{0, 2, 3, 4, 5, 6, 8} //y
},
smoOutput2<math_t>{6,
{-1, 1, 0.45, -0.45, -1, 1},
-0.4,
{1.1},
{1.0, 2.0, 3.0, 5.0, 6.0, 7.0},
{0, 1, 2, 4, 5, 6},
{0.7, 1.8, 2.9, 4, 5.1, 6.2, 7.3}}},
// Almost same as above, but with sample weights
{SvrInput<math_t>{
svmParameter{1, 0, 100, 10, 1e-3, CUML_LEVEL_INFO, 0.1, EPSILON_SVR},
KernelParams{LINEAR, 3, 1, 0},
7, // n_rows
1, // n_cols
{1, 2, 3, 4, 5, 6, 7}, // x
{0, 2, 3, 0, 4, 8, 12}, // y
{1, 1, 1, 10, 2, 10, 1} // sample weights
},
smoOutput2<math_t>{6,
{},
-15.5,
{3.9},
{1.0, 2.0, 3.0, 4.0, 6.0, 7.0},
{0, 1, 2, 3, 5, 6},
{}}}};
for (auto d : data) {
auto p = d.first;
auto exp = d.second;
SCOPED_TRACE(p);
device_buffer<math_t> x_dev(allocator, stream, p.n_rows * p.n_cols);
raft::update_device(x_dev.data(), p.x.data(), p.n_rows * p.n_cols,
stream);
device_buffer<math_t> y_dev(allocator, stream, p.n_rows);
raft::update_device(y_dev.data(), p.y.data(), p.n_rows, stream);
MLCommon::device_buffer<math_t> sample_weights_dev(allocator, stream);
math_t *sample_weights = nullptr;
if (!p.sample_weighs.empty()) {
sample_weights_dev.resize(p.n_rows, stream);
sample_weights = sample_weights_dev.data();
raft::update_device(sample_weights_dev.data(), p.sample_weighs.data(),
p.n_rows, stream);
}
svrFit(handle, x_dev.data(), p.n_rows, p.n_cols, y_dev.data(), p.param,
p.kernel, model, sample_weights);
checkResults(model, toSmoOutput(exp), stream);
device_buffer<math_t> preds(allocator, stream, p.n_rows);
svcPredict(handle, x_dev.data(), p.n_rows, p.n_cols, p.kernel, model,
preds.data(), (math_t)200.0, false);
if (!exp.decision_function.empty()) {
EXPECT_TRUE(devArrMatchHost(exp.decision_function.data(), preds.data(),
p.n_rows,
raft::CompareApprox<math_t>(1.0e-5)));
}
}
}
protected:
raft::handle_t handle;
cudaStream_t stream;
std::shared_ptr<deviceAllocator> allocator;
int n_rows = 7;
int n_train = 2 * n_rows;
const int n_cols = 1;
svmModel<math_t> model;
math_t *x_dev;
math_t *y_dev;
math_t *C_dev;
math_t *y_pred;
math_t *yc;
math_t *f;
math_t *alpha;
math_t x_host[7] = {1, 2, 3, 4, 5, 6, 7};
math_t y_host[7] = {0, 2, 3, 4, 5, 6, 8};
math_t yc_exp[14] = {1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1};
math_t f_exp[14] = {0.1, -1.9, -2.9, -3.9, -4.9, -5.9, -7.9,
-0.1, -2.1, -3.1, -4.1, -5.1, -6.1, -8.1};
math_t alpha_host[14] = {0.2, 0.3, 0, 0, 1, 0.1, 0,
0.1, 0, 0.4, 0, 0.1, 1, 0};
}; // namespace SVM
typedef ::testing::Types<float> OnlyFp32;
TYPED_TEST_CASE(SvrTest, FloatTypes);
TYPED_TEST(SvrTest, Init) { this->TestSvrInit(); }
TYPED_TEST(SvrTest, WorkingSet) { this->TestSvrWorkingSet(); }
TYPED_TEST(SvrTest, Results) { this->TestSvrResults(); }
TYPED_TEST(SvrTest, FitPredict) { this->TestSvrFitPredict(); }
}; // namespace SVM
}; // namespace ML
|
86a606b7260c19f65010df9cbf1cbcc7cba12b37.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (C) by Argonne National Laboratory
* See COPYRIGHT in top-level directory
*/
#include <mpi.h>
#include <stdio.h>
#include <assert.h>
#define CHECK_RESULT(i, result, expected, msg) \
do { \
if (result != expected) { \
printf("%s: i = %d, expect %d, got %d\n", msg, i, expected, result); \
errs++; \
} \
} while (0)
int main(void)
{
int errs = 0;
hipStream_t stream;
hipStreamCreate(&stream);
int mpi_errno;
int rank, size;
MPI_Init(NULL, NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int expected_sum = size * (size - 1) / 2;
MPI_Info info;
MPI_Info_create(&info);
MPI_Info_set(info, "type", "hipStream_t");
MPIX_Info_set_hex(info, "value", &stream, sizeof(stream));
MPIX_Stream mpi_stream;
MPIX_Stream_create(info, &mpi_stream);
MPI_Info_free(&info);
MPI_Comm stream_comm;
MPIX_Stream_comm_create(MPI_COMM_WORLD, mpi_stream, &stream_comm);
#define N 10
/* TEST 1 - MPI_INT */
int buf[N];
void *d_buf, *d_result_buf;
hipMalloc(&d_buf, sizeof(buf));
hipMalloc(&d_result_buf, sizeof(buf));
for (int i = 0; i < N; i++) {
buf[i] = rank;
}
hipMemcpyAsync(d_buf, buf, sizeof(buf), hipMemcpyHostToDevice, stream);
mpi_errno = MPIX_Allreduce_enqueue(d_buf, d_result_buf, N, MPI_INT, MPI_SUM, stream_comm);
assert(mpi_errno == MPI_SUCCESS);
hipMemcpyAsync(buf, d_result_buf, sizeof(buf), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
hipFree(d_buf);
hipFree(d_result_buf);
for (int i = 0; i < N; i++) {
CHECK_RESULT(i, buf[i], expected_sum, "TEST 1");
}
/* TEST 2 - MPI_SHORT_INT (typically non-contig) */
struct {
short a;
int b;
} buf2[N];
hipMalloc(&d_buf, sizeof(buf2));
hipMalloc(&d_result_buf, sizeof(buf2));
for(int i = 0; i < N; i++) {
/* MINLOC result should be {0, i % size} */
if (i % size == rank) {
buf2[i].a = 0;
} else {
buf2[i].a = rank + 1;
}
buf2[i].b = rank;
}
hipMemcpyAsync(d_buf, buf2, sizeof(buf2), hipMemcpyHostToDevice, stream);
mpi_errno = MPIX_Allreduce_enqueue(d_buf, d_result_buf, N, MPI_SHORT_INT, MPI_MINLOC, stream_comm);
assert(mpi_errno == MPI_SUCCESS);
hipMemcpyAsync(buf2, d_result_buf, sizeof(buf2), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
for (int i = 0; i < N; i++) {
CHECK_RESULT(i, buf2[i].a, 0, "TEST 2");
CHECK_RESULT(i, buf2[i].b, i % size, "TEST 2");
}
/* clean up */
MPI_Comm_free(&stream_comm);
MPIX_Stream_free(&mpi_stream);
hipStreamDestroy(stream);
int tot_errs;
MPI_Reduce(&errs, &tot_errs, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if (rank == 0) {
if (tot_errs == 0) {
printf("No Errors\n");
} else {
printf("%d Errors\n", tot_errs);
}
}
MPI_Finalize();
return errs;
}
| 86a606b7260c19f65010df9cbf1cbcc7cba12b37.cu | /*
* Copyright (C) by Argonne National Laboratory
* See COPYRIGHT in top-level directory
*/
#include <mpi.h>
#include <stdio.h>
#include <assert.h>
#define CHECK_RESULT(i, result, expected, msg) \
do { \
if (result != expected) { \
printf("%s: i = %d, expect %d, got %d\n", msg, i, expected, result); \
errs++; \
} \
} while (0)
int main(void)
{
int errs = 0;
cudaStream_t stream;
cudaStreamCreate(&stream);
int mpi_errno;
int rank, size;
MPI_Init(NULL, NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int expected_sum = size * (size - 1) / 2;
MPI_Info info;
MPI_Info_create(&info);
MPI_Info_set(info, "type", "cudaStream_t");
MPIX_Info_set_hex(info, "value", &stream, sizeof(stream));
MPIX_Stream mpi_stream;
MPIX_Stream_create(info, &mpi_stream);
MPI_Info_free(&info);
MPI_Comm stream_comm;
MPIX_Stream_comm_create(MPI_COMM_WORLD, mpi_stream, &stream_comm);
#define N 10
/* TEST 1 - MPI_INT */
int buf[N];
void *d_buf, *d_result_buf;
cudaMalloc(&d_buf, sizeof(buf));
cudaMalloc(&d_result_buf, sizeof(buf));
for (int i = 0; i < N; i++) {
buf[i] = rank;
}
cudaMemcpyAsync(d_buf, buf, sizeof(buf), cudaMemcpyHostToDevice, stream);
mpi_errno = MPIX_Allreduce_enqueue(d_buf, d_result_buf, N, MPI_INT, MPI_SUM, stream_comm);
assert(mpi_errno == MPI_SUCCESS);
cudaMemcpyAsync(buf, d_result_buf, sizeof(buf), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
cudaFree(d_buf);
cudaFree(d_result_buf);
for (int i = 0; i < N; i++) {
CHECK_RESULT(i, buf[i], expected_sum, "TEST 1");
}
/* TEST 2 - MPI_SHORT_INT (typically non-contig) */
struct {
short a;
int b;
} buf2[N];
cudaMalloc(&d_buf, sizeof(buf2));
cudaMalloc(&d_result_buf, sizeof(buf2));
for(int i = 0; i < N; i++) {
/* MINLOC result should be {0, i % size} */
if (i % size == rank) {
buf2[i].a = 0;
} else {
buf2[i].a = rank + 1;
}
buf2[i].b = rank;
}
cudaMemcpyAsync(d_buf, buf2, sizeof(buf2), cudaMemcpyHostToDevice, stream);
mpi_errno = MPIX_Allreduce_enqueue(d_buf, d_result_buf, N, MPI_SHORT_INT, MPI_MINLOC, stream_comm);
assert(mpi_errno == MPI_SUCCESS);
cudaMemcpyAsync(buf2, d_result_buf, sizeof(buf2), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
for (int i = 0; i < N; i++) {
CHECK_RESULT(i, buf2[i].a, 0, "TEST 2");
CHECK_RESULT(i, buf2[i].b, i % size, "TEST 2");
}
/* clean up */
MPI_Comm_free(&stream_comm);
MPIX_Stream_free(&mpi_stream);
cudaStreamDestroy(stream);
int tot_errs;
MPI_Reduce(&errs, &tot_errs, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
if (rank == 0) {
if (tot_errs == 0) {
printf("No Errors\n");
} else {
printf("%d Errors\n", tot_errs);
}
}
MPI_Finalize();
return errs;
}
|
56ac493a047720f2adf2f52e57b8ff8fec3a0bee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _VECTOR_ADDITION_KERNEL_H_
#define _VECTOR_ADDITION_KERNEL_H_
__global__ void vector_addition_kernel(float *A, float *B, float *C, int num_elements){
int thread_id = threadIdx.x; // Obtain the index of the thread within the thread block
if(thread_id >= num_elements)
return;
C[thread_id] = A[thread_id] + B[thread_id];
return;
}
#endif // #ifndef _VECTOR_ADDITION_KERNEL_H
| 56ac493a047720f2adf2f52e57b8ff8fec3a0bee.cu | #ifndef _VECTOR_ADDITION_KERNEL_H_
#define _VECTOR_ADDITION_KERNEL_H_
__global__ void vector_addition_kernel(float *A, float *B, float *C, int num_elements){
int thread_id = threadIdx.x; // Obtain the index of the thread within the thread block
if(thread_id >= num_elements)
return;
C[thread_id] = A[thread_id] + B[thread_id];
return;
}
#endif // #ifndef _VECTOR_ADDITION_KERNEL_H
|
541dc1b58d0d064a2d1279082f1d21834230ac67.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020 NVIDIA Corporation.
* Copyright (c) 2018-2020 Chris Choy ([email protected]).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#include "coordinate_map.hpp"
#include "coordinate_map_cpu.hpp"
#include "coordinate_map_key.hpp"
#include "coordinate_map_manager.hpp"
#include "errors.hpp"
#include "types.hpp"
#include "utils.hpp"
#include "global_pooling_cpu.cpp"
#include "pooling_avg_kernel.cuh"
#include "pooling_max_kernel.cuh"
#include <pybind11/pybind11.h>
#include <torch/extension.h>
namespace minkowski {
template <typename coordinate_type,
template <typename C> class TemplatedAllocator>
std::tuple<at::Tensor, at::Tensor> GlobalPoolingForwardGPU(
at::Tensor const &in_feat,
PoolingMode::Type const pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) {
ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous");
ASSERT(in_feat.is_cuda(), "in_feat must be on GPU");
ASSERT(in_feat.dim() == 2, "Invalid in_feat.dim():", in_feat.dim());
coordinate_map_key_type in_key = p_in_map_key->get_key();
ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND);
ASSERT(in_feat.size(0) == p_map_manager->size(in_key), "Invalid in_feat size",
in_feat.size(0), "!=", p_map_manager->size(in_key));
ASSERT(pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_PYTORCH_INDEX ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX ||
pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_PYTORCH_INDEX,
"Invalid pooling mode");
if (!p_out_map_key->is_key_set()) {
coordinate_map_key_type out_key = std::get<0>(p_map_manager->origin());
p_out_map_key->set_key(out_key);
}
int64_t const batch_size = p_map_manager->origin_map_size();
bool const use_avg =
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX;
if (batch_size == 1) {
// Simple reduction
if (pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_PYTORCH_INDEX) {
return in_feat.max(0);
} else {
auto out_feat = in_feat.sum(0, true);
auto num_nonzero = torch::zeros({batch_size}, in_feat.options());
if (use_avg)
out_feat /= in_feat.size(0);
num_nonzero[0] = in_feat.size(0);
return {out_feat, num_nonzero};
}
} else {
// batch_size > 1
// TODO Default to specific pooling mode conversion.
// Regular case
// if (pooling_mode == 0)
// pooling_mode = in_feat.size(0) / batch_size > 100 ? 1 : 2;
// origin kernel map
if (pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_PYTORCH_INDEX ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX) {
auto out_feat =
torch::zeros({batch_size, in_feat.size(1)}, in_feat.options());
auto num_nonzero = torch::zeros({batch_size}, in_feat.options());
// If the policy is GlobalPoolingMode.INDEX_SELECT
switch (pooling_mode) {
case PoolingMode::GLOBAL_SUM_POOLING_PYTORCH_INDEX:
case PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX: {
std::vector<at::Tensor> const vec_maps =
p_map_manager->origin_map_th(p_in_map_key).second;
for (int b = 0; b < batch_size; ++b) {
if (use_avg)
out_feat[b] = in_feat.index_select(0, vec_maps[b]).mean(0);
else
out_feat[b] = in_feat.index_select(0, vec_maps[b]).sum(0);
num_nonzero[b] = vec_maps[b].numel();
}
} break;
case PoolingMode::GLOBAL_SUM_POOLING_KERNEL:
case PoolingMode::GLOBAL_AVG_POOLING_KERNEL: {
const auto &in_outs = p_map_manager->origin_map(p_in_map_key);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
hipsparseHandle_t handle = at::cuda::getCurrentCUDASparseHandle();
hipsparseSetStream(handle, stream);
TemplatedAllocator<char> byte_allocator;
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "global_pooling_forward_gpu", [&] {
NonzeroAvgPoolingForwardKernelGPU<scalar_t,
default_types::index_type,
TemplatedAllocator<char>>(
in_feat.template data_ptr<scalar_t>(), in_feat.size(0),
out_feat.template data_ptr<scalar_t>(), batch_size,
num_nonzero.template data_ptr<scalar_t>(), in_feat.size(1),
in_outs, use_avg, byte_allocator, handle, stream);
});
} break;
}
return {out_feat, num_nonzero};
} else {
// Max pool
auto out_feat =
torch::zeros({batch_size, in_feat.size(1)}, in_feat.options());
at::Tensor max_index = torch::empty({batch_size, in_feat.size(1)},
torch::TensorOptions()
.device(in_feat.device())
.dtype(torch::kInt)
.requires_grad(false));
switch (pooling_mode) {
case PoolingMode::GLOBAL_MAX_POOLING_KERNEL:
// TODO
case PoolingMode::GLOBAL_MAX_POOLING_PYTORCH_INDEX: {
const auto &in_outs = p_map_manager->origin_map(p_in_map_key);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
TemplatedAllocator<char> byte_allocator;
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "global_pooling_forward_gpu", [&] {
MaxPoolingForwardKernelGPU<scalar_t, default_types::index_type,
TemplatedAllocator<char>>(
in_feat.template data_ptr<scalar_t>(),
out_feat.template data_ptr<scalar_t>(), batch_size,
max_index.data_ptr<int>(), in_feat.size(1), in_outs,
byte_allocator, stream);
});
} break;
default:
ASSERT(false, "Invalid pooling mode");
}
return {out_feat, max_index};
}
}
}
template <typename coordinate_type,
template <typename C> class TemplatedAllocator>
at::Tensor GlobalPoolingBackwardGPU(
at::Tensor const &in_feat, //
at::Tensor &grad_out_feat, //
at::Tensor const &num_nonzero, //
PoolingMode::Type const pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) {
ASSERT(in_feat.is_cuda(), "in_feat must be on CUDA");
ASSERT(grad_out_feat.is_cuda(), "grad_out_feat must be on CUDA");
ASSERT(num_nonzero.is_cuda(), "num_nonzero must be on CUDA");
ASSERT(grad_out_feat.dim() == 2,
"Invalid grad_out_feat.dim():", grad_out_feat.dim());
if (!grad_out_feat.is_contiguous())
grad_out_feat = grad_out_feat.contiguous();
ASSERT(in_feat.scalar_type() == grad_out_feat.scalar_type(), "type mismatch");
coordinate_map_key_type in_key = p_in_map_key->get_key();
ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND);
coordinate_map_key_type out_key = p_out_map_key->get_key();
ASSERT(p_map_manager->exists(out_key), ERROR_MAP_NOT_FOUND);
ASSERT(grad_out_feat.size(0) == p_map_manager->size(out_key),
"Invalid grad_out size", grad_out_feat.size(0),
"!=", p_map_manager->size(out_key));
ASSERT(in_feat.size(1) == grad_out_feat.size(1),
"Input feature size and kernel size mismatch");
ASSERT(pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_PYTORCH_INDEX ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX ||
pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_PYTORCH_INDEX,
"Invalid pooling mode");
int64_t const batch_size = p_map_manager->size(out_key);
bool const use_avg =
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX;
auto grad_in_feat = torch::empty_like(in_feat);
// TODO Default to specific pooling mode conversion.
// Regular case
// if (pooling_mode == 0)
// pooling_mode = in_feat.size(0) / batch_size > 100 ? 1 : 2;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
if (pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_PYTORCH_INDEX ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX) {
LOG_DEBUG("GLOBAL_POOLING");
if (batch_size == 1) {
if (use_avg) {
LOG_DEBUG("Copying grad_out_feat. size:", in_feat.size(0));
grad_in_feat.copy_(grad_out_feat / in_feat.size(0));
} else
grad_in_feat.copy_(grad_out_feat);
} else {
const auto &in_outs = p_map_manager->origin_map(p_in_map_key);
grad_in_feat.zero_();
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "global_pooling_backward_gpu", [&] {
NonzeroAvgPoolingBackwardKernelGPU<
scalar_t, default_types::index_type, TemplatedAllocator<char>>(
grad_in_feat.template data_ptr<scalar_t>(), in_feat.size(0),
grad_out_feat.template data_ptr<scalar_t>(),
grad_out_feat.size(0),
num_nonzero.template data_ptr<scalar_t>(), in_feat.size(1),
in_outs, use_avg, stream);
});
}
} else {
// MAX Pooling
grad_in_feat.zero_();
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "global_pooling_backward_gpu", [&] {
MaxPoolingBackwardKernelGPU<scalar_t>(
grad_in_feat.template data_ptr<scalar_t>(), in_feat.size(0),
grad_out_feat.template data_ptr<scalar_t>(),
grad_out_feat.size(0), num_nonzero.template data_ptr<int>(),
in_feat.size(1), stream);
});
}
return grad_in_feat;
}
// default allocator
template std::tuple<at::Tensor, at::Tensor> GlobalPoolingForwardGPU(
at::Tensor const &in_feat,
PoolingMode::Type const pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator>
*p_map_manager);
template at::Tensor GlobalPoolingBackwardGPU(
at::Tensor const &in_feat, //
at::Tensor &grad_out_feat, //
at::Tensor const &num_nonzero, //
PoolingMode::Type const pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator>
*p_map_manager);
// c10
template std::tuple<at::Tensor, at::Tensor> GlobalPoolingForwardGPU(
at::Tensor const &in_feat,
PoolingMode::Type const pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator>
*p_map_manager);
template at::Tensor GlobalPoolingBackwardGPU(
at::Tensor const &in_feat, //
at::Tensor &grad_out_feat, //
at::Tensor const &num_nonzero, //
PoolingMode::Type const pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator>
*p_map_manager);
} // end namespace minkowski
| 541dc1b58d0d064a2d1279082f1d21834230ac67.cu | /*
* Copyright (c) 2020 NVIDIA Corporation.
* Copyright (c) 2018-2020 Chris Choy ([email protected]).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#include "coordinate_map.hpp"
#include "coordinate_map_cpu.hpp"
#include "coordinate_map_key.hpp"
#include "coordinate_map_manager.hpp"
#include "errors.hpp"
#include "types.hpp"
#include "utils.hpp"
#include "global_pooling_cpu.cpp"
#include "pooling_avg_kernel.cuh"
#include "pooling_max_kernel.cuh"
#include <pybind11/pybind11.h>
#include <torch/extension.h>
namespace minkowski {
template <typename coordinate_type,
template <typename C> class TemplatedAllocator>
std::tuple<at::Tensor, at::Tensor> GlobalPoolingForwardGPU(
at::Tensor const &in_feat,
PoolingMode::Type const pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) {
ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous");
ASSERT(in_feat.is_cuda(), "in_feat must be on GPU");
ASSERT(in_feat.dim() == 2, "Invalid in_feat.dim():", in_feat.dim());
coordinate_map_key_type in_key = p_in_map_key->get_key();
ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND);
ASSERT(in_feat.size(0) == p_map_manager->size(in_key), "Invalid in_feat size",
in_feat.size(0), "!=", p_map_manager->size(in_key));
ASSERT(pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_PYTORCH_INDEX ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX ||
pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_PYTORCH_INDEX,
"Invalid pooling mode");
if (!p_out_map_key->is_key_set()) {
coordinate_map_key_type out_key = std::get<0>(p_map_manager->origin());
p_out_map_key->set_key(out_key);
}
int64_t const batch_size = p_map_manager->origin_map_size();
bool const use_avg =
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX;
if (batch_size == 1) {
// Simple reduction
if (pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_PYTORCH_INDEX) {
return in_feat.max(0);
} else {
auto out_feat = in_feat.sum(0, true);
auto num_nonzero = torch::zeros({batch_size}, in_feat.options());
if (use_avg)
out_feat /= in_feat.size(0);
num_nonzero[0] = in_feat.size(0);
return {out_feat, num_nonzero};
}
} else {
// batch_size > 1
// TODO Default to specific pooling mode conversion.
// Regular case
// if (pooling_mode == 0)
// pooling_mode = in_feat.size(0) / batch_size > 100 ? 1 : 2;
// origin kernel map
if (pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_PYTORCH_INDEX ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX) {
auto out_feat =
torch::zeros({batch_size, in_feat.size(1)}, in_feat.options());
auto num_nonzero = torch::zeros({batch_size}, in_feat.options());
// If the policy is GlobalPoolingMode.INDEX_SELECT
switch (pooling_mode) {
case PoolingMode::GLOBAL_SUM_POOLING_PYTORCH_INDEX:
case PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX: {
std::vector<at::Tensor> const vec_maps =
p_map_manager->origin_map_th(p_in_map_key).second;
for (int b = 0; b < batch_size; ++b) {
if (use_avg)
out_feat[b] = in_feat.index_select(0, vec_maps[b]).mean(0);
else
out_feat[b] = in_feat.index_select(0, vec_maps[b]).sum(0);
num_nonzero[b] = vec_maps[b].numel();
}
} break;
case PoolingMode::GLOBAL_SUM_POOLING_KERNEL:
case PoolingMode::GLOBAL_AVG_POOLING_KERNEL: {
const auto &in_outs = p_map_manager->origin_map(p_in_map_key);
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
cusparseHandle_t handle = at::cuda::getCurrentCUDASparseHandle();
cusparseSetStream(handle, stream);
TemplatedAllocator<char> byte_allocator;
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "global_pooling_forward_gpu", [&] {
NonzeroAvgPoolingForwardKernelGPU<scalar_t,
default_types::index_type,
TemplatedAllocator<char>>(
in_feat.template data_ptr<scalar_t>(), in_feat.size(0),
out_feat.template data_ptr<scalar_t>(), batch_size,
num_nonzero.template data_ptr<scalar_t>(), in_feat.size(1),
in_outs, use_avg, byte_allocator, handle, stream);
});
} break;
}
return {out_feat, num_nonzero};
} else {
// Max pool
auto out_feat =
torch::zeros({batch_size, in_feat.size(1)}, in_feat.options());
at::Tensor max_index = torch::empty({batch_size, in_feat.size(1)},
torch::TensorOptions()
.device(in_feat.device())
.dtype(torch::kInt)
.requires_grad(false));
switch (pooling_mode) {
case PoolingMode::GLOBAL_MAX_POOLING_KERNEL:
// TODO
case PoolingMode::GLOBAL_MAX_POOLING_PYTORCH_INDEX: {
const auto &in_outs = p_map_manager->origin_map(p_in_map_key);
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
TemplatedAllocator<char> byte_allocator;
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "global_pooling_forward_gpu", [&] {
MaxPoolingForwardKernelGPU<scalar_t, default_types::index_type,
TemplatedAllocator<char>>(
in_feat.template data_ptr<scalar_t>(),
out_feat.template data_ptr<scalar_t>(), batch_size,
max_index.data_ptr<int>(), in_feat.size(1), in_outs,
byte_allocator, stream);
});
} break;
default:
ASSERT(false, "Invalid pooling mode");
}
return {out_feat, max_index};
}
}
}
template <typename coordinate_type,
template <typename C> class TemplatedAllocator>
at::Tensor GlobalPoolingBackwardGPU(
at::Tensor const &in_feat, //
at::Tensor &grad_out_feat, //
at::Tensor const &num_nonzero, //
PoolingMode::Type const pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) {
ASSERT(in_feat.is_cuda(), "in_feat must be on CUDA");
ASSERT(grad_out_feat.is_cuda(), "grad_out_feat must be on CUDA");
ASSERT(num_nonzero.is_cuda(), "num_nonzero must be on CUDA");
ASSERT(grad_out_feat.dim() == 2,
"Invalid grad_out_feat.dim():", grad_out_feat.dim());
if (!grad_out_feat.is_contiguous())
grad_out_feat = grad_out_feat.contiguous();
ASSERT(in_feat.scalar_type() == grad_out_feat.scalar_type(), "type mismatch");
coordinate_map_key_type in_key = p_in_map_key->get_key();
ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND);
coordinate_map_key_type out_key = p_out_map_key->get_key();
ASSERT(p_map_manager->exists(out_key), ERROR_MAP_NOT_FOUND);
ASSERT(grad_out_feat.size(0) == p_map_manager->size(out_key),
"Invalid grad_out size", grad_out_feat.size(0),
"!=", p_map_manager->size(out_key));
ASSERT(in_feat.size(1) == grad_out_feat.size(1),
"Input feature size and kernel size mismatch");
ASSERT(pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_PYTORCH_INDEX ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX ||
pooling_mode == PoolingMode::GLOBAL_MAX_POOLING_PYTORCH_INDEX,
"Invalid pooling mode");
int64_t const batch_size = p_map_manager->size(out_key);
bool const use_avg =
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX;
auto grad_in_feat = torch::empty_like(in_feat);
// TODO Default to specific pooling mode conversion.
// Regular case
// if (pooling_mode == 0)
// pooling_mode = in_feat.size(0) / batch_size > 100 ? 1 : 2;
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
if (pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_DEFAULT ||
pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_KERNEL ||
pooling_mode == PoolingMode::GLOBAL_SUM_POOLING_PYTORCH_INDEX ||
pooling_mode == PoolingMode::GLOBAL_AVG_POOLING_PYTORCH_INDEX) {
LOG_DEBUG("GLOBAL_POOLING");
if (batch_size == 1) {
if (use_avg) {
LOG_DEBUG("Copying grad_out_feat. size:", in_feat.size(0));
grad_in_feat.copy_(grad_out_feat / in_feat.size(0));
} else
grad_in_feat.copy_(grad_out_feat);
} else {
const auto &in_outs = p_map_manager->origin_map(p_in_map_key);
grad_in_feat.zero_();
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "global_pooling_backward_gpu", [&] {
NonzeroAvgPoolingBackwardKernelGPU<
scalar_t, default_types::index_type, TemplatedAllocator<char>>(
grad_in_feat.template data_ptr<scalar_t>(), in_feat.size(0),
grad_out_feat.template data_ptr<scalar_t>(),
grad_out_feat.size(0),
num_nonzero.template data_ptr<scalar_t>(), in_feat.size(1),
in_outs, use_avg, stream);
});
}
} else {
// MAX Pooling
grad_in_feat.zero_();
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "global_pooling_backward_gpu", [&] {
MaxPoolingBackwardKernelGPU<scalar_t>(
grad_in_feat.template data_ptr<scalar_t>(), in_feat.size(0),
grad_out_feat.template data_ptr<scalar_t>(),
grad_out_feat.size(0), num_nonzero.template data_ptr<int>(),
in_feat.size(1), stream);
});
}
return grad_in_feat;
}
// default allocator
template std::tuple<at::Tensor, at::Tensor> GlobalPoolingForwardGPU(
at::Tensor const &in_feat,
PoolingMode::Type const pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator>
*p_map_manager);
template at::Tensor GlobalPoolingBackwardGPU(
at::Tensor const &in_feat, //
at::Tensor &grad_out_feat, //
at::Tensor const &num_nonzero, //
PoolingMode::Type const pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator>
*p_map_manager);
// c10
template std::tuple<at::Tensor, at::Tensor> GlobalPoolingForwardGPU(
at::Tensor const &in_feat,
PoolingMode::Type const pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator>
*p_map_manager);
template at::Tensor GlobalPoolingBackwardGPU(
at::Tensor const &in_feat, //
at::Tensor &grad_out_feat, //
at::Tensor const &num_nonzero, //
PoolingMode::Type const pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator>
*p_map_manager);
} // end namespace minkowski
|
35f5258c774c6272717f202398c4141e393ec6f7.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <algorithm>
#include <cuml/common/cuml_allocator.hpp>
#include <iostream>
#include <random>
#include "metrics/adjustedRandIndex.h"
#include "metrics/contingencyMatrix.h"
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
struct AdjustedRandIndexParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
bool sameArrays;
double tolerance;
};
template <typename T, typename MathT = int>
class AdjustedRandIndexTest
: public ::testing::TestWithParam<AdjustedRandIndexParam> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<AdjustedRandIndexParam>::GetParam();
nElements = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange,
upperLabelRange);
std::generate(arr1.begin(), arr1.end(),
[&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(),
[&]() { return intGenerator(dre); });
}
// calculating golden output
int numUniqueClasses = upperLabelRange - lowerLabelRange + 1;
size_t sizeOfMat = numUniqueClasses * numUniqueClasses * sizeof(int);
int *hGoldenOutput = (int *)malloc(sizeOfMat);
memset(hGoldenOutput, 0, sizeOfMat);
for (int i = 0; i < nElements; i++) {
int row = arr1[i] - lowerLabelRange;
int column = arr2[i] - lowerLabelRange;
hGoldenOutput[row * numUniqueClasses + column] += 1;
}
int sumOfNijCTwo = 0;
int *a = (int *)malloc(numUniqueClasses * sizeof(int));
int *b = (int *)malloc(numUniqueClasses * sizeof(int));
memset(a, 0, numUniqueClasses * sizeof(int));
memset(b, 0, numUniqueClasses * sizeof(int));
int sumOfAiCTwo = 0;
int sumOfBiCTwo = 0;
//calculating the sum of number of pairwise points in each index
//and also the reducing contingency matrix along row and column
for (int i = 0; i < numUniqueClasses; ++i) {
for (int j = 0; j < numUniqueClasses; ++j) {
int Nij = hGoldenOutput[i * numUniqueClasses + j];
sumOfNijCTwo += ((Nij) * (Nij - 1)) / 2;
a[i] += hGoldenOutput[i * numUniqueClasses + j];
b[i] += hGoldenOutput[j * numUniqueClasses + i];
}
}
//claculating the sum of number pairwise points in ever column sum
//claculating the sum of number pairwise points in ever row sum
for (int i = 0; i < numUniqueClasses; ++i) {
sumOfAiCTwo += ((a[i]) * (a[i] - 1)) / 2;
sumOfBiCTwo += ((b[i]) * (b[i] - 1)) / 2;
}
//calculating the ARI
double nCTwo = double(nElements) * double(nElements - 1) / 2.0;
double expectedIndex =
(double(sumOfBiCTwo) * double(sumOfAiCTwo)) / double(nCTwo);
double maxIndex = (double(sumOfAiCTwo) + double(sumOfBiCTwo)) / 2.0;
double index = (double)sumOfNijCTwo;
if (maxIndex - expectedIndex)
truthAdjustedRandIndex =
(index - expectedIndex) / (maxIndex - expectedIndex);
else
truthAdjustedRandIndex = 0;
//allocating and initializing memory to the GPU
CUDA_CHECK(hipStreamCreate(&stream));
allocate(firstClusterArray, nElements, true);
allocate(secondClusterArray, nElements, true);
updateDevice(firstClusterArray, &arr1[0], nElements, stream);
updateDevice(secondClusterArray, &arr2[0], nElements, stream);
std::shared_ptr<deviceAllocator> allocator(new defaultDeviceAllocator);
computedAdjustedRandIndex = computeAdjustedRandIndex<T, MathT>(
firstClusterArray, secondClusterArray, nElements, lowerLabelRange,
upperLabelRange, allocator, stream);
}
void TearDown() override {
CUDA_CHECK(hipFree(firstClusterArray));
CUDA_CHECK(hipFree(secondClusterArray));
CUDA_CHECK(hipStreamDestroy(stream));
}
AdjustedRandIndexParam params;
T lowerLabelRange, upperLabelRange;
T *firstClusterArray = nullptr;
T *secondClusterArray = nullptr;
int nElements = 0;
double truthAdjustedRandIndex = 0;
double computedAdjustedRandIndex = 0;
hipStream_t stream;
};
const std::vector<AdjustedRandIndexParam> inputs = {
{199, 1, 10, false, 0.000001}, {200, 15, 100, false, 0.000001},
{100, 1, 20, false, 0.000001}, {10, 1, 10, false, 0.000001},
{198, 1, 100, false, 0.000001}, {300, 3, 99, false, 0.000001},
{199, 1, 10, true, 0.000001}, {200, 15, 100, true, 0.000001},
{100, 1, 20, true, 0.000001}, {10, 1, 10, true, 0.000001},
{198, 1, 100, true, 0.000001}, {300, 3, 99, true, 0.000001}};
const std::vector<AdjustedRandIndexParam> large_inputs = {
{2000000, 1, 1000, false, 0.000001},
{2000000, 1, 1000, true, 0.000001},
};
typedef AdjustedRandIndexTest<int, int> ARI_ii;
TEST_P(ARI_ii, Result) {
ASSERT_NEAR(computedAdjustedRandIndex, truthAdjustedRandIndex,
params.tolerance);
}
INSTANTIATE_TEST_CASE_P(AdjustedRandIndex, ARI_ii, ::testing::ValuesIn(inputs));
typedef AdjustedRandIndexTest<int, unsigned long long> ARI_il;
TEST_P(ARI_il, Result) {
ASSERT_NEAR(computedAdjustedRandIndex, truthAdjustedRandIndex,
params.tolerance);
}
INSTANTIATE_TEST_CASE_P(AdjustedRandIndex, ARI_il, ::testing::ValuesIn(inputs));
INSTANTIATE_TEST_CASE_P(AdjustedRandIndexLarge, ARI_il,
::testing::ValuesIn(large_inputs));
} //end namespace Metrics
} //end namespace MLCommon
| 35f5258c774c6272717f202398c4141e393ec6f7.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <algorithm>
#include <cuml/common/cuml_allocator.hpp>
#include <iostream>
#include <random>
#include "metrics/adjustedRandIndex.h"
#include "metrics/contingencyMatrix.h"
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
struct AdjustedRandIndexParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
bool sameArrays;
double tolerance;
};
template <typename T, typename MathT = int>
class AdjustedRandIndexTest
: public ::testing::TestWithParam<AdjustedRandIndexParam> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<AdjustedRandIndexParam>::GetParam();
nElements = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange,
upperLabelRange);
std::generate(arr1.begin(), arr1.end(),
[&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(),
[&]() { return intGenerator(dre); });
}
// calculating golden output
int numUniqueClasses = upperLabelRange - lowerLabelRange + 1;
size_t sizeOfMat = numUniqueClasses * numUniqueClasses * sizeof(int);
int *hGoldenOutput = (int *)malloc(sizeOfMat);
memset(hGoldenOutput, 0, sizeOfMat);
for (int i = 0; i < nElements; i++) {
int row = arr1[i] - lowerLabelRange;
int column = arr2[i] - lowerLabelRange;
hGoldenOutput[row * numUniqueClasses + column] += 1;
}
int sumOfNijCTwo = 0;
int *a = (int *)malloc(numUniqueClasses * sizeof(int));
int *b = (int *)malloc(numUniqueClasses * sizeof(int));
memset(a, 0, numUniqueClasses * sizeof(int));
memset(b, 0, numUniqueClasses * sizeof(int));
int sumOfAiCTwo = 0;
int sumOfBiCTwo = 0;
//calculating the sum of number of pairwise points in each index
//and also the reducing contingency matrix along row and column
for (int i = 0; i < numUniqueClasses; ++i) {
for (int j = 0; j < numUniqueClasses; ++j) {
int Nij = hGoldenOutput[i * numUniqueClasses + j];
sumOfNijCTwo += ((Nij) * (Nij - 1)) / 2;
a[i] += hGoldenOutput[i * numUniqueClasses + j];
b[i] += hGoldenOutput[j * numUniqueClasses + i];
}
}
//claculating the sum of number pairwise points in ever column sum
//claculating the sum of number pairwise points in ever row sum
for (int i = 0; i < numUniqueClasses; ++i) {
sumOfAiCTwo += ((a[i]) * (a[i] - 1)) / 2;
sumOfBiCTwo += ((b[i]) * (b[i] - 1)) / 2;
}
//calculating the ARI
double nCTwo = double(nElements) * double(nElements - 1) / 2.0;
double expectedIndex =
(double(sumOfBiCTwo) * double(sumOfAiCTwo)) / double(nCTwo);
double maxIndex = (double(sumOfAiCTwo) + double(sumOfBiCTwo)) / 2.0;
double index = (double)sumOfNijCTwo;
if (maxIndex - expectedIndex)
truthAdjustedRandIndex =
(index - expectedIndex) / (maxIndex - expectedIndex);
else
truthAdjustedRandIndex = 0;
//allocating and initializing memory to the GPU
CUDA_CHECK(cudaStreamCreate(&stream));
allocate(firstClusterArray, nElements, true);
allocate(secondClusterArray, nElements, true);
updateDevice(firstClusterArray, &arr1[0], nElements, stream);
updateDevice(secondClusterArray, &arr2[0], nElements, stream);
std::shared_ptr<deviceAllocator> allocator(new defaultDeviceAllocator);
computedAdjustedRandIndex = computeAdjustedRandIndex<T, MathT>(
firstClusterArray, secondClusterArray, nElements, lowerLabelRange,
upperLabelRange, allocator, stream);
}
void TearDown() override {
CUDA_CHECK(cudaFree(firstClusterArray));
CUDA_CHECK(cudaFree(secondClusterArray));
CUDA_CHECK(cudaStreamDestroy(stream));
}
AdjustedRandIndexParam params;
T lowerLabelRange, upperLabelRange;
T *firstClusterArray = nullptr;
T *secondClusterArray = nullptr;
int nElements = 0;
double truthAdjustedRandIndex = 0;
double computedAdjustedRandIndex = 0;
cudaStream_t stream;
};
const std::vector<AdjustedRandIndexParam> inputs = {
{199, 1, 10, false, 0.000001}, {200, 15, 100, false, 0.000001},
{100, 1, 20, false, 0.000001}, {10, 1, 10, false, 0.000001},
{198, 1, 100, false, 0.000001}, {300, 3, 99, false, 0.000001},
{199, 1, 10, true, 0.000001}, {200, 15, 100, true, 0.000001},
{100, 1, 20, true, 0.000001}, {10, 1, 10, true, 0.000001},
{198, 1, 100, true, 0.000001}, {300, 3, 99, true, 0.000001}};
const std::vector<AdjustedRandIndexParam> large_inputs = {
{2000000, 1, 1000, false, 0.000001},
{2000000, 1, 1000, true, 0.000001},
};
typedef AdjustedRandIndexTest<int, int> ARI_ii;
TEST_P(ARI_ii, Result) {
ASSERT_NEAR(computedAdjustedRandIndex, truthAdjustedRandIndex,
params.tolerance);
}
INSTANTIATE_TEST_CASE_P(AdjustedRandIndex, ARI_ii, ::testing::ValuesIn(inputs));
typedef AdjustedRandIndexTest<int, unsigned long long> ARI_il;
TEST_P(ARI_il, Result) {
ASSERT_NEAR(computedAdjustedRandIndex, truthAdjustedRandIndex,
params.tolerance);
}
INSTANTIATE_TEST_CASE_P(AdjustedRandIndex, ARI_il, ::testing::ValuesIn(inputs));
INSTANTIATE_TEST_CASE_P(AdjustedRandIndexLarge, ARI_il,
::testing::ValuesIn(large_inputs));
} //end namespace Metrics
} //end namespace MLCommon
|
bc5ef317400faa9c01419cd8df09b8c81182aff8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mat_inverse_kernel.h"
#include "cuMat_config.h"
__device__ __forceinline__ float mat_inverse (float a){
return 1.0/(a+1e-8);
}
__global__ void mat_inverse_kernel (const float * __restrict__ src,
float * __restrict__ dst, int m, int n){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if (row < m && col < n){
dst[row * n + col] = mat_inverse(src[row * n + col]);
}
}
void mat_inverse_kernel_exec(const float *src, float *dst, int m, int n){
/* specified block and grid size */
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((n+block.x-1)/block.x, (m+block.y-1)/block.y);
/* lunch kernel */
hipLaunchKernelGGL(( mat_inverse_kernel), dim3(grid), dim3(block), 0, 0, src, dst, m, n);
hipDeviceSynchronize();
}
| bc5ef317400faa9c01419cd8df09b8c81182aff8.cu | #include "mat_inverse_kernel.h"
#include "cuMat_config.h"
__device__ __forceinline__ float mat_inverse (float a){
return 1.0/(a+1e-8);
}
__global__ void mat_inverse_kernel (const float * __restrict__ src,
float * __restrict__ dst, int m, int n){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if (row < m && col < n){
dst[row * n + col] = mat_inverse(src[row * n + col]);
}
}
void mat_inverse_kernel_exec(const float *src, float *dst, int m, int n){
/* specified block and grid size */
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((n+block.x-1)/block.x, (m+block.y-1)/block.y);
/* lunch kernel */
mat_inverse_kernel<<<grid, block>>>(src, dst, m, n);
cudaThreadSynchronize();
}
|
0d6f4493b4a67eec082645c5872d586c76552b78.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ck.cuh"
surface<void, cudaSurfaceType2D> surfRef;
__global__ void makeImage(uchar4 *A, size_t pitch, const int w, const int h)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
uchar4 *p = (uchar4 *)((char *)A + j * pitch) + i;
(*p).x = i * 255.0f / w;
(*p).y = j * 255.0f / h;
(*p).z = (*p).z + 1;
(*p).w = 255;
}
__global__ void cudaKernelReadWriteSurface_Kernel(hipArray_t s, int w, int h)
{
}
void cudaImageFunc(dim3 tpb, dim3 tb, uchar4 *devD, size_t pitch, int w, int h)
{
printf("cudaImageFunc called\n");
hipError_t err;
hipLaunchKernelGGL(( makeImage), dim3(tb), dim3(tpb), 0, 0, devD, pitch, w, h);
err = hipGetLastError();
if (err != hipSuccess)
{
printf("WARNING : %s\n", hipGetErrorString(err));
}
}
void cudaKernelReadWriteSurface(dim3 tpb, dim3 tb, hipArray_t s, int w, int h)
{
/*Create surface reference*/
const hipSurfaceReference *surfRefPtr;
hipGetSurfaceReference(&surfRefPtr, &surfRef);
hipChannelFormatDesc channelDesc;
hipGetChannelDesc(&channelDesc, s);
hipBindSurfaceToArray(surfRefPtr, s, &channelDesc);
} | 0d6f4493b4a67eec082645c5872d586c76552b78.cu | #include "ck.cuh"
surface<void, cudaSurfaceType2D> surfRef;
__global__ void makeImage(uchar4 *A, size_t pitch, const int w, const int h)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
uchar4 *p = (uchar4 *)((char *)A + j * pitch) + i;
(*p).x = i * 255.0f / w;
(*p).y = j * 255.0f / h;
(*p).z = (*p).z + 1;
(*p).w = 255;
}
__global__ void cudaKernelReadWriteSurface_Kernel(cudaArray_t s, int w, int h)
{
}
void cudaImageFunc(dim3 tpb, dim3 tb, uchar4 *devD, size_t pitch, int w, int h)
{
printf("cudaImageFunc called\n");
cudaError_t err;
makeImage<<<tb, tpb>>>(devD, pitch, w, h);
err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("WARNING : %s\n", cudaGetErrorString(err));
}
}
void cudaKernelReadWriteSurface(dim3 tpb, dim3 tb, cudaArray_t s, int w, int h)
{
/*Create surface reference*/
const surfaceReference *surfRefPtr;
cudaGetSurfaceReference(&surfRefPtr, &surfRef);
cudaChannelFormatDesc channelDesc;
cudaGetChannelDesc(&channelDesc, s);
cudaBindSurfaceToArray(surfRefPtr, s, &channelDesc);
} |
d83ff6d3af848fe3a79e03f57cb7a5f4e5c314bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
} | d83ff6d3af848fe3a79e03f57cb7a5f4e5c314bf.cu | #include <iostream>
#include <math.h>
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
} |
e4864975b6ba4e815e2ef4a38da6161360166492.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* _et_joint_histogram_gpu.cu
*
* NiftyRec
* Stefano Pedemonte, May 2012.
* CMIC - Centre for Medical Image Computing
* UCL - University College London.
* Released under BSD licence, see LICENSE.txt
*/
#include "_et_joint_histogram_gpu.h"
#include "_et_joint_histogram_gpu_kernels.cu"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cutil.h>
void et_joint_histogram_gpu(float **d_array_A, float **d_array_B, int **d_jont_hist, int array_size, int hist_size, float min_A, float max_A, float min_B, float max_B)
{
//CUDA_SAFE_CALL(hipMemcpyToSymbol(c_backprojection_size,&backprojection_size,sizeof(int3)));
const unsigned int grid = (unsigned int)ceil(array_size/(float)BLOCK);
dim3 B(BLOCK,1,1);
dim3 G(grid,1,1);
hipLaunchKernelGGL(( et_joint_histogram_gpu_kernel) , dim3(G),dim3(B), 0, 0, *d_array_A, *d_array_B, *d_jont_hist, array_size, hist_size, min_A, max_A, min_B, max_B);
CUDA_SAFE_CALL(hipDeviceSynchronize());
}
| e4864975b6ba4e815e2ef4a38da6161360166492.cu | /*
* _et_joint_histogram_gpu.cu
*
* NiftyRec
* Stefano Pedemonte, May 2012.
* CMIC - Centre for Medical Image Computing
* UCL - University College London.
* Released under BSD licence, see LICENSE.txt
*/
#include "_et_joint_histogram_gpu.h"
#include "_et_joint_histogram_gpu_kernels.cu"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cutil.h>
void et_joint_histogram_gpu(float **d_array_A, float **d_array_B, int **d_jont_hist, int array_size, int hist_size, float min_A, float max_A, float min_B, float max_B)
{
//CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_backprojection_size,&backprojection_size,sizeof(int3)));
const unsigned int grid = (unsigned int)ceil(array_size/(float)BLOCK);
dim3 B(BLOCK,1,1);
dim3 G(grid,1,1);
et_joint_histogram_gpu_kernel <<<G,B>>> (*d_array_A, *d_array_B, *d_jont_hist, array_size, hist_size, min_A, max_A, min_B, max_B);
CUDA_SAFE_CALL(cudaThreadSynchronize());
}
|
b84d8698f903c156e067a4259ed8d322cad73c00.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "unaccumulatedPartSizesKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
int *accumulatedSize = NULL;
hipMalloc(&accumulatedSize, XSIZE*YSIZE);
int *sizes = NULL;
hipMalloc(&sizes, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
unaccumulatedPartSizesKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,accumulatedSize,sizes);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
unaccumulatedPartSizesKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,accumulatedSize,sizes);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
unaccumulatedPartSizesKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,accumulatedSize,sizes);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b84d8698f903c156e067a4259ed8d322cad73c00.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "unaccumulatedPartSizesKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
int *accumulatedSize = NULL;
cudaMalloc(&accumulatedSize, XSIZE*YSIZE);
int *sizes = NULL;
cudaMalloc(&sizes, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
unaccumulatedPartSizesKernel<<<gridBlock,threadBlock>>>(size,accumulatedSize,sizes);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
unaccumulatedPartSizesKernel<<<gridBlock,threadBlock>>>(size,accumulatedSize,sizes);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
unaccumulatedPartSizesKernel<<<gridBlock,threadBlock>>>(size,accumulatedSize,sizes);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
cu_matrix_maths.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cu_matrix_maths.h"
__global__ void cu_plus(float *A, const float *B, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid] = __fadd_rd(A[tid], B[tid]);
tid += stride;
}
}
__global__ void cu_plus(const float *A, const float *B, float *C, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
C[tid] = __fadd_rd(A[tid], B[tid]);
tid += stride;
}
}
__global__ void cu_plus(float *A, const float b, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid] = __fadd_rd(A[tid], b);
tid += stride;
}
}
__global__ void cu_plus(const float *A, float *B, const float c, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
B[tid] = __fadd_rd(A[tid], c);
tid += stride;
}
}
__global__ void cu_minus(float *A, const float *B, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid] = __fsub_rd(A[tid], B[tid]);
tid += stride;
}
}
__global__ void cu_minus(const float *A, const float *B, float *C, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
C[tid] = __fsub_rd(A[tid], B[tid]);
tid += stride;
}
}
__global__ void cu_minus(float *A, const float b, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid] = __fsub_rd(A[tid], b);
tid += stride;
}
}
__global__ void cu_minus(const float *A, float *B, const float c, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
B[tid] = __fsub_rd(A[tid], c);
tid += stride;
}
}
__global__ void cu_square(const float *A, float *B, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
B[tid] = __fmul_rd(A[tid], A[tid]);
tid += stride;
}
}
__global__ void cu_sqrt(const float *A, float *B, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
B[tid] = sqrtf(A[tid]);
tid += stride;
}
}
__global__ void cu_elementWiseMultiply(float *A, const float *B, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid] = __fmul_rd(A[tid], B[tid]);
tid += stride;
}
}
__global__ void cu_elementWiseMultiply(float *A, float B, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid] = __fmul_rd(A[tid], B);
tid += stride;
}
}
__global__ void cu_elementWiseMultiply(const float *A, const float *B, float *C, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
C[tid] = __fmul_rd(A[tid], B[tid]);
tid += stride;
}
}
__global__ void cu_elementWiseMultiply(const float *A, const float B, float *C, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
C[tid] = __fmul_rd(A[tid], B);
tid += stride;
}
}
__global__ void cu_setAll(float* A, const float val, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid] = val;
tid += stride;
}
}
__global__ void cu_exp(const float* src, float* dst, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
dst[tid] = __expf(src[tid]);
tid += stride;
}
}
__global__ void cu_log(const float* src, float* dst, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
dst[tid] = __logf(src[tid]);
tid += stride;
}
}
__global__ void cu_pow(const float* src, float* dst, const float power, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
dst[tid] = powf(src[tid], power);
tid += stride;
}
}
__global__ void cu_divide(float *A, float B, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid] = __fdividef(A[tid], B);
tid += stride;
}
}
__global__ void cu_divide(const float* src, float* dst, const float denominator, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(0 == denominator) dst[tid] = 0.0;
else dst[tid] = __fdividef(src[tid], denominator);
tid += stride;
}
}
__global__ void cu_divide(const float numerator, const float* src, float* dst, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(0 == src[tid]) dst[tid] = 0.0;
else dst[tid] = __fdividef(numerator, src[tid]);
tid += stride;
}
}
__global__ void cu_divide(const float* numerator, const float* denominator, float* dst, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(0 == denominator[tid]) dst[tid] = 0.0;
else dst[tid] = __fdividef(numerator[tid], denominator[tid]);
tid += stride;
}
}
__global__ void cu_sum(const float* src, float* sum, const int n){
extern __shared__ float sdata[];
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory
float x = 0;
if(tid < n){
x = src[tid];
}
sdata[threadIdx.x] = x;
__syncthreads();
// contiguous range pattern
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1){
if(threadIdx.x < offset){
// add a partial sum upstream to our own
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0){
sum[blockIdx.x] = sdata[0];
}
}
__global__ void cu_minMaxLoc(const float* src, float* minValue, float* maxValue, int* minLoc, int* maxLoc, const int n){
__shared__ float minValCache[threadsPerBlock];
__shared__ float maxValCache[threadsPerBlock];
__shared__ int minLocCache[threadsPerBlock];
__shared__ int maxLocCache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//int stride = blockDim.x * gridDim.x;
float val = src[0];
int loc = 0;
if(tid < n){
val = src[tid];
loc = tid;
}
maxValCache[threadIdx.x] = val;
minValCache[threadIdx.x] = val;
maxLocCache[threadIdx.x] = loc;
minLocCache[threadIdx.x] = loc;
__syncthreads();
// contiguous range pattern
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1){
if(threadIdx.x < offset){
// add a partial sum upstream to our own
if(maxValCache[threadIdx.x] >= maxValCache[threadIdx.x + offset]){
;
}else{
maxValCache[threadIdx.x] = maxValCache[threadIdx.x + offset];
maxLocCache[threadIdx.x] = maxLocCache[threadIdx.x + offset];
}
if(minValCache[threadIdx.x] <= minValCache[threadIdx.x + offset]){
;
}else{
minValCache[threadIdx.x] = minValCache[threadIdx.x + offset];
minLocCache[threadIdx.x] = minLocCache[threadIdx.x + offset];
}
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0){
minValue[blockIdx.x] = minValCache[0];
maxValue[blockIdx.x] = maxValCache[0];
minLoc[blockIdx.x] = minLocCache[0];
maxLoc[blockIdx.x] = maxLocCache[0];
}
}
__global__ void cu_greaterThan(const float* src, float* dst, const float val, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(src[tid] > val) dst[tid] = 1.0;
else dst[tid] = 0.0;
tid += stride;
}
}
__global__ void cu_greaterThanOrEqualTo(const float* src, float* dst, const float val, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(src[tid] >= val) dst[tid] = 1.0;
else dst[tid] = 0.0;
tid += stride;
}
}
__global__ void cu_lessThan(const float* src, float* dst, const float val, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(src[tid] < val) dst[tid] = 1.0;
else dst[tid] = 0.0;
tid += stride;
}
}
__global__ void cu_lessThanOrEqualTo(const float* src, float* dst, const float val, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(src[tid] <= val) dst[tid] = 1.0;
else dst[tid] = 0.0;
tid += stride;
}
}
__global__ void cu_equalTo(const float* src, float* dst, const float val, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(src[tid] == val) dst[tid] = 1.0;
else dst[tid] = 0.0;
tid += stride;
}
}
__global__ void cu_tanh(const float* src, float* dst, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
dst[tid] = tanhf(src[tid]);
tid += stride;
}
}
__global__ void cu_fliplr(const float* src, float* dst, const int rows, const int cols, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int c = tid % cols;
int r = tid / cols;
dst[tid] = src[(cols - c - 1) + r * cols];
tid += stride;
}
}
__global__ void cu_padding(const float* src, float* dst, const int rows1, const int cols1, const int cols2, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int pad = (cols2 - cols1) / 2;
int c1 = tid % cols1;
int r1 = tid / cols1;
int r2 = r1 + pad;
int c2 = c1 + pad;
dst[r2 * cols2 + c2] = src[tid];
tid += stride;
}
}
__global__ void cu_depadding(const float* src, float* dst, const int rows1, const int cols1, const int cols2, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int pad = (cols1 - cols2) / 2;
int c2 = tid % cols2;
int r2 = tid / cols2;
int r1 = r2 + pad;
int c1 = c2 + pad;
dst[tid] = src[r1 * cols1 + c1];
tid += stride;
}
}
__global__ void cu_repmat(const float *a, float* dst, const int rowsa, const int colsa, const int rowsdst, const int colsdst, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int c2 = tid % colsdst;
int r2 = tid / colsdst;
int ra = r2 % rowsa;
int ca = c2 % colsa;
dst[tid] = a[ra * colsa + ca];
tid += stride;
}
}
__global__ void cu_kron(const float *a, const float* b, float* dst, const int rowsa, const int colsa, const int rowsdst, const int colsdst, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int colsb = colsdst / colsa;
int rowsb = rowsdst / rowsa;
while(tid < n){
int c2 = tid % colsdst;
int r2 = tid / colsdst;
int rb = r2 % rowsb;
int cb = c2 % colsb;
int ra = r2 / rowsb;
int ca = c2 / colsb;
dst[tid] = a[ra * colsa + ca] * b[rb * colsb + cb];
tid += stride;
}
}
__global__ void cu_downSample(const float *src, float* dst, const int y_stride, const int x_stride, const int colssrc, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int colsdst = colssrc / x_stride;
if(colssrc % x_stride > 0) ++colsdst;
while(tid < n){
int cdst = tid % colsdst;
int rdst = tid / colsdst;
int rsrc = rdst * y_stride;
int csrc = cdst * x_stride;
dst[tid] = src[rsrc * colssrc + csrc];
tid += stride;
}
}
__global__ void cu_interpolation(const float* src, float* dst, const int colssrc, const int colsdst, const int _stride, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int csrc = tid % colssrc;
int rsrc = tid / colssrc;
int rdst = rsrc * _stride;
int cdst = csrc * _stride;
dst[rdst * colsdst + cdst] = src[tid];
tid += stride;
}
}
__global__ void cu_getRange(const float *src, float* dst, const int xstart, const int xend, const int ystart, const int yend, const int colssrc, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int colsdst = xend - xstart + 1;
while(tid < n){
int cdst = tid % colsdst;
int rdst = tid / colsdst;
int rsrc = rdst + ystart;
int csrc = cdst + xstart;
dst[tid] = src[rsrc * colssrc + csrc];
tid += stride;
}
}
__global__ void cu_copyMakeBorder(const float *src, float* dst, const int rowssrc, const int colssrc, const int up, const int down, const int left, const int right, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int colsdst = colssrc + left + right;
while(tid < n){
int csrc = tid % colssrc;
int rsrc = tid / colssrc;
int rdst = up + rsrc;
int cdst = left + csrc;
dst[rdst * colsdst + cdst] = src[tid];
tid += stride;
}
}
__global__ void cu_pooling_max(const float* src, float* dst, float *loc, const int rowssrc, const int colssrc, const int rowsdst, const int colsdst, const int stridex, const int stridey, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int cdst = tid % colsdst;
int rdst = tid / colsdst;
int rsrc = rdst * stridey;
int csrc = cdst * stridex;
int xend = (csrc + stridex - 1) > (colssrc - 1) ? (colssrc - 1) : (csrc + stridex - 1);
int yend = (rsrc + stridey - 1) > (rowssrc - 1) ? (rowssrc - 1) : (rsrc + stridey - 1);
loc[tid] = (float)(rsrc * colssrc + csrc);
for(int i = rsrc; i <= yend; ++i){
for(int j = csrc; j <= xend; ++j){
if(src[i * colssrc + j] > dst[tid]){
dst[tid] = src[i * colssrc + j];
loc[tid] = (float)(i * colssrc + j);
}
}
}
tid += stride;
}
}
__global__ void cu_pooling_mean(const float* src, float* dst, float *loc, const int rowssrc, const int colssrc, const int rowsdst, const int colsdst, const int stridex, const int stridey, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int cdst = tid % colsdst;
int rdst = tid / colsdst;
int rsrc = rdst * stridey;
int csrc = cdst * stridex;
int xend = (csrc + stridex - 1) > (colssrc - 1) ? (colssrc - 1) : (csrc + stridex - 1);
int yend = (rsrc + stridey - 1) > (rowssrc - 1) ? (rowssrc - 1) : (rsrc + stridey - 1);
loc[tid] = (float)(rsrc * colssrc + csrc);
for(int i = rsrc; i <= yend; ++i){
for(int j = csrc; j <= xend; ++j){
dst[tid] += __fdividef(src[i * colssrc + j], __fmul_rd(yend - rsrc + 1, xend - csrc + 1));
}
}
tid += stride;
}
}
__global__ void cu_pooling_overlap_max(const float* src, float* dst, float *loc, const int rowssrc, const int colssrc, const int rowsdst, const int colsdst, const int sizex, const int sizey, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int cdst = tid % colsdst;
int rdst = tid / colsdst;
int rsrc = rdst;
int csrc = cdst;
int xend = (csrc + sizex - 1);
int yend = (rsrc + sizey - 1);
loc[tid] = (float)(rsrc * colssrc + csrc);
for(int i = rsrc; i <= yend; ++i){
for(int j = csrc; j <= xend; ++j){
if(src[i * colssrc + j] > dst[tid]){
dst[tid] = src[i * colssrc + j];
loc[tid] = (float)(i * colssrc + j);
}
}
}
tid += stride;
}
}
__global__ void cu_unpooling(const float* src, const float* loc, float* dst, const int colsdst, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int cdst = (int)(loc[tid]) % colsdst;
int rdst = (int)(loc[tid]) / colsdst;
dst[rdst * colsdst + cdst] = src[tid];
tid += stride;
}
}
__global__ void cu_multiply(const float* A, const float* B, float * C,
int rowsa, int colsa,
int rowsb, int colsb,
int rowsc, int colsc){
__shared__ float sA[32][32]; // Tile size of 32x32
__shared__ float sB[32][32];
int Row = blockDim.y*blockIdx.y + threadIdx.y;
int Col = blockDim.x*blockIdx.x + threadIdx.x;
float Cvalue = 0.0;
sA[threadIdx.y][threadIdx.x] = 0.0;
sB[threadIdx.y][threadIdx.x] = 0.0;
for (int k = 0; k < (((colsa - 1)/ 32) + 1); k++){
if ( (Row < rowsa) && (threadIdx.x + (k*32)) < colsa){
sA[threadIdx.y][threadIdx.x] = A[(Row*colsa) + threadIdx.x + (k*32)];
}
else{
sA[threadIdx.y][threadIdx.x] = 0.0;
}
if ( Col < colsb && (threadIdx.y + k*32) < rowsb){
sB[threadIdx.y][threadIdx.x] = B[(threadIdx.y + k*32)*colsb + Col];
}
else{
sB[threadIdx.y][threadIdx.x] = 0.0;
}
__syncthreads();
for (int j = 0; j < 32; ++j){
Cvalue += sA[threadIdx.y][j] * sB[j][threadIdx.x];
}
}
if (Row < rowsc && Col < colsc){
C[Row*colsc + Col] = Cvalue;
}
}
__global__ void cu_transpose(const float* src, float* dst, int colssrc, int colsdst, int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int cdst = tid % colsdst;
int rdst = tid / colsdst;
int rsrc = cdst;
int csrc = rdst;
dst[tid] = src[rsrc * colssrc + csrc];
tid += stride;
}
}
| cu_matrix_maths.cu | #include "cu_matrix_maths.h"
__global__ void cu_plus(float *A, const float *B, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid] = __fadd_rd(A[tid], B[tid]);
tid += stride;
}
}
__global__ void cu_plus(const float *A, const float *B, float *C, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
C[tid] = __fadd_rd(A[tid], B[tid]);
tid += stride;
}
}
__global__ void cu_plus(float *A, const float b, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid] = __fadd_rd(A[tid], b);
tid += stride;
}
}
__global__ void cu_plus(const float *A, float *B, const float c, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
B[tid] = __fadd_rd(A[tid], c);
tid += stride;
}
}
__global__ void cu_minus(float *A, const float *B, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid] = __fsub_rd(A[tid], B[tid]);
tid += stride;
}
}
__global__ void cu_minus(const float *A, const float *B, float *C, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
C[tid] = __fsub_rd(A[tid], B[tid]);
tid += stride;
}
}
__global__ void cu_minus(float *A, const float b, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid] = __fsub_rd(A[tid], b);
tid += stride;
}
}
__global__ void cu_minus(const float *A, float *B, const float c, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
B[tid] = __fsub_rd(A[tid], c);
tid += stride;
}
}
__global__ void cu_square(const float *A, float *B, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
B[tid] = __fmul_rd(A[tid], A[tid]);
tid += stride;
}
}
__global__ void cu_sqrt(const float *A, float *B, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
B[tid] = sqrtf(A[tid]);
tid += stride;
}
}
__global__ void cu_elementWiseMultiply(float *A, const float *B, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid] = __fmul_rd(A[tid], B[tid]);
tid += stride;
}
}
__global__ void cu_elementWiseMultiply(float *A, float B, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid] = __fmul_rd(A[tid], B);
tid += stride;
}
}
__global__ void cu_elementWiseMultiply(const float *A, const float *B, float *C, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
C[tid] = __fmul_rd(A[tid], B[tid]);
tid += stride;
}
}
__global__ void cu_elementWiseMultiply(const float *A, const float B, float *C, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
C[tid] = __fmul_rd(A[tid], B);
tid += stride;
}
}
__global__ void cu_setAll(float* A, const float val, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid] = val;
tid += stride;
}
}
__global__ void cu_exp(const float* src, float* dst, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
dst[tid] = __expf(src[tid]);
tid += stride;
}
}
__global__ void cu_log(const float* src, float* dst, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
dst[tid] = __logf(src[tid]);
tid += stride;
}
}
__global__ void cu_pow(const float* src, float* dst, const float power, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
dst[tid] = powf(src[tid], power);
tid += stride;
}
}
__global__ void cu_divide(float *A, float B, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
A[tid] = __fdividef(A[tid], B);
tid += stride;
}
}
__global__ void cu_divide(const float* src, float* dst, const float denominator, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(0 == denominator) dst[tid] = 0.0;
else dst[tid] = __fdividef(src[tid], denominator);
tid += stride;
}
}
__global__ void cu_divide(const float numerator, const float* src, float* dst, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(0 == src[tid]) dst[tid] = 0.0;
else dst[tid] = __fdividef(numerator, src[tid]);
tid += stride;
}
}
__global__ void cu_divide(const float* numerator, const float* denominator, float* dst, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(0 == denominator[tid]) dst[tid] = 0.0;
else dst[tid] = __fdividef(numerator[tid], denominator[tid]);
tid += stride;
}
}
__global__ void cu_sum(const float* src, float* sum, const int n){
extern __shared__ float sdata[];
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory
float x = 0;
if(tid < n){
x = src[tid];
}
sdata[threadIdx.x] = x;
__syncthreads();
// contiguous range pattern
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1){
if(threadIdx.x < offset){
// add a partial sum upstream to our own
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0){
sum[blockIdx.x] = sdata[0];
}
}
__global__ void cu_minMaxLoc(const float* src, float* minValue, float* maxValue, int* minLoc, int* maxLoc, const int n){
__shared__ float minValCache[threadsPerBlock];
__shared__ float maxValCache[threadsPerBlock];
__shared__ int minLocCache[threadsPerBlock];
__shared__ int maxLocCache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//int stride = blockDim.x * gridDim.x;
float val = src[0];
int loc = 0;
if(tid < n){
val = src[tid];
loc = tid;
}
maxValCache[threadIdx.x] = val;
minValCache[threadIdx.x] = val;
maxLocCache[threadIdx.x] = loc;
minLocCache[threadIdx.x] = loc;
__syncthreads();
// contiguous range pattern
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1){
if(threadIdx.x < offset){
// add a partial sum upstream to our own
if(maxValCache[threadIdx.x] >= maxValCache[threadIdx.x + offset]){
;
}else{
maxValCache[threadIdx.x] = maxValCache[threadIdx.x + offset];
maxLocCache[threadIdx.x] = maxLocCache[threadIdx.x + offset];
}
if(minValCache[threadIdx.x] <= minValCache[threadIdx.x + offset]){
;
}else{
minValCache[threadIdx.x] = minValCache[threadIdx.x + offset];
minLocCache[threadIdx.x] = minLocCache[threadIdx.x + offset];
}
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0){
minValue[blockIdx.x] = minValCache[0];
maxValue[blockIdx.x] = maxValCache[0];
minLoc[blockIdx.x] = minLocCache[0];
maxLoc[blockIdx.x] = maxLocCache[0];
}
}
__global__ void cu_greaterThan(const float* src, float* dst, const float val, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(src[tid] > val) dst[tid] = 1.0;
else dst[tid] = 0.0;
tid += stride;
}
}
__global__ void cu_greaterThanOrEqualTo(const float* src, float* dst, const float val, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(src[tid] >= val) dst[tid] = 1.0;
else dst[tid] = 0.0;
tid += stride;
}
}
__global__ void cu_lessThan(const float* src, float* dst, const float val, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(src[tid] < val) dst[tid] = 1.0;
else dst[tid] = 0.0;
tid += stride;
}
}
__global__ void cu_lessThanOrEqualTo(const float* src, float* dst, const float val, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(src[tid] <= val) dst[tid] = 1.0;
else dst[tid] = 0.0;
tid += stride;
}
}
__global__ void cu_equalTo(const float* src, float* dst, const float val, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(src[tid] == val) dst[tid] = 1.0;
else dst[tid] = 0.0;
tid += stride;
}
}
__global__ void cu_tanh(const float* src, float* dst, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
dst[tid] = tanhf(src[tid]);
tid += stride;
}
}
__global__ void cu_fliplr(const float* src, float* dst, const int rows, const int cols, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int c = tid % cols;
int r = tid / cols;
dst[tid] = src[(cols - c - 1) + r * cols];
tid += stride;
}
}
__global__ void cu_padding(const float* src, float* dst, const int rows1, const int cols1, const int cols2, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int pad = (cols2 - cols1) / 2;
int c1 = tid % cols1;
int r1 = tid / cols1;
int r2 = r1 + pad;
int c2 = c1 + pad;
dst[r2 * cols2 + c2] = src[tid];
tid += stride;
}
}
__global__ void cu_depadding(const float* src, float* dst, const int rows1, const int cols1, const int cols2, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int pad = (cols1 - cols2) / 2;
int c2 = tid % cols2;
int r2 = tid / cols2;
int r1 = r2 + pad;
int c1 = c2 + pad;
dst[tid] = src[r1 * cols1 + c1];
tid += stride;
}
}
__global__ void cu_repmat(const float *a, float* dst, const int rowsa, const int colsa, const int rowsdst, const int colsdst, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int c2 = tid % colsdst;
int r2 = tid / colsdst;
int ra = r2 % rowsa;
int ca = c2 % colsa;
dst[tid] = a[ra * colsa + ca];
tid += stride;
}
}
__global__ void cu_kron(const float *a, const float* b, float* dst, const int rowsa, const int colsa, const int rowsdst, const int colsdst, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int colsb = colsdst / colsa;
int rowsb = rowsdst / rowsa;
while(tid < n){
int c2 = tid % colsdst;
int r2 = tid / colsdst;
int rb = r2 % rowsb;
int cb = c2 % colsb;
int ra = r2 / rowsb;
int ca = c2 / colsb;
dst[tid] = a[ra * colsa + ca] * b[rb * colsb + cb];
tid += stride;
}
}
__global__ void cu_downSample(const float *src, float* dst, const int y_stride, const int x_stride, const int colssrc, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int colsdst = colssrc / x_stride;
if(colssrc % x_stride > 0) ++colsdst;
while(tid < n){
int cdst = tid % colsdst;
int rdst = tid / colsdst;
int rsrc = rdst * y_stride;
int csrc = cdst * x_stride;
dst[tid] = src[rsrc * colssrc + csrc];
tid += stride;
}
}
__global__ void cu_interpolation(const float* src, float* dst, const int colssrc, const int colsdst, const int _stride, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int csrc = tid % colssrc;
int rsrc = tid / colssrc;
int rdst = rsrc * _stride;
int cdst = csrc * _stride;
dst[rdst * colsdst + cdst] = src[tid];
tid += stride;
}
}
__global__ void cu_getRange(const float *src, float* dst, const int xstart, const int xend, const int ystart, const int yend, const int colssrc, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int colsdst = xend - xstart + 1;
while(tid < n){
int cdst = tid % colsdst;
int rdst = tid / colsdst;
int rsrc = rdst + ystart;
int csrc = cdst + xstart;
dst[tid] = src[rsrc * colssrc + csrc];
tid += stride;
}
}
__global__ void cu_copyMakeBorder(const float *src, float* dst, const int rowssrc, const int colssrc, const int up, const int down, const int left, const int right, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int colsdst = colssrc + left + right;
while(tid < n){
int csrc = tid % colssrc;
int rsrc = tid / colssrc;
int rdst = up + rsrc;
int cdst = left + csrc;
dst[rdst * colsdst + cdst] = src[tid];
tid += stride;
}
}
__global__ void cu_pooling_max(const float* src, float* dst, float *loc, const int rowssrc, const int colssrc, const int rowsdst, const int colsdst, const int stridex, const int stridey, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int cdst = tid % colsdst;
int rdst = tid / colsdst;
int rsrc = rdst * stridey;
int csrc = cdst * stridex;
int xend = (csrc + stridex - 1) > (colssrc - 1) ? (colssrc - 1) : (csrc + stridex - 1);
int yend = (rsrc + stridey - 1) > (rowssrc - 1) ? (rowssrc - 1) : (rsrc + stridey - 1);
loc[tid] = (float)(rsrc * colssrc + csrc);
for(int i = rsrc; i <= yend; ++i){
for(int j = csrc; j <= xend; ++j){
if(src[i * colssrc + j] > dst[tid]){
dst[tid] = src[i * colssrc + j];
loc[tid] = (float)(i * colssrc + j);
}
}
}
tid += stride;
}
}
__global__ void cu_pooling_mean(const float* src, float* dst, float *loc, const int rowssrc, const int colssrc, const int rowsdst, const int colsdst, const int stridex, const int stridey, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int cdst = tid % colsdst;
int rdst = tid / colsdst;
int rsrc = rdst * stridey;
int csrc = cdst * stridex;
int xend = (csrc + stridex - 1) > (colssrc - 1) ? (colssrc - 1) : (csrc + stridex - 1);
int yend = (rsrc + stridey - 1) > (rowssrc - 1) ? (rowssrc - 1) : (rsrc + stridey - 1);
loc[tid] = (float)(rsrc * colssrc + csrc);
for(int i = rsrc; i <= yend; ++i){
for(int j = csrc; j <= xend; ++j){
dst[tid] += __fdividef(src[i * colssrc + j], __fmul_rd(yend - rsrc + 1, xend - csrc + 1));
}
}
tid += stride;
}
}
__global__ void cu_pooling_overlap_max(const float* src, float* dst, float *loc, const int rowssrc, const int colssrc, const int rowsdst, const int colsdst, const int sizex, const int sizey, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int cdst = tid % colsdst;
int rdst = tid / colsdst;
int rsrc = rdst;
int csrc = cdst;
int xend = (csrc + sizex - 1);
int yend = (rsrc + sizey - 1);
loc[tid] = (float)(rsrc * colssrc + csrc);
for(int i = rsrc; i <= yend; ++i){
for(int j = csrc; j <= xend; ++j){
if(src[i * colssrc + j] > dst[tid]){
dst[tid] = src[i * colssrc + j];
loc[tid] = (float)(i * colssrc + j);
}
}
}
tid += stride;
}
}
__global__ void cu_unpooling(const float* src, const float* loc, float* dst, const int colsdst, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int cdst = (int)(loc[tid]) % colsdst;
int rdst = (int)(loc[tid]) / colsdst;
dst[rdst * colsdst + cdst] = src[tid];
tid += stride;
}
}
__global__ void cu_multiply(const float* A, const float* B, float * C,
int rowsa, int colsa,
int rowsb, int colsb,
int rowsc, int colsc){
__shared__ float sA[32][32]; // Tile size of 32x32
__shared__ float sB[32][32];
int Row = blockDim.y*blockIdx.y + threadIdx.y;
int Col = blockDim.x*blockIdx.x + threadIdx.x;
float Cvalue = 0.0;
sA[threadIdx.y][threadIdx.x] = 0.0;
sB[threadIdx.y][threadIdx.x] = 0.0;
for (int k = 0; k < (((colsa - 1)/ 32) + 1); k++){
if ( (Row < rowsa) && (threadIdx.x + (k*32)) < colsa){
sA[threadIdx.y][threadIdx.x] = A[(Row*colsa) + threadIdx.x + (k*32)];
}
else{
sA[threadIdx.y][threadIdx.x] = 0.0;
}
if ( Col < colsb && (threadIdx.y + k*32) < rowsb){
sB[threadIdx.y][threadIdx.x] = B[(threadIdx.y + k*32)*colsb + Col];
}
else{
sB[threadIdx.y][threadIdx.x] = 0.0;
}
__syncthreads();
for (int j = 0; j < 32; ++j){
Cvalue += sA[threadIdx.y][j] * sB[j][threadIdx.x];
}
}
if (Row < rowsc && Col < colsc){
C[Row*colsc + Col] = Cvalue;
}
}
__global__ void cu_transpose(const float* src, float* dst, int colssrc, int colsdst, int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int cdst = tid % colsdst;
int rdst = tid / colsdst;
int rsrc = cdst;
int csrc = rdst;
dst[tid] = src[rsrc * colssrc + csrc];
tid += stride;
}
}
|
c55220928da26af5114b3e403931ba8117b53bad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
lfcm.c
A literal FCM implementation.
$Id: lfcm.c,v 1.3 2002/07/12 20:48:48 eschrich Exp $
Steven Eschrich
Copyright (C) 2002 University of South Florida
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <math.h>
#include <sys/times.h>
#include <sys/resource.h>
#include <limits.h>
#include <unistd.h>
#include <time.h>
#include <string.h>
#include "utils.h"
#ifndef HANDLE_ERROR
static void HandleError( hipError_t err,
const char *file,
int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#endif
#define U(i,j) U[j][i]
#define DIM 256
float *U;
float *V;
float *X;
int C;
float m;
int S;
int N;
/*
__device__ float *U_d;
__device__ float *V_d;
__device__ float *X_d;
*/
/* Variables are defined statically here. These are reasonable
defaults, but can be changed via the parameter to lfcmCluster() */
float epsilon=0.25;
int number_of_iterations;
long seed;
int *max_value;
/* Public functions */
int lfcm(float** U_d,float** V_d,float* X_d);
/* Private functions */
int update_centroids();
__global__ void update_umatrix(float*,float*,float*,float*,int,int,int,float);
/* Utilities */
int init(float** U_d,float** V_d, float* X_d);
__device__ int is_example_centroid(float*,float*,int k,int,int);
__device__ float distance(float *,float *,int,int,int);
int output_centroids(char*);
int output_umatrix(char*);
int output_members(char*);
/* External functions */
int load_test_data(float **ds,float **ds_d, int *s, int *n);
int load_atr_data(char *filename, float **ds,float **ds_d ,int *s, int *n);
int load_mri_data(char *filename, float **ds,float **ds_d, int *s, int *n);
/* For testing purposes, we hard-code the desired number of clusters */
#define ATR_NUMBER_OF_CLUSTERS 5
#define MRI_NUMBER_OF_CLUSTERS 10
#define TEST_NUMBER_OF_CLUSTERS 2
#define TEST 1
#define ATR 2
#define MRI 3
/* Global variables */
int dataset_type=MRI;
int write_centroids=0;
int write_umatrix=0;
int write_members=0;
/* Variables that must be defined for called functions */
int vals[][3]={{DIM,DIM,DIM},{0,0,0},{DIM,DIM,DIM},{4096,4096,4096}};
/* Function prototypes */
float *timing_of(struct rusage,struct rusage); /* Calculate time in seconds */
int main(int argc, char **argv)
{
hipDeviceReset();
number_of_iterations=0;
struct rusage start_usage, end_usage;
int ch;
m = 2.0;
C=2;
S=2;
N=2;
float *perf_times;
char *filename;
float *U_d,*V_d,*X_d;
epsilon=0.225;
m=2.0;
seed=2000;
max_value=vals[dataset_type];
while ( (ch=getopt(argc, argv,"hw:d:s:")) != EOF ) {
switch (ch) {
case 'h':
fprintf(stderr,"Usage\n" \
"-d [a|t|m|s] Use dataset atr, mri, test, seawifs\n"\
"-w write cluster centers and memberships out\n"\
"-s seed Use seed as the random seed\n");
exit(1);
case 'w':
if ( !strcmp(optarg,"umatrix") ) write_umatrix=1;
if ( !strcmp(optarg,"centroids") ) write_centroids=1;
if ( !strcmp(optarg,"members") ) write_members=1;
if ( !strcmp(optarg,"all"))
write_umatrix=write_centroids=write_members=1;
break;
case 'd':
if ( *optarg == 'a' ) dataset_type=ATR;
if ( *optarg == 'm' ) dataset_type=MRI;
if ( *optarg == 't' ) dataset_type=TEST;
max_value=vals[dataset_type];
break;
case 's':
seed=atol(optarg);
break;
}
}
/* Print out main parameters for this run */
fprintf(stdout,"FCM Parameters\n clusterMethod=literal fcm\n");
filename=argv[optind];
fprintf(stdout," file=%s\n\n",filename);
/* Load the dataset, using one of a particular group of datasets. */
switch (dataset_type) {
case TEST:
load_test_data(&X,&X_d, &S, &N);
C=TEST_NUMBER_OF_CLUSTERS;
break;
case ATR:
load_atr_data(argv[optind],&X,&X_d, &S, &N);
C=ATR_NUMBER_OF_CLUSTERS;
break;
case MRI:
load_mri_data(argv[optind], &X,&X_d, &S, &N);
C=MRI_NUMBER_OF_CLUSTERS;
break;
}
fprintf(stdout, "Beginning to cluster here...\n");
/* Time the fcm algorithm */
//getrusage(RUSAGE_SELF, &start_usage);
lfcm(&U_d,&V_d,X_d);
//getrusage(RUSAGE_SELF, &end_usage);
/* Output whatever clustering results we need */
if ( write_centroids ) output_centroids(filename);
if ( write_umatrix ) output_umatrix(filename);
if ( write_members ) output_members(filename);
/* Output timing numbers */
//perf_times=timing_of(start_usage, end_usage);
///printf("Timing: %f user, %f system, %f total.\n",
//perf_times[0], perf_times[1], perf_times[0] +
//perf_times[1]);
printf("Clustering required %d iterations.\n", number_of_iterations);
return 0;
}
/* Main entry point into code. Cluster the dataset, given the details
in the parameter block. */
int lfcm(float** U_d,float** V_d,float* X_d)
{
float sqrerror[((N+DIM-1)/DIM)*(C/1)];
float *sqrerror_d;
float sqrerror_sum;
sqrerror_sum= 2 * epsilon;
/* Initialize code */
init(U_d,V_d,X_d);
hipDeviceSynchronize();
HANDLE_ERROR(hipMalloc(&sqrerror_d,((N+DIM-1)/DIM)*sizeof(float)));
printf("Beginning GPU side code\n");
/* Run the updates iteratively */
while (sqrerror_sum > epsilon ) {
number_of_iterations++;
HANDLE_ERROR(hipMemcpy(U,*U_d,N*C*sizeof(float),hipMemcpyDeviceToHost));
update_centroids();
HANDLE_ERROR(hipMemcpy(*V_d,V,C*S*sizeof(float),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( update_umatrix), dim3((N+DIM-1)/DIM),dim3(DIM), 0, 0, sqrerror_d,*U_d,*V_d,X_d,C,N,S,m);
HANDLE_ERROR(hipGetLastError());
HANDLE_ERROR(hipMemcpy(sqrerror,sqrerror_d,((N+DIM-1)/DIM)*sizeof(float),hipMemcpyDeviceToHost));
sqrerror_sum=0;
hipDeviceSynchronize();
for(int i=0; i<((N+DIM-1)/DIM); i++)
sqrerror_sum+=sqrerror[i];
}
/* We go ahead and update the centroids - presumably this will not
change much, since the overall square error in U is small */
update_centroids();
HANDLE_ERROR(hipMemcpy(U,*U_d,N*C*sizeof(float),hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(V,*V_d,C*S*sizeof(float),hipMemcpyDeviceToHost));
return 0;
}
/*
update_centroids()
Given a membership matrix U, recalculate the cluster centroids as the
"weighted" mean of each contributing example from the dataset. Each
example contributes by an amount proportional to the membership value.
*/
int update_centroids()
{
int i,k,x;
float *numerator, *denominator;
numerator = (float *)malloc(S*sizeof(float));
denominator = (float *)malloc(S*sizeof(float));
/* For each cluster */
for (i=0; i < C; i++) {
/* Zero out numerator and denominator options */
for (x=0; x < S; x++) {
numerator[x]=0;
denominator[x]=0;
}
/* Calculate numerator */
for (k=0; k < N; k++) {
for (x=0; x < S; x++)
numerator[x] += powf(U[k*C+i], m) * X[k*S+x];
}
/* Calculate denominator */
for (k=0; k < N; k++) {
for (x=0; x < S; x++)
denominator[x] += powf(U[k*C+i], m);
}
/* Calculate V */
for (x=0; x < S; x++) {
V[i*S+x]= numerator[x] / denominator[x];
}
} /* endfor: C clusters */
return 0;
}
__global__ void update_umatrix(float *sqrerror,float* U_d, float* V_d, float* X_d,int C,int N,int S,float m)
{
int i,j,k;
int example_is_centroid;
float summation, D_ki, D_kj;
float newU;
__shared__ float tmp_sqrerror[DIM];
/* For each example in the dataset */
k = threadIdx.x + blockIdx.x*blockDim.x;
int local_offset = threadIdx.x;
tmp_sqrerror[local_offset]=0;
if(k<N)
{
/* Special case: If Example is equal to a Cluster Centroid,
then U=1.0 for that cluster and 0 for all others */
if ( (example_is_centroid=is_example_centroid(V_d,X_d,k,S,C)) != -1 ) {
for(int i=0; i<C; i++)
{
if ( i == example_is_centroid )
U_d[k*C+i]=1.0;
else
U_d[k*C+i]=0.0;
}
return;
}
/* For each class */
for(int i=0; i< C; i++)
{
summation=0;
/* Calculate summation */
for (j=0; j < C; j++) {
D_ki=distance(X_d, V_d,k*S,i*S,S);
D_kj=distance(X_d, V_d,k*S,j*S,S);
summation += powf( D_ki / D_kj , (2.0/ (m-1)));
}
/* Weight is 1/sum */
newU=1.0/summation;
/* Add to the squareDifference */
tmp_sqrerror[local_offset] += powf(U_d[k*C+i] - newU, 2);
U_d[k*C+i]=newU;
}
}
__syncthreads();
int t= blockDim.x/2;
while(t>0)
{
if(k+t < N && threadIdx.x<t)
tmp_sqrerror[local_offset] += tmp_sqrerror[local_offset+t];
t/=2;
__syncthreads();
}
if(threadIdx.x==0)
sqrerror[blockIdx.x] = tmp_sqrerror[0];
}
/*===================================================
Utilities
init()
checkIfExampleIsCentroid()
distance()
===================================================*/
/* Allocate storage for U and V dynamically. Also, copy over the
variables that may have been externally set into short names,
which are private and easier to access.
*/
int init(float** U_d, float** V_d, float* X_d)
{
int i,j;
/* Allocate necessary storage */
V=(float *)CALLOC(S*C, sizeof(float));
U=(float *)CALLOC(C*N,sizeof(float));
HANDLE_ERROR(hipMalloc(U_d,N*C*sizeof(float)));
HANDLE_ERROR(hipMalloc(V_d,C*S*sizeof(float)));
/* Place random values in V, then update U matrix based on it */
srand48(seed);
for (i=0; i < C; i++) {
for (j=0; j < S; j++) {
V[i*S+j]=drand48() * max_value[j];
}
}
float *dummy;
hipMalloc(&dummy,N*sizeof(float));
HANDLE_ERROR(hipMemcpy(*V_d,V,C*S*sizeof(float),hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(X_d,X,N*S*sizeof(float),hipMemcpyHostToDevice));
/* Once values are populated in V, update the U Matrix for sane values */
hipLaunchKernelGGL(( update_umatrix), dim3((N+DIM-1)/DIM),dim3(DIM), 0, 0, dummy,*U_d,*V_d,X_d,C,N,S,m);
hipDeviceSynchronize();
HANDLE_ERROR(hipGetLastError());
fprintf(stdout,"Initialization completed.\n");
return 0;
}
/* If X[k] == V[i] for some i, then return that i. Otherwise, return -1 */
__device__ int is_example_centroid(float* V_d,float* X_d,int k,int S, int C)
{
int i,x;
for (i=0; i < C; i++) {
for (x=0; x < S; x++) {
if ( X_d[k*S+x] != V_d[i*S+x] ) break;
}
if ( x == S ) /* X==V */
return i;
}
return -1;
}
__device__ float distance(float *v1, float *v2,int startV1,int startV2,int S)
{
int x,i;
float sum=0;
for (x=startV1,i=startV2; x < startV1+S && i<startV2+S; x++, i++)
sum += (v1[x] - v2[i]) * (v1[x] - v2[i]);
return sqrtf(sum);
}
/*=====================================================
Public output utilities
output_centroids()
output_umatrix()
output_members()
=====================================================*/
int output_centroids(char *filestem)
{
FILE *fp;
char buf[DIM];
int i,j;
sprintf(buf,"%s.centroids", filestem);
fp=FOPEN(buf,"w");
for (i=0;i < C ;i++) {
for (j=0; j < S; j++)
fprintf(fp, "%f\t",V[i*S+j]);
fprintf(fp,"\n");
}
fclose(fp);
return 0;
}
int output_umatrix(char *filestem)
{
FILE *fp;
char buf[DIM];
int i,j;
sprintf(buf,"%s.umatrix", filestem);
fp=FOPEN(buf,"w");
for (i=0; i < N; i++) {
for (j=0; j < C; j++)
fprintf(fp,"%f\t", U[i*C+j]);
fprintf(fp,"\n");
}
fclose(fp);
return 0;
}
int output_members(char *filestem)
{
FILE *fp;
char buf[DIM];
int i,j,max;
sprintf(buf,"%s.members", filestem);
fp=FOPEN(buf,"w");
for (i=0; i < N; i++) {
for (max=j=0; j < C; j++)
if ( U[i*C+j] > U[i*C+max] ) max=j;
fprintf(fp,"%d\n",max);
}
fclose(fp);
return 0;
}
| c55220928da26af5114b3e403931ba8117b53bad.cu | /*
lfcm.c
A literal FCM implementation.
$Id: lfcm.c,v 1.3 2002/07/12 20:48:48 eschrich Exp $
Steven Eschrich
Copyright (C) 2002 University of South Florida
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <math.h>
#include <sys/times.h>
#include <sys/resource.h>
#include <limits.h>
#include <unistd.h>
#include <time.h>
#include <string.h>
#include "utils.h"
#ifndef HANDLE_ERROR
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#endif
#define U(i,j) U[j][i]
#define DIM 256
float *U;
float *V;
float *X;
int C;
float m;
int S;
int N;
/*
__device__ float *U_d;
__device__ float *V_d;
__device__ float *X_d;
*/
/* Variables are defined statically here. These are reasonable
defaults, but can be changed via the parameter to lfcmCluster() */
float epsilon=0.25;
int number_of_iterations;
long seed;
int *max_value;
/* Public functions */
int lfcm(float** U_d,float** V_d,float* X_d);
/* Private functions */
int update_centroids();
__global__ void update_umatrix(float*,float*,float*,float*,int,int,int,float);
/* Utilities */
int init(float** U_d,float** V_d, float* X_d);
__device__ int is_example_centroid(float*,float*,int k,int,int);
__device__ float distance(float *,float *,int,int,int);
int output_centroids(char*);
int output_umatrix(char*);
int output_members(char*);
/* External functions */
int load_test_data(float **ds,float **ds_d, int *s, int *n);
int load_atr_data(char *filename, float **ds,float **ds_d ,int *s, int *n);
int load_mri_data(char *filename, float **ds,float **ds_d, int *s, int *n);
/* For testing purposes, we hard-code the desired number of clusters */
#define ATR_NUMBER_OF_CLUSTERS 5
#define MRI_NUMBER_OF_CLUSTERS 10
#define TEST_NUMBER_OF_CLUSTERS 2
#define TEST 1
#define ATR 2
#define MRI 3
/* Global variables */
int dataset_type=MRI;
int write_centroids=0;
int write_umatrix=0;
int write_members=0;
/* Variables that must be defined for called functions */
int vals[][3]={{DIM,DIM,DIM},{0,0,0},{DIM,DIM,DIM},{4096,4096,4096}};
/* Function prototypes */
float *timing_of(struct rusage,struct rusage); /* Calculate time in seconds */
int main(int argc, char **argv)
{
cudaDeviceReset();
number_of_iterations=0;
struct rusage start_usage, end_usage;
int ch;
m = 2.0;
C=2;
S=2;
N=2;
float *perf_times;
char *filename;
float *U_d,*V_d,*X_d;
epsilon=0.225;
m=2.0;
seed=2000;
max_value=vals[dataset_type];
while ( (ch=getopt(argc, argv,"hw:d:s:")) != EOF ) {
switch (ch) {
case 'h':
fprintf(stderr,"Usage\n" \
"-d [a|t|m|s] Use dataset atr, mri, test, seawifs\n"\
"-w write cluster centers and memberships out\n"\
"-s seed Use seed as the random seed\n");
exit(1);
case 'w':
if ( !strcmp(optarg,"umatrix") ) write_umatrix=1;
if ( !strcmp(optarg,"centroids") ) write_centroids=1;
if ( !strcmp(optarg,"members") ) write_members=1;
if ( !strcmp(optarg,"all"))
write_umatrix=write_centroids=write_members=1;
break;
case 'd':
if ( *optarg == 'a' ) dataset_type=ATR;
if ( *optarg == 'm' ) dataset_type=MRI;
if ( *optarg == 't' ) dataset_type=TEST;
max_value=vals[dataset_type];
break;
case 's':
seed=atol(optarg);
break;
}
}
/* Print out main parameters for this run */
fprintf(stdout,"FCM Parameters\n clusterMethod=literal fcm\n");
filename=argv[optind];
fprintf(stdout," file=%s\n\n",filename);
/* Load the dataset, using one of a particular group of datasets. */
switch (dataset_type) {
case TEST:
load_test_data(&X,&X_d, &S, &N);
C=TEST_NUMBER_OF_CLUSTERS;
break;
case ATR:
load_atr_data(argv[optind],&X,&X_d, &S, &N);
C=ATR_NUMBER_OF_CLUSTERS;
break;
case MRI:
load_mri_data(argv[optind], &X,&X_d, &S, &N);
C=MRI_NUMBER_OF_CLUSTERS;
break;
}
fprintf(stdout, "Beginning to cluster here...\n");
/* Time the fcm algorithm */
//getrusage(RUSAGE_SELF, &start_usage);
lfcm(&U_d,&V_d,X_d);
//getrusage(RUSAGE_SELF, &end_usage);
/* Output whatever clustering results we need */
if ( write_centroids ) output_centroids(filename);
if ( write_umatrix ) output_umatrix(filename);
if ( write_members ) output_members(filename);
/* Output timing numbers */
//perf_times=timing_of(start_usage, end_usage);
///printf("Timing: %f user, %f system, %f total.\n",
//perf_times[0], perf_times[1], perf_times[0] +
//perf_times[1]);
printf("Clustering required %d iterations.\n", number_of_iterations);
return 0;
}
/* Main entry point into code. Cluster the dataset, given the details
in the parameter block. */
int lfcm(float** U_d,float** V_d,float* X_d)
{
float sqrerror[((N+DIM-1)/DIM)*(C/1)];
float *sqrerror_d;
float sqrerror_sum;
sqrerror_sum= 2 * epsilon;
/* Initialize code */
init(U_d,V_d,X_d);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaMalloc(&sqrerror_d,((N+DIM-1)/DIM)*sizeof(float)));
printf("Beginning GPU side code\n");
/* Run the updates iteratively */
while (sqrerror_sum > epsilon ) {
number_of_iterations++;
HANDLE_ERROR(cudaMemcpy(U,*U_d,N*C*sizeof(float),cudaMemcpyDeviceToHost));
update_centroids();
HANDLE_ERROR(cudaMemcpy(*V_d,V,C*S*sizeof(float),cudaMemcpyHostToDevice));
update_umatrix<<<(N+DIM-1)/DIM,DIM>>>(sqrerror_d,*U_d,*V_d,X_d,C,N,S,m);
HANDLE_ERROR(cudaGetLastError());
HANDLE_ERROR(cudaMemcpy(sqrerror,sqrerror_d,((N+DIM-1)/DIM)*sizeof(float),cudaMemcpyDeviceToHost));
sqrerror_sum=0;
cudaDeviceSynchronize();
for(int i=0; i<((N+DIM-1)/DIM); i++)
sqrerror_sum+=sqrerror[i];
}
/* We go ahead and update the centroids - presumably this will not
change much, since the overall square error in U is small */
update_centroids();
HANDLE_ERROR(cudaMemcpy(U,*U_d,N*C*sizeof(float),cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(V,*V_d,C*S*sizeof(float),cudaMemcpyDeviceToHost));
return 0;
}
/*
update_centroids()
Given a membership matrix U, recalculate the cluster centroids as the
"weighted" mean of each contributing example from the dataset. Each
example contributes by an amount proportional to the membership value.
*/
int update_centroids()
{
int i,k,x;
float *numerator, *denominator;
numerator = (float *)malloc(S*sizeof(float));
denominator = (float *)malloc(S*sizeof(float));
/* For each cluster */
for (i=0; i < C; i++) {
/* Zero out numerator and denominator options */
for (x=0; x < S; x++) {
numerator[x]=0;
denominator[x]=0;
}
/* Calculate numerator */
for (k=0; k < N; k++) {
for (x=0; x < S; x++)
numerator[x] += powf(U[k*C+i], m) * X[k*S+x];
}
/* Calculate denominator */
for (k=0; k < N; k++) {
for (x=0; x < S; x++)
denominator[x] += powf(U[k*C+i], m);
}
/* Calculate V */
for (x=0; x < S; x++) {
V[i*S+x]= numerator[x] / denominator[x];
}
} /* endfor: C clusters */
return 0;
}
__global__ void update_umatrix(float *sqrerror,float* U_d, float* V_d, float* X_d,int C,int N,int S,float m)
{
int i,j,k;
int example_is_centroid;
float summation, D_ki, D_kj;
float newU;
__shared__ float tmp_sqrerror[DIM];
/* For each example in the dataset */
k = threadIdx.x + blockIdx.x*blockDim.x;
int local_offset = threadIdx.x;
tmp_sqrerror[local_offset]=0;
if(k<N)
{
/* Special case: If Example is equal to a Cluster Centroid,
then U=1.0 for that cluster and 0 for all others */
if ( (example_is_centroid=is_example_centroid(V_d,X_d,k,S,C)) != -1 ) {
for(int i=0; i<C; i++)
{
if ( i == example_is_centroid )
U_d[k*C+i]=1.0;
else
U_d[k*C+i]=0.0;
}
return;
}
/* For each class */
for(int i=0; i< C; i++)
{
summation=0;
/* Calculate summation */
for (j=0; j < C; j++) {
D_ki=distance(X_d, V_d,k*S,i*S,S);
D_kj=distance(X_d, V_d,k*S,j*S,S);
summation += powf( D_ki / D_kj , (2.0/ (m-1)));
}
/* Weight is 1/sum */
newU=1.0/summation;
/* Add to the squareDifference */
tmp_sqrerror[local_offset] += powf(U_d[k*C+i] - newU, 2);
U_d[k*C+i]=newU;
}
}
__syncthreads();
int t= blockDim.x/2;
while(t>0)
{
if(k+t < N && threadIdx.x<t)
tmp_sqrerror[local_offset] += tmp_sqrerror[local_offset+t];
t/=2;
__syncthreads();
}
if(threadIdx.x==0)
sqrerror[blockIdx.x] = tmp_sqrerror[0];
}
/*===================================================
Utilities
init()
checkIfExampleIsCentroid()
distance()
===================================================*/
/* Allocate storage for U and V dynamically. Also, copy over the
variables that may have been externally set into short names,
which are private and easier to access.
*/
int init(float** U_d, float** V_d, float* X_d)
{
int i,j;
/* Allocate necessary storage */
V=(float *)CALLOC(S*C, sizeof(float));
U=(float *)CALLOC(C*N,sizeof(float));
HANDLE_ERROR(cudaMalloc(U_d,N*C*sizeof(float)));
HANDLE_ERROR(cudaMalloc(V_d,C*S*sizeof(float)));
/* Place random values in V, then update U matrix based on it */
srand48(seed);
for (i=0; i < C; i++) {
for (j=0; j < S; j++) {
V[i*S+j]=drand48() * max_value[j];
}
}
float *dummy;
cudaMalloc(&dummy,N*sizeof(float));
HANDLE_ERROR(cudaMemcpy(*V_d,V,C*S*sizeof(float),cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(X_d,X,N*S*sizeof(float),cudaMemcpyHostToDevice));
/* Once values are populated in V, update the U Matrix for sane values */
update_umatrix<<<(N+DIM-1)/DIM,DIM>>>(dummy,*U_d,*V_d,X_d,C,N,S,m);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaGetLastError());
fprintf(stdout,"Initialization completed.\n");
return 0;
}
/* If X[k] == V[i] for some i, then return that i. Otherwise, return -1 */
__device__ int is_example_centroid(float* V_d,float* X_d,int k,int S, int C)
{
int i,x;
for (i=0; i < C; i++) {
for (x=0; x < S; x++) {
if ( X_d[k*S+x] != V_d[i*S+x] ) break;
}
if ( x == S ) /* X==V */
return i;
}
return -1;
}
__device__ float distance(float *v1, float *v2,int startV1,int startV2,int S)
{
int x,i;
float sum=0;
for (x=startV1,i=startV2; x < startV1+S && i<startV2+S; x++, i++)
sum += (v1[x] - v2[i]) * (v1[x] - v2[i]);
return sqrtf(sum);
}
/*=====================================================
Public output utilities
output_centroids()
output_umatrix()
output_members()
=====================================================*/
int output_centroids(char *filestem)
{
FILE *fp;
char buf[DIM];
int i,j;
sprintf(buf,"%s.centroids", filestem);
fp=FOPEN(buf,"w");
for (i=0;i < C ;i++) {
for (j=0; j < S; j++)
fprintf(fp, "%f\t",V[i*S+j]);
fprintf(fp,"\n");
}
fclose(fp);
return 0;
}
int output_umatrix(char *filestem)
{
FILE *fp;
char buf[DIM];
int i,j;
sprintf(buf,"%s.umatrix", filestem);
fp=FOPEN(buf,"w");
for (i=0; i < N; i++) {
for (j=0; j < C; j++)
fprintf(fp,"%f\t", U[i*C+j]);
fprintf(fp,"\n");
}
fclose(fp);
return 0;
}
int output_members(char *filestem)
{
FILE *fp;
char buf[DIM];
int i,j,max;
sprintf(buf,"%s.members", filestem);
fp=FOPEN(buf,"w");
for (i=0; i < N; i++) {
for (max=j=0; j < C; j++)
if ( U[i*C+j] > U[i*C+max] ) max=j;
fprintf(fp,"%d\n",max);
}
fclose(fp);
return 0;
}
|
3acff720438dc0ed259932344369902de624350e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************
*cr
*cr (C) Copyright 2008-2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "atom.h"
#include "cutoff.h"
#ifdef __DEVICE_EMULATION__
#define DEBUG
/* define which grid block and which thread to examine */
#define BX 0
#define BY 0
#define TX 0
#define TY 0
#define TZ 0
#define EMU(code) do { \
if (blockIdx.x==BX && blockIdx.y==BY && \
threadIdx.x==TX && threadIdx.y==TY && threadIdx.z==TZ) { \
code; \
} \
} while (0)
#define INT(n) printf("%s = %d\n", #n, n)
#define FLOAT(f) printf("%s = %g\n", #f, (double)(f))
#define INT3(n) printf("%s = %d %d %d\n", #n, (n).x, (n).y, (n).z)
#define FLOAT4(f) printf("%s = %g %g %g %g\n", #f, (double)(f).x, \
(double)(f).y, (double)(f).z, (double)(f).w)
#else
#define EMU(code)
#define INT(n)
#define FLOAT(f)
#define INT3(n)
#define FLOAT4(f)
#endif
/* report error from CUDA */
#define CUERR \
do { \
hipError_t err; \
if ((err = hipGetLastError()) != hipSuccess) { \
printf("CUDA error: %s, line %d\n", hipGetErrorString(err), __LINE__); \
return -1; \
} \
} while (0)
/*
* neighbor list:
* stored in constant memory as table of offsets
* flat index addressing is computed by kernel
*
* reserve enough memory for 11^3 stencil of grid cells
* this fits within 16K of memory
*/
#define NBRLIST_DIM 11
#define NBRLIST_MAXLEN (NBRLIST_DIM * NBRLIST_DIM * NBRLIST_DIM)
__constant__ int NbrListLen;
__constant__ int3 NbrList[NBRLIST_MAXLEN];
/* Normally, we're summing electrostatic potential. However, for
* profiling we may want to appropriate this storage to count the
* number of nearby atoms, instead.
*/
#undef NEIGHBOR_COUNT
#ifndef NEIGHBOR_COUNT
typedef float ener_t;
#else
typedef int ener_t;
#endif
/*
* atom bins cached into shared memory for processing
*
* this reserves 4K of shared memory for 32 atom bins each containing 8 atoms,
* should permit scheduling of up to 3 thread blocks per SM
*/
#define BIN_DEPTH 8 /* max number of atoms per bin */
#define BIN_SIZE 32 /* size of bin in floats */
#define BIN_SHIFT 5 /* # of bits to shift for mul/div by BIN_SIZE */
#define BIN_CACHE_MAXLEN 32 /* max number of atom bins to cache */
#define BIN_LENGTH 4.f /* spatial length in Angstroms */
#define BIN_INVLEN (1.f / BIN_LENGTH)
/* assuming density of 1 atom / 10 A^3, expectation is 6.4 atoms per bin
* so that bin fill should be 80% (for non-empty regions of space) */
#define REGION_SIZE 512 /* number of floats in lattice region */
/*
* potential lattice is decomposed into size 8^3 lattice point "regions"
*
* THIS IMPLEMENTATION: one thread per lattice point
* thread block size 128 gives 4 thread blocks per region
* kernel is invoked for each x-y plane of regions,
* where gridDim.x is 4*(x region dimension) so that blockIdx.x
* can absorb the z sub-region index in its 2 lowest order bits
*
* Regions are stored contiguously in memory in row-major order
*
* The bins have to not only cover the region, but they need to surround
* the outer edges so that region sides and corners can still use
* neighbor list stencil. The binZeroAddr is actually a shifted pointer into
* the bin array (binZeroAddr = binBaseAddr + (c*binDim_y + c)*binDim_x + c)
* where c = ceil(cutoff / binsize). This allows for negative offsets to
* be added to myBinIndex.
*
* The (0,0,0) spatial origin corresponds to lower left corner of both
* regionZeroAddr and binZeroAddr. The atom coordinates are translated
* during binning to enforce this assumption.
*/
__global__ static void cuda_cutoff_potential_lattice6overlap(
int binDim_x,
int binDim_y,
float4 *binZeroAddr, /* address of atom bins starting at origin */
float h, /* lattice spacing */
float cutoff2, /* square of cutoff distance */
float inv_cutoff2,
ener_t *regionZeroAddr, /* address of lattice regions starting at origin */
int zRegionIndex
)
{
__shared__ float AtomBinCache[BIN_CACHE_MAXLEN * BIN_DEPTH * 4];
__shared__ ener_t *myRegionAddr;
__shared__ int3 myBinIndex;
const int xRegionIndex = blockIdx.x;
const int yRegionIndex = blockIdx.y;
/* thread id */
const int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x
+ threadIdx.x;
/* blockDim.x == 8, blockDim.y == 2, blockDim.z == 8 */
/* neighbor index */
int nbrid;
/* this is the start of the sub-region indexed by tid */
myRegionAddr = regionZeroAddr + ((zRegionIndex*gridDim.y
+ yRegionIndex)*gridDim.x + xRegionIndex)*REGION_SIZE;
/* spatial coordinate of this lattice point */
float x = (8 * xRegionIndex + threadIdx.x) * h;
float y = (8 * yRegionIndex + threadIdx.y) * h;
float z = (8 * zRegionIndex + threadIdx.z) * h;
int totalbins = 0;
int numbins;
/* bin number determined by center of region */
myBinIndex.x = (int) floorf((8 * xRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.y = (int) floorf((8 * yRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.z = (int) floorf((8 * zRegionIndex + 4) * h * BIN_INVLEN);
/* first neighbor in list for me to cache */
nbrid = (tid >> 4);
numbins = BIN_CACHE_MAXLEN;
#ifndef NEIGHBOR_COUNT
ener_t energy0 = 0.f;
ener_t energy1 = 0.f;
ener_t energy2 = 0.f;
ener_t energy3 = 0.f;
#else
ener_t energy0 = 0, energy1 = 0, energy2 = 0, energy3 = 0;
#endif
for (totalbins = 0; totalbins < NbrListLen; totalbins += numbins) {
int bincnt;
/* start of where to write in shared memory */
int startoff = BIN_SIZE * (tid >> 4);
/* each half-warp to cache up to 4 atom bins */
for (bincnt = 0; bincnt < 4 && nbrid < NbrListLen; bincnt++, nbrid += 8) {
int i = myBinIndex.x + NbrList[nbrid].x;
int j = myBinIndex.y + NbrList[nbrid].y;
int k = myBinIndex.z + NbrList[nbrid].z;
/* determine global memory location of atom bin */
float *p_global = ((float *) binZeroAddr)
+ (((__mul24(k, binDim_y) + j)*binDim_x + i) << BIN_SHIFT);
/* coalesced read from global memory -
* retain same ordering in shared memory for now */
int binIndex = startoff + (bincnt << (3 + BIN_SHIFT));
int tidmask = tid & 15;
AtomBinCache[binIndex + tidmask ] = p_global[tidmask ];
AtomBinCache[binIndex + tidmask+16] = p_global[tidmask+16];
}
__syncthreads();
/* no warp divergence */
if (totalbins + BIN_CACHE_MAXLEN > NbrListLen) {
numbins = NbrListLen - totalbins;
}
int stopbin = (numbins << BIN_SHIFT);
for (bincnt = 0; bincnt < stopbin; bincnt+=BIN_SIZE) {
int i;
for (i = 0; i < BIN_DEPTH; i++) {
int off = bincnt + (i<<2);
float aq = AtomBinCache[off + 3];
if (0.f == aq)
break; /* no more atoms in bin */
float dx = AtomBinCache[off ] - x;
float dz = AtomBinCache[off + 2] - z;
float dxdz2 = dx*dx + dz*dz;
float dy = AtomBinCache[off + 1] - y;
float r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy0 += aq * rsqrtf(r2) * s * s;
}
#else
energy0 += (r2 < cutoff2);
#endif
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy1 += aq * rsqrtf(r2) * s * s;
}
#else
energy1 += (r2 < cutoff2);
#endif
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy2 += aq * rsqrtf(r2) * s * s;
}
#else
energy2 += (r2 < cutoff2);
#endif
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy3 += aq * rsqrtf(r2) * s * s;
}
#else
energy3 += (r2 < cutoff2);
#endif
} /* end loop over atoms in bin */
} /* end loop over cached atom bins */
__syncthreads();
} /* end loop over neighbor list */
/* store into global memory */
myRegionAddr[(tid>>4)*64 + (tid&15) ] = energy0;
myRegionAddr[(tid>>4)*64 + (tid&15) + 16] = energy1;
myRegionAddr[(tid>>4)*64 + (tid&15) + 32] = energy2;
myRegionAddr[(tid>>4)*64 + (tid&15) + 48] = energy3;
}
extern "C" int gpu_compute_cutoff_potential_lattice6overlap(
Lattice *lattice,
float cutoff, /* cutoff distance */
Atoms *atoms, /* array of atoms */
int verbose /* print info/debug messages */
, int loop)
{
int nx = lattice->dim.nx;
int ny = lattice->dim.ny;
int nz = lattice->dim.nz;
float xlo = lattice->dim.lo.x;
float ylo = lattice->dim.lo.y;
float zlo = lattice->dim.lo.z;
float h = lattice->dim.h;
int natoms = atoms->size;
Atom *atom = atoms->atoms;
int3 nbrlist[NBRLIST_MAXLEN];
int nbrlistlen = 0;
int binHistoFull[BIN_DEPTH+1] = { 0 }; /* clear every array element */
int binHistoCover[BIN_DEPTH+1] = { 0 }; /* clear every array element */
int num_excluded = 0;
int xRegionDim, yRegionDim, zRegionDim;
int xRegionIndex, yRegionIndex, zRegionIndex;
int xOffset, yOffset, zOffset;
int lnx, lny, lnz, lnall;
ener_t *regionZeroAddr, *thisRegion;
ener_t *regionZeroCuda;
int index, indexRegion;
int c;
int3 binDim;
int nbins;
float4 *binBaseAddr, *binZeroAddr;
float4 *binBaseCuda, *binZeroCuda;
int *bincntBaseAddr, *bincntZeroAddr;
Atoms *extra = NULL;
int i, j, k, n;
int sum, total;
float avgFillFull, avgFillCover;
const float cutoff2 = cutoff * cutoff;
const float inv_cutoff2 = 1.f / cutoff2;
dim3 gridDim, blockDim;
#ifdef NEIGHBOR_COUNT
double neighbor_count = 0; /* used to profile the number of atoms near a
* lattice point */
#endif
// Caller has made the 'compute' timer active
/* pad lattice to be factor of 8 in each dimension */
xRegionDim = (int) ceilf(nx/8.f);
yRegionDim = (int) ceilf(ny/8.f);
zRegionDim = (int) ceilf(nz/8.f);
lnx = 8 * xRegionDim;
lny = 8 * yRegionDim;
lnz = 8 * zRegionDim;
lnall = lnx * lny * lnz;
/* will receive energies from CUDA */
regionZeroAddr = (ener_t *) malloc(lnall * sizeof(float));
/* create bins */
c = (int) ceil(cutoff * BIN_INVLEN); /* count extra bins around lattice */
binDim.x = (int) ceil(lnx * h * BIN_INVLEN) + 2*c;
binDim.y = (int) ceil(lny * h * BIN_INVLEN) + 2*c;
binDim.z = (int) ceil(lnz * h * BIN_INVLEN) + 2*c;
nbins = binDim.x * binDim.y * binDim.z;
binBaseAddr = (float4 *) calloc(nbins * BIN_DEPTH, sizeof(float4));
binZeroAddr = binBaseAddr + ((c * binDim.y + c) * binDim.x + c) * BIN_DEPTH;
bincntBaseAddr = (int *) calloc(nbins, sizeof(int));
bincntZeroAddr = bincntBaseAddr + (c * binDim.y + c) * binDim.x + c;
/* create neighbor list */
if (ceilf(BIN_LENGTH / (8*h)) == floorf(BIN_LENGTH / (8*h))) {
float s = sqrtf(3);
float r2 = (cutoff + s*BIN_LENGTH) * (cutoff + s*BIN_LENGTH);
int cnt = 0;
/* develop neighbor list around 1 cell */
if (2*c + 1 > NBRLIST_DIM) {
fprintf(stderr, "must have cutoff <= %f\n",
(NBRLIST_DIM-1)/2 * BIN_LENGTH);
return -1;
}
for (k = -c; k <= c; k++) {
for (j = -c; j <= c; j++) {
for (i = -c; i <= c; i++) {
if ((i*i + j*j + k*k)*BIN_LENGTH*BIN_LENGTH >= r2) continue;
nbrlist[cnt].x = i;
nbrlist[cnt].y = j;
nbrlist[cnt].z = k;
cnt++;
}
}
}
nbrlistlen = cnt;
}
else if (8*h <= 2*BIN_LENGTH) {
float s = 2.f*sqrtf(3);
float r2 = (cutoff + s*BIN_LENGTH) * (cutoff + s*BIN_LENGTH);
int cnt = 0;
/* develop neighbor list around 3-cube of cells */
if (2*c + 3 > NBRLIST_DIM) {
fprintf(stderr, "must have cutoff <= %f\n",
(NBRLIST_DIM-3)/2 * BIN_LENGTH);
return -1;
}
for (k = -c; k <= c; k++) {
for (j = -c; j <= c; j++) {
for (i = -c; i <= c; i++) {
if ((i*i + j*j + k*k)*BIN_LENGTH*BIN_LENGTH >= r2) continue;
nbrlist[cnt].x = i;
nbrlist[cnt].y = j;
nbrlist[cnt].z = k;
cnt++;
}
}
}
nbrlistlen = cnt;
}
else {
fprintf(stderr, "must have h <= %f\n", 0.25 * BIN_LENGTH);
return -1;
}
/* perform geometric hashing of atoms into bins */
{
/* array of extra atoms, permit average of one extra per bin */
Atom *extra_atoms = (Atom *) calloc(nbins, sizeof(Atom));
int extra_len = 0;
for (n = 0; n < natoms; n++) {
float4 p;
p.x = atom[n].x - xlo;
p.y = atom[n].y - ylo;
p.z = atom[n].z - zlo;
p.w = atom[n].q;
i = (int) floorf(p.x * BIN_INVLEN);
j = (int) floorf(p.y * BIN_INVLEN);
k = (int) floorf(p.z * BIN_INVLEN);
if (i >= -c && i < binDim.x - c &&
j >= -c && j < binDim.y - c &&
k >= -c && k < binDim.z - c &&
atom[n].q != 0) {
int index = (k * binDim.y + j) * binDim.x + i;
float4 *bin = binZeroAddr + index * BIN_DEPTH;
int bindex = bincntZeroAddr[index];
if (bindex < BIN_DEPTH) {
/* copy atom into bin and increase counter for this bin */
bin[bindex] = p;
bincntZeroAddr[index]++;
}
else {
/* add index to array of extra atoms to be computed with CPU */
if (extra_len >= nbins) {
fprintf(stderr, "exceeded space for storing extra atoms\n");
return -1;
}
extra_atoms[extra_len] = atom[n];
extra_len++;
}
}
else {
/* excluded atoms are either outside bins or neutrally charged */
num_excluded++;
}
}
/* Save result */
extra = (Atoms *)malloc(sizeof(Atoms));
extra->atoms = extra_atoms;
extra->size = extra_len;
}
/* bin stats */
sum = total = 0;
for (n = 0; n < nbins; n++) {
binHistoFull[ bincntBaseAddr[n] ]++;
sum += bincntBaseAddr[n];
total += BIN_DEPTH;
}
avgFillFull = sum / (float) total;
sum = total = 0;
for (k = 0; k < binDim.z - 2*c; k++) {
for (j = 0; j < binDim.y - 2*c; j++) {
for (i = 0; i < binDim.x - 2*c; i++) {
int index = (k * binDim.y + j) * binDim.x + i;
binHistoCover[ bincntZeroAddr[index] ]++;
sum += bincntZeroAddr[index];
total += BIN_DEPTH;
}
}
}
avgFillCover = sum / (float) total;
if (verbose) {
/* report */
printf("number of atoms = %d\n", natoms);
printf("lattice spacing = %g\n", h);
printf("cutoff distance = %g\n", cutoff);
printf("\n");
printf("requested lattice dimensions = %d %d %d\n", nx, ny, nz);
printf("requested space dimensions = %g %g %g\n", nx*h, ny*h, nz*h);
printf("expanded lattice dimensions = %d %d %d\n", lnx, lny, lnz);
printf("expanded space dimensions = %g %g %g\n", lnx*h, lny*h, lnz*h);
printf("number of bytes for lattice data = %u\n", lnall*sizeof(float));
printf("\n");
printf("bin padding thickness = %d\n", c);
printf("bin cover dimensions = %d %d %d\n",
binDim.x - 2*c, binDim.y - 2*c, binDim.z - 2*c);
printf("bin full dimensions = %d %d %d\n", binDim.x, binDim.y, binDim.z);
printf("number of bins = %d\n", nbins);
printf("total number of atom slots = %d\n", nbins * BIN_DEPTH);
printf("%% overhead space = %g\n",
(natoms / (double) (nbins * BIN_DEPTH)) * 100);
printf("number of bytes for bin data = %u\n",
nbins * BIN_DEPTH * sizeof(float4));
printf("\n");
printf("bin histogram with padding:\n");
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
printf(" number of bins with %d atoms: %d\n", n, binHistoFull[n]);
sum += binHistoFull[n];
}
printf(" total number of bins: %d\n", sum);
printf(" %% average fill: %g\n", avgFillFull * 100);
printf("\n");
printf("bin histogram excluding padding:\n");
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
printf(" number of bins with %d atoms: %d\n", n, binHistoCover[n]);
sum += binHistoCover[n];
}
printf(" total number of bins: %d\n", sum);
printf(" %% average fill: %g\n", avgFillCover * 100);
printf("\n");
printf("number of extra atoms = %d\n", extra->size);
printf("%% atoms that are extra = %g\n", (extra->size / (double) natoms) * 100);
printf("\n");
/* sanity check on bins */
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
sum += n * binHistoFull[n];
}
sum += extra->size + num_excluded;
printf("sanity check on bin histogram with edges: "
"sum + others = %d\n", sum);
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
sum += n * binHistoCover[n];
}
sum += extra->size + num_excluded;
printf("sanity check on bin histogram excluding edges: "
"sum + others = %d\n", sum);
printf("\n");
/* neighbor list */
printf("neighbor list length = %d\n", nbrlistlen);
printf("\n");
}
/* setup CUDA kernel parameters */
gridDim.x = xRegionDim;
gridDim.y = yRegionDim;
gridDim.z = 1;
blockDim.x = 8;
blockDim.y = 2;
blockDim.z = 8;
/* allocate and initialize memory on CUDA device */
if (verbose) {
printf("Allocating %.2fMB on CUDA device for potentials\n",
lnall * sizeof(float) / (double) (1024*1024));
}
hipMalloc((void **) ®ionZeroCuda, lnall * sizeof(ener_t));
CUERR;
hipMemset(regionZeroCuda, 0, lnall * sizeof(ener_t));
CUERR;
if (verbose) {
printf("Allocating %.2fMB on CUDA device for atom bins\n",
nbins * BIN_DEPTH * sizeof(float4) / (double) (1024*1024));
}
hipMalloc((void **) &binBaseCuda, nbins * BIN_DEPTH * sizeof(float4));
CUERR;
hipMemcpy(binBaseCuda, binBaseAddr, nbins * BIN_DEPTH * sizeof(float4),
hipMemcpyHostToDevice);
CUERR;
binZeroCuda = binBaseCuda + ((c * binDim.y + c) * binDim.x + c) * BIN_DEPTH;
hipMemcpyToSymbol(NbrListLen, &nbrlistlen, sizeof(int), 0);
CUERR;
hipMemcpyToSymbol(NbrList, nbrlist, nbrlistlen * sizeof(int3), 0);
CUERR;
if (verbose)
printf("\n");
hipStream_t cutoffstream;
hipStreamCreate(&cutoffstream);
/* loop over z-dimension, invoke CUDA kernel for each x-y plane */
for(int i=0;i<loop;i++){
// printf("Invoking CUDA kernel on %d region planes...\n", zRegionDim);
for (zRegionIndex = 0; zRegionIndex < zRegionDim; zRegionIndex++) {
// printf(" computing plane %d\r", zRegionIndex);
// fflush(stdout);
hipLaunchKernelGGL(( cuda_cutoff_potential_lattice6overlap), dim3(gridDim), dim3(blockDim), 0, 0, binDim.x, binDim.y,
binZeroCuda, h, cutoff2, inv_cutoff2, regionZeroCuda, zRegionIndex);
}
}
/*
* handle extra atoms on the CPU, concurrently with the GPU calculations
*/
if (extra->size > 0) {
printf("computing extra atoms on CPU\n");
if (cpu_compute_cutoff_potential_lattice(lattice, cutoff, extra)) {
fprintf(stderr, "cpu_compute_cutoff_potential_lattice() failed "
"for extra atoms\n");
return -1;
}
printf("\n");
}
hipStreamSynchronize(cutoffstream);
CUERR;
hipDeviceSynchronize();
hipStreamDestroy(cutoffstream);
printf("Finished CUDA kernel calls \n");
/* copy result regions from CUDA device */
hipMemcpy(regionZeroAddr, regionZeroCuda, lnall * sizeof(ener_t),
hipMemcpyDeviceToHost);
CUERR;
/* free CUDA memory allocations */
hipFree(regionZeroCuda);
hipFree(binBaseCuda);
/*
* transpose on CPU, updating, producing the final lattice
*/
/* transpose regions back into lattice */
for (k = 0; k < nz; k++) {
zRegionIndex = (k >> 3);
zOffset = (k & 7);
for (j = 0; j < ny; j++) {
yRegionIndex = (j >> 3);
yOffset = (j & 7);
for (i = 0; i < nx; i++) {
xRegionIndex = (i >> 3);
xOffset = (i & 7);
thisRegion = regionZeroAddr
+ ((zRegionIndex * yRegionDim + yRegionIndex) * xRegionDim
+ xRegionIndex) * REGION_SIZE;
indexRegion = (zOffset * 8 + yOffset) * 8 + xOffset;
index = (k * ny + j) * nx + i;
#ifndef NEIGHBOR_COUNT
lattice->lattice[index] += thisRegion[indexRegion];
#else
neighbor_count += thisRegion[indexRegion];
#endif
}
}
}
#ifdef NEIGHBOR_COUNT
printf("Neighbor count: %f\n", (float)neighbor_count);
#endif
/* cleanup memory allocations */
free(regionZeroAddr);
free(binBaseAddr);
free(bincntBaseAddr);
free_atom(extra);
return 0;
}
| 3acff720438dc0ed259932344369902de624350e.cu | /***************************************************************************
*cr
*cr (C) Copyright 2008-2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "atom.h"
#include "cutoff.h"
#ifdef __DEVICE_EMULATION__
#define DEBUG
/* define which grid block and which thread to examine */
#define BX 0
#define BY 0
#define TX 0
#define TY 0
#define TZ 0
#define EMU(code) do { \
if (blockIdx.x==BX && blockIdx.y==BY && \
threadIdx.x==TX && threadIdx.y==TY && threadIdx.z==TZ) { \
code; \
} \
} while (0)
#define INT(n) printf("%s = %d\n", #n, n)
#define FLOAT(f) printf("%s = %g\n", #f, (double)(f))
#define INT3(n) printf("%s = %d %d %d\n", #n, (n).x, (n).y, (n).z)
#define FLOAT4(f) printf("%s = %g %g %g %g\n", #f, (double)(f).x, \
(double)(f).y, (double)(f).z, (double)(f).w)
#else
#define EMU(code)
#define INT(n)
#define FLOAT(f)
#define INT3(n)
#define FLOAT4(f)
#endif
/* report error from CUDA */
#define CUERR \
do { \
cudaError_t err; \
if ((err = cudaGetLastError()) != cudaSuccess) { \
printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__); \
return -1; \
} \
} while (0)
/*
* neighbor list:
* stored in constant memory as table of offsets
* flat index addressing is computed by kernel
*
* reserve enough memory for 11^3 stencil of grid cells
* this fits within 16K of memory
*/
#define NBRLIST_DIM 11
#define NBRLIST_MAXLEN (NBRLIST_DIM * NBRLIST_DIM * NBRLIST_DIM)
__constant__ int NbrListLen;
__constant__ int3 NbrList[NBRLIST_MAXLEN];
/* Normally, we're summing electrostatic potential. However, for
* profiling we may want to appropriate this storage to count the
* number of nearby atoms, instead.
*/
#undef NEIGHBOR_COUNT
#ifndef NEIGHBOR_COUNT
typedef float ener_t;
#else
typedef int ener_t;
#endif
/*
* atom bins cached into shared memory for processing
*
* this reserves 4K of shared memory for 32 atom bins each containing 8 atoms,
* should permit scheduling of up to 3 thread blocks per SM
*/
#define BIN_DEPTH 8 /* max number of atoms per bin */
#define BIN_SIZE 32 /* size of bin in floats */
#define BIN_SHIFT 5 /* # of bits to shift for mul/div by BIN_SIZE */
#define BIN_CACHE_MAXLEN 32 /* max number of atom bins to cache */
#define BIN_LENGTH 4.f /* spatial length in Angstroms */
#define BIN_INVLEN (1.f / BIN_LENGTH)
/* assuming density of 1 atom / 10 A^3, expectation is 6.4 atoms per bin
* so that bin fill should be 80% (for non-empty regions of space) */
#define REGION_SIZE 512 /* number of floats in lattice region */
/*
* potential lattice is decomposed into size 8^3 lattice point "regions"
*
* THIS IMPLEMENTATION: one thread per lattice point
* thread block size 128 gives 4 thread blocks per region
* kernel is invoked for each x-y plane of regions,
* where gridDim.x is 4*(x region dimension) so that blockIdx.x
* can absorb the z sub-region index in its 2 lowest order bits
*
* Regions are stored contiguously in memory in row-major order
*
* The bins have to not only cover the region, but they need to surround
* the outer edges so that region sides and corners can still use
* neighbor list stencil. The binZeroAddr is actually a shifted pointer into
* the bin array (binZeroAddr = binBaseAddr + (c*binDim_y + c)*binDim_x + c)
* where c = ceil(cutoff / binsize). This allows for negative offsets to
* be added to myBinIndex.
*
* The (0,0,0) spatial origin corresponds to lower left corner of both
* regionZeroAddr and binZeroAddr. The atom coordinates are translated
* during binning to enforce this assumption.
*/
__global__ static void cuda_cutoff_potential_lattice6overlap(
int binDim_x,
int binDim_y,
float4 *binZeroAddr, /* address of atom bins starting at origin */
float h, /* lattice spacing */
float cutoff2, /* square of cutoff distance */
float inv_cutoff2,
ener_t *regionZeroAddr, /* address of lattice regions starting at origin */
int zRegionIndex
)
{
__shared__ float AtomBinCache[BIN_CACHE_MAXLEN * BIN_DEPTH * 4];
__shared__ ener_t *myRegionAddr;
__shared__ int3 myBinIndex;
const int xRegionIndex = blockIdx.x;
const int yRegionIndex = blockIdx.y;
/* thread id */
const int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x
+ threadIdx.x;
/* blockDim.x == 8, blockDim.y == 2, blockDim.z == 8 */
/* neighbor index */
int nbrid;
/* this is the start of the sub-region indexed by tid */
myRegionAddr = regionZeroAddr + ((zRegionIndex*gridDim.y
+ yRegionIndex)*gridDim.x + xRegionIndex)*REGION_SIZE;
/* spatial coordinate of this lattice point */
float x = (8 * xRegionIndex + threadIdx.x) * h;
float y = (8 * yRegionIndex + threadIdx.y) * h;
float z = (8 * zRegionIndex + threadIdx.z) * h;
int totalbins = 0;
int numbins;
/* bin number determined by center of region */
myBinIndex.x = (int) floorf((8 * xRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.y = (int) floorf((8 * yRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.z = (int) floorf((8 * zRegionIndex + 4) * h * BIN_INVLEN);
/* first neighbor in list for me to cache */
nbrid = (tid >> 4);
numbins = BIN_CACHE_MAXLEN;
#ifndef NEIGHBOR_COUNT
ener_t energy0 = 0.f;
ener_t energy1 = 0.f;
ener_t energy2 = 0.f;
ener_t energy3 = 0.f;
#else
ener_t energy0 = 0, energy1 = 0, energy2 = 0, energy3 = 0;
#endif
for (totalbins = 0; totalbins < NbrListLen; totalbins += numbins) {
int bincnt;
/* start of where to write in shared memory */
int startoff = BIN_SIZE * (tid >> 4);
/* each half-warp to cache up to 4 atom bins */
for (bincnt = 0; bincnt < 4 && nbrid < NbrListLen; bincnt++, nbrid += 8) {
int i = myBinIndex.x + NbrList[nbrid].x;
int j = myBinIndex.y + NbrList[nbrid].y;
int k = myBinIndex.z + NbrList[nbrid].z;
/* determine global memory location of atom bin */
float *p_global = ((float *) binZeroAddr)
+ (((__mul24(k, binDim_y) + j)*binDim_x + i) << BIN_SHIFT);
/* coalesced read from global memory -
* retain same ordering in shared memory for now */
int binIndex = startoff + (bincnt << (3 + BIN_SHIFT));
int tidmask = tid & 15;
AtomBinCache[binIndex + tidmask ] = p_global[tidmask ];
AtomBinCache[binIndex + tidmask+16] = p_global[tidmask+16];
}
__syncthreads();
/* no warp divergence */
if (totalbins + BIN_CACHE_MAXLEN > NbrListLen) {
numbins = NbrListLen - totalbins;
}
int stopbin = (numbins << BIN_SHIFT);
for (bincnt = 0; bincnt < stopbin; bincnt+=BIN_SIZE) {
int i;
for (i = 0; i < BIN_DEPTH; i++) {
int off = bincnt + (i<<2);
float aq = AtomBinCache[off + 3];
if (0.f == aq)
break; /* no more atoms in bin */
float dx = AtomBinCache[off ] - x;
float dz = AtomBinCache[off + 2] - z;
float dxdz2 = dx*dx + dz*dz;
float dy = AtomBinCache[off + 1] - y;
float r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy0 += aq * rsqrtf(r2) * s * s;
}
#else
energy0 += (r2 < cutoff2);
#endif
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy1 += aq * rsqrtf(r2) * s * s;
}
#else
energy1 += (r2 < cutoff2);
#endif
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy2 += aq * rsqrtf(r2) * s * s;
}
#else
energy2 += (r2 < cutoff2);
#endif
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
#ifndef NEIGHBOR_COUNT
if (r2 < cutoff2)
{
float s = (1.f - r2 * inv_cutoff2);
energy3 += aq * rsqrtf(r2) * s * s;
}
#else
energy3 += (r2 < cutoff2);
#endif
} /* end loop over atoms in bin */
} /* end loop over cached atom bins */
__syncthreads();
} /* end loop over neighbor list */
/* store into global memory */
myRegionAddr[(tid>>4)*64 + (tid&15) ] = energy0;
myRegionAddr[(tid>>4)*64 + (tid&15) + 16] = energy1;
myRegionAddr[(tid>>4)*64 + (tid&15) + 32] = energy2;
myRegionAddr[(tid>>4)*64 + (tid&15) + 48] = energy3;
}
extern "C" int gpu_compute_cutoff_potential_lattice6overlap(
Lattice *lattice,
float cutoff, /* cutoff distance */
Atoms *atoms, /* array of atoms */
int verbose /* print info/debug messages */
, int loop)
{
int nx = lattice->dim.nx;
int ny = lattice->dim.ny;
int nz = lattice->dim.nz;
float xlo = lattice->dim.lo.x;
float ylo = lattice->dim.lo.y;
float zlo = lattice->dim.lo.z;
float h = lattice->dim.h;
int natoms = atoms->size;
Atom *atom = atoms->atoms;
int3 nbrlist[NBRLIST_MAXLEN];
int nbrlistlen = 0;
int binHistoFull[BIN_DEPTH+1] = { 0 }; /* clear every array element */
int binHistoCover[BIN_DEPTH+1] = { 0 }; /* clear every array element */
int num_excluded = 0;
int xRegionDim, yRegionDim, zRegionDim;
int xRegionIndex, yRegionIndex, zRegionIndex;
int xOffset, yOffset, zOffset;
int lnx, lny, lnz, lnall;
ener_t *regionZeroAddr, *thisRegion;
ener_t *regionZeroCuda;
int index, indexRegion;
int c;
int3 binDim;
int nbins;
float4 *binBaseAddr, *binZeroAddr;
float4 *binBaseCuda, *binZeroCuda;
int *bincntBaseAddr, *bincntZeroAddr;
Atoms *extra = NULL;
int i, j, k, n;
int sum, total;
float avgFillFull, avgFillCover;
const float cutoff2 = cutoff * cutoff;
const float inv_cutoff2 = 1.f / cutoff2;
dim3 gridDim, blockDim;
#ifdef NEIGHBOR_COUNT
double neighbor_count = 0; /* used to profile the number of atoms near a
* lattice point */
#endif
// Caller has made the 'compute' timer active
/* pad lattice to be factor of 8 in each dimension */
xRegionDim = (int) ceilf(nx/8.f);
yRegionDim = (int) ceilf(ny/8.f);
zRegionDim = (int) ceilf(nz/8.f);
lnx = 8 * xRegionDim;
lny = 8 * yRegionDim;
lnz = 8 * zRegionDim;
lnall = lnx * lny * lnz;
/* will receive energies from CUDA */
regionZeroAddr = (ener_t *) malloc(lnall * sizeof(float));
/* create bins */
c = (int) ceil(cutoff * BIN_INVLEN); /* count extra bins around lattice */
binDim.x = (int) ceil(lnx * h * BIN_INVLEN) + 2*c;
binDim.y = (int) ceil(lny * h * BIN_INVLEN) + 2*c;
binDim.z = (int) ceil(lnz * h * BIN_INVLEN) + 2*c;
nbins = binDim.x * binDim.y * binDim.z;
binBaseAddr = (float4 *) calloc(nbins * BIN_DEPTH, sizeof(float4));
binZeroAddr = binBaseAddr + ((c * binDim.y + c) * binDim.x + c) * BIN_DEPTH;
bincntBaseAddr = (int *) calloc(nbins, sizeof(int));
bincntZeroAddr = bincntBaseAddr + (c * binDim.y + c) * binDim.x + c;
/* create neighbor list */
if (ceilf(BIN_LENGTH / (8*h)) == floorf(BIN_LENGTH / (8*h))) {
float s = sqrtf(3);
float r2 = (cutoff + s*BIN_LENGTH) * (cutoff + s*BIN_LENGTH);
int cnt = 0;
/* develop neighbor list around 1 cell */
if (2*c + 1 > NBRLIST_DIM) {
fprintf(stderr, "must have cutoff <= %f\n",
(NBRLIST_DIM-1)/2 * BIN_LENGTH);
return -1;
}
for (k = -c; k <= c; k++) {
for (j = -c; j <= c; j++) {
for (i = -c; i <= c; i++) {
if ((i*i + j*j + k*k)*BIN_LENGTH*BIN_LENGTH >= r2) continue;
nbrlist[cnt].x = i;
nbrlist[cnt].y = j;
nbrlist[cnt].z = k;
cnt++;
}
}
}
nbrlistlen = cnt;
}
else if (8*h <= 2*BIN_LENGTH) {
float s = 2.f*sqrtf(3);
float r2 = (cutoff + s*BIN_LENGTH) * (cutoff + s*BIN_LENGTH);
int cnt = 0;
/* develop neighbor list around 3-cube of cells */
if (2*c + 3 > NBRLIST_DIM) {
fprintf(stderr, "must have cutoff <= %f\n",
(NBRLIST_DIM-3)/2 * BIN_LENGTH);
return -1;
}
for (k = -c; k <= c; k++) {
for (j = -c; j <= c; j++) {
for (i = -c; i <= c; i++) {
if ((i*i + j*j + k*k)*BIN_LENGTH*BIN_LENGTH >= r2) continue;
nbrlist[cnt].x = i;
nbrlist[cnt].y = j;
nbrlist[cnt].z = k;
cnt++;
}
}
}
nbrlistlen = cnt;
}
else {
fprintf(stderr, "must have h <= %f\n", 0.25 * BIN_LENGTH);
return -1;
}
/* perform geometric hashing of atoms into bins */
{
/* array of extra atoms, permit average of one extra per bin */
Atom *extra_atoms = (Atom *) calloc(nbins, sizeof(Atom));
int extra_len = 0;
for (n = 0; n < natoms; n++) {
float4 p;
p.x = atom[n].x - xlo;
p.y = atom[n].y - ylo;
p.z = atom[n].z - zlo;
p.w = atom[n].q;
i = (int) floorf(p.x * BIN_INVLEN);
j = (int) floorf(p.y * BIN_INVLEN);
k = (int) floorf(p.z * BIN_INVLEN);
if (i >= -c && i < binDim.x - c &&
j >= -c && j < binDim.y - c &&
k >= -c && k < binDim.z - c &&
atom[n].q != 0) {
int index = (k * binDim.y + j) * binDim.x + i;
float4 *bin = binZeroAddr + index * BIN_DEPTH;
int bindex = bincntZeroAddr[index];
if (bindex < BIN_DEPTH) {
/* copy atom into bin and increase counter for this bin */
bin[bindex] = p;
bincntZeroAddr[index]++;
}
else {
/* add index to array of extra atoms to be computed with CPU */
if (extra_len >= nbins) {
fprintf(stderr, "exceeded space for storing extra atoms\n");
return -1;
}
extra_atoms[extra_len] = atom[n];
extra_len++;
}
}
else {
/* excluded atoms are either outside bins or neutrally charged */
num_excluded++;
}
}
/* Save result */
extra = (Atoms *)malloc(sizeof(Atoms));
extra->atoms = extra_atoms;
extra->size = extra_len;
}
/* bin stats */
sum = total = 0;
for (n = 0; n < nbins; n++) {
binHistoFull[ bincntBaseAddr[n] ]++;
sum += bincntBaseAddr[n];
total += BIN_DEPTH;
}
avgFillFull = sum / (float) total;
sum = total = 0;
for (k = 0; k < binDim.z - 2*c; k++) {
for (j = 0; j < binDim.y - 2*c; j++) {
for (i = 0; i < binDim.x - 2*c; i++) {
int index = (k * binDim.y + j) * binDim.x + i;
binHistoCover[ bincntZeroAddr[index] ]++;
sum += bincntZeroAddr[index];
total += BIN_DEPTH;
}
}
}
avgFillCover = sum / (float) total;
if (verbose) {
/* report */
printf("number of atoms = %d\n", natoms);
printf("lattice spacing = %g\n", h);
printf("cutoff distance = %g\n", cutoff);
printf("\n");
printf("requested lattice dimensions = %d %d %d\n", nx, ny, nz);
printf("requested space dimensions = %g %g %g\n", nx*h, ny*h, nz*h);
printf("expanded lattice dimensions = %d %d %d\n", lnx, lny, lnz);
printf("expanded space dimensions = %g %g %g\n", lnx*h, lny*h, lnz*h);
printf("number of bytes for lattice data = %u\n", lnall*sizeof(float));
printf("\n");
printf("bin padding thickness = %d\n", c);
printf("bin cover dimensions = %d %d %d\n",
binDim.x - 2*c, binDim.y - 2*c, binDim.z - 2*c);
printf("bin full dimensions = %d %d %d\n", binDim.x, binDim.y, binDim.z);
printf("number of bins = %d\n", nbins);
printf("total number of atom slots = %d\n", nbins * BIN_DEPTH);
printf("%% overhead space = %g\n",
(natoms / (double) (nbins * BIN_DEPTH)) * 100);
printf("number of bytes for bin data = %u\n",
nbins * BIN_DEPTH * sizeof(float4));
printf("\n");
printf("bin histogram with padding:\n");
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
printf(" number of bins with %d atoms: %d\n", n, binHistoFull[n]);
sum += binHistoFull[n];
}
printf(" total number of bins: %d\n", sum);
printf(" %% average fill: %g\n", avgFillFull * 100);
printf("\n");
printf("bin histogram excluding padding:\n");
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
printf(" number of bins with %d atoms: %d\n", n, binHistoCover[n]);
sum += binHistoCover[n];
}
printf(" total number of bins: %d\n", sum);
printf(" %% average fill: %g\n", avgFillCover * 100);
printf("\n");
printf("number of extra atoms = %d\n", extra->size);
printf("%% atoms that are extra = %g\n", (extra->size / (double) natoms) * 100);
printf("\n");
/* sanity check on bins */
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
sum += n * binHistoFull[n];
}
sum += extra->size + num_excluded;
printf("sanity check on bin histogram with edges: "
"sum + others = %d\n", sum);
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
sum += n * binHistoCover[n];
}
sum += extra->size + num_excluded;
printf("sanity check on bin histogram excluding edges: "
"sum + others = %d\n", sum);
printf("\n");
/* neighbor list */
printf("neighbor list length = %d\n", nbrlistlen);
printf("\n");
}
/* setup CUDA kernel parameters */
gridDim.x = xRegionDim;
gridDim.y = yRegionDim;
gridDim.z = 1;
blockDim.x = 8;
blockDim.y = 2;
blockDim.z = 8;
/* allocate and initialize memory on CUDA device */
if (verbose) {
printf("Allocating %.2fMB on CUDA device for potentials\n",
lnall * sizeof(float) / (double) (1024*1024));
}
cudaMalloc((void **) ®ionZeroCuda, lnall * sizeof(ener_t));
CUERR;
cudaMemset(regionZeroCuda, 0, lnall * sizeof(ener_t));
CUERR;
if (verbose) {
printf("Allocating %.2fMB on CUDA device for atom bins\n",
nbins * BIN_DEPTH * sizeof(float4) / (double) (1024*1024));
}
cudaMalloc((void **) &binBaseCuda, nbins * BIN_DEPTH * sizeof(float4));
CUERR;
cudaMemcpy(binBaseCuda, binBaseAddr, nbins * BIN_DEPTH * sizeof(float4),
cudaMemcpyHostToDevice);
CUERR;
binZeroCuda = binBaseCuda + ((c * binDim.y + c) * binDim.x + c) * BIN_DEPTH;
cudaMemcpyToSymbol(NbrListLen, &nbrlistlen, sizeof(int), 0);
CUERR;
cudaMemcpyToSymbol(NbrList, nbrlist, nbrlistlen * sizeof(int3), 0);
CUERR;
if (verbose)
printf("\n");
cudaStream_t cutoffstream;
cudaStreamCreate(&cutoffstream);
/* loop over z-dimension, invoke CUDA kernel for each x-y plane */
for(int i=0;i<loop;i++){
// printf("Invoking CUDA kernel on %d region planes...\n", zRegionDim);
for (zRegionIndex = 0; zRegionIndex < zRegionDim; zRegionIndex++) {
// printf(" computing plane %d\r", zRegionIndex);
// fflush(stdout);
cuda_cutoff_potential_lattice6overlap<<<gridDim, blockDim, 0>>>(binDim.x, binDim.y,
binZeroCuda, h, cutoff2, inv_cutoff2, regionZeroCuda, zRegionIndex);
}
}
/*
* handle extra atoms on the CPU, concurrently with the GPU calculations
*/
if (extra->size > 0) {
printf("computing extra atoms on CPU\n");
if (cpu_compute_cutoff_potential_lattice(lattice, cutoff, extra)) {
fprintf(stderr, "cpu_compute_cutoff_potential_lattice() failed "
"for extra atoms\n");
return -1;
}
printf("\n");
}
cudaStreamSynchronize(cutoffstream);
CUERR;
cudaThreadSynchronize();
cudaStreamDestroy(cutoffstream);
printf("Finished CUDA kernel calls \n");
/* copy result regions from CUDA device */
cudaMemcpy(regionZeroAddr, regionZeroCuda, lnall * sizeof(ener_t),
cudaMemcpyDeviceToHost);
CUERR;
/* free CUDA memory allocations */
cudaFree(regionZeroCuda);
cudaFree(binBaseCuda);
/*
* transpose on CPU, updating, producing the final lattice
*/
/* transpose regions back into lattice */
for (k = 0; k < nz; k++) {
zRegionIndex = (k >> 3);
zOffset = (k & 7);
for (j = 0; j < ny; j++) {
yRegionIndex = (j >> 3);
yOffset = (j & 7);
for (i = 0; i < nx; i++) {
xRegionIndex = (i >> 3);
xOffset = (i & 7);
thisRegion = regionZeroAddr
+ ((zRegionIndex * yRegionDim + yRegionIndex) * xRegionDim
+ xRegionIndex) * REGION_SIZE;
indexRegion = (zOffset * 8 + yOffset) * 8 + xOffset;
index = (k * ny + j) * nx + i;
#ifndef NEIGHBOR_COUNT
lattice->lattice[index] += thisRegion[indexRegion];
#else
neighbor_count += thisRegion[indexRegion];
#endif
}
}
}
#ifdef NEIGHBOR_COUNT
printf("Neighbor count: %f\n", (float)neighbor_count);
#endif
/* cleanup memory allocations */
free(regionZeroAddr);
free(binBaseAddr);
free(bincntBaseAddr);
free_atom(extra);
return 0;
}
|
a198e1a94dc382b2928099443e8f9c8136311ec3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_minus_4_back;
int xdim0_update_halo_kernel5_minus_4_back_h = -1;
__constant__ int ydim0_update_halo_kernel5_minus_4_back;
int ydim0_update_halo_kernel5_minus_4_back_h = -1;
__constant__ int xdim1_update_halo_kernel5_minus_4_back;
int xdim1_update_halo_kernel5_minus_4_back_h = -1;
__constant__ int ydim1_update_halo_kernel5_minus_4_back;
int ydim1_update_halo_kernel5_minus_4_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel5_minus_4_back * (y) + \
xdim0_update_halo_kernel5_minus_4_back * \
ydim0_update_halo_kernel5_minus_4_back * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel5_minus_4_back * (y) + \
xdim1_update_halo_kernel5_minus_4_back * \
ydim1_update_halo_kernel5_minus_4_back * (z))
// user function
__device__
inline void
update_halo_kernel5_minus_4_back_gpu(double *vol_flux_z,
double *mass_flux_z,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Z] == 1)
vol_flux_z[OPS_ACC0(0, 0, 0)] = -vol_flux_z[OPS_ACC0(0, 0, 4)];
if (fields[FIELD_MASS_FLUX_Z] == 1)
mass_flux_z[OPS_ACC1(0, 0, 0)] = -mass_flux_z[OPS_ACC1(0, 0, 4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_minus_4_back(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel5_minus_4_back +
idx_z * 1 * 1 * xdim0_update_halo_kernel5_minus_4_back *
ydim0_update_halo_kernel5_minus_4_back;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel5_minus_4_back +
idx_z * 1 * 1 * xdim1_update_halo_kernel5_minus_4_back *
ydim1_update_halo_kernel5_minus_4_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_minus_4_back_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel5_minus_4_back(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 137))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(137, "update_halo_kernel5_minus_4_back");
OPS_kernels[137].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_minus_4_back_h ||
ydim0 != ydim0_update_halo_kernel5_minus_4_back_h ||
xdim1 != xdim1_update_halo_kernel5_minus_4_back_h ||
ydim1 != ydim1_update_halo_kernel5_minus_4_back_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel5_minus_4_back, &xdim0,
sizeof(int));
xdim0_update_halo_kernel5_minus_4_back_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel5_minus_4_back, &ydim0,
sizeof(int));
ydim0_update_halo_kernel5_minus_4_back_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel5_minus_4_back, &xdim1,
sizeof(int));
xdim1_update_halo_kernel5_minus_4_back_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel5_minus_4_back, &ydim1,
sizeof(int));
ydim1_update_halo_kernel5_minus_4_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[137].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel5_minus_4_back), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[137].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[137].mpi_time += t2 - t1;
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| a198e1a94dc382b2928099443e8f9c8136311ec3.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_minus_4_back;
int xdim0_update_halo_kernel5_minus_4_back_h = -1;
__constant__ int ydim0_update_halo_kernel5_minus_4_back;
int ydim0_update_halo_kernel5_minus_4_back_h = -1;
__constant__ int xdim1_update_halo_kernel5_minus_4_back;
int xdim1_update_halo_kernel5_minus_4_back_h = -1;
__constant__ int ydim1_update_halo_kernel5_minus_4_back;
int ydim1_update_halo_kernel5_minus_4_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel5_minus_4_back * (y) + \
xdim0_update_halo_kernel5_minus_4_back * \
ydim0_update_halo_kernel5_minus_4_back * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel5_minus_4_back * (y) + \
xdim1_update_halo_kernel5_minus_4_back * \
ydim1_update_halo_kernel5_minus_4_back * (z))
// user function
__device__
inline void
update_halo_kernel5_minus_4_back_gpu(double *vol_flux_z,
double *mass_flux_z,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Z] == 1)
vol_flux_z[OPS_ACC0(0, 0, 0)] = -vol_flux_z[OPS_ACC0(0, 0, 4)];
if (fields[FIELD_MASS_FLUX_Z] == 1)
mass_flux_z[OPS_ACC1(0, 0, 0)] = -mass_flux_z[OPS_ACC1(0, 0, 4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_minus_4_back(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel5_minus_4_back +
idx_z * 1 * 1 * xdim0_update_halo_kernel5_minus_4_back *
ydim0_update_halo_kernel5_minus_4_back;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel5_minus_4_back +
idx_z * 1 * 1 * xdim1_update_halo_kernel5_minus_4_back *
ydim1_update_halo_kernel5_minus_4_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_minus_4_back_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel5_minus_4_back(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 137))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(137, "update_halo_kernel5_minus_4_back");
OPS_kernels[137].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_minus_4_back_h ||
ydim0 != ydim0_update_halo_kernel5_minus_4_back_h ||
xdim1 != xdim1_update_halo_kernel5_minus_4_back_h ||
ydim1 != ydim1_update_halo_kernel5_minus_4_back_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel5_minus_4_back, &xdim0,
sizeof(int));
xdim0_update_halo_kernel5_minus_4_back_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel5_minus_4_back, &ydim0,
sizeof(int));
ydim0_update_halo_kernel5_minus_4_back_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel5_minus_4_back, &xdim1,
sizeof(int));
xdim1_update_halo_kernel5_minus_4_back_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel5_minus_4_back, &ydim1,
sizeof(int));
ydim1_update_halo_kernel5_minus_4_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[137].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel5_minus_4_back<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[137].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[137].mpi_time += t2 - t1;
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
755b8909211c5557bb7c2405978afb78caab7ec1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: %clang_cc1 -triple x86_64-unknown-gnu-linux -aux-triple amdgcn-amd-amdhsa \
// RUN: -emit-llvm -o - -x hip %s | FileCheck -check-prefixes=CHECK,LNX %s
// RUN: %clang_cc1 -triple x86_64-unknown-windows-msvc -aux-triple amdgcn-amd-amdhsa \
// RUN: -emit-llvm -o - -x hip %s | FileCheck -check-prefixes=CHECK,MSVC %s
#include "Inputs/cuda.h"
namespace X {
__global__ void kern1(int *x);
__device__ int var1;
}
// CHECK: @[[STR1:.*]] = {{.*}} c"_ZN1X5kern1EPi\00"
// CHECK: @[[STR2:.*]] = {{.*}} c"_ZN1X4var1E\00"
// LNX-LABEL: define {{.*}}@_Z4fun1v()
// MSVC-LABEL: define {{.*}} @"?fun1@@YAPEBDXZ"()
// CHECK: ret ptr @[[STR1]]
const char *fun1() {
return __builtin_get_device_side_mangled_name(X::kern1);
}
// LNX-LABEL: define {{.*}}@_Z4fun2v()
// MSVC-LABEL: define {{.*}}@"?fun2@@YAPEBDXZ"()
// CHECK: ret ptr @[[STR2]]
__host__ __device__ const char *fun2() {
return __builtin_get_device_side_mangled_name(X::var1);
}
| 755b8909211c5557bb7c2405978afb78caab7ec1.cu | // RUN: %clang_cc1 -triple x86_64-unknown-gnu-linux -aux-triple amdgcn-amd-amdhsa \
// RUN: -emit-llvm -o - -x hip %s | FileCheck -check-prefixes=CHECK,LNX %s
// RUN: %clang_cc1 -triple x86_64-unknown-windows-msvc -aux-triple amdgcn-amd-amdhsa \
// RUN: -emit-llvm -o - -x hip %s | FileCheck -check-prefixes=CHECK,MSVC %s
#include "Inputs/cuda.h"
namespace X {
__global__ void kern1(int *x);
__device__ int var1;
}
// CHECK: @[[STR1:.*]] = {{.*}} c"_ZN1X5kern1EPi\00"
// CHECK: @[[STR2:.*]] = {{.*}} c"_ZN1X4var1E\00"
// LNX-LABEL: define {{.*}}@_Z4fun1v()
// MSVC-LABEL: define {{.*}} @"?fun1@@YAPEBDXZ"()
// CHECK: ret ptr @[[STR1]]
const char *fun1() {
return __builtin_get_device_side_mangled_name(X::kern1);
}
// LNX-LABEL: define {{.*}}@_Z4fun2v()
// MSVC-LABEL: define {{.*}}@"?fun2@@YAPEBDXZ"()
// CHECK: ret ptr @[[STR2]]
__host__ __device__ const char *fun2() {
return __builtin_get_device_side_mangled_name(X::var1);
}
|
5397b4c3370cb29c267a7915199cc3ac663b6c01.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include <opencv2/opencv.hpp>
#include <opencv2/core/cuda.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "npp.h"
#include "nppdefs.h"
#include "nppcore.h"
#include "nppi.h"
#include "npps.h"
#include <iostream>
#include <unordered_map>
#include <string>
#include <windows.h>
#include "base_op.cuh"
#include <thread>
#include "graph_active_threads.cuh"
#include "test_tool.h"
#include <ctime>
#include "constant_class.cuh"
#include "weigh_class.cuh"
#include "x_op.cuh"
#include "exp_op.cuh"
#include "sum_op.cuh"
#include "div_op.cuh"
#include "mul_op.cuh"
#include "sin_op.cuh"
#include "cos_op.cuh"
#include "reduce_sum_op.cuh"
#include "tf.cuh"
#include "image.h"
#include "cuda_rand.cuh"
using namespace std;
using namespace cv;
void addr(float& b) {
float* c = &b;
cout<<c<<endl;
}
int main()
{
clock_t startTime, endTime;
startTime = clock();//
//int dimfold[4] = {-1, 1, 1, 1};
//int dimfold2[4] = { 1, 1, 1, 1 };
//int dim[4] = { 1,1,1,1 };
//int dim1[4] = { 1,1,1,3};
//float src[3] = {5.0,6.0,7.0};
//float a[1] = { 2.0 };
//float one_v[1] = { 1.0 };
//tf<float> tf;
//tf.graph_init();
//base_op<float>* two_con = tf.constantPlaceholder_o("2",1,4,dimfold);
//base_op<float>* one_con=tf.constant_o("1", 1, 4, dim, one_v);
//base_op<float>* X =tf.variable_o(true, "X", 1, 4, dim1, src);
//base_op<float>* exp = tf.exp(X);
//base_op<float>* y1=(*((*one_con) - X))*((*two_con)*exp);//2*exp(x)*(1-x)
//base_op<float>* y2 =*(*X + two_con) + *(*one_con + X)*exp;
//base_op<float>* last = *(tf.exp(*(*y1 + tf.sin(X)) / (*y2*tf.cos(X))))+tf.sin(X);
//int dim_c[4] = { 1, 1, 1, 1 };
//base_op<float>* reduce = tf.cos(tf.reduce_avg(tf.a_power_x(last,2.0),dim_c));
//
////set sess
//graph_active<float>* sess=tf.session();
////init placeholder
//vector<constant<float>*>* v = new vector<constant<float>*>;
//constant<float>* aa = constant<float>::getObject("2", 1, 4, dim,a);
//v->push_back(aa);
//sess->Placeholder_assgin(*v);
//aa->clear();
////run sess
//sess->ward_start(1,0);
//sess->ward_start(0,1);
//endTime = clock();
//
////output
//cout << "The run time is: " << (double)(endTime - startTime) / CLOCKS_PER_SEC << "s" << endl;
//cout <<"forward::"<<(reduce->y)->x[0] << endl;
//cout << "forward::" << (last->y)->x[1] << endl;
//cout << "forward::" << (last->y)->x[2] << endl;
//cout<<"----------------------------------------- "<<endl;
//vector<variable<float>*>* list_dw=base_op<float>::global_dw_trainable->getallvalue();
//for(const auto& e : *list_dw)
// {
// for (int i = 0; i < ((variable<float>*) e)->length; i++)
// {
// cout <<e->var_name<<":" <<((variable<float>*)e)->x[i] << endl;;
// }
// }
//tensor_reduce_test();
image<float>* im=new image<float>;
im->readImage("C:/Users/Administrator/Desktop/lena.jpg");
cout<<im->width<<endl;
cout<<im->height<<endl;
cout<<im->size<<endl;
tf<float> tf;
tf.graph_init();
int dim[4] = {1,3,512,512};
base_op<float>* image_input = tf.constant_o("image", 1, 4, dim, im->imgData_h);
int filter[4] = {1000,3,5,5};
tf.conv(image_input,1000,filter);
graph_active<float>* sess=tf.session();
sess->ward_start(0, 0);
cout<<"--------forward over-----------"<<endl;
//sess->ward_start(0, 1);
endTime = clock();
//output
cout << "The run time is: " << (double)(endTime - startTime) / CLOCKS_PER_SEC << "s" << endl;
return 0;
}
| 5397b4c3370cb29c267a7915199cc3ac663b6c01.cu | #pragma once
#include <opencv2/opencv.hpp>
#include <opencv2/core/cuda.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "npp.h"
#include "nppdefs.h"
#include "nppcore.h"
#include "nppi.h"
#include "npps.h"
#include <iostream>
#include <unordered_map>
#include <string>
#include <windows.h>
#include "base_op.cuh"
#include <thread>
#include "graph_active_threads.cuh"
#include "test_tool.h"
#include <ctime>
#include "constant_class.cuh"
#include "weigh_class.cuh"
#include "x_op.cuh"
#include "exp_op.cuh"
#include "sum_op.cuh"
#include "div_op.cuh"
#include "mul_op.cuh"
#include "sin_op.cuh"
#include "cos_op.cuh"
#include "reduce_sum_op.cuh"
#include "tf.cuh"
#include "image.h"
#include "cuda_rand.cuh"
using namespace std;
using namespace cv;
void addr(float& b) {
float* c = &b;
cout<<c<<endl;
}
int main()
{
clock_t startTime, endTime;
startTime = clock();//¼ÆÊ±¿ªÊ¼
//int dimfold[4] = {-1, 1, 1, 1};
//int dimfold2[4] = { 1, 1, 1, 1 };
//int dim[4] = { 1,1,1,1 };
//int dim1[4] = { 1,1,1,3};
//float src[3] = {5.0,6.0,7.0};
//float a[1] = { 2.0 };
//float one_v[1] = { 1.0 };
//tf<float> tf;
//tf.graph_init();
//base_op<float>* two_con = tf.constantPlaceholder_o("2",1,4,dimfold);
//base_op<float>* one_con=tf.constant_o("1", 1, 4, dim, one_v);
//base_op<float>* X =tf.variable_o(true, "X", 1, 4, dim1, src);
//base_op<float>* exp = tf.exp(X);
//base_op<float>* y1=(*((*one_con) - X))*((*two_con)*exp);//2*exp(x)*(1-x)
//base_op<float>* y2 =*(*X + two_con) + *(*one_con + X)*exp;
//base_op<float>* last = *(tf.exp(*(*y1 + tf.sin(X)) / (*y2*tf.cos(X))))+tf.sin(X);
//int dim_c[4] = { 1, 1, 1, 1 };
//base_op<float>* reduce = tf.cos(tf.reduce_avg(tf.a_power_x(last,2.0),dim_c));
//
////set sess
//graph_active<float>* sess=tf.session();
////init placeholder
//vector<constant<float>*>* v = new vector<constant<float>*>;
//constant<float>* aa = constant<float>::getObject("2", 1, 4, dim,a);
//v->push_back(aa);
//sess->Placeholder_assgin(*v);
//aa->clear();
////run sess
//sess->ward_start(1,0);
//sess->ward_start(0,1);
//endTime = clock();
//
////output
//cout << "The run time is: " << (double)(endTime - startTime) / CLOCKS_PER_SEC << "s" << endl;
//cout <<"forward::"<<(reduce->y)->x[0] << endl;
//cout << "forward::" << (last->y)->x[1] << endl;
//cout << "forward::" << (last->y)->x[2] << endl;
//cout<<"----------------------------------------- "<<endl;
//vector<variable<float>*>* list_dw=base_op<float>::global_dw_trainable->getallvalue();
//for(const auto& e : *list_dw)
// {
// for (int i = 0; i < ((variable<float>*) e)->length; i++)
// {
// cout <<e->var_name<<":" <<((variable<float>*)e)->x[i] << endl;;
// }
// }
//tensor_reduce_test();
image<float>* im=new image<float>;
im->readImage("C:/Users/Administrator/Desktop/lena.jpg");
cout<<im->width<<endl;
cout<<im->height<<endl;
cout<<im->size<<endl;
tf<float> tf;
tf.graph_init();
int dim[4] = {1,3,512,512};
base_op<float>* image_input = tf.constant_o("image", 1, 4, dim, im->imgData_h);
int filter[4] = {1000,3,5,5};
tf.conv(image_input,1000,filter);
graph_active<float>* sess=tf.session();
sess->ward_start(0, 0);
cout<<"--------forward over-----------"<<endl;
//sess->ward_start(0, 1);
endTime = clock();
//output
cout << "The run time is: " << (double)(endTime - startTime) / CLOCKS_PER_SEC << "s" << endl;
return 0;
}
|
2aa5311911fce001090410490968d84558186437.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void setupKernel(hiprandState_t *state, unsigned long long seed) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
hiprand_init(seed, idx, 0, &state[idx]);
} | 2aa5311911fce001090410490968d84558186437.cu | #include "includes.h"
__global__ void setupKernel(curandState *state, unsigned long long seed) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
curand_init(seed, idx, 0, &state[idx]);
} |
2392da7e0710b4bf7ecc5b478cf1a793865142a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ void timeTest1(int *a){
int t_index = threadIdx.x + (blockIdx.x * blockDim.x);
if (t_index < SIZE) {
*a +=5;
}
}
__global__ void timeTest() {
int t_index = threadIdx.x + (blockIdx.x * blockDim.x);
if (t_index < SIZE) {
int a = 0;
for(int i = 0; i < 10000000; i++){
timeTest1(&a);
}
}
} | 2392da7e0710b4bf7ecc5b478cf1a793865142a7.cu | #include "includes.h"
__device__ void timeTest1(int *a){
int t_index = threadIdx.x + (blockIdx.x * blockDim.x);
if (t_index < SIZE) {
*a +=5;
}
}
__global__ void timeTest() {
int t_index = threadIdx.x + (blockIdx.x * blockDim.x);
if (t_index < SIZE) {
int a = 0;
for(int i = 0; i < 10000000; i++){
timeTest1(&a);
}
}
} |
40f5e99553e4d20165f4fc4f7cabb54bc478039c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hiprand/hiprand_kernel.h>
#include "../include/pagerank.h"
#define H2D (hipMemcpyHostToDevice)
#define D2H (hipMemcpyDeviceToHost)
#define WARP_SIZE 32
int MONTE_CARLO = 1;
__global__ void setup(const int nodes, float* value, curandStateMRG32k3a *state)
{
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < nodes) {
value[tid] = 0;
}
if (threadIdx.x < WARP_SIZE) {
int rid = threadIdx.x + blockIdx.x * WARP_SIZE;
hiprand_init(0, rid, 0, &state[rid]);
}
}
__global__ void random_walk(const int nodes, float* value, const int* rowptr, const int* col, curandStateMRG32k3a *state)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int rid = (threadIdx.x % WARP_SIZE) + blockIdx.x * WARP_SIZE;
if (tid < nodes) {
int cur = tid;
for (int i = 0; i < length; i++) {
int deg = rowptr[cur + 1] - rowptr[cur];
if (hiprand_uniform(&state[rid]) < alpha)
cur = deg == 0? cur: col[ rowptr[cur] + (int)(hiprand_uniform(&state[rid]) * deg) ];
else
cur = tid;
atomicAdd(&value[cur], 1);
}
}
}
__global__ void normalize(const int nodes, float* value)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < nodes) {
value[tid] /= length;
}
}
void pagerank(const int nodes, const int edges, float* value, const int* rowdeg, const int* rowptr, const int* row, const int* col) {
float *d_value;
int *d_rowptr, *d_col;
curandStateMRG32k3a *state;
const int threads_per_block = 512;
hipMalloc(&state, sizeof(curandStateMRG32k3a) * (nodes / threads_per_block + 1) * WARP_SIZE);
hipMalloc(&d_value, sizeof(float) * nodes);
hipMalloc(&d_rowptr, sizeof(int) * (nodes + 1));
hipMalloc(&d_col, sizeof(int) * edges);
hipMemcpy(d_rowptr, rowptr, sizeof(int) * (nodes + 1), H2D);
hipMemcpy(d_col, col, sizeof(int) * edges, H2D);
hipLaunchKernelGGL(( setup), dim3(nodes/threads_per_block+1), dim3(threads_per_block), 0, 0, nodes, d_value, state);
hipLaunchKernelGGL(( random_walk), dim3(nodes/threads_per_block+1), dim3(threads_per_block), 0, 0, nodes, d_value, d_rowptr, d_col, state);
hipLaunchKernelGGL(( normalize), dim3(nodes/threads_per_block+1), dim3(threads_per_block), 0, 0, nodes, d_value);
hipMemcpy(value, d_value, sizeof(float) * nodes, D2H);
hipFree(state);
hipFree(d_value);
hipFree(d_rowptr);
hipFree(d_col);
}
| 40f5e99553e4d20165f4fc4f7cabb54bc478039c.cu | #include <curand_kernel.h>
#include "../include/pagerank.h"
#define H2D (cudaMemcpyHostToDevice)
#define D2H (cudaMemcpyDeviceToHost)
#define WARP_SIZE 32
int MONTE_CARLO = 1;
__global__ void setup(const int nodes, float* value, curandStateMRG32k3a *state)
{
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < nodes) {
value[tid] = 0;
}
if (threadIdx.x < WARP_SIZE) {
int rid = threadIdx.x + blockIdx.x * WARP_SIZE;
curand_init(0, rid, 0, &state[rid]);
}
}
__global__ void random_walk(const int nodes, float* value, const int* rowptr, const int* col, curandStateMRG32k3a *state)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int rid = (threadIdx.x % WARP_SIZE) + blockIdx.x * WARP_SIZE;
if (tid < nodes) {
int cur = tid;
for (int i = 0; i < length; i++) {
int deg = rowptr[cur + 1] - rowptr[cur];
if (curand_uniform(&state[rid]) < alpha)
cur = deg == 0? cur: col[ rowptr[cur] + (int)(curand_uniform(&state[rid]) * deg) ];
else
cur = tid;
atomicAdd(&value[cur], 1);
}
}
}
__global__ void normalize(const int nodes, float* value)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < nodes) {
value[tid] /= length;
}
}
void pagerank(const int nodes, const int edges, float* value, const int* rowdeg, const int* rowptr, const int* row, const int* col) {
float *d_value;
int *d_rowptr, *d_col;
curandStateMRG32k3a *state;
const int threads_per_block = 512;
cudaMalloc(&state, sizeof(curandStateMRG32k3a) * (nodes / threads_per_block + 1) * WARP_SIZE);
cudaMalloc(&d_value, sizeof(float) * nodes);
cudaMalloc(&d_rowptr, sizeof(int) * (nodes + 1));
cudaMalloc(&d_col, sizeof(int) * edges);
cudaMemcpy(d_rowptr, rowptr, sizeof(int) * (nodes + 1), H2D);
cudaMemcpy(d_col, col, sizeof(int) * edges, H2D);
setup<<<nodes/threads_per_block+1, threads_per_block>>>(nodes, d_value, state);
random_walk<<<nodes/threads_per_block+1, threads_per_block>>>(nodes, d_value, d_rowptr, d_col, state);
normalize<<<nodes/threads_per_block+1, threads_per_block>>>(nodes, d_value);
cudaMemcpy(value, d_value, sizeof(float) * nodes, D2H);
cudaFree(state);
cudaFree(d_value);
cudaFree(d_rowptr);
cudaFree(d_col);
}
|
7b2f73081e85f80d1d0b4955f69285f57f25cce2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "constantMemoryKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *array = NULL;
hipMalloc(&array, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
constantMemoryKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, array,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
constantMemoryKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, array,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
constantMemoryKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, array,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7b2f73081e85f80d1d0b4955f69285f57f25cce2.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "constantMemoryKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *array = NULL;
cudaMalloc(&array, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
constantMemoryKernel<<<gridBlock,threadBlock>>>(array,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
constantMemoryKernel<<<gridBlock,threadBlock>>>(array,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
constantMemoryKernel<<<gridBlock,threadBlock>>>(array,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9a9969cf4d40329e84f91f5cc8b5e6bf91db978d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include "matrix.h"
template<typename T> __global__ void compute_wx_wy(T* Ux, T* Uy, T* bx, T* by, T* Wx, T* Wy, int rows, int cols, double tau);
template<typename T> __global__ void compute_rhs_DxtU_DytU_column_mayor_order(T* bx, T* by, T* Wx, T* Wy, T* RHS, int rows, int cols, double tau);
template<typename T> __global__ void compute_rhs_DxtU_DytU_row_mayor_order(T* bx, T* by, T* Wx, T* Wy, T* RHS, int rows, int cols, double tau);
template<typename T> __global__ void compute_Ux_Uy_column_major_order(T* U, T* Ux, T* Uy, int rows, int cols);
template<typename T> __global__ void compute_Ux_Uy_row_major_order(T* U, T* Ux, T* Uy, int rows, int cols);
template<typename T> __global__ void bregman_update(T* b, T* U, T* W, int rows, int cols, T gamma);
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]){
/* Declare all variables.*/
mxGPUArray *U;
mxGPUArray *Ux, *Uy;
double *d_U;
double *d_Ux, *d_Uy;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
/* Initialize the MathWorks GPU API. */
mxInitGPU();
/* Throw an error if the input is not a GPU array. */
if ((nrhs < 2) || !(mxIsGPUArray(prhs[0]))) {
mexErrMsgIdAndTxt(errId, errMsg);
}
U = mxGPUCopyFromMxArray(prhs[0]);
int N = mxGetScalar(prhs[1]);
/*
* Verify that A really is a double array before extracting the pointer.
*/
if (mxGPUGetClassID(U) != mxDOUBLE_CLASS) {
mexErrMsgIdAndTxt(errId, errMsg);
}
/*
* Now that we have verified the data type, extract a pointer to the input
* data on the device.
*/
d_U = (double *)(mxGPUGetData(U));
/* Create a GPUArray to hold the result and get its underlying pointer. */
Ux = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(U),
mxGPUGetDimensions(U),
mxGPUGetClassID(U),
mxGPUGetComplexity(U),
MX_GPU_DO_NOT_INITIALIZE);
d_Ux = (double *)(mxGPUGetData(Ux));
Uy = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(U),
mxGPUGetDimensions(U),
mxGPUGetClassID(U),
mxGPUGetComplexity(U),
MX_GPU_DO_NOT_INITIALIZE);
d_Uy = (double *)(mxGPUGetData(Uy));
// YOUR CODE HERE
hipError_t status;
int threads = N;
int blocks = N;
// compute_Ux_Uy_column_major_order<<<threads, blocks>>>(d_U, d_Ux, d_Uy, N, N);
hipLaunchKernelGGL(( compute_Ux_Uy_row_major_order), dim3(threads), dim3(blocks), 0, 0, d_U, d_Ux, d_Uy, N, N);
hipDeviceSynchronize();
status = hipGetLastError();
if(status != hipSuccess){
mexErrMsgIdAndTxt(errId, "cuda error code %d\n", status);
}
// END OF YOUR CODE
/* Wrap the result up as a MATLAB gpuArray for return. */
plhs[0] = mxGPUCreateMxArrayOnGPU(Ux);
plhs[1] = mxGPUCreateMxArrayOnGPU(Uy);
/*
* The mxGPUArray pointers are host-side structures that refer to device
* data. These must be destroyed before leaving the MEX function.
*/
mxGPUDestroyGPUArray(U);
mxGPUDestroyGPUArray(Ux);
mxGPUDestroyGPUArray(Uy);
}
//version for real numbers
/*
Compute_Wx_Wy
it's very simple
xr = Ux[i] + bx[i]
yr = Uy[i] + by[i]
Vr = sqrt(xr*xr + yr*yr)
if ...
*/
template<typename T>
__global__ void compute_wx_wy(T* Ux, T* Uy, T* bx, T* by, T* Wx, T* Wy, int rows, int cols, double tau){
// tutaj trzeba dodac wykrywanie konfiguracji kernela tzn. siatk blokw i wtkw, wymiary blokw
if (blockDim.x == 1){
}
else if(blockDim.y == 1){
}
else{
}
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int index = x + blockIdx.x * cols;
if (x < cols && y < rows){
T xr = Ux[index] + bx[index];
T yr = Uy[index] + by[index];
xr = pow(xr, 2.0);
yr = pow(yr, 2.0);
T vr = sqrt(xr+yr);
if (vr <= tau)
{
Wx[index] = 0; Wy[index] = 0;
}
else
{
vr = (vr - tau) / vr;
Wx[index] = xr*vr; Wy[index] = yr*vr;
}
}
}
//version for real numbers
template<typename T>
__global__ void compute_rhs_DxtU_DytU_column_mayor_order(T* bx, T* by, T* Wx, T* Wy, T* RHS, int rows, int cols, double tau){
/*
it is assumed that data in every matrix (bx, by, Wx, Wy, RHS) are in column major order, which is typicall for matlab
kernel configuration: blocks are spanned to cover columns i.e. every block is one-dimensional and may visualized as a column of matrix
*/
int index = threadIdx.x+blockIdx.x*rows;
int colt = rows*(cols-1);
int rowt = rows-1;
// predicates depend on block configuration
// predicate1 - if a thread belogns to the first column (0 indexed)
// predicate2 - if a thread (cell) is a first thread in a column (0 indexed)
int index1 = index + (colt * (blockIdx.x == 0)) - (rows*(blockIdx.x != 0));
int index2 = index + (rowt * (threadIdx.x == 0)) - (1*(threadIdx.x != 0));
if (blockIdx.x < cols && threadIdx.x < rows){
RHS[index] = tau*(bx[index] - bx[index1] - Wx[index] + Wx[index1] + by[index] - by[index2] - Wy[index] + Wy[index2]);
}
}
template<typename T>
__global__ void compute_rhs_DxtU_DytU_row_mayor_order(T* bx, T* by, T* Wx, T* Wy, T* RHS, int rows, int cols, double tau){
/*
it is assumed that data in every matrix (bx, by, Wx, Wy, RHS) are in row major order, which is NOT typicall for matlab
*/
int index = threadIdx.x+blockIdx.x*cols;
int colt = rows*(cols-1);
int rowt = rows-1;
// predicates depend on block configuration
int index1 = index + (colt * (threadIdx.x == 0)) - (rows*(threadIdx.x != 0));
int index2 = index + (rowt * (blockIdx.x == 0)) - (1*(blockIdx.x != 0));
if (threadIdx.x < cols && blockIdx.x < rows){
RHS[index] = tau*(bx[index] - bx[index1] - Wx[index] + Wx[index1] + by[index] - by[index2] - Wy[index] + Wy[index2]);
}
}
template<typename T>
__global__ void compute_Ux_Uy_column_major_order(T* U, T* Ux, T* Uy, int rows, int cols){
// <<< threads = rows, blocks = cols >>>
// shuffle instructions ??
int index = threadIdx.x+blockIdx.x*rows;
int ux_index = (index + rows) % (rows*cols);
int uy_index = index + 1 - (rows*(threadIdx.x == (cols-1)));
if (threadIdx.x < cols && blockIdx.x < rows){
T u = U[index];
Ux[index] = U[ux_index] -u;
Uy[index] = U[uy_index] -u;
}
}
template<typename T>
__global__ void compute_Ux_Uy_row_major_order(T* U, T* Ux, T* Uy, int rows, int cols){
// moe t funkcj rozbi na dwa kernele, obliczajce Ux i Uy i wtedy mona by uzyska memory access coalesced albo shuffle instructions
//shuffle instructions ??
int index = threadIdx.x+blockIdx.x*cols;
// TODO przekopiowane rozwizanie z gry, czy w wersje column i row powinny si zamienia rows z cols ??? chyba nie!!!
int ux_index = (threadIdx.x + 1) % cols + blockIdx.x*cols;
int uy_index = (index + cols) % (rows*cols);
if (threadIdx.x < rows && blockIdx.x < cols){
T u = U[index];
Ux[index] = U[ux_index] -u;
Uy[index] = U[uy_index] -u;
}
}
template<typename T>
__global__ void bregman_update(T* b, T* U, T* W, int rows, int cols, T gamma){
int index = threadIdx.x + blockIdx.x*blockDim.x + threadIdx.y*cols;
if (index < (cols*rows)){
b[index] = gamma*(U[index]-W[index]);
}
}
/*
Compute_Ux_Uy
matrices Ux and Uy are computed independently
IMPORTANT !!: this computational procedures are explained assuming that data are stored in a column major order (specific for Matlab)
computations for Ux: Ux[i] = U[i+rows] - U[i], basically U[i+rows] is a neighboor element from next COLUMNS (but the same row) e.g. for Aij it is Ai(j+1),
except for the last column, in this case we take the first column
computations for Uy: Uy[i] = U[i+1] - U[i], so taking into account matlab store order U[i+1] is the next element from the same column,
e.g. for Aij it is A(i+1)j, for the last element of the column, the first element is taken
*/ | 9a9969cf4d40329e84f91f5cc8b5e6bf91db978d.cu |
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include "matrix.h"
template<typename T> __global__ void compute_wx_wy(T* Ux, T* Uy, T* bx, T* by, T* Wx, T* Wy, int rows, int cols, double tau);
template<typename T> __global__ void compute_rhs_DxtU_DytU_column_mayor_order(T* bx, T* by, T* Wx, T* Wy, T* RHS, int rows, int cols, double tau);
template<typename T> __global__ void compute_rhs_DxtU_DytU_row_mayor_order(T* bx, T* by, T* Wx, T* Wy, T* RHS, int rows, int cols, double tau);
template<typename T> __global__ void compute_Ux_Uy_column_major_order(T* U, T* Ux, T* Uy, int rows, int cols);
template<typename T> __global__ void compute_Ux_Uy_row_major_order(T* U, T* Ux, T* Uy, int rows, int cols);
template<typename T> __global__ void bregman_update(T* b, T* U, T* W, int rows, int cols, T gamma);
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]){
/* Declare all variables.*/
mxGPUArray *U;
mxGPUArray *Ux, *Uy;
double *d_U;
double *d_Ux, *d_Uy;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
/* Initialize the MathWorks GPU API. */
mxInitGPU();
/* Throw an error if the input is not a GPU array. */
if ((nrhs < 2) || !(mxIsGPUArray(prhs[0]))) {
mexErrMsgIdAndTxt(errId, errMsg);
}
U = mxGPUCopyFromMxArray(prhs[0]);
int N = mxGetScalar(prhs[1]);
/*
* Verify that A really is a double array before extracting the pointer.
*/
if (mxGPUGetClassID(U) != mxDOUBLE_CLASS) {
mexErrMsgIdAndTxt(errId, errMsg);
}
/*
* Now that we have verified the data type, extract a pointer to the input
* data on the device.
*/
d_U = (double *)(mxGPUGetData(U));
/* Create a GPUArray to hold the result and get its underlying pointer. */
Ux = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(U),
mxGPUGetDimensions(U),
mxGPUGetClassID(U),
mxGPUGetComplexity(U),
MX_GPU_DO_NOT_INITIALIZE);
d_Ux = (double *)(mxGPUGetData(Ux));
Uy = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(U),
mxGPUGetDimensions(U),
mxGPUGetClassID(U),
mxGPUGetComplexity(U),
MX_GPU_DO_NOT_INITIALIZE);
d_Uy = (double *)(mxGPUGetData(Uy));
// YOUR CODE HERE
cudaError_t status;
int threads = N;
int blocks = N;
// compute_Ux_Uy_column_major_order<<<threads, blocks>>>(d_U, d_Ux, d_Uy, N, N);
compute_Ux_Uy_row_major_order<<<threads, blocks>>>(d_U, d_Ux, d_Uy, N, N);
cudaDeviceSynchronize();
status = cudaGetLastError();
if(status != cudaSuccess){
mexErrMsgIdAndTxt(errId, "cuda error code %d\n", status);
}
// END OF YOUR CODE
/* Wrap the result up as a MATLAB gpuArray for return. */
plhs[0] = mxGPUCreateMxArrayOnGPU(Ux);
plhs[1] = mxGPUCreateMxArrayOnGPU(Uy);
/*
* The mxGPUArray pointers are host-side structures that refer to device
* data. These must be destroyed before leaving the MEX function.
*/
mxGPUDestroyGPUArray(U);
mxGPUDestroyGPUArray(Ux);
mxGPUDestroyGPUArray(Uy);
}
//version for real numbers
/*
Compute_Wx_Wy
it's very simple
xr = Ux[i] + bx[i]
yr = Uy[i] + by[i]
Vr = sqrt(xr*xr + yr*yr)
if ...
*/
template<typename T>
__global__ void compute_wx_wy(T* Ux, T* Uy, T* bx, T* by, T* Wx, T* Wy, int rows, int cols, double tau){
// tutaj trzeba dodac wykrywanie konfiguracji kernela tzn. siatkę bloków i wątków, wymiary bloków
if (blockDim.x == 1){
}
else if(blockDim.y == 1){
}
else{
}
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int index = x + blockIdx.x * cols;
if (x < cols && y < rows){
T xr = Ux[index] + bx[index];
T yr = Uy[index] + by[index];
xr = pow(xr, 2.0);
yr = pow(yr, 2.0);
T vr = sqrt(xr+yr);
if (vr <= tau)
{
Wx[index] = 0; Wy[index] = 0;
}
else
{
vr = (vr - tau) / vr;
Wx[index] = xr*vr; Wy[index] = yr*vr;
}
}
}
//version for real numbers
template<typename T>
__global__ void compute_rhs_DxtU_DytU_column_mayor_order(T* bx, T* by, T* Wx, T* Wy, T* RHS, int rows, int cols, double tau){
/*
it is assumed that data in every matrix (bx, by, Wx, Wy, RHS) are in column major order, which is typicall for matlab
kernel configuration: blocks are spanned to cover columns i.e. every block is one-dimensional and may visualized as a column of matrix
*/
int index = threadIdx.x+blockIdx.x*rows;
int colt = rows*(cols-1);
int rowt = rows-1;
// predicates depend on block configuration
// predicate1 - if a thread belogns to the first column (0 indexed)
// predicate2 - if a thread (cell) is a first thread in a column (0 indexed)
int index1 = index + (colt * (blockIdx.x == 0)) - (rows*(blockIdx.x != 0));
int index2 = index + (rowt * (threadIdx.x == 0)) - (1*(threadIdx.x != 0));
if (blockIdx.x < cols && threadIdx.x < rows){
RHS[index] = tau*(bx[index] - bx[index1] - Wx[index] + Wx[index1] + by[index] - by[index2] - Wy[index] + Wy[index2]);
}
}
template<typename T>
__global__ void compute_rhs_DxtU_DytU_row_mayor_order(T* bx, T* by, T* Wx, T* Wy, T* RHS, int rows, int cols, double tau){
/*
it is assumed that data in every matrix (bx, by, Wx, Wy, RHS) are in row major order, which is NOT typicall for matlab
*/
int index = threadIdx.x+blockIdx.x*cols;
int colt = rows*(cols-1);
int rowt = rows-1;
// predicates depend on block configuration
int index1 = index + (colt * (threadIdx.x == 0)) - (rows*(threadIdx.x != 0));
int index2 = index + (rowt * (blockIdx.x == 0)) - (1*(blockIdx.x != 0));
if (threadIdx.x < cols && blockIdx.x < rows){
RHS[index] = tau*(bx[index] - bx[index1] - Wx[index] + Wx[index1] + by[index] - by[index2] - Wy[index] + Wy[index2]);
}
}
template<typename T>
__global__ void compute_Ux_Uy_column_major_order(T* U, T* Ux, T* Uy, int rows, int cols){
// <<< threads = rows, blocks = cols >>>
// shuffle instructions ??
int index = threadIdx.x+blockIdx.x*rows;
int ux_index = (index + rows) % (rows*cols);
int uy_index = index + 1 - (rows*(threadIdx.x == (cols-1)));
if (threadIdx.x < cols && blockIdx.x < rows){
T u = U[index];
Ux[index] = U[ux_index] -u;
Uy[index] = U[uy_index] -u;
}
}
template<typename T>
__global__ void compute_Ux_Uy_row_major_order(T* U, T* Ux, T* Uy, int rows, int cols){
// może tą funkcję rozbić na dwa kernele, obliczające Ux i Uy i wtedy można by uzyskać memory access coalesced albo shuffle instructions
//shuffle instructions ??
int index = threadIdx.x+blockIdx.x*cols;
// TODO przekopiowane rozwiązanie z góry, czy w wersje column i row powinny się zamieniać rows z cols ??? chyba nie!!!
int ux_index = (threadIdx.x + 1) % cols + blockIdx.x*cols;
int uy_index = (index + cols) % (rows*cols);
if (threadIdx.x < rows && blockIdx.x < cols){
T u = U[index];
Ux[index] = U[ux_index] -u;
Uy[index] = U[uy_index] -u;
}
}
template<typename T>
__global__ void bregman_update(T* b, T* U, T* W, int rows, int cols, T gamma){
int index = threadIdx.x + blockIdx.x*blockDim.x + threadIdx.y*cols;
if (index < (cols*rows)){
b[index] = gamma*(U[index]-W[index]);
}
}
/*
Compute_Ux_Uy
matrices Ux and Uy are computed independently
IMPORTANT !!: this computational procedures are explained assuming that data are stored in a column major order (specific for Matlab)
computations for Ux: Ux[i] = U[i+rows] - U[i], basically U[i+rows] is a neighboor element from next COLUMNS (but the same row) e.g. for Aij it is Ai(j+1),
except for the last column, in this case we take the first column
computations for Uy: Uy[i] = U[i+1] - U[i], so taking into account matlab store order U[i+1] is the next element from the same column,
e.g. for Aij it is A(i+1)j, for the last element of the column, the first element is taken
*/ |
920828d1df4615070192b8c45b7806100617fbdb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_test5_init(char* _ptr, char* end_ptr)
{
unsigned int i;
unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE);
if (ptr >= (unsigned int*) end_ptr) {
return;
}
unsigned int p1 = 1;
for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i+=16){
unsigned int p2 = ~p1;
ptr[i] = p1;
ptr[i+1] = p1;
ptr[i+2] = p2;
ptr[i+3] = p2;
ptr[i+4] = p1;
ptr[i+5] = p1;
ptr[i+6] = p2;
ptr[i+7] = p2;
ptr[i+8] = p1;
ptr[i+9] = p1;
ptr[i+10] = p2;
ptr[i+11] = p2;
ptr[i+12] = p1;
ptr[i+13] = p1;
ptr[i+14] = p2;
ptr[i+15] = p2;
p1 = p1<<1;
if (p1 == 0){
p1 = 1;
}
}
return;
} | 920828d1df4615070192b8c45b7806100617fbdb.cu | #include "includes.h"
__global__ void kernel_test5_init(char* _ptr, char* end_ptr)
{
unsigned int i;
unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE);
if (ptr >= (unsigned int*) end_ptr) {
return;
}
unsigned int p1 = 1;
for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i+=16){
unsigned int p2 = ~p1;
ptr[i] = p1;
ptr[i+1] = p1;
ptr[i+2] = p2;
ptr[i+3] = p2;
ptr[i+4] = p1;
ptr[i+5] = p1;
ptr[i+6] = p2;
ptr[i+7] = p2;
ptr[i+8] = p1;
ptr[i+9] = p1;
ptr[i+10] = p2;
ptr[i+11] = p2;
ptr[i+12] = p1;
ptr[i+13] = p1;
ptr[i+14] = p2;
ptr[i+15] = p2;
p1 = p1<<1;
if (p1 == 0){
p1 = 1;
}
}
return;
} |
3c41b4c4d98101e419e09a552bae671fed5ce670.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_pml_wave_2d_kernel_math.cu"
__device__ __forceinline__ Number_t w_get_pos(int j, int nn, Number_t vmin, Number_t vmax, Number_t dd){
return ((j*vmax + (nn - j)*vmin)/nn) + dd/2;
}
__constant__ Number_t kernel_constants[12];
__global__ void cuda_pml_wave_2d_velocity_kernel(Number_t * u,
const int nx, const int ny){
//__shared__ Number_t cache[BDIMX + 2][BDIMY + 2];
Number_t local[2][2];
Number_t dt = kernel_constants[1];
Number_t idt = 1/dt;
Number_t dx = kernel_constants[2];
Number_t dy = kernel_constants[3];
Number_t xmin = kernel_constants[4];
Number_t xmax = kernel_constants[5];
Number_t ymin = kernel_constants[6];
Number_t ymax = kernel_constants[7];
Number_t pml_strength = kernel_constants[8];
Number_t pml_width = kernel_constants[9];
Number_t density = kernel_constants[10];
int j = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.y*blockDim.y + threadIdx.y;
if(j < nx && i < ny){
int idx = 3*(j + nx*i);
//Set the position
Number_t bx = w_get_pos(j, nx, xmin, xmax, dx);
Number_t by = w_get_pos(i, ny, ymin, ymax, dy);
local[0][0] = u[idx];
Number_t oldVx = u[idx+1];
Number_t oldVy = u[idx+2];
//Update velocities
{
Number_t newVx = 0;
Number_t newVy = 0;
Number_t absortion;
Number_t update;
Number_t gradient;
//X
if(j != nx-1){
local[1][0] = u[idx+3];
absortion = pml_wave_2d_absortion(bx+dx/2, xmin, xmax, pml_strength, pml_width);
update = pml_wave_2d_vel_update(idt, absortion);
gradient = pml_wave_2d_gradient(idt, absortion, dx, density);
newVx = oldVx*update + gradient*(local[1][0]-local[0][0]);
u[idx+1] = newVx;
}
//Y
if(i != ny-1){
local[0][1] = u[idx + 3*nx];
absortion = pml_wave_2d_absortion(by+dy/2, ymin, ymax, pml_strength, pml_width);
update = pml_wave_2d_vel_update(idt, absortion);
gradient = pml_wave_2d_gradient(idt, absortion, dy, density);
newVy = oldVy*update + gradient*(local[0][1]-local[0][0]);
u[idx+2] = newVy;
}
}
}
}
__global__ void cuda_pml_wave_2d_pressure_kernel(Number_t * u,
const int nx, const int ny){
//__shared__ Number_t cache[BDIMX + 2][BDIMY + 2];
Number_t local[2][2];
Number_t c = kernel_constants[0];
Number_t dt = kernel_constants[1];
Number_t idt = 1/dt;
Number_t dx = kernel_constants[2];
Number_t dy = kernel_constants[3];
Number_t xmin = kernel_constants[4];
Number_t xmax = kernel_constants[5];
Number_t ymin = kernel_constants[6];
Number_t ymax = kernel_constants[7];
Number_t pml_strength = kernel_constants[8];
Number_t pml_width = kernel_constants[9];
Number_t density = kernel_constants[10];
int j = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.y*blockDim.y + threadIdx.y;
if(j < nx && i < ny){
int idx = 3*(j + nx*i);
Number_t update = 0;
Number_t temp = 0;
Number_t oldU = u[idx];
local[0][1] = u[idx+1];
local[1][1] = u[idx+2];
if(j != 0){
local[0][0] = u[idx-3+1];
} else{
local[0][0] = 0;
}
//Set the position
Number_t abs_d;
Number_t dir_d;
Number_t upd_d;
Number_t div_d;
//Update pressure
{
Number_t bx = w_get_pos(j, nx, xmin, xmax, dx);
abs_d = pml_wave_2d_absortion(bx+dx/2, xmin, xmax, pml_strength, pml_width);
dir_d = pml_wave_2d_directional(idt, abs_d);
upd_d = pml_wave_2d_pre_update(idt, abs_d, dir_d);
div_d = pml_wave_2d_pre_divergence(density, c, dir_d, dx);
update += upd_d/2;
temp += div_d*(local[0][1] - local[0][0]);
}
if(i != 0){
local[1][0] = u[idx - 3*nx + 2];
} else{
local[1][0] = 0;
}
{
Number_t by = w_get_pos(i, ny, ymin, ymax, dy);
abs_d = pml_wave_2d_absortion(by+dy/2, ymin, ymax, pml_strength, pml_width);
dir_d = pml_wave_2d_directional(idt, abs_d);
upd_d = pml_wave_2d_pre_update(idt, abs_d, dir_d);
div_d = pml_wave_2d_pre_divergence(density, c, dir_d, dy);
update += upd_d/2;
temp += div_d*(local[1][1] - local[1][0]);
}
//Write back to the global memory
// u[idx] = threadIdx.x;
Number_t newU = oldU*update + temp;
u[ idx ] = newU;
}
} | 3c41b4c4d98101e419e09a552bae671fed5ce670.cu | #include "cuda_pml_wave_2d_kernel_math.cu"
__device__ __forceinline__ Number_t w_get_pos(int j, int nn, Number_t vmin, Number_t vmax, Number_t dd){
return ((j*vmax + (nn - j)*vmin)/nn) + dd/2;
}
__constant__ Number_t kernel_constants[12];
__global__ void cuda_pml_wave_2d_velocity_kernel(Number_t * u,
const int nx, const int ny){
//__shared__ Number_t cache[BDIMX + 2][BDIMY + 2];
Number_t local[2][2];
Number_t dt = kernel_constants[1];
Number_t idt = 1/dt;
Number_t dx = kernel_constants[2];
Number_t dy = kernel_constants[3];
Number_t xmin = kernel_constants[4];
Number_t xmax = kernel_constants[5];
Number_t ymin = kernel_constants[6];
Number_t ymax = kernel_constants[7];
Number_t pml_strength = kernel_constants[8];
Number_t pml_width = kernel_constants[9];
Number_t density = kernel_constants[10];
int j = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.y*blockDim.y + threadIdx.y;
if(j < nx && i < ny){
int idx = 3*(j + nx*i);
//Set the position
Number_t bx = w_get_pos(j, nx, xmin, xmax, dx);
Number_t by = w_get_pos(i, ny, ymin, ymax, dy);
local[0][0] = u[idx];
Number_t oldVx = u[idx+1];
Number_t oldVy = u[idx+2];
//Update velocities
{
Number_t newVx = 0;
Number_t newVy = 0;
Number_t absortion;
Number_t update;
Number_t gradient;
//X
if(j != nx-1){
local[1][0] = u[idx+3];
absortion = pml_wave_2d_absortion(bx+dx/2, xmin, xmax, pml_strength, pml_width);
update = pml_wave_2d_vel_update(idt, absortion);
gradient = pml_wave_2d_gradient(idt, absortion, dx, density);
newVx = oldVx*update + gradient*(local[1][0]-local[0][0]);
u[idx+1] = newVx;
}
//Y
if(i != ny-1){
local[0][1] = u[idx + 3*nx];
absortion = pml_wave_2d_absortion(by+dy/2, ymin, ymax, pml_strength, pml_width);
update = pml_wave_2d_vel_update(idt, absortion);
gradient = pml_wave_2d_gradient(idt, absortion, dy, density);
newVy = oldVy*update + gradient*(local[0][1]-local[0][0]);
u[idx+2] = newVy;
}
}
}
}
__global__ void cuda_pml_wave_2d_pressure_kernel(Number_t * u,
const int nx, const int ny){
//__shared__ Number_t cache[BDIMX + 2][BDIMY + 2];
Number_t local[2][2];
Number_t c = kernel_constants[0];
Number_t dt = kernel_constants[1];
Number_t idt = 1/dt;
Number_t dx = kernel_constants[2];
Number_t dy = kernel_constants[3];
Number_t xmin = kernel_constants[4];
Number_t xmax = kernel_constants[5];
Number_t ymin = kernel_constants[6];
Number_t ymax = kernel_constants[7];
Number_t pml_strength = kernel_constants[8];
Number_t pml_width = kernel_constants[9];
Number_t density = kernel_constants[10];
int j = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.y*blockDim.y + threadIdx.y;
if(j < nx && i < ny){
int idx = 3*(j + nx*i);
Number_t update = 0;
Number_t temp = 0;
Number_t oldU = u[idx];
local[0][1] = u[idx+1];
local[1][1] = u[idx+2];
if(j != 0){
local[0][0] = u[idx-3+1];
} else{
local[0][0] = 0;
}
//Set the position
Number_t abs_d;
Number_t dir_d;
Number_t upd_d;
Number_t div_d;
//Update pressure
{
Number_t bx = w_get_pos(j, nx, xmin, xmax, dx);
abs_d = pml_wave_2d_absortion(bx+dx/2, xmin, xmax, pml_strength, pml_width);
dir_d = pml_wave_2d_directional(idt, abs_d);
upd_d = pml_wave_2d_pre_update(idt, abs_d, dir_d);
div_d = pml_wave_2d_pre_divergence(density, c, dir_d, dx);
update += upd_d/2;
temp += div_d*(local[0][1] - local[0][0]);
}
if(i != 0){
local[1][0] = u[idx - 3*nx + 2];
} else{
local[1][0] = 0;
}
{
Number_t by = w_get_pos(i, ny, ymin, ymax, dy);
abs_d = pml_wave_2d_absortion(by+dy/2, ymin, ymax, pml_strength, pml_width);
dir_d = pml_wave_2d_directional(idt, abs_d);
upd_d = pml_wave_2d_pre_update(idt, abs_d, dir_d);
div_d = pml_wave_2d_pre_divergence(density, c, dir_d, dy);
update += upd_d/2;
temp += div_d*(local[1][1] - local[1][0]);
}
//Write back to the global memory
// u[idx] = threadIdx.x;
Number_t newU = oldU*update + temp;
u[ idx ] = newU;
}
} |
cc78cf023d5db5d0baafbc6a5421b5b7177fa482.hip | // !!! This is a file automatically generated by hipify!!!
#include "chainerx/cuda/cuda_device.h"
#include <cmath>
#include <cstdint>
#include <hip/hip_runtime.h>
#include "chainerx/array.h"
#include "chainerx/axes.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/data_type.cuh"
#include "chainerx/cuda/kernel_regist.h"
#include "chainerx/cuda/numeric.cuh"
#include "chainerx/cuda/numeric_limits.cuh"
#include "chainerx/cuda/reduce.cuh"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/kernels/math.h"
#include "chainerx/kernels/sorting.h"
#include "chainerx/macro.h"
#include "chainerx/numeric_limits.h"
#include "chainerx/reduction_kernel_arg.h"
#include "chainerx/routines/math.h"
#include "chainerx/shape.h"
namespace chainerx {
namespace cuda {
namespace {
template <typename T>
struct ArgMaxImpl {
using CudaType = cuda_internal::DataType<T>;
struct MaxAndArgMax {
CudaType max;
int64_t argmax;
};
__device__ MaxAndArgMax Identity() { return {CudaType{}, -1}; }
__device__ MaxAndArgMax MapIn(CudaType in, int64_t index) { return {in, index}; }
__device__ void Reduce(MaxAndArgMax next, MaxAndArgMax& accum) {
// Note that `next` can be the return value of `Identity()` in which case `accum` should not be updated.
if (next.argmax != -1 && (accum.argmax == -1 || accum.max < next.max)) {
accum = next;
}
}
__device__ int64_t MapOut(MaxAndArgMax accum) { return accum.argmax; }
};
class CudaArgMaxKernel : public ArgMaxKernel {
public:
void Call(const Array& a, const Axes& axis, const Array& out) override {
Device& device = a.device();
device.CheckDevicesCompatible(a, out);
CudaSetDeviceScope scope{device.index()};
VisitDtype(a.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Reduce<T, int64_t>(a, axis, out, ArgMaxImpl<T>{});
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(ArgMaxKernel, CudaArgMaxKernel);
template <typename In, typename Out>
struct SumImpl {
using InCudaType = cuda_internal::DataType<In>;
using OutCudaType = cuda_internal::DataType<Out>;
__device__ OutCudaType Identity() { return OutCudaType{0}; }
__device__ OutCudaType MapIn(InCudaType in, int64_t /*index*/) { return static_cast<OutCudaType>(in); }
__device__ void Reduce(OutCudaType next, OutCudaType& accum) { accum += next; }
__device__ OutCudaType MapOut(OutCudaType accum) { return accum; }
};
class CudaSumKernel : public SumKernel {
public:
void Call(const Array& a, const Axes& axis, const Array& out) override {
Device& device = a.device();
CHAINERX_ASSERT(internal::IsValidReductionShape(a.shape(), axis, out.shape(), true));
device.CheckDevicesCompatible(a, out);
CudaSetDeviceScope scope{device.index()};
auto do_sum = [&a, &axis, &out](auto in_pt, auto out_pt) {
using In = typename decltype(in_pt)::type;
using Out = typename decltype(out_pt)::type;
Reduce<In, Out>(a, axis, out, SumImpl<In, Out>{});
};
VisitDtype(out.dtype(), [a_dtype = a.dtype(), &do_sum](auto out_pt) { VisitDtype(a_dtype, do_sum, out_pt); });
}
};
CHAINERX_CUDA_REGISTER_KERNEL(SumKernel, CudaSumKernel);
template <typename T>
struct AMaxImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ CudaType Identity() { return cuda::NumericLimits<CudaType>::LowestOrInf(); }
__device__ CudaType MapIn(CudaType in, int64_t /*index*/) { return in; }
__device__ void Reduce(CudaType next, CudaType& accum) {
if (cuda::IsNan(next) || accum < next) {
accum = next;
}
}
__device__ CudaType MapOut(CudaType accum) { return accum; }
};
class CudaAMaxKernel : public AMaxKernel {
public:
void Call(const Array& a, const Axes& axis, const Array& out) override {
Device& device = a.device();
CHAINERX_ASSERT(internal::IsValidReductionShape(a.shape(), axis, out.shape(), true));
device.CheckDevicesCompatible(a, out);
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Reduce<T, T>(a, axis, out, AMaxImpl<T>{});
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(AMaxKernel, CudaAMaxKernel);
template <typename T>
struct AMinImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ CudaType Identity() { return cuda::NumericLimits<CudaType>::MaxOrInf(); }
__device__ CudaType MapIn(CudaType in, int64_t /*index*/) { return in; }
__device__ void Reduce(CudaType next, CudaType& accum) {
if (cuda::IsNan(next) || accum > next) {
accum = next;
}
}
__device__ CudaType MapOut(CudaType accum) { return accum; }
};
class CudaAMinKernel : public AMinKernel {
public:
void Call(const Array& a, const Axes& axis, const Array& out) override {
Device& device = a.device();
CHAINERX_ASSERT(internal::IsValidReductionShape(a.shape(), axis, out.shape(), true));
device.CheckDevicesCompatible(a, out);
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Reduce<T, T>(a, axis, out, AMinImpl<T>{});
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(AMinKernel, CudaAMinKernel);
} // namespace
} // namespace cuda
} // namespace chainerx
| cc78cf023d5db5d0baafbc6a5421b5b7177fa482.cu | #include "chainerx/cuda/cuda_device.h"
#include <cmath>
#include <cstdint>
#include <cuda_runtime.h>
#include "chainerx/array.h"
#include "chainerx/axes.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/data_type.cuh"
#include "chainerx/cuda/kernel_regist.h"
#include "chainerx/cuda/numeric.cuh"
#include "chainerx/cuda/numeric_limits.cuh"
#include "chainerx/cuda/reduce.cuh"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/kernels/math.h"
#include "chainerx/kernels/sorting.h"
#include "chainerx/macro.h"
#include "chainerx/numeric_limits.h"
#include "chainerx/reduction_kernel_arg.h"
#include "chainerx/routines/math.h"
#include "chainerx/shape.h"
namespace chainerx {
namespace cuda {
namespace {
template <typename T>
struct ArgMaxImpl {
using CudaType = cuda_internal::DataType<T>;
struct MaxAndArgMax {
CudaType max;
int64_t argmax;
};
__device__ MaxAndArgMax Identity() { return {CudaType{}, -1}; }
__device__ MaxAndArgMax MapIn(CudaType in, int64_t index) { return {in, index}; }
__device__ void Reduce(MaxAndArgMax next, MaxAndArgMax& accum) {
// Note that `next` can be the return value of `Identity()` in which case `accum` should not be updated.
if (next.argmax != -1 && (accum.argmax == -1 || accum.max < next.max)) {
accum = next;
}
}
__device__ int64_t MapOut(MaxAndArgMax accum) { return accum.argmax; }
};
class CudaArgMaxKernel : public ArgMaxKernel {
public:
void Call(const Array& a, const Axes& axis, const Array& out) override {
Device& device = a.device();
device.CheckDevicesCompatible(a, out);
CudaSetDeviceScope scope{device.index()};
VisitDtype(a.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Reduce<T, int64_t>(a, axis, out, ArgMaxImpl<T>{});
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(ArgMaxKernel, CudaArgMaxKernel);
template <typename In, typename Out>
struct SumImpl {
using InCudaType = cuda_internal::DataType<In>;
using OutCudaType = cuda_internal::DataType<Out>;
__device__ OutCudaType Identity() { return OutCudaType{0}; }
__device__ OutCudaType MapIn(InCudaType in, int64_t /*index*/) { return static_cast<OutCudaType>(in); }
__device__ void Reduce(OutCudaType next, OutCudaType& accum) { accum += next; }
__device__ OutCudaType MapOut(OutCudaType accum) { return accum; }
};
class CudaSumKernel : public SumKernel {
public:
void Call(const Array& a, const Axes& axis, const Array& out) override {
Device& device = a.device();
CHAINERX_ASSERT(internal::IsValidReductionShape(a.shape(), axis, out.shape(), true));
device.CheckDevicesCompatible(a, out);
CudaSetDeviceScope scope{device.index()};
auto do_sum = [&a, &axis, &out](auto in_pt, auto out_pt) {
using In = typename decltype(in_pt)::type;
using Out = typename decltype(out_pt)::type;
Reduce<In, Out>(a, axis, out, SumImpl<In, Out>{});
};
VisitDtype(out.dtype(), [a_dtype = a.dtype(), &do_sum](auto out_pt) { VisitDtype(a_dtype, do_sum, out_pt); });
}
};
CHAINERX_CUDA_REGISTER_KERNEL(SumKernel, CudaSumKernel);
template <typename T>
struct AMaxImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ CudaType Identity() { return cuda::NumericLimits<CudaType>::LowestOrInf(); }
__device__ CudaType MapIn(CudaType in, int64_t /*index*/) { return in; }
__device__ void Reduce(CudaType next, CudaType& accum) {
if (cuda::IsNan(next) || accum < next) {
accum = next;
}
}
__device__ CudaType MapOut(CudaType accum) { return accum; }
};
class CudaAMaxKernel : public AMaxKernel {
public:
void Call(const Array& a, const Axes& axis, const Array& out) override {
Device& device = a.device();
CHAINERX_ASSERT(internal::IsValidReductionShape(a.shape(), axis, out.shape(), true));
device.CheckDevicesCompatible(a, out);
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Reduce<T, T>(a, axis, out, AMaxImpl<T>{});
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(AMaxKernel, CudaAMaxKernel);
template <typename T>
struct AMinImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ CudaType Identity() { return cuda::NumericLimits<CudaType>::MaxOrInf(); }
__device__ CudaType MapIn(CudaType in, int64_t /*index*/) { return in; }
__device__ void Reduce(CudaType next, CudaType& accum) {
if (cuda::IsNan(next) || accum > next) {
accum = next;
}
}
__device__ CudaType MapOut(CudaType accum) { return accum; }
};
class CudaAMinKernel : public AMinKernel {
public:
void Call(const Array& a, const Axes& axis, const Array& out) override {
Device& device = a.device();
CHAINERX_ASSERT(internal::IsValidReductionShape(a.shape(), axis, out.shape(), true));
device.CheckDevicesCompatible(a, out);
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
Reduce<T, T>(a, axis, out, AMinImpl<T>{});
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(AMinKernel, CudaAMinKernel);
} // namespace
} // namespace cuda
} // namespace chainerx
|
da2865ed0ad940ea11e459f27d5149e7a430e4af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// 2018.11.10 // very slow average 200s
// every thread check for a Queen. not a pair[]
// 2018.12.29
//copy form CheckOne_Datafile_compare/gpu_1_Queen
#include "Kernel_p.h"
#define DEBUG
#define BLOCK_SIZE 512
//----------------------------Kernel----------------------------------------
__global__ void Ker_Warm(){
// empty body, just warmup GPU;
if(threadIdx.x == 0 )
printf("GPU is OK!\n");
}
__global__ void Ker_Check_Combination3 (
unsigned int *d_combination, //
unsigned int combination_size, // length of combinations =queen number =N
unsigned int *d_result // return conflicts count.
)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x; // use shared mem,so must be in a Block, need not global thread No.
d_result[0] =0;
if( tid == 0 ) {
for(int i =0 ;i<combination_size ; i++){
printf("%4d ", d_combination[i]);
d_result[0] += d_combination[i];
}
printf("\nsum = %8d \n ", d_result[0]);
}
}
__global__ void Ker_Check_Combination (
unsigned int *d_combination, //
unsigned int combination_size, // length of combinations =queen number =N
unsigned int *d_result // return conflicts count.
)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x; // use shared mem,so must be in a Block, need not global thread No.
//printf("block =%d, thread=%d, tid=%d\n",blockIdx.x, threadIdx.x ,tid);
if(tid >= combination_size)
return;
if(tid==0)
d_result[0]=0;
int curX=tid;
int curY=d_combination[tid];
//check every queen after cur;
for(int iX=tid+1 ; iX <= combination_size-1 ; iX++){
int iY=d_combination[iX];
if(iY == curY || iX+iY == curX+ curY || iY -iX == curY - curX) { // not a Permutations, it is random numbers.
//printf("-------->>> (%5d,%5d) (%5d,%5d) thread:%5d \n ", curX,curY,iX,iY,tid);
atomicAdd ((unsigned int *)&d_result[0],1);
// break; // get all conflicts
}
}
}// end of Kernel
//----------------------------CPU Interface----------------------------------------
void setDevice(int i)
{
checkCudaErrors( hipSetDevice( i ) );
}
int getDevice()
{
int id=-1;
checkCudaErrors( hipGetDevice( &id ) );
return id;
}
void warmGPU() // warm a single GPU
{
hipError_t cuda_err;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float cuda_time=0;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( Ker_Warm) , dim3(1),dim3(1), 0, 0, );
cuda_err= hipSuccess;
cuda_err = hipGetLastError();
if (cuda_err != hipSuccess)
{
fprintf(stderr, "Failed to launch (error code= %s)!\n", hipGetErrorString(cuda_err));
exit(EXIT_FAILURE);
}
else
{
#ifdef DEBUG
fprintf(stderr, "Worm launch successed! ( code= %s)!\n", hipGetErrorString(cuda_err));
#endif
}
//checkCudaErrors( hipDeviceSynchronize() );
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&cuda_time, start, stop);
#ifdef DEBUG
printf("%-40s %f ms \n","warmup() run time=",cuda_time);
#endif
}
void warmGPU0_1() // warm GPU 0 and 1 in diffirent streams// wrong??
{
hipError_t cuda_err;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float cuda_time=0;
hipEventRecord(start, 0);
hipStream_t stream0;
hipStream_t stream1;
checkCudaErrors(hipSetDevice(0));
checkCudaErrors(hipStreamCreate(&stream0));
hipLaunchKernelGGL(( Ker_Warm) , dim3(1),dim3(1),0 , stream0, );
checkCudaErrors(hipSetDevice(1));
checkCudaErrors(hipStreamCreate(&stream1));
hipLaunchKernelGGL(( Ker_Warm) , dim3(1),dim3(1),0 , stream1, );
cuda_err= hipSuccess;
cuda_err = hipGetLastError();
if (cuda_err != hipSuccess)
{
fprintf(stderr, "Failed to launch (error code= %s)!\n", hipGetErrorString(cuda_err));
exit(EXIT_FAILURE);
}
else
{
#ifdef DEBUG
fprintf(stderr, "Worm launch successed! ( code= %s)!\n", hipGetErrorString(cuda_err));
#endif
}
//checkCudaErrors( hipDeviceSynchronize() );
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&cuda_time, start, stop);
#ifdef DEBUG
printf("%-40s %f ms \n","warmup() run time=",cuda_time);
#endif
checkCudaErrors(hipSetDevice(0));
checkCudaErrors(hipStreamDestroy(stream0));
checkCudaErrors(hipSetDevice(1));
checkCudaErrors(hipStreamDestroy(stream1));
}
unsigned int get_conflicts(unsigned int * combination, unsigned int combination_size)
{
/* printf("\n----get_conflicts() begin! \n"); */
/* unsigned int *h_combination0= 0; */
/* unsigned int *h_combination1= 0; */
unsigned int *h_combination= combination ; //
unsigned int *d_combination0= 0;
unsigned int *d_combination1= 0;
unsigned int * h_result0 = 0;
unsigned int * h_result1 = 0;
unsigned int * d_result0 = 0;
unsigned int * d_result1 = 0;
hipError_t cuda_err;
hipStream_t stream0;
hipStream_t stream1;
//--------------cpu----------------------------------
int GPU_N;
checkCudaErrors(hipGetDeviceCount(&GPU_N));
cuda_err = hipSuccess;
//---------------GPU0-----------------------------
checkCudaErrors(hipSetDevice(0));
checkCudaErrors(hipStreamCreate(&stream0));
checkCudaErrors(hipMalloc((void **)&d_combination0, combination_size * sizeof( unsigned int)));
checkCudaErrors(hipMalloc((void **)&d_result0 , 1 * sizeof( unsigned int)));
checkCudaErrors(hipHostMalloc((void **)&h_result0, 1 * sizeof(unsigned))); // hipHostMalloc , not malloc()
/* memcpy( h_combination0, combination , combination_size * sizeof(unsigned)); */
cuda_err = hipGetLastError();
if (cuda_err != hipSuccess)
{
fprintf(stderr, "GPU 0 alloc d_combination error! (error code= %s)!\n", hipGetErrorString(cuda_err));
exit(EXIT_FAILURE);
}
//---------------GPU1-----------------------------
checkCudaErrors(hipSetDevice(1));
checkCudaErrors(hipStreamCreate(&stream1));
checkCudaErrors(hipMalloc((void **)&d_combination1, combination_size * sizeof( unsigned int)));
checkCudaErrors(hipMalloc((void **)&d_result1 , 1 * sizeof( unsigned int)));
checkCudaErrors(hipHostMalloc((void **)&h_result1, 1 * sizeof(unsigned))); // hipHostMalloc , not malloc()
/* memcpy( h_combination1, combination , combination_size * sizeof(unsigned)); */
cuda_err = hipGetLastError();
if (cuda_err != hipSuccess)
{
fprintf(stderr, "GPU1 alloc d_combination error! (error code= %s)!\n", hipGetErrorString(cuda_err));
exit(EXIT_FAILURE);
}
// -----------------GPU0----------------------------------------------------------------------------
checkCudaErrors(hipSetDevice(0));
checkCudaErrors(hipMemcpyAsync(d_combination0, h_combination, combination_size * sizeof( unsigned int), hipMemcpyHostToDevice, stream0));
int GRID_SIZE = ( combination_size + BLOCK_SIZE-1)/BLOCK_SIZE ;
/* printf("gridsize = %d, blocksize =%d, queens = %d\n", GRID_SIZE, BLOCK_SIZE, combination_size ); */
checkCudaErrors(hipSetDevice(0));
/* printf( "Switch to device : %d\n", getDevice()); */
hipLaunchKernelGGL(( Ker_Check_Combination), dim3(GRID_SIZE), dim3(BLOCK_SIZE) , 0, stream0 ,
d_combination0 ,combination_size , d_result0);
getLastCudaError("Kernel() in divece 0 execution failed.\n");
checkCudaErrors(hipMemcpyAsync(h_result0, d_result0, 1 * sizeof(unsigned int), hipMemcpyDeviceToHost,stream0));
// -----------------GPU1----------------------------------------------------------------------------
checkCudaErrors(hipSetDevice(1));
checkCudaErrors(hipMemcpyAsync(d_combination1, h_combination, combination_size * sizeof( unsigned int), hipMemcpyHostToDevice, stream1));
/* printf( "Switch to device : %d\n", getDevice()); */
cuda_err= hipSuccess;
hipLaunchKernelGGL(( Ker_Check_Combination), dim3(( combination_size + BLOCK_SIZE-1)/BLOCK_SIZE) , dim3(BLOCK_SIZE) , 0, stream1 ,
d_combination1 ,combination_size , d_result1);
getLastCudaError("Kernel() in divece 1 execution failed.\n");
checkCudaErrors(hipMemcpyAsync(h_result1, d_result1, 1 * sizeof(unsigned int), hipMemcpyDeviceToHost,stream1));
//------------------GPU0 -----------------------------------------------------------
checkCudaErrors(hipSetDevice(0));
hipStreamSynchronize(stream0);
//------------------GPU1 -----------------------------------------------------------
checkCudaErrors(hipSetDevice(1));
hipStreamSynchronize(stream1);
// ----------------------CPU ---------------------------------
unsigned int conflicts =0 ;
/* printf("%d\t%d\t device0 == device1:%d\n", h_result0[0], h_result1[0], h_result0[0]== h_result1[0] ); */
if( h_result0[0] = h_result1[0])
conflicts = h_result1[0];
//************************************************************************************************************
//--------GPU0---------------------------
checkCudaErrors(hipSetDevice(0));
checkCudaErrors( hipFree(d_combination0) );
checkCudaErrors( hipFree(d_result0) );
/* checkCudaErrors(hipHostFree(h_combination0)); */
checkCudaErrors(hipHostFree(h_result0));
checkCudaErrors(hipStreamDestroy(stream0));
//--------GPU0---------------------------
checkCudaErrors(hipSetDevice(1));
checkCudaErrors( hipFree(d_combination1) );
checkCudaErrors( hipFree(d_result1) );
/* checkCudaErrors(hipHostFree(h_combination1)); */
checkCudaErrors(hipHostFree(h_result1));
checkCudaErrors(hipStreamDestroy(stream1));
/* printf(" get_conflicts() run OK !\n"); */
return conflicts;
}
| da2865ed0ad940ea11e459f27d5149e7a430e4af.cu | // 2018.11.10 // very slow average 200s
// every thread check for a Queen. not a pair[]
// 2018.12.29
//copy form CheckOne_Datafile_compare/gpu_1_Queen
#include "Kernel_p.h"
#define DEBUG
#define BLOCK_SIZE 512
//----------------------------Kernel----------------------------------------
__global__ void Ker_Warm(){
// empty body, just warmup GPU;
if(threadIdx.x == 0 )
printf("GPU is OK!\n");
}
__global__ void Ker_Check_Combination3 (
unsigned int *d_combination, //
unsigned int combination_size, // length of combinations =queen number =N
unsigned int *d_result // return conflicts count.
)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x; // use shared mem,so must be in a Block, need not global thread No.
d_result[0] =0;
if( tid == 0 ) {
for(int i =0 ;i<combination_size ; i++){
printf("%4d ", d_combination[i]);
d_result[0] += d_combination[i];
}
printf("\nsum = %8d \n ", d_result[0]);
}
}
__global__ void Ker_Check_Combination (
unsigned int *d_combination, //
unsigned int combination_size, // length of combinations =queen number =N
unsigned int *d_result // return conflicts count.
)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x; // use shared mem,so must be in a Block, need not global thread No.
//printf("block =%d, thread=%d, tid=%d\n",blockIdx.x, threadIdx.x ,tid);
if(tid >= combination_size)
return;
if(tid==0)
d_result[0]=0;
int curX=tid;
int curY=d_combination[tid];
//check every queen after cur;
for(int iX=tid+1 ; iX <= combination_size-1 ; iX++){
int iY=d_combination[iX];
if(iY == curY || iX+iY == curX+ curY || iY -iX == curY - curX) { // not a Permutations, it is random numbers.
//printf("-------->>> (%5d,%5d) (%5d,%5d) thread:%5d \n ", curX,curY,iX,iY,tid);
atomicAdd ((unsigned int *)&d_result[0],1);
// break; // get all conflicts
}
}
}// end of Kernel
//----------------------------CPU Interface----------------------------------------
void setDevice(int i)
{
checkCudaErrors( cudaSetDevice( i ) );
}
int getDevice()
{
int id=-1;
checkCudaErrors( cudaGetDevice( &id ) );
return id;
}
void warmGPU() // warm a single GPU
{
cudaError_t cuda_err;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float cuda_time=0;
cudaEventRecord(start, 0);
Ker_Warm <<<1,1>>> ();
cuda_err= cudaSuccess;
cuda_err = cudaGetLastError();
if (cuda_err != cudaSuccess)
{
fprintf(stderr, "Failed to launch (error code= %s)!\n", cudaGetErrorString(cuda_err));
exit(EXIT_FAILURE);
}
else
{
#ifdef DEBUG
fprintf(stderr, "Worm launch successed! ( code= %s)!\n", cudaGetErrorString(cuda_err));
#endif
}
//checkCudaErrors( cudaDeviceSynchronize() );
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cuda_time, start, stop);
#ifdef DEBUG
printf("%-40s %f ms \n","warmup() run time=",cuda_time);
#endif
}
void warmGPU0_1() // warm GPU 0 and 1 in diffirent streams// wrong??
{
cudaError_t cuda_err;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float cuda_time=0;
cudaEventRecord(start, 0);
cudaStream_t stream0;
cudaStream_t stream1;
checkCudaErrors(cudaSetDevice(0));
checkCudaErrors(cudaStreamCreate(&stream0));
Ker_Warm <<<1,1,0 , stream0>>> ();
checkCudaErrors(cudaSetDevice(1));
checkCudaErrors(cudaStreamCreate(&stream1));
Ker_Warm <<<1,1,0 , stream1>>> ();
cuda_err= cudaSuccess;
cuda_err = cudaGetLastError();
if (cuda_err != cudaSuccess)
{
fprintf(stderr, "Failed to launch (error code= %s)!\n", cudaGetErrorString(cuda_err));
exit(EXIT_FAILURE);
}
else
{
#ifdef DEBUG
fprintf(stderr, "Worm launch successed! ( code= %s)!\n", cudaGetErrorString(cuda_err));
#endif
}
//checkCudaErrors( cudaDeviceSynchronize() );
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cuda_time, start, stop);
#ifdef DEBUG
printf("%-40s %f ms \n","warmup() run time=",cuda_time);
#endif
checkCudaErrors(cudaSetDevice(0));
checkCudaErrors(cudaStreamDestroy(stream0));
checkCudaErrors(cudaSetDevice(1));
checkCudaErrors(cudaStreamDestroy(stream1));
}
unsigned int get_conflicts(unsigned int * combination, unsigned int combination_size)
{
/* printf("\n----get_conflicts() begin! \n"); */
/* unsigned int *h_combination0= 0; */
/* unsigned int *h_combination1= 0; */
unsigned int *h_combination= combination ; // 不是页锁定内存,是主程序直接传入的数组
unsigned int *d_combination0= 0;
unsigned int *d_combination1= 0;
unsigned int * h_result0 = 0;
unsigned int * h_result1 = 0;
unsigned int * d_result0 = 0;
unsigned int * d_result1 = 0;
cudaError_t cuda_err;
cudaStream_t stream0;
cudaStream_t stream1;
//--------------cpu----------------------------------
int GPU_N;
checkCudaErrors(cudaGetDeviceCount(&GPU_N));
cuda_err = cudaSuccess;
//---------------GPU0-----------------------------
checkCudaErrors(cudaSetDevice(0));
checkCudaErrors(cudaStreamCreate(&stream0));
checkCudaErrors(cudaMalloc((void **)&d_combination0, combination_size * sizeof( unsigned int)));
checkCudaErrors(cudaMalloc((void **)&d_result0 , 1 * sizeof( unsigned int)));
checkCudaErrors(cudaMallocHost((void **)&h_result0, 1 * sizeof(unsigned))); // cudaMallocHost , not malloc()
/* memcpy( h_combination0, combination , combination_size * sizeof(unsigned)); */
cuda_err = cudaGetLastError();
if (cuda_err != cudaSuccess)
{
fprintf(stderr, "GPU 0 alloc d_combination error! (error code= %s)!\n", cudaGetErrorString(cuda_err));
exit(EXIT_FAILURE);
}
//---------------GPU1-----------------------------
checkCudaErrors(cudaSetDevice(1));
checkCudaErrors(cudaStreamCreate(&stream1));
checkCudaErrors(cudaMalloc((void **)&d_combination1, combination_size * sizeof( unsigned int)));
checkCudaErrors(cudaMalloc((void **)&d_result1 , 1 * sizeof( unsigned int)));
checkCudaErrors(cudaMallocHost((void **)&h_result1, 1 * sizeof(unsigned))); // cudaMallocHost , not malloc()
/* memcpy( h_combination1, combination , combination_size * sizeof(unsigned)); */
cuda_err = cudaGetLastError();
if (cuda_err != cudaSuccess)
{
fprintf(stderr, "GPU1 alloc d_combination error! (error code= %s)!\n", cudaGetErrorString(cuda_err));
exit(EXIT_FAILURE);
}
// -----------------GPU0----------------------------------------------------------------------------
checkCudaErrors(cudaSetDevice(0));
checkCudaErrors(cudaMemcpyAsync(d_combination0, h_combination, combination_size * sizeof( unsigned int), cudaMemcpyHostToDevice, stream0));
int GRID_SIZE = ( combination_size + BLOCK_SIZE-1)/BLOCK_SIZE ;
/* printf("gridsize = %d, blocksize =%d, queens = %d\n", GRID_SIZE, BLOCK_SIZE, combination_size ); */
checkCudaErrors(cudaSetDevice(0));
/* printf( "Switch to device : %d\n", getDevice()); */
Ker_Check_Combination<<< GRID_SIZE, BLOCK_SIZE , 0, stream0 >>>
(d_combination0 ,combination_size , d_result0);
getLastCudaError("Kernel() in divece 0 execution failed.\n");
checkCudaErrors(cudaMemcpyAsync(h_result0, d_result0, 1 * sizeof(unsigned int), cudaMemcpyDeviceToHost,stream0));
// -----------------GPU1----------------------------------------------------------------------------
checkCudaErrors(cudaSetDevice(1));
checkCudaErrors(cudaMemcpyAsync(d_combination1, h_combination, combination_size * sizeof( unsigned int), cudaMemcpyHostToDevice, stream1));
/* printf( "Switch to device : %d\n", getDevice()); */
cuda_err= cudaSuccess;
Ker_Check_Combination<<< ( combination_size + BLOCK_SIZE-1)/BLOCK_SIZE , BLOCK_SIZE , 0, stream1 >>>
(d_combination1 ,combination_size , d_result1);
getLastCudaError("Kernel() in divece 1 execution failed.\n");
checkCudaErrors(cudaMemcpyAsync(h_result1, d_result1, 1 * sizeof(unsigned int), cudaMemcpyDeviceToHost,stream1));
//------------------GPU0 同步-----------------------------------------------------------
checkCudaErrors(cudaSetDevice(0));
cudaStreamSynchronize(stream0);
//------------------GPU1 同步-----------------------------------------------------------
checkCudaErrors(cudaSetDevice(1));
cudaStreamSynchronize(stream1);
// ----------------------CPU ---------------------------------
unsigned int conflicts =0 ;
/* printf("%d\t%d\t device0 == device1:%d\n", h_result0[0], h_result1[0], h_result0[0]== h_result1[0] ); */
if( h_result0[0] = h_result1[0])
conflicts = h_result1[0];
//************************************************************************************************************
//--------GPU0---------------------------
checkCudaErrors(cudaSetDevice(0));
checkCudaErrors( cudaFree(d_combination0) );
checkCudaErrors( cudaFree(d_result0) );
/* checkCudaErrors(cudaFreeHost(h_combination0)); */
checkCudaErrors(cudaFreeHost(h_result0));
checkCudaErrors(cudaStreamDestroy(stream0));
//--------GPU0---------------------------
checkCudaErrors(cudaSetDevice(1));
checkCudaErrors( cudaFree(d_combination1) );
checkCudaErrors( cudaFree(d_result1) );
/* checkCudaErrors(cudaFreeHost(h_combination1)); */
checkCudaErrors(cudaFreeHost(h_result1));
checkCudaErrors(cudaStreamDestroy(stream1));
/* printf(" get_conflicts() run OK !\n"); */
return conflicts;
}
|
88f828c532a7075362cedfb54642bf15ea256cdc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../bm_conv.hpp"
void bm_cuda_tensor_block_conv_halo(benchmark::State& state) {
pointi<2> grid_dim;
fill(grid_dim, state.range(0));
typedef dim<16, 16> BLOCK_DIM;
pointi<2> block_dim = BLOCK_DIM::value();
auto shape = block_dim * grid_dim;
cuda::tensor<float, 2> ts_kernel(pointi<2>{3, 3});
auto padding = ts_kernel.shape() / 2;
cuda::tensor<float, 2> ts_src_pad(shape + ts_kernel.shape() - 1);
auto ts_src_view = view::slice(ts_src_pad, padding, shape);
cuda::tensor<float, 2> ts_dst(ts_src_view.shape());
cuda::execution_policy policy;
while (state.KeepRunning()) {
hipEvent_t events[2];
hipEventCreate(&events[0]);
hipEventCreate(&events[1]);
hipEventRecord(events[0], nullptr);
cuda::block_for_index<
BLOCK_DIM>(policy, grid_dim, [=] __device__(cuda::block_index<BLOCK_DIM> block_idx) {
// shared memory
__shared__ local_tensor<float, BLOCK_DIM> sh_ts_block;
// 0
sh_ts_block(block_idx.local) = static_cast<float>(ts_src_view(block_idx.global));
cuda::syncthreads();
if (inside_rect(block_idx.local, padding,
block_idx.block_dim - ts_kernel.shape() + 1) &&
inside_rect(block_idx.global, pointi<2>{0, 0}, ts_src_view.shape())) {
auto re = 0.0f;
re += ts_kernel(pointi<2>{0, 0}) * sh_ts_block(block_idx.local + pointi<2>{-1, -1});
re += ts_kernel(pointi<2>{1, 0}) * sh_ts_block(block_idx.local + pointi<2>{0, -1});
re += ts_kernel(pointi<2>{2, 0}) * sh_ts_block(block_idx.local + pointi<2>{1, -1});
re += ts_kernel(pointi<2>{0, 1}) * sh_ts_block(block_idx.local + pointi<2>{-1, 0});
re += ts_kernel(pointi<2>{1, 1}) * sh_ts_block(block_idx.local + pointi<2>{0, 0});
re += ts_kernel(pointi<2>{2, 1}) * sh_ts_block(block_idx.local + pointi<2>{1, 0});
re += ts_kernel(pointi<2>{0, 2}) * sh_ts_block(block_idx.local + pointi<2>{-1, 1});
re += ts_kernel(pointi<2>{1, 2}) * sh_ts_block(block_idx.local + pointi<2>{0, 1});
re += ts_kernel(pointi<2>{2, 2}) * sh_ts_block(block_idx.local + pointi<2>{1, 1});
ts_dst(block_idx.global) = re;
}
});
hipEventRecord(events[1], nullptr);
policy.synchronize();
float avg_ms;
hipEventElapsedTime(&avg_ms, events[0], events[1]);
state.SetIterationTime(avg_ms / 1000.0f);
hipEventDestroy(events[0]);
hipEventDestroy(events[1]);
}
state.SetBytesProcessed(state.iterations() * static_cast<size_t>(ts_src_pad.size()) *
sizeof(ts_dst[0]));
state.SetItemsProcessed(state.iterations() * static_cast<size_t>(ts_dst.size()) *
ts_kernel.size() * 2);
}
void bm_cuda_tensor_block_conv_overlap(benchmark::State& state) {
pointi<2> grid_dim;
fill(grid_dim, state.range(0));
typedef dim<16, 16> BLOCK_DIM;
pointi<2> block_dim = BLOCK_DIM::value();
cuda::tensor<float, 2> ts_kernel(pointi<2>{3, 3});
auto padding = ts_kernel.shape() / 2;
auto valid_block_dim = block_dim - ts_kernel.shape() + 1;
auto shape = valid_block_dim * grid_dim;
cuda::tensor<float, 2> ts_src_pad(shape + ts_kernel.shape() - 1);
auto ts_src_view = view::slice(ts_src_pad, padding, shape);
cuda::tensor<float, 2> ts_dst(ts_src_view.shape());
while (state.KeepRunning()) {
cuda::block_for_index<BLOCK_DIM>(grid_dim, [=] __device__(
cuda::block_index<BLOCK_DIM> block_idx) {
auto valid_global_idx = valid_block_dim * block_idx.block + block_idx.local - padding;
__shared__ local_tensor<float, BLOCK_DIM> sh_ts_block;
if (inside_rect(valid_global_idx, pointi<2>{0, 0}, ts_src_view.shape())) {
sh_ts_block(block_idx.local) = ts_src_view(valid_global_idx);
} else {
sh_ts_block(block_idx.local) = 0.0f;
}
cuda::syncthreads();
if (inside_rect(block_idx.local, padding,
block_idx.block_dim - ts_kernel.shape() + pointi<2>{1, 1}) &&
inside_rect(valid_global_idx, zero<pointi<2>>::value(), ts_src_view.shape())) {
auto re = 0.0f;
re += ts_kernel(pointi<2>{0, 0}) * sh_ts_block(block_idx.local + pointi<2>{-1, -1});
re += ts_kernel(pointi<2>{1, 0}) * sh_ts_block(block_idx.local + pointi<2>{0, -1});
re += ts_kernel(pointi<2>{2, 0}) * sh_ts_block(block_idx.local + pointi<2>{1, -1});
re += ts_kernel(pointi<2>{0, 1}) * sh_ts_block(block_idx.local + pointi<2>{-1, 0});
re += ts_kernel(pointi<2>{1, 1}) * sh_ts_block(block_idx.local + pointi<2>{0, 0});
re += ts_kernel(pointi<2>{2, 1}) * sh_ts_block(block_idx.local + pointi<2>{1, 0});
re += ts_kernel(pointi<2>{0, 2}) * sh_ts_block(block_idx.local + pointi<2>{-1, 1});
re += ts_kernel(pointi<2>{1, 2}) * sh_ts_block(block_idx.local + pointi<2>{0, 1});
re += ts_kernel(pointi<2>{2, 2}) * sh_ts_block(block_idx.local + pointi<2>{1, 1});
ts_dst(valid_global_idx) = re;
}
});
}
state.SetBytesProcessed(state.iterations() * static_cast<size_t>(ts_src_view.size()) *
sizeof(ts_dst[0]));
state.SetItemsProcessed(state.iterations() * static_cast<size_t>(ts_dst.size()) *
ts_kernel.size() * 2);
}
auto bm_cuda_tensor2f_general_roll_conv = bm_tensor2f_general_roll_conv<cuda::tensor<float, 2>>;
BENCHMARK(bm_cuda_tensor2f_general_roll_conv)->Arg(512)->Arg(10_K);
auto bm_cuda_tensor2f_general_unroll_conv = bm_tensor2f_general_unroll_conv<cuda::tensor<float, 2>>;
BENCHMARK(bm_cuda_tensor2f_general_unroll_conv)->Arg(512)->Arg(10_K);
auto bm_cuda_tensor2_view_conv_local_tensor3x3 =
bm_tensor2_view_conv_local_tensor3x3<cuda::tensor<float, 2>>;
BENCHMARK(bm_cuda_tensor2_view_conv_local_tensor3x3)->Arg(512)->Arg(10_K);
auto bm_cuda_tensor_view_conv_tensor3x3 = bm_tensor_view_conv_tensor3x3<cuda::tensor<float, 2>>;
BENCHMARK(bm_cuda_tensor_view_conv_tensor3x3)->Arg(128)->Arg(10_K);
BENCHMARK(bm_cuda_tensor_block_conv_halo)->Arg(512)->Arg(1_K)->Arg(2_K);
BENCHMARK(bm_cuda_tensor_block_conv_overlap)->Arg(512)->Arg(1_K)->Arg(2_K);
| 88f828c532a7075362cedfb54642bf15ea256cdc.cu | #include "../bm_conv.hpp"
void bm_cuda_tensor_block_conv_halo(benchmark::State& state) {
pointi<2> grid_dim;
fill(grid_dim, state.range(0));
typedef dim<16, 16> BLOCK_DIM;
pointi<2> block_dim = BLOCK_DIM::value();
auto shape = block_dim * grid_dim;
cuda::tensor<float, 2> ts_kernel(pointi<2>{3, 3});
auto padding = ts_kernel.shape() / 2;
cuda::tensor<float, 2> ts_src_pad(shape + ts_kernel.shape() - 1);
auto ts_src_view = view::slice(ts_src_pad, padding, shape);
cuda::tensor<float, 2> ts_dst(ts_src_view.shape());
cuda::execution_policy policy;
while (state.KeepRunning()) {
cudaEvent_t events[2];
cudaEventCreate(&events[0]);
cudaEventCreate(&events[1]);
cudaEventRecord(events[0], nullptr);
cuda::block_for_index<
BLOCK_DIM>(policy, grid_dim, [=] __device__(cuda::block_index<BLOCK_DIM> block_idx) {
// 使用shared memory以获取更好的速度
__shared__ local_tensor<float, BLOCK_DIM> sh_ts_block;
// 若是无效区域则填充0
sh_ts_block(block_idx.local) = static_cast<float>(ts_src_view(block_idx.global));
cuda::syncthreads();
if (inside_rect(block_idx.local, padding,
block_idx.block_dim - ts_kernel.shape() + 1) &&
inside_rect(block_idx.global, pointi<2>{0, 0}, ts_src_view.shape())) {
auto re = 0.0f;
re += ts_kernel(pointi<2>{0, 0}) * sh_ts_block(block_idx.local + pointi<2>{-1, -1});
re += ts_kernel(pointi<2>{1, 0}) * sh_ts_block(block_idx.local + pointi<2>{0, -1});
re += ts_kernel(pointi<2>{2, 0}) * sh_ts_block(block_idx.local + pointi<2>{1, -1});
re += ts_kernel(pointi<2>{0, 1}) * sh_ts_block(block_idx.local + pointi<2>{-1, 0});
re += ts_kernel(pointi<2>{1, 1}) * sh_ts_block(block_idx.local + pointi<2>{0, 0});
re += ts_kernel(pointi<2>{2, 1}) * sh_ts_block(block_idx.local + pointi<2>{1, 0});
re += ts_kernel(pointi<2>{0, 2}) * sh_ts_block(block_idx.local + pointi<2>{-1, 1});
re += ts_kernel(pointi<2>{1, 2}) * sh_ts_block(block_idx.local + pointi<2>{0, 1});
re += ts_kernel(pointi<2>{2, 2}) * sh_ts_block(block_idx.local + pointi<2>{1, 1});
ts_dst(block_idx.global) = re;
}
});
cudaEventRecord(events[1], nullptr);
policy.synchronize();
float avg_ms;
cudaEventElapsedTime(&avg_ms, events[0], events[1]);
state.SetIterationTime(avg_ms / 1000.0f);
cudaEventDestroy(events[0]);
cudaEventDestroy(events[1]);
}
state.SetBytesProcessed(state.iterations() * static_cast<size_t>(ts_src_pad.size()) *
sizeof(ts_dst[0]));
state.SetItemsProcessed(state.iterations() * static_cast<size_t>(ts_dst.size()) *
ts_kernel.size() * 2);
}
void bm_cuda_tensor_block_conv_overlap(benchmark::State& state) {
pointi<2> grid_dim;
fill(grid_dim, state.range(0));
typedef dim<16, 16> BLOCK_DIM;
pointi<2> block_dim = BLOCK_DIM::value();
cuda::tensor<float, 2> ts_kernel(pointi<2>{3, 3});
auto padding = ts_kernel.shape() / 2;
auto valid_block_dim = block_dim - ts_kernel.shape() + 1;
auto shape = valid_block_dim * grid_dim;
cuda::tensor<float, 2> ts_src_pad(shape + ts_kernel.shape() - 1);
auto ts_src_view = view::slice(ts_src_pad, padding, shape);
cuda::tensor<float, 2> ts_dst(ts_src_view.shape());
while (state.KeepRunning()) {
cuda::block_for_index<BLOCK_DIM>(grid_dim, [=] __device__(
cuda::block_index<BLOCK_DIM> block_idx) {
auto valid_global_idx = valid_block_dim * block_idx.block + block_idx.local - padding;
__shared__ local_tensor<float, BLOCK_DIM> sh_ts_block;
if (inside_rect(valid_global_idx, pointi<2>{0, 0}, ts_src_view.shape())) {
sh_ts_block(block_idx.local) = ts_src_view(valid_global_idx);
} else {
sh_ts_block(block_idx.local) = 0.0f;
}
cuda::syncthreads();
if (inside_rect(block_idx.local, padding,
block_idx.block_dim - ts_kernel.shape() + pointi<2>{1, 1}) &&
inside_rect(valid_global_idx, zero<pointi<2>>::value(), ts_src_view.shape())) {
auto re = 0.0f;
re += ts_kernel(pointi<2>{0, 0}) * sh_ts_block(block_idx.local + pointi<2>{-1, -1});
re += ts_kernel(pointi<2>{1, 0}) * sh_ts_block(block_idx.local + pointi<2>{0, -1});
re += ts_kernel(pointi<2>{2, 0}) * sh_ts_block(block_idx.local + pointi<2>{1, -1});
re += ts_kernel(pointi<2>{0, 1}) * sh_ts_block(block_idx.local + pointi<2>{-1, 0});
re += ts_kernel(pointi<2>{1, 1}) * sh_ts_block(block_idx.local + pointi<2>{0, 0});
re += ts_kernel(pointi<2>{2, 1}) * sh_ts_block(block_idx.local + pointi<2>{1, 0});
re += ts_kernel(pointi<2>{0, 2}) * sh_ts_block(block_idx.local + pointi<2>{-1, 1});
re += ts_kernel(pointi<2>{1, 2}) * sh_ts_block(block_idx.local + pointi<2>{0, 1});
re += ts_kernel(pointi<2>{2, 2}) * sh_ts_block(block_idx.local + pointi<2>{1, 1});
ts_dst(valid_global_idx) = re;
}
});
}
state.SetBytesProcessed(state.iterations() * static_cast<size_t>(ts_src_view.size()) *
sizeof(ts_dst[0]));
state.SetItemsProcessed(state.iterations() * static_cast<size_t>(ts_dst.size()) *
ts_kernel.size() * 2);
}
auto bm_cuda_tensor2f_general_roll_conv = bm_tensor2f_general_roll_conv<cuda::tensor<float, 2>>;
BENCHMARK(bm_cuda_tensor2f_general_roll_conv)->Arg(512)->Arg(10_K);
auto bm_cuda_tensor2f_general_unroll_conv = bm_tensor2f_general_unroll_conv<cuda::tensor<float, 2>>;
BENCHMARK(bm_cuda_tensor2f_general_unroll_conv)->Arg(512)->Arg(10_K);
auto bm_cuda_tensor2_view_conv_local_tensor3x3 =
bm_tensor2_view_conv_local_tensor3x3<cuda::tensor<float, 2>>;
BENCHMARK(bm_cuda_tensor2_view_conv_local_tensor3x3)->Arg(512)->Arg(10_K);
auto bm_cuda_tensor_view_conv_tensor3x3 = bm_tensor_view_conv_tensor3x3<cuda::tensor<float, 2>>;
BENCHMARK(bm_cuda_tensor_view_conv_tensor3x3)->Arg(128)->Arg(10_K);
BENCHMARK(bm_cuda_tensor_block_conv_halo)->Arg(512)->Arg(1_K)->Arg(2_K);
BENCHMARK(bm_cuda_tensor_block_conv_overlap)->Arg(512)->Arg(1_K)->Arg(2_K);
|
26b32d13f0507d03438588f3713a3dd09dbcbbcf.hip | // !!! This is a file automatically generated by hipify!!!
#include "gpu_code_executor.h"
#include "lpu_parts_tracking.h"
#include "../runtime/structure.h"
#include "../gpu-utils/gpu_utils.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include <vector>
/*******************************************************************************************************************************************
Note that *_cuda.cu files have the code that needs the NVCC compiler for compilation
*******************************************************************************************************************************************/
//--------------------------------------------------------- GPU Code Executor -------------------------------------------------------------/
void GpuCodeExecutor::execute() {
struct timeval tv;
gettimeofday(&tv, NULL);
long startTime = tv.tv_sec * 1000000 + tv.tv_usec;
lpuBatchController->submitCurrentBatchToGpu();
gettimeofday(&tv, NULL);
long endTime = tv.tv_sec * 1000000 + tv.tv_usec;
double timeTaken = ((endTime - startTime) * 1.0) / (1000 * 1000);
offloadStats->addStagingInTime(timeTaken);
startTime = endTime;
offloadFunction();
hipDeviceSynchronize();
check_error(hipGetLastError(), *logFile);
gettimeofday(&tv, NULL);
endTime = tv.tv_sec * 1000000 + tv.tv_usec;
timeTaken = ((endTime - startTime) * 1.0) / (1000 * 1000);
offloadStats->addExecutionTime(timeTaken);
startTime = endTime;
lpuBatchController->updateBatchDataPartsFromGpuResults();
lpuBatchController->resetController();
gettimeofday(&tv, NULL);
endTime = tv.tv_sec * 1000000 + tv.tv_usec;
timeTaken = ((endTime - startTime) * 1.0) / (1000 * 1000);
offloadStats->addStagingOutTime(timeTaken);
offloadStats->increaseExecutionCount();
resetCurrentBatchLpuRanges();
}
void GpuCodeExecutor::cleanup() {
hipDeviceReset();
delete lpuCountVector;
delete lpuBatchRangeVector;
}
| 26b32d13f0507d03438588f3713a3dd09dbcbbcf.cu | #include "gpu_code_executor.h"
#include "lpu_parts_tracking.h"
#include "../runtime/structure.h"
#include "../gpu-utils/gpu_utils.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <sys/time.h>
#include <vector>
/*******************************************************************************************************************************************
Note that *_cuda.cu files have the code that needs the NVCC compiler for compilation
*******************************************************************************************************************************************/
//--------------------------------------------------------- GPU Code Executor -------------------------------------------------------------/
void GpuCodeExecutor::execute() {
struct timeval tv;
gettimeofday(&tv, NULL);
long startTime = tv.tv_sec * 1000000 + tv.tv_usec;
lpuBatchController->submitCurrentBatchToGpu();
gettimeofday(&tv, NULL);
long endTime = tv.tv_sec * 1000000 + tv.tv_usec;
double timeTaken = ((endTime - startTime) * 1.0) / (1000 * 1000);
offloadStats->addStagingInTime(timeTaken);
startTime = endTime;
offloadFunction();
cudaThreadSynchronize();
check_error(cudaGetLastError(), *logFile);
gettimeofday(&tv, NULL);
endTime = tv.tv_sec * 1000000 + tv.tv_usec;
timeTaken = ((endTime - startTime) * 1.0) / (1000 * 1000);
offloadStats->addExecutionTime(timeTaken);
startTime = endTime;
lpuBatchController->updateBatchDataPartsFromGpuResults();
lpuBatchController->resetController();
gettimeofday(&tv, NULL);
endTime = tv.tv_sec * 1000000 + tv.tv_usec;
timeTaken = ((endTime - startTime) * 1.0) / (1000 * 1000);
offloadStats->addStagingOutTime(timeTaken);
offloadStats->increaseExecutionCount();
resetCurrentBatchLpuRanges();
}
void GpuCodeExecutor::cleanup() {
cudaDeviceReset();
delete lpuCountVector;
delete lpuBatchRangeVector;
}
|
360baf0866d5dcff5463a7f450c80a49410d3094.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuseful.h>
#include<kendall.h>
#define NUMTHREADS 16
#define THREADWORK 32
__global__ void gpuKendall(const float * a, size_t na,
const float * b, size_t nb, size_t sampleSize, double * results)
{
size_t
i, j, tests,
tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y,
rowa = bx * sampleSize, rowb = by * sampleSize;
float
discordant, concordant = 0.f,
numer, denom;
__shared__ float threadSums[NUMTHREADS*NUMTHREADS];
for(i = tx; i < sampleSize; i += NUMTHREADS) {
for(j = i+1+ty; j < sampleSize; j += NUMTHREADS) {
tests = ((a[rowa+j] > a[rowa+i]) && (b[rowb+j] > b[rowb+i]))
+ ((a[rowa+j] < a[rowa+i]) && (b[rowb+j] < b[rowb+i]))
+ ((a[rowa+j] == a[rowa+i]) && (b[rowb+j] == b[rowb+i]));
concordant = concordant + (float)tests;
}
}
threadSums[tx*NUMTHREADS+ty] = concordant;
__syncthreads();
for(i = NUMTHREADS >> 1; i > 0; i >>= 1) {
if(ty < i)
threadSums[tx*NUMTHREADS+ty] += threadSums[tx*NUMTHREADS+ty+i];
__syncthreads();
}
// if(ty == 0) {
for(i = NUMTHREADS >> 1; i > 0; i >>= 1) {
if((tx < i) && (ty == 0))
threadSums[tx*NUMTHREADS] += threadSums[(tx+i)*NUMTHREADS];
__syncthreads();
}
// }
if((tx == 0) && (ty == 0)) {
concordant = threadSums[0];
denom = (float)sampleSize;
denom = (denom * (denom - 1.f)) / 2.f; discordant = denom - concordant;
numer = concordant - discordant;
results[by*na+bx] = ((double)numer)/((double)denom);
}
}
__host__ void masterKendall(const float * x, size_t nx,
const float * y, size_t ny, size_t sampleSize, double * results)
{
size_t
outputLength = nx * ny, outputBytes = outputLength*sizeof(double),
xBytes = nx*sampleSize*sizeof(float),
yBytes = ny*sampleSize*sizeof(float);
float
* gpux, * gpuy;
double
* gpuResults;
dim3
initGrid(nx, ny), initBlock(NUMTHREADS, NUMTHREADS);
checkDoubleCapable("Your device doesn't support double precision arithmetic, so the Kendall functionality is disabled. Sorry for any inconvenience.");
hipMalloc((void **)&gpux, xBytes);
hipMalloc((void **)&gpuy, yBytes);
checkCudaError("input vector space allocation");
hipMemcpy(gpux, x, xBytes, hipMemcpyHostToDevice);
hipMemcpy(gpuy, y, yBytes, hipMemcpyHostToDevice);
checkCudaError("copying input vectors to gpu");
hipMalloc((void **)&gpuResults, outputBytes);
checkCudaError("allocation of space for result matrix");
hipLaunchKernelGGL(( gpuKendall), dim3(initGrid), dim3(initBlock), 0, 0, gpux, nx, gpuy, ny, sampleSize,
gpuResults);
checkCudaError("executing gpu kernel");
hipFree(gpux); hipFree(gpuy);
hipMemcpy(results, gpuResults, outputBytes, hipMemcpyDeviceToHost);
hipFree(gpuResults);
checkCudaError("copying results from gpu and cleaning up");
}
| 360baf0866d5dcff5463a7f450c80a49410d3094.cu | #include<cuseful.h>
#include<kendall.h>
#define NUMTHREADS 16
#define THREADWORK 32
__global__ void gpuKendall(const float * a, size_t na,
const float * b, size_t nb, size_t sampleSize, double * results)
{
size_t
i, j, tests,
tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y,
rowa = bx * sampleSize, rowb = by * sampleSize;
float
discordant, concordant = 0.f,
numer, denom;
__shared__ float threadSums[NUMTHREADS*NUMTHREADS];
for(i = tx; i < sampleSize; i += NUMTHREADS) {
for(j = i+1+ty; j < sampleSize; j += NUMTHREADS) {
tests = ((a[rowa+j] > a[rowa+i]) && (b[rowb+j] > b[rowb+i]))
+ ((a[rowa+j] < a[rowa+i]) && (b[rowb+j] < b[rowb+i]))
+ ((a[rowa+j] == a[rowa+i]) && (b[rowb+j] == b[rowb+i]));
concordant = concordant + (float)tests;
}
}
threadSums[tx*NUMTHREADS+ty] = concordant;
__syncthreads();
for(i = NUMTHREADS >> 1; i > 0; i >>= 1) {
if(ty < i)
threadSums[tx*NUMTHREADS+ty] += threadSums[tx*NUMTHREADS+ty+i];
__syncthreads();
}
// if(ty == 0) {
for(i = NUMTHREADS >> 1; i > 0; i >>= 1) {
if((tx < i) && (ty == 0))
threadSums[tx*NUMTHREADS] += threadSums[(tx+i)*NUMTHREADS];
__syncthreads();
}
// }
if((tx == 0) && (ty == 0)) {
concordant = threadSums[0];
denom = (float)sampleSize;
denom = (denom * (denom - 1.f)) / 2.f; discordant = denom - concordant;
numer = concordant - discordant;
results[by*na+bx] = ((double)numer)/((double)denom);
}
}
__host__ void masterKendall(const float * x, size_t nx,
const float * y, size_t ny, size_t sampleSize, double * results)
{
size_t
outputLength = nx * ny, outputBytes = outputLength*sizeof(double),
xBytes = nx*sampleSize*sizeof(float),
yBytes = ny*sampleSize*sizeof(float);
float
* gpux, * gpuy;
double
* gpuResults;
dim3
initGrid(nx, ny), initBlock(NUMTHREADS, NUMTHREADS);
checkDoubleCapable("Your device doesn't support double precision arithmetic, so the Kendall functionality is disabled. Sorry for any inconvenience.");
cudaMalloc((void **)&gpux, xBytes);
cudaMalloc((void **)&gpuy, yBytes);
checkCudaError("input vector space allocation");
cudaMemcpy(gpux, x, xBytes, cudaMemcpyHostToDevice);
cudaMemcpy(gpuy, y, yBytes, cudaMemcpyHostToDevice);
checkCudaError("copying input vectors to gpu");
cudaMalloc((void **)&gpuResults, outputBytes);
checkCudaError("allocation of space for result matrix");
gpuKendall<<<initGrid, initBlock>>>(gpux, nx, gpuy, ny, sampleSize,
gpuResults);
checkCudaError("executing gpu kernel");
cudaFree(gpux); cudaFree(gpuy);
cudaMemcpy(results, gpuResults, outputBytes, cudaMemcpyDeviceToHost);
cudaFree(gpuResults);
checkCudaError("copying results from gpu and cleaning up");
}
|
4155e619f51fe3b6c13fcb5bd4eb5974398e0848.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
*/
// connected components tests
// Author: Andrei Schaffer [email protected]
#include <utilities/base_fixture.hpp>
#include <utilities/high_res_clock.h>
#include <utilities/test_utilities.hpp>
#include <hip/hip_runtime_api.h>
#include <rmm/device_vector.hpp>
#include <converters/COOtoCSR.cuh>
#include <cugraph/algorithms.hpp>
#include <cugraph/legacy/graph.hpp>
#include <rmm/device_vector.hpp>
#include <algorithm>
#include <iterator>
namespace { // un-nammed
struct Usecase {
explicit Usecase(const std::string& a)
{
// assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR
const std::string& rapidsDatasetRootDir = cugraph::test::get_rapids_dataset_root_dir();
if ((a != "") && (a[0] != '/')) {
matrix_file = rapidsDatasetRootDir + "/" + a;
} else {
matrix_file = a;
}
}
const std::string& get_matrix_file(void) const { return matrix_file; }
private:
std::string matrix_file;
};
} // namespace
struct Tests_Weakly_CC : ::testing::TestWithParam<Usecase> {
Tests_Weakly_CC() {}
static void SetupTestCase() {}
static void TearDownTestCase()
{
if (cugraph::test::g_perf) {
for (unsigned int i = 0; i < weakly_cc_time.size(); ++i) {
std::cout << weakly_cc_time[i] << std::endl;
}
}
}
virtual void SetUp() {}
virtual void TearDown() {}
static std::vector<double> weakly_cc_time;
void run_current_test(const Usecase& param)
{
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
std::stringstream ss;
std::string test_id = std::string(test_info->test_case_name()) + std::string(".") +
std::string(test_info->name()) + std::string("_") +
cugraph::test::getFileName(param.get_matrix_file()) + std::string("_") +
ss.str().c_str();
int m, k, nnz; //
MM_typecode mc;
HighResClock hr_clock;
double time_tmp;
FILE* fpin = fopen(param.get_matrix_file().c_str(), "r");
ASSERT_NE(fpin, nullptr) << "fopen (" << param.get_matrix_file() << ") failure.";
ASSERT_EQ(cugraph::test::mm_properties<int>(fpin, 1, &mc, &m, &k, &nnz), 0)
<< "could not read Matrix Market file properties"
<< "\n";
ASSERT_TRUE(mm_is_matrix(mc));
ASSERT_TRUE(mm_is_coordinate(mc));
ASSERT_TRUE(mm_is_symmetric(mc)); // weakly cc only works w/ undirected graphs, for now;
#ifdef _DEBUG_WEAK_CC
std::cout << "matrix nrows: " << m << "\n";
std::cout << "matrix nnz: " << nnz << "\n";
#endif
// Allocate memory on host
std::vector<int> cooRowInd(nnz);
std::vector<int> cooColInd(nnz);
std::vector<int> labels(m); // for G(V, E), m := |V|
std::vector<int> verts(m);
// Read: COO Format
//
ASSERT_EQ((cugraph::test::mm_to_coo<int, int>(
fpin, 1, nnz, &cooRowInd[0], &cooColInd[0], nullptr, nullptr)),
0)
<< "could not read matrix data"
<< "\n";
ASSERT_EQ(fclose(fpin), 0);
cugraph::legacy::GraphCOOView<int, int, float> G_coo(
&cooRowInd[0], &cooColInd[0], nullptr, m, nnz);
auto G_unique = cugraph::coo_to_csr(G_coo);
cugraph::legacy::GraphCSRView<int, int, float> G = G_unique->view();
rmm::device_vector<int> d_labels(m);
if (cugraph::test::g_perf) {
hr_clock.start();
cugraph::connected_components<int, int, float>(
G, cugraph::cugraph_cc_t::CUGRAPH_WEAK, d_labels.data().get());
hipDeviceSynchronize();
hr_clock.stop(&time_tmp);
weakly_cc_time.push_back(time_tmp);
} else {
hipProfilerStart();
cugraph::connected_components<int, int, float>(
G, cugraph::cugraph_cc_t::CUGRAPH_WEAK, d_labels.data().get());
hipProfilerStop();
hipDeviceSynchronize();
}
}
};
std::vector<double> Tests_Weakly_CC::weakly_cc_time;
TEST_P(Tests_Weakly_CC, Weakly_CC) { run_current_test(GetParam()); }
// --gtest_filter=*simple_test*
INSTANTIATE_TEST_SUITE_P(simple_test,
Tests_Weakly_CC,
::testing::Values(Usecase("test/datasets/dolphins.mtx"),
Usecase("test/datasets/coPapersDBLP.mtx"),
Usecase("test/datasets/coPapersCiteseer.mtx"),
Usecase("test/datasets/hollywood.mtx")));
CUGRAPH_TEST_PROGRAM_MAIN()
| 4155e619f51fe3b6c13fcb5bd4eb5974398e0848.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
*/
// connected components tests
// Author: Andrei Schaffer [email protected]
#include <utilities/base_fixture.hpp>
#include <utilities/high_res_clock.h>
#include <utilities/test_utilities.hpp>
#include <cuda_profiler_api.h>
#include <rmm/device_vector.hpp>
#include <converters/COOtoCSR.cuh>
#include <cugraph/algorithms.hpp>
#include <cugraph/legacy/graph.hpp>
#include <rmm/device_vector.hpp>
#include <algorithm>
#include <iterator>
namespace { // un-nammed
struct Usecase {
explicit Usecase(const std::string& a)
{
// assume relative paths are relative to RAPIDS_DATASET_ROOT_DIR
const std::string& rapidsDatasetRootDir = cugraph::test::get_rapids_dataset_root_dir();
if ((a != "") && (a[0] != '/')) {
matrix_file = rapidsDatasetRootDir + "/" + a;
} else {
matrix_file = a;
}
}
const std::string& get_matrix_file(void) const { return matrix_file; }
private:
std::string matrix_file;
};
} // namespace
struct Tests_Weakly_CC : ::testing::TestWithParam<Usecase> {
Tests_Weakly_CC() {}
static void SetupTestCase() {}
static void TearDownTestCase()
{
if (cugraph::test::g_perf) {
for (unsigned int i = 0; i < weakly_cc_time.size(); ++i) {
std::cout << weakly_cc_time[i] << std::endl;
}
}
}
virtual void SetUp() {}
virtual void TearDown() {}
static std::vector<double> weakly_cc_time;
void run_current_test(const Usecase& param)
{
const ::testing::TestInfo* const test_info =
::testing::UnitTest::GetInstance()->current_test_info();
std::stringstream ss;
std::string test_id = std::string(test_info->test_case_name()) + std::string(".") +
std::string(test_info->name()) + std::string("_") +
cugraph::test::getFileName(param.get_matrix_file()) + std::string("_") +
ss.str().c_str();
int m, k, nnz; //
MM_typecode mc;
HighResClock hr_clock;
double time_tmp;
FILE* fpin = fopen(param.get_matrix_file().c_str(), "r");
ASSERT_NE(fpin, nullptr) << "fopen (" << param.get_matrix_file() << ") failure.";
ASSERT_EQ(cugraph::test::mm_properties<int>(fpin, 1, &mc, &m, &k, &nnz), 0)
<< "could not read Matrix Market file properties"
<< "\n";
ASSERT_TRUE(mm_is_matrix(mc));
ASSERT_TRUE(mm_is_coordinate(mc));
ASSERT_TRUE(mm_is_symmetric(mc)); // weakly cc only works w/ undirected graphs, for now;
#ifdef _DEBUG_WEAK_CC
std::cout << "matrix nrows: " << m << "\n";
std::cout << "matrix nnz: " << nnz << "\n";
#endif
// Allocate memory on host
std::vector<int> cooRowInd(nnz);
std::vector<int> cooColInd(nnz);
std::vector<int> labels(m); // for G(V, E), m := |V|
std::vector<int> verts(m);
// Read: COO Format
//
ASSERT_EQ((cugraph::test::mm_to_coo<int, int>(
fpin, 1, nnz, &cooRowInd[0], &cooColInd[0], nullptr, nullptr)),
0)
<< "could not read matrix data"
<< "\n";
ASSERT_EQ(fclose(fpin), 0);
cugraph::legacy::GraphCOOView<int, int, float> G_coo(
&cooRowInd[0], &cooColInd[0], nullptr, m, nnz);
auto G_unique = cugraph::coo_to_csr(G_coo);
cugraph::legacy::GraphCSRView<int, int, float> G = G_unique->view();
rmm::device_vector<int> d_labels(m);
if (cugraph::test::g_perf) {
hr_clock.start();
cugraph::connected_components<int, int, float>(
G, cugraph::cugraph_cc_t::CUGRAPH_WEAK, d_labels.data().get());
cudaDeviceSynchronize();
hr_clock.stop(&time_tmp);
weakly_cc_time.push_back(time_tmp);
} else {
cudaProfilerStart();
cugraph::connected_components<int, int, float>(
G, cugraph::cugraph_cc_t::CUGRAPH_WEAK, d_labels.data().get());
cudaProfilerStop();
cudaDeviceSynchronize();
}
}
};
std::vector<double> Tests_Weakly_CC::weakly_cc_time;
TEST_P(Tests_Weakly_CC, Weakly_CC) { run_current_test(GetParam()); }
// --gtest_filter=*simple_test*
INSTANTIATE_TEST_SUITE_P(simple_test,
Tests_Weakly_CC,
::testing::Values(Usecase("test/datasets/dolphins.mtx"),
Usecase("test/datasets/coPapersDBLP.mtx"),
Usecase("test/datasets/coPapersCiteseer.mtx"),
Usecase("test/datasets/hollywood.mtx")));
CUGRAPH_TEST_PROGRAM_MAIN()
|
cdd0eab3be6787819a840d40a2f29edacea32529.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "common_magmasparse.h"
#define BLOCK_SIZE 512
#define PRECISION_z
__global__ void
zvjacobisetup_gpu( int num_rows,
int num_vecs,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *c,
magmaDoubleComplex *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++ ){
c[row+i*num_rows] = b[row+i*num_rows] / d[row];
x[row+i*num_rows] = c[row+i*num_rows];
}
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[out]
c magma_z_matrix*
c = D^(-1) * b
@param[out]
x magma_z_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zjacobisetup_vector_gpu(
magma_int_t num_rows,
magma_z_matrix b,
magma_z_matrix d,
magma_z_matrix c,
magma_z_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, BLOCK_SIZE ) );
int num_vecs = b.num_rows / num_rows;
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( zvjacobisetup_gpu), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
num_rows, num_vecs, b.dval, d.dval, c.dval, x->val );
return MAGMA_SUCCESS;
}
__global__ void
zjacobidiagscal_kernel( int num_rows,
int num_vecs,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *c)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++)
c[row+i*num_rows] = b[row+i*num_rows] * d[row];
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[out]
c magma_z_matrix*
c = D^(-1) * b
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zjacobi_diagscal(
magma_int_t num_rows,
magma_z_matrix d,
magma_z_matrix b,
magma_z_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, 512 ));
int num_vecs = b.num_rows*b.num_cols/num_rows;
magma_int_t threads = 512;
hipLaunchKernelGGL(( zjacobidiagscal_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), num_rows, num_vecs, b.dval, d.dval, c->val );
return MAGMA_SUCCESS;
}
__global__ void
zjacobiupdate_kernel( int num_rows,
int num_cols,
magmaDoubleComplex *t,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_cols; i++)
x[row+i*num_rows] += (b[row+i*num_rows]-t[row+i*num_rows]) * d[row];
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-t)
where d is the diagonal of the system matrix A and t=Ax.
Arguments
---------
@param[in]
t magma_z_matrix
t = A*x
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[out]
x magma_z_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zjacobiupdate(
magma_z_matrix t,
magma_z_matrix b,
magma_z_matrix d,
magma_z_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( zjacobiupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
return MAGMA_SUCCESS;
}
__global__ void
zjacobispmvupdate_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex *t,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *x )
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if(row<num_rows){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_z_matrix
system matrix
@param[in]
t magma_z_matrix
workspace
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[out]
x magma_z_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zjacobispmvupdate(
magma_int_t maxiter,
magma_z_matrix A,
magma_z_matrix t,
magma_z_matrix b,
magma_z_matrix d,
magma_z_matrix *x,
magma_queue_t queue )
{
// local variables
//magmaDoubleComplex c_zero = MAGMA_Z_ZERO;
//magmaDoubleComplex c_one = MAGMA_Z_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_z_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
//hipLaunchKernelGGL(( zjacobiupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
hipLaunchKernelGGL(( zjacobispmvupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
zjacobispmvupdate_bw_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex *t,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *x )
{
int row_tmp = blockDim.x * blockIdx.x + threadIdx.x;
int row = num_rows-1 - row_tmp;
int j;
if( row>-1 ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel processes the thread blocks in reversed order.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_z_matrix
system matrix
@param[in]
t magma_z_matrix
workspace
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[out]
x magma_z_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zjacobispmvupdate_bw(
magma_int_t maxiter,
magma_z_matrix A,
magma_z_matrix t,
magma_z_matrix b,
magma_z_matrix d,
magma_z_matrix *x,
magma_queue_t queue )
{
// local variables
//magmaDoubleComplex c_zero = MAGMA_Z_ZERO;
//magmaDoubleComplex c_one = MAGMA_Z_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_z_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
//hipLaunchKernelGGL(( zjacobiupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
hipLaunchKernelGGL(( zjacobispmvupdate_bw_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
zjacobispmvupdateselect_kernel(
int num_rows,
int num_cols,
int num_updates,
magma_index_t * indices,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex *t,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *x,
magmaDoubleComplex *y )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if( idx<num_updates){
int row = indices[ idx ];
printf(" ");
//if( row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] = x[row+i*num_rows] + (b[row+i*num_rows]-dot) * d[row];
//magmaDoubleComplex add = (b[row+i*num_rows]-dot) * d[row];
//#if defined(PRECISION_s) //|| defined(PRECISION_d)
// atomicAdd( x + row + i*num_rows, add );
//#endif
// ( unsigned int* address, unsigned int val);
//}
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel allows for overlapping domains: the indices-array contains
the locations that are updated. Locations may be repeated to simulate
overlapping domains.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
num_updates magma_int_t
number of updates - length of the indices array
@param[in]
indices magma_index_t*
indices, which entries of x to update
@param[in]
A magma_z_matrix
system matrix
@param[in]
t magma_z_matrix
workspace
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[in]
tmp magma_z_matrix
workspace
@param[out]
x magma_z_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zjacobispmvupdateselect(
magma_int_t maxiter,
magma_int_t num_updates,
magma_index_t *indices,
magma_z_matrix A,
magma_z_matrix t,
magma_z_matrix b,
magma_z_matrix d,
magma_z_matrix tmp,
magma_z_matrix *x,
magma_queue_t queue )
{
// local variables
//magmaDoubleComplex c_zero = MAGMA_Z_ZERO
//magmaDoubleComplex c_one = MAGMA_Z_ONE;
//magma_z_matrix swp;
dim3 grid( magma_ceildiv( num_updates, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
printf("num updates:%d %d %d\n", int(num_updates), int(threads), int(grid.x) );
for( magma_int_t i=0; i<maxiter; i++ ) {
hipLaunchKernelGGL(( zjacobispmvupdateselect_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
t.num_rows, t.num_cols, num_updates, indices, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval, tmp.dval );
magma_device_sync();
//swp.dval = x->dval;
//x->dval = tmp.dval;
//tmp.dval = swp.dval;
}
return MAGMA_SUCCESS;
}
__global__ void
zftjacobicontractions_kernel(
int num_rows,
magmaDoubleComplex * xkm2val,
magmaDoubleComplex * xkm1val,
magmaDoubleComplex * xkval,
magmaDoubleComplex * zval,
magmaDoubleComplex * cval )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx<num_rows ){
zval[idx] = MAGMA_Z_MAKE( MAGMA_Z_ABS( xkm1val[idx] - xkval[idx] ), 0.0);
cval[ idx ] = MAGMA_Z_MAKE(
MAGMA_Z_ABS( xkm2val[idx] - xkm1val[idx] )
/ MAGMA_Z_ABS( xkm1val[idx] - xkval[idx] )
,0.0 );
}
}
/**
Purpose
-------
Computes the contraction coefficients c_i:
c_i = z_i^{k-1} / z_i^{k}
= | x_i^{k-1} - x_i^{k-2} | / | x_i^{k} - x_i^{k-1} |
Arguments
---------
@param[in]
xkm2 magma_z_matrix
vector x^{k-2}
@param[in]
xkm1 magma_z_matrix
vector x^{k-2}
@param[in]
xk magma_z_matrix
vector x^{k-2}
@param[out]
z magma_z_matrix*
ratio
@param[out]
c magma_z_matrix*
contraction coefficients
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zftjacobicontractions(
magma_z_matrix xkm2,
magma_z_matrix xkm1,
magma_z_matrix xk,
magma_z_matrix *z,
magma_z_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( xk.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( zftjacobicontractions_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
xkm2.num_rows, xkm2.dval, xkm1.dval, xk.dval, z->dval, c->dval );
return MAGMA_SUCCESS;
}
__global__ void
zftjacobiupdatecheck_kernel(
int num_rows,
double delta,
magmaDoubleComplex * xold,
magmaDoubleComplex * xnew,
magmaDoubleComplex * zprev,
magmaDoubleComplex * cval,
magma_int_t *flag_t,
magma_int_t *flag_fp )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx<num_rows ){
double t1 = delta * MAGMA_Z_ABS(cval[idx]);
double vkv = 1.0;
for( magma_int_t i=0; i<min( flag_fp[idx], 100 ); i++){
vkv = vkv*2;
}
magmaDoubleComplex xold_l = xold[idx];
magmaDoubleComplex xnew_l = xnew[idx];
magmaDoubleComplex znew = MAGMA_Z_MAKE(
max( MAGMA_Z_ABS( xold_l - xnew_l), 1e-15), 0.0 );
magmaDoubleComplex znr = zprev[idx] / znew;
double t2 = MAGMA_Z_ABS( znr - cval[idx] );
//% evaluate fp-cond
magma_int_t fpcond = 0;
if( MAGMA_Z_ABS(znr)>vkv ){
fpcond = 1;
}
// % combine t-cond and fp-cond + flag_t == 1
magma_int_t cond = 0;
if( t2<t1 || (flag_t[idx]>0 && fpcond > 0 ) ){
cond = 1;
}
flag_fp[idx] = flag_fp[idx]+1;
if( fpcond>0 ){
flag_fp[idx] = 0;
}
if( cond > 0 ){
flag_t[idx] = 0;
zprev[idx] = znew;
xold[idx] = xnew_l;
} else {
flag_t[idx] = 1;
xnew[idx] = xold_l;
}
}
}
/**
Purpose
-------
Checks the Jacobi updates accorting to the condition in the ScaLA'15 paper.
Arguments
---------
@param[in]
delta double
threshold
@param[in,out]
xold magma_z_matrix*
vector xold
@param[in,out]
xnew magma_z_matrix*
vector xnew
@param[in,out]
zprev magma_z_matrix*
vector z = | x_k-1 - x_k |
@param[in]
c magma_z_matrix
contraction coefficients
@param[in,out]
flag_t magma_int_t
threshold condition
@param[in,out]
flag_fp magma_int_t
false positive condition
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zftjacobiupdatecheck(
double delta,
magma_z_matrix *xold,
magma_z_matrix *xnew,
magma_z_matrix *zprev,
magma_z_matrix c,
magma_int_t *flag_t,
magma_int_t *flag_fp,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( xnew->num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( zftjacobiupdatecheck_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(),
xold->num_rows, delta, xold->dval, xnew->dval, zprev->dval, c.dval,
flag_t, flag_fp );
return MAGMA_SUCCESS;
}
| cdd0eab3be6787819a840d40a2f29edacea32529.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "common_magmasparse.h"
#define BLOCK_SIZE 512
#define PRECISION_z
__global__ void
zvjacobisetup_gpu( int num_rows,
int num_vecs,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *c,
magmaDoubleComplex *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++ ){
c[row+i*num_rows] = b[row+i*num_rows] / d[row];
x[row+i*num_rows] = c[row+i*num_rows];
}
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[out]
c magma_z_matrix*
c = D^(-1) * b
@param[out]
x magma_z_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zjacobisetup_vector_gpu(
magma_int_t num_rows,
magma_z_matrix b,
magma_z_matrix d,
magma_z_matrix c,
magma_z_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, BLOCK_SIZE ) );
int num_vecs = b.num_rows / num_rows;
magma_int_t threads = BLOCK_SIZE;
zvjacobisetup_gpu<<< grid, threads, 0, queue->cuda_stream()>>>
( num_rows, num_vecs, b.dval, d.dval, c.dval, x->val );
return MAGMA_SUCCESS;
}
__global__ void
zjacobidiagscal_kernel( int num_rows,
int num_vecs,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *c)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++)
c[row+i*num_rows] = b[row+i*num_rows] * d[row];
}
}
/**
Purpose
-------
Prepares the Jacobi Iteration according to
x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k
x^(k+1) = c - M * x^k.
Returns the vector c. It calls a GPU kernel
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[out]
c magma_z_matrix*
c = D^(-1) * b
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zjacobi_diagscal(
magma_int_t num_rows,
magma_z_matrix d,
magma_z_matrix b,
magma_z_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( num_rows, 512 ));
int num_vecs = b.num_rows*b.num_cols/num_rows;
magma_int_t threads = 512;
zjacobidiagscal_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( num_rows, num_vecs, b.dval, d.dval, c->val );
return MAGMA_SUCCESS;
}
__global__ void
zjacobiupdate_kernel( int num_rows,
int num_cols,
magmaDoubleComplex *t,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *x)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ){
for( int i=0; i<num_cols; i++)
x[row+i*num_rows] += (b[row+i*num_rows]-t[row+i*num_rows]) * d[row];
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-t)
where d is the diagonal of the system matrix A and t=Ax.
Arguments
---------
@param[in]
t magma_z_matrix
t = A*x
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[out]
x magma_z_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zjacobiupdate(
magma_z_matrix t,
magma_z_matrix b,
magma_z_matrix d,
magma_z_matrix *x,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
zjacobiupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
return MAGMA_SUCCESS;
}
__global__ void
zjacobispmvupdate_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex *t,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *x )
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if(row<num_rows){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_z_matrix
system matrix
@param[in]
t magma_z_matrix
workspace
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[out]
x magma_z_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zjacobispmvupdate(
magma_int_t maxiter,
magma_z_matrix A,
magma_z_matrix t,
magma_z_matrix b,
magma_z_matrix d,
magma_z_matrix *x,
magma_queue_t queue )
{
// local variables
//magmaDoubleComplex c_zero = MAGMA_Z_ZERO;
//magmaDoubleComplex c_one = MAGMA_Z_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_z_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
// zjacobiupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
zjacobispmvupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
zjacobispmvupdate_bw_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex *t,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *x )
{
int row_tmp = blockDim.x * blockIdx.x + threadIdx.x;
int row = num_rows-1 - row_tmp;
int j;
if( row>-1 ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row];
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel processes the thread blocks in reversed order.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
A magma_z_matrix
system matrix
@param[in]
t magma_z_matrix
workspace
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[out]
x magma_z_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zjacobispmvupdate_bw(
magma_int_t maxiter,
magma_z_matrix A,
magma_z_matrix t,
magma_z_matrix b,
magma_z_matrix d,
magma_z_matrix *x,
magma_queue_t queue )
{
// local variables
//magmaDoubleComplex c_zero = MAGMA_Z_ZERO;
//magmaDoubleComplex c_one = MAGMA_Z_ONE;
dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
for( magma_int_t i=0; i<maxiter; i++ ) {
// distinct routines imply synchronization
// magma_z_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x
// zjacobiupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval );
// merged in one implies asynchronous update
zjacobispmvupdate_bw_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval );
}
return MAGMA_SUCCESS;
}
__global__ void
zjacobispmvupdateselect_kernel(
int num_rows,
int num_cols,
int num_updates,
magma_index_t * indices,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex *t,
magmaDoubleComplex *b,
magmaDoubleComplex *d,
magmaDoubleComplex *x,
magmaDoubleComplex *y )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int j;
if( idx<num_updates){
int row = indices[ idx ];
printf(" ");
//if( row < num_rows ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( int i=0; i<num_cols; i++){
for( j=start; j<end; j++){
dot += dval[ j ] * x[ dcolind[j]+i*num_rows ];
}
x[row+i*num_rows] = x[row+i*num_rows] + (b[row+i*num_rows]-dot) * d[row];
//magmaDoubleComplex add = (b[row+i*num_rows]-dot) * d[row];
//#if defined(PRECISION_s) //|| defined(PRECISION_d)
// atomicAdd( x + row + i*num_rows, add );
//#endif
// ( unsigned int* address, unsigned int val);
//}
}
}
}
/**
Purpose
-------
Updates the iteration vector x for the Jacobi iteration
according to
x=x+d.*(b-Ax)
This kernel allows for overlapping domains: the indices-array contains
the locations that are updated. Locations may be repeated to simulate
overlapping domains.
Arguments
---------
@param[in]
maxiter magma_int_t
number of Jacobi iterations
@param[in]
num_updates magma_int_t
number of updates - length of the indices array
@param[in]
indices magma_index_t*
indices, which entries of x to update
@param[in]
A magma_z_matrix
system matrix
@param[in]
t magma_z_matrix
workspace
@param[in]
b magma_z_matrix
RHS b
@param[in]
d magma_z_matrix
vector with diagonal entries
@param[in]
tmp magma_z_matrix
workspace
@param[out]
x magma_z_matrix*
iteration vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zjacobispmvupdateselect(
magma_int_t maxiter,
magma_int_t num_updates,
magma_index_t *indices,
magma_z_matrix A,
magma_z_matrix t,
magma_z_matrix b,
magma_z_matrix d,
magma_z_matrix tmp,
magma_z_matrix *x,
magma_queue_t queue )
{
// local variables
//magmaDoubleComplex c_zero = MAGMA_Z_ZERO
//magmaDoubleComplex c_one = MAGMA_Z_ONE;
//magma_z_matrix swp;
dim3 grid( magma_ceildiv( num_updates, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
printf("num updates:%d %d %d\n", int(num_updates), int(threads), int(grid.x) );
for( magma_int_t i=0; i<maxiter; i++ ) {
zjacobispmvupdateselect_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( t.num_rows, t.num_cols, num_updates, indices, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval, tmp.dval );
magma_device_sync();
//swp.dval = x->dval;
//x->dval = tmp.dval;
//tmp.dval = swp.dval;
}
return MAGMA_SUCCESS;
}
__global__ void
zftjacobicontractions_kernel(
int num_rows,
magmaDoubleComplex * xkm2val,
magmaDoubleComplex * xkm1val,
magmaDoubleComplex * xkval,
magmaDoubleComplex * zval,
magmaDoubleComplex * cval )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx<num_rows ){
zval[idx] = MAGMA_Z_MAKE( MAGMA_Z_ABS( xkm1val[idx] - xkval[idx] ), 0.0);
cval[ idx ] = MAGMA_Z_MAKE(
MAGMA_Z_ABS( xkm2val[idx] - xkm1val[idx] )
/ MAGMA_Z_ABS( xkm1val[idx] - xkval[idx] )
,0.0 );
}
}
/**
Purpose
-------
Computes the contraction coefficients c_i:
c_i = z_i^{k-1} / z_i^{k}
= | x_i^{k-1} - x_i^{k-2} | / | x_i^{k} - x_i^{k-1} |
Arguments
---------
@param[in]
xkm2 magma_z_matrix
vector x^{k-2}
@param[in]
xkm1 magma_z_matrix
vector x^{k-2}
@param[in]
xk magma_z_matrix
vector x^{k-2}
@param[out]
z magma_z_matrix*
ratio
@param[out]
c magma_z_matrix*
contraction coefficients
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zftjacobicontractions(
magma_z_matrix xkm2,
magma_z_matrix xkm1,
magma_z_matrix xk,
magma_z_matrix *z,
magma_z_matrix *c,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( xk.num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
zftjacobicontractions_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( xkm2.num_rows, xkm2.dval, xkm1.dval, xk.dval, z->dval, c->dval );
return MAGMA_SUCCESS;
}
__global__ void
zftjacobiupdatecheck_kernel(
int num_rows,
double delta,
magmaDoubleComplex * xold,
magmaDoubleComplex * xnew,
magmaDoubleComplex * zprev,
magmaDoubleComplex * cval,
magma_int_t *flag_t,
magma_int_t *flag_fp )
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if( idx<num_rows ){
double t1 = delta * MAGMA_Z_ABS(cval[idx]);
double vkv = 1.0;
for( magma_int_t i=0; i<min( flag_fp[idx], 100 ); i++){
vkv = vkv*2;
}
magmaDoubleComplex xold_l = xold[idx];
magmaDoubleComplex xnew_l = xnew[idx];
magmaDoubleComplex znew = MAGMA_Z_MAKE(
max( MAGMA_Z_ABS( xold_l - xnew_l), 1e-15), 0.0 );
magmaDoubleComplex znr = zprev[idx] / znew;
double t2 = MAGMA_Z_ABS( znr - cval[idx] );
//% evaluate fp-cond
magma_int_t fpcond = 0;
if( MAGMA_Z_ABS(znr)>vkv ){
fpcond = 1;
}
// % combine t-cond and fp-cond + flag_t == 1
magma_int_t cond = 0;
if( t2<t1 || (flag_t[idx]>0 && fpcond > 0 ) ){
cond = 1;
}
flag_fp[idx] = flag_fp[idx]+1;
if( fpcond>0 ){
flag_fp[idx] = 0;
}
if( cond > 0 ){
flag_t[idx] = 0;
zprev[idx] = znew;
xold[idx] = xnew_l;
} else {
flag_t[idx] = 1;
xnew[idx] = xold_l;
}
}
}
/**
Purpose
-------
Checks the Jacobi updates accorting to the condition in the ScaLA'15 paper.
Arguments
---------
@param[in]
delta double
threshold
@param[in,out]
xold magma_z_matrix*
vector xold
@param[in,out]
xnew magma_z_matrix*
vector xnew
@param[in,out]
zprev magma_z_matrix*
vector z = | x_k-1 - x_k |
@param[in]
c magma_z_matrix
contraction coefficients
@param[in,out]
flag_t magma_int_t
threshold condition
@param[in,out]
flag_fp magma_int_t
false positive condition
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_z
********************************************************************/
extern "C" magma_int_t
magma_zftjacobiupdatecheck(
double delta,
magma_z_matrix *xold,
magma_z_matrix *xnew,
magma_z_matrix *zprev,
magma_z_matrix c,
magma_int_t *flag_t,
magma_int_t *flag_fp,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( xnew->num_rows, BLOCK_SIZE ));
magma_int_t threads = BLOCK_SIZE;
zftjacobiupdatecheck_kernel<<< grid, threads, 0, queue->cuda_stream()>>>
( xold->num_rows, delta, xold->dval, xnew->dval, zprev->dval, c.dval,
flag_t, flag_fp );
return MAGMA_SUCCESS;
}
|
1df0d5603838fd06d7a7441e0a68b3b7fcd98b74.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void addInc(unsigned int* deviceInput, unsigned int* deviceOutput, int eleCnt, unsigned int* deviceInc)
{
/*
__shared__ int inc;
if (threadIdx.x == 0)
{
inc = deviceInc[blockIdx.x];
}
__syncthreads();
*/
int inc = deviceInc[blockIdx.x];
int cntInB = blockDim.x * 2;
int idxInG = blockIdx.x * cntInB + threadIdx.x;
if (idxInG < eleCnt)
{
deviceOutput[idxInG] = deviceInput[idxInG] + inc;
}
if (idxInG + blockDim.x < eleCnt)
{
deviceOutput[idxInG + blockDim.x] = deviceInput[idxInG + blockDim.x] + inc;
}
} | 1df0d5603838fd06d7a7441e0a68b3b7fcd98b74.cu | #include "includes.h"
__global__ void addInc(unsigned int* deviceInput, unsigned int* deviceOutput, int eleCnt, unsigned int* deviceInc)
{
/*
__shared__ int inc;
if (threadIdx.x == 0)
{
inc = deviceInc[blockIdx.x];
}
__syncthreads();
*/
int inc = deviceInc[blockIdx.x];
int cntInB = blockDim.x * 2;
int idxInG = blockIdx.x * cntInB + threadIdx.x;
if (idxInG < eleCnt)
{
deviceOutput[idxInG] = deviceInput[idxInG] + inc;
}
if (idxInG + blockDim.x < eleCnt)
{
deviceOutput[idxInG + blockDim.x] = deviceInput[idxInG + blockDim.x] + inc;
}
} |
b62323e2ae7871acf55ccbbccb5b68856b0ac107.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2009, Andrew Corrigan, [email protected]
// This code is from the AIAA-2009-4001 paper
//#include <cutil.h>
#include <helper_cuda.h>
#include <helper_timer.h>
#include <iostream>
#include <fstream>
/*
* Options
*
*/
#define GAMMA 1.4f
#define iterations 2
// #ifndef block_length
// #define block_length 192
// #endif
#define NDIM 3
#define NNB 4
#define RK 3 // 3rd order RK
#define ff_mach 1.2f
#define deg_angle_of_attack 0.0f
/*
* not options
*/
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE_0 RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE_0 RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_0 RD_WG_SIZE
#else
#define BLOCK_SIZE_0 192
#endif
#ifdef RD_WG_SIZE_1_0
#define BLOCK_SIZE_1 RD_WG_SIZE_1_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_1 RD_WG_SIZE_1
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_1 RD_WG_SIZE
#else
#define BLOCK_SIZE_1 192
#endif
#ifdef RD_WG_SIZE_2_0
#define BLOCK_SIZE_2 RD_WG_SIZE_2_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_2 RD_WG_SIZE_2
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_2 RD_WG_SIZE
#else
#define BLOCK_SIZE_2 192
#endif
#ifdef RD_WG_SIZE_3_0
#define BLOCK_SIZE_3 RD_WG_SIZE_3_0
#elif defined(RD_WG_SIZE_3)
#define BLOCK_SIZE_3 RD_WG_SIZE_3
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_3 RD_WG_SIZE
#else
#define BLOCK_SIZE_3 192
#endif
#ifdef RD_WG_SIZE_4_0
#define BLOCK_SIZE_4 RD_WG_SIZE_4_0
#elif defined(RD_WG_SIZE_4)
#define BLOCK_SIZE_4 RD_WG_SIZE_4
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_4 RD_WG_SIZE
#else
#define BLOCK_SIZE_4 192
#endif
// #if block_length > 128
// #warning "the kernels may fail too launch on some systems if the block length is too large"
// #endif
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM + NDIM)
#define NVAR (VAR_DENSITY_ENERGY + 1)
/*
* Generic functions
*/
template <typename T>
T *alloc(int N)
{
T *t;
checkCudaErrors(hipMalloc((void **)&t, sizeof(T) * N));
return t;
}
template <typename T>
void dealloc(T *array)
{
checkCudaErrors(hipFree((void *)array));
}
template <typename T>
void copy(T *dst, T *src, int N)
{
checkCudaErrors(hipMemcpy((void *)dst, (void *)src, N * sizeof(T), hipMemcpyDeviceToDevice));
}
template <typename T>
void upload(T *dst, T *src, int N)
{
checkCudaErrors(hipMemcpy((void *)dst, (void *)src, N * sizeof(T), hipMemcpyHostToDevice));
}
template <typename T>
void download(T *dst, T *src, int N)
{
checkCudaErrors(hipMemcpy((void *)dst, (void *)src, N * sizeof(T), hipMemcpyDeviceToHost));
}
void dump(float *variables, int nel, int nelr)
{
float *h_variables = new float[nelr * NVAR];
download(h_variables, variables, nelr * NVAR);
{
std::ofstream file("density");
file << nel << " " << nelr << std::endl;
for (int i = 0; i < nel; i++)
file << h_variables[i + VAR_DENSITY * nelr] << std::endl;
}
{
std::ofstream file("momentum");
file << nel << " " << nelr << std::endl;
for (int i = 0; i < nel; i++)
{
for (int j = 0; j != NDIM; j++)
file << h_variables[i + (VAR_MOMENTUM + j) * nelr] << " ";
file << std::endl;
}
}
{
std::ofstream file("density_energy");
file << nel << " " << nelr << std::endl;
for (int i = 0; i < nel; i++)
file << h_variables[i + VAR_DENSITY_ENERGY * nelr] << std::endl;
}
delete[] h_variables;
}
/*
* Element-based Cell-centered FVM solver functions
*/
__constant__ float ff_variable[NVAR];
__constant__ float3 ff_flux_contribution_momentum_x[1];
__constant__ float3 ff_flux_contribution_momentum_y[1];
__constant__ float3 ff_flux_contribution_momentum_z[1];
__constant__ float3 ff_flux_contribution_density_energy[1];
__global__ void cuda_initialize_variables(int nelr, float *variables)
{
const int i = (blockDim.x * blockIdx.x + threadIdx.x);
for (int j = 0; j < NVAR; j++)
variables[i + j * nelr] = ff_variable[j];
}
void initialize_variables(int nelr, float *variables)
{
dim3 Dg(nelr / BLOCK_SIZE_1), Db(BLOCK_SIZE_1);
hipLaunchKernelGGL(( cuda_initialize_variables), dim3(Dg), dim3(Db), 0, 0, nelr, variables);
getLastCudaError("initialize_variables failed");
}
__device__ __host__ inline void compute_flux_contribution(float &density, float3 &momentum, float &density_energy, float &pressure, float3 &velocity, float3 &fc_momentum_x, float3 &fc_momentum_y, float3 &fc_momentum_z, float3 &fc_density_energy)
{
fc_momentum_x.x = velocity.x * momentum.x + pressure;
fc_momentum_x.y = velocity.x * momentum.y;
fc_momentum_x.z = velocity.x * momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y * momentum.y + pressure;
fc_momentum_y.z = velocity.y * momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z * momentum.z + pressure;
float de_p = density_energy + pressure;
fc_density_energy.x = velocity.x * de_p;
fc_density_energy.y = velocity.y * de_p;
fc_density_energy.z = velocity.z * de_p;
}
__device__ inline void compute_velocity(float &density, float3 &momentum, float3 &velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__device__ inline float compute_speed_sqd(float3 &velocity)
{
return velocity.x * velocity.x + velocity.y * velocity.y + velocity.z * velocity.z;
}
__device__ inline float compute_pressure(float &density, float &density_energy, float &speed_sqd)
{
return (float(GAMMA) - float(1.0f)) * (density_energy - float(0.5f) * density * speed_sqd);
}
__device__ inline float compute_speed_of_sound(float &density, float &pressure)
{
return sqrtf(float(GAMMA) * pressure / density);
}
template<bool first_round>
__global__ void cuda_compute_step_factor(int nelr, float *variables, float *areas, float *step_factors)
{
if (first_round) {
const int i = (blockDim.x * blockIdx.x + threadIdx.x);
int index = i / nelr + VAR_MOMENTUM;
float density = ff_variable[i/nelr + VAR_DENSITY];
float3 momentum;
momentum.x = ff_variable[index];
momentum.y = ff_variable[index + 1];
momentum.z = ff_variable[index + 2];
// float density_energy = variables[i + VAR_DENSITY_ENERGY * nelr];
float density_energy = ff_variable[i/nelr + VAR_DENSITY_ENERGY];
float3 velocity;
compute_velocity(density, momentum, velocity);
float speed_sqd = compute_speed_sqd(velocity);
float pressure = compute_pressure(density, density_energy, speed_sqd);
float speed_of_sound = compute_speed_of_sound(density, pressure);
// dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once
step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound));
} else {
const int i = (blockDim.x * blockIdx.x + threadIdx.x);
float density = variables[i + VAR_DENSITY * nelr];
float3 momentum;
momentum.x = variables[i + (VAR_MOMENTUM + 0) * nelr];
momentum.y = variables[i + (VAR_MOMENTUM + 1) * nelr];
momentum.z = variables[i + (VAR_MOMENTUM + 2) * nelr];
float density_energy = variables[i + VAR_DENSITY_ENERGY * nelr];
float3 velocity;
compute_velocity(density, momentum, velocity);
float speed_sqd = compute_speed_sqd(velocity);
float pressure = compute_pressure(density, density_energy, speed_sqd);
float speed_of_sound = compute_speed_of_sound(density, pressure);
// dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once
step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound));
}
}
void compute_step_factor(int nelr, float *variables, float *areas, float *step_factors)
{
dim3 Dg(nelr / BLOCK_SIZE_2), Db(BLOCK_SIZE_2);
hipLaunchKernelGGL(( cuda_compute_step_factor<false>), dim3(Dg), dim3(Db), 0, 0, nelr, variables, areas, step_factors);
getLastCudaError("compute_step_factor failed");
}
//@findhao for the first iteration
void compute_step_factor2(int nelr, float *variables, float *areas, float *step_factors)
{
dim3 Dg(nelr / BLOCK_SIZE_2), Db(BLOCK_SIZE_2);
hipLaunchKernelGGL(( cuda_compute_step_factor<true>), dim3(Dg), dim3(Db), 0, 0, nelr, variables, areas, step_factors);
getLastCudaError("compute_step_factor failed");
}
/*
*
*
*/
template<bool first_round>
__global__ void cuda_compute_flux(int nelr, int *elements_surrounding_elements, float *normals, float *variables, float *fluxes)
{
if (first_round) {
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x * blockIdx.x + threadIdx.x);
int j, nb;
float3 normal;
float normal_len;
float factor;
int index2 = i / nelr + VAR_DENSITY;
// float density_i = variables[i + VAR_DENSITY * nelr];
float density_i = ff_variable[index2];
float3 momentum_i;
int index = i / nelr + VAR_MOMENTUM;
// momentum_i.x = variables[i + (VAR_MOMENTUM + 0) * nelr];
// momentum_i.y = variables[i + (VAR_MOMENTUM + 1) * nelr];
// momentum_i.z = variables[i + (VAR_MOMENTUM + 2) * nelr];
momentum_i.x = ff_variable[index];
momentum_i.y = ff_variable[index + 1];
momentum_i.z = ff_variable[index + 2];
// float density_energy_i = variables[i + VAR_DENSITY_ENERGY * nelr];
float density_energy_i = ff_variable[i/nelr + VAR_DENSITY_ENERGY ];
float3 velocity_i;
compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
float flux_i_density = float(0.0f);
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for (j = 0; j < NNB; j++)
{
nb = elements_surrounding_elements[i + j * nelr];
normal.x = normals[i + (j + 0 * NNB) * nelr];
normal.y = normals[i + (j + 1 * NNB) * nelr];
normal.z = normals[i + (j + 2 * NNB) * nelr];
normal_len = sqrtf(normal.x * normal.x + normal.y * normal.y + normal.z * normal.z);
if (nb >= 0) // a legitimate neighbor
{
// density_nb = variables[nb + VAR_DENSITY * nelr];
// momentum_nb.x = variables[nb + (VAR_MOMENTUM + 0) * nelr];
// momentum_nb.y = variables[nb + (VAR_MOMENTUM + 1) * nelr];
// momentum_nb.z = variables[nb + (VAR_MOMENTUM + 2) * nelr];
// density_energy_nb = variables[nb + VAR_DENSITY_ENERGY * nelr];
density_nb = ff_variable[nb/nelr + VAR_DENSITY];
int index3 = nb/nelr + VAR_MOMENTUM;
momentum_nb.x = ff_variable[index3];
momentum_nb.y = ff_variable[index3 + 1];
momentum_nb.z = ff_variable[index3 + 2];
density_energy_nb = ff_variable[nb/nelr + VAR_DENSITY_ENERGY];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
factor = -normal_len * smoothing_coefficient * float(0.5f) * (speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
flux_i_density += factor * (density_i - density_nb);
flux_i_density_energy += factor * (density_energy_i - density_energy_nb);
flux_i_momentum.x += factor * (momentum_i.x - momentum_nb.x);
flux_i_momentum.y += factor * (momentum_i.y - momentum_nb.y);
flux_i_momentum.z += factor * (momentum_i.z - momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f) * normal.x;
flux_i_density += factor * (momentum_nb.x + momentum_i.x);
flux_i_density_energy += factor * (flux_contribution_nb_density_energy.x + flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor * (flux_contribution_nb_momentum_x.x + flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor * (flux_contribution_nb_momentum_y.x + flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor * (flux_contribution_nb_momentum_z.x + flux_contribution_i_momentum_z.x);
factor = float(0.5f) * normal.y;
flux_i_density += factor * (momentum_nb.y + momentum_i.y);
flux_i_density_energy += factor * (flux_contribution_nb_density_energy.y + flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor * (flux_contribution_nb_momentum_x.y + flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor * (flux_contribution_nb_momentum_y.y + flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor * (flux_contribution_nb_momentum_z.y + flux_contribution_i_momentum_z.y);
factor = float(0.5f) * normal.z;
flux_i_density += factor * (momentum_nb.z + momentum_i.z);
flux_i_density_energy += factor * (flux_contribution_nb_density_energy.z + flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor * (flux_contribution_nb_momentum_x.z + flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor * (flux_contribution_nb_momentum_y.z + flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor * (flux_contribution_nb_momentum_z.z + flux_contribution_i_momentum_z.z);
}
else if (nb == -1) // a wing boundary
{
flux_i_momentum.x += normal.x * pressure_i;
flux_i_momentum.y += normal.y * pressure_i;
flux_i_momentum.z += normal.z * pressure_i;
}
else if (nb == -2) // a far field boundary
{
factor = float(0.5f) * normal.x;
flux_i_density += factor * (ff_variable[VAR_MOMENTUM + 0] + momentum_i.x);
flux_i_density_energy += factor * (ff_flux_contribution_density_energy[0].x + flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor * (ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor * (ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor * (ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x);
factor = float(0.5f) * normal.y;
flux_i_density += factor * (ff_variable[VAR_MOMENTUM + 1] + momentum_i.y);
flux_i_density_energy += factor * (ff_flux_contribution_density_energy[0].y + flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor * (ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor * (ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor * (ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y);
factor = float(0.5f) * normal.z;
flux_i_density += factor * (ff_variable[VAR_MOMENTUM + 2] + momentum_i.z);
flux_i_density_energy += factor * (ff_flux_contribution_density_energy[0].z + flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor * (ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor * (ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor * (ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY * nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM + 0) * nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM + 1) * nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM + 2) * nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY * nelr] = flux_i_density_energy;
} else {
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x * blockIdx.x + threadIdx.x);
int j, nb;
float3 normal;
float normal_len;
float factor;
float density_i = variables[i + VAR_DENSITY * nelr];
float3 momentum_i;
momentum_i.x = variables[i + (VAR_MOMENTUM + 0) * nelr];
momentum_i.y = variables[i + (VAR_MOMENTUM + 1) * nelr];
momentum_i.z = variables[i + (VAR_MOMENTUM + 2) * nelr];
float density_energy_i = variables[i + VAR_DENSITY_ENERGY * nelr];
float3 velocity_i;
compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
float flux_i_density = float(0.0f);
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for (j = 0; j < NNB; j++)
{
nb = elements_surrounding_elements[i + j * nelr];
normal.x = normals[i + (j + 0 * NNB) * nelr];
normal.y = normals[i + (j + 1 * NNB) * nelr];
normal.z = normals[i + (j + 2 * NNB) * nelr];
normal_len = sqrtf(normal.x * normal.x + normal.y * normal.y + normal.z * normal.z);
if (nb >= 0) // a legitimate neighbor
{
density_nb = variables[nb + VAR_DENSITY * nelr];
momentum_nb.x = variables[nb + (VAR_MOMENTUM + 0) * nelr];
momentum_nb.y = variables[nb + (VAR_MOMENTUM + 1) * nelr];
momentum_nb.z = variables[nb + (VAR_MOMENTUM + 2) * nelr];
density_energy_nb = variables[nb + VAR_DENSITY_ENERGY * nelr];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
factor = -normal_len * smoothing_coefficient * float(0.5f) * (speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
flux_i_density += factor * (density_i - density_nb);
flux_i_density_energy += factor * (density_energy_i - density_energy_nb);
flux_i_momentum.x += factor * (momentum_i.x - momentum_nb.x);
flux_i_momentum.y += factor * (momentum_i.y - momentum_nb.y);
flux_i_momentum.z += factor * (momentum_i.z - momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f) * normal.x;
flux_i_density += factor * (momentum_nb.x + momentum_i.x);
flux_i_density_energy += factor * (flux_contribution_nb_density_energy.x + flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor * (flux_contribution_nb_momentum_x.x + flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor * (flux_contribution_nb_momentum_y.x + flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor * (flux_contribution_nb_momentum_z.x + flux_contribution_i_momentum_z.x);
factor = float(0.5f) * normal.y;
flux_i_density += factor * (momentum_nb.y + momentum_i.y);
flux_i_density_energy += factor * (flux_contribution_nb_density_energy.y + flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor * (flux_contribution_nb_momentum_x.y + flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor * (flux_contribution_nb_momentum_y.y + flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor * (flux_contribution_nb_momentum_z.y + flux_contribution_i_momentum_z.y);
factor = float(0.5f) * normal.z;
flux_i_density += factor * (momentum_nb.z + momentum_i.z);
flux_i_density_energy += factor * (flux_contribution_nb_density_energy.z + flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor * (flux_contribution_nb_momentum_x.z + flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor * (flux_contribution_nb_momentum_y.z + flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor * (flux_contribution_nb_momentum_z.z + flux_contribution_i_momentum_z.z);
}
else if (nb == -1) // a wing boundary
{
flux_i_momentum.x += normal.x * pressure_i;
flux_i_momentum.y += normal.y * pressure_i;
flux_i_momentum.z += normal.z * pressure_i;
}
else if (nb == -2) // a far field boundary
{
factor = float(0.5f) * normal.x;
flux_i_density += factor * (ff_variable[VAR_MOMENTUM + 0] + momentum_i.x);
flux_i_density_energy += factor * (ff_flux_contribution_density_energy[0].x + flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor * (ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor * (ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor * (ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x);
factor = float(0.5f) * normal.y;
flux_i_density += factor * (ff_variable[VAR_MOMENTUM + 1] + momentum_i.y);
flux_i_density_energy += factor * (ff_flux_contribution_density_energy[0].y + flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor * (ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor * (ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor * (ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y);
factor = float(0.5f) * normal.z;
flux_i_density += factor * (ff_variable[VAR_MOMENTUM + 2] + momentum_i.z);
flux_i_density_energy += factor * (ff_flux_contribution_density_energy[0].z + flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor * (ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor * (ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor * (ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY * nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM + 0) * nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM + 1) * nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM + 2) * nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY * nelr] = flux_i_density_energy;
}
}
void compute_flux(int nelr, int *elements_surrounding_elements, float *normals, float *variables, float *fluxes)
{
dim3 Dg(nelr / BLOCK_SIZE_3), Db(BLOCK_SIZE_3);
hipLaunchKernelGGL(( cuda_compute_flux<false>), dim3(Dg), dim3(Db), 0, 0, nelr, elements_surrounding_elements, normals, variables, fluxes);
getLastCudaError("compute_flux failed");
}
void compute_flux2(int nelr, int *elements_surrounding_elements, float *normals, float *variables, float *fluxes)
{
dim3 Dg(nelr / BLOCK_SIZE_3), Db(BLOCK_SIZE_3);
hipLaunchKernelGGL(( cuda_compute_flux<true>), dim3(Dg), dim3(Db), 0, 0, nelr, elements_surrounding_elements, normals, variables, fluxes);
getLastCudaError("compute_flux failed");
}
template<bool first_round>
__global__ void cuda_time_step(int j, int nelr, float *old_variables, float *variables, float *step_factors, float *fluxes)
{
if (first_round) {
const int i = (blockDim.x * blockIdx.x + threadIdx.x);
float factor = step_factors[i] / float(RK + 1 - j);
int index = i/nelr + VAR_MOMENTUM;
variables[i + VAR_DENSITY * nelr] = ff_variable[i/nelr + VAR_DENSITY] + factor * fluxes[i + VAR_DENSITY * nelr];
variables[i + VAR_DENSITY_ENERGY * nelr] = ff_variable[i/nelr + VAR_DENSITY_ENERGY ] + factor * fluxes[i + VAR_DENSITY_ENERGY * nelr];
variables[i + (VAR_MOMENTUM + 0) * nelr] = ff_variable[index] + factor * fluxes[i + (VAR_MOMENTUM + 0) * nelr];
variables[i + (VAR_MOMENTUM + 1) * nelr] = ff_variable[index + 1] + factor * fluxes[i + (VAR_MOMENTUM + 1) * nelr];
variables[i + (VAR_MOMENTUM + 2) * nelr] = ff_variable[index + 2] + factor * fluxes[i + (VAR_MOMENTUM + 2) * nelr];
} else {
const int i = (blockDim.x * blockIdx.x + threadIdx.x);
float factor = step_factors[i] / float(RK + 1 - j);
variables[i + VAR_DENSITY * nelr] = old_variables[i + VAR_DENSITY * nelr] + factor * fluxes[i + VAR_DENSITY * nelr];
variables[i + VAR_DENSITY_ENERGY * nelr] = old_variables[i + VAR_DENSITY_ENERGY * nelr] + factor * fluxes[i + VAR_DENSITY_ENERGY * nelr];
variables[i + (VAR_MOMENTUM + 0) * nelr] = old_variables[i + (VAR_MOMENTUM + 0) * nelr] + factor * fluxes[i + (VAR_MOMENTUM + 0) * nelr];
variables[i + (VAR_MOMENTUM + 1) * nelr] = old_variables[i + (VAR_MOMENTUM + 1) * nelr] + factor * fluxes[i + (VAR_MOMENTUM + 1) * nelr];
variables[i + (VAR_MOMENTUM + 2) * nelr] = old_variables[i + (VAR_MOMENTUM + 2) * nelr] + factor * fluxes[i + (VAR_MOMENTUM + 2) * nelr];
}
}
void time_step(int j, int nelr, float *old_variables, float *variables, float *step_factors, float *fluxes)
{
dim3 Dg(nelr / BLOCK_SIZE_4), Db(BLOCK_SIZE_4);
hipLaunchKernelGGL(( cuda_time_step<false>), dim3(Dg), dim3(Db), 0, 0, j, nelr, old_variables, variables, step_factors, fluxes);
getLastCudaError("update failed");
}
void time_step2(int j, int nelr, float *old_variables, float *variables, float *step_factors, float *fluxes)
{
dim3 Dg(nelr / BLOCK_SIZE_4), Db(BLOCK_SIZE_4);
hipLaunchKernelGGL(( cuda_time_step<true>), dim3(Dg), dim3(Db), 0, 0, j, nelr, old_variables, variables, step_factors, fluxes);
getLastCudaError("update failed");
}
/*
* Main function
*/
int main(int argc, char **argv)
{
printf("WG size of kernel:initialize = %d, WG size of kernel:compute_step_factor = %d, WG size of kernel:compute_flux = %d, WG size of kernel:time_step = %d\n", BLOCK_SIZE_1, BLOCK_SIZE_2, BLOCK_SIZE_3, BLOCK_SIZE_4);
if (argc < 2)
{
std::cout << "specify data file name" << std::endl;
return 0;
}
const char *data_file_name = argv[1];
hipDeviceProp_t prop;
int dev;
checkCudaErrors(hipSetDevice(0));
checkCudaErrors(hipGetDevice(&dev));
checkCudaErrors(hipGetDeviceProperties(&prop, dev));
printf("Name: %s\n", prop.name);
// set far field conditions and load them into constant memory on the gpu
{
float h_ff_variable[NVAR];
const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack);
h_ff_variable[VAR_DENSITY] = float(1.4);
float ff_pressure = float(1.0f);
float ff_speed_of_sound = sqrt(GAMMA * ff_pressure / h_ff_variable[VAR_DENSITY]);
float ff_speed = float(ff_mach) * ff_speed_of_sound;
float3 ff_velocity;
ff_velocity.x = ff_speed * float(cos((float)angle_of_attack));
ff_velocity.y = ff_speed * float(sin((float)angle_of_attack));
ff_velocity.z = 0.0f;
h_ff_variable[VAR_MOMENTUM + 0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x;
h_ff_variable[VAR_MOMENTUM + 1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y;
h_ff_variable[VAR_MOMENTUM + 2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z;
h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY] * (float(0.5f) * (ff_speed * ff_speed)) + (ff_pressure / float(GAMMA - 1.0f));
float3 h_ff_momentum;
h_ff_momentum.x = *(h_ff_variable + VAR_MOMENTUM + 0);
h_ff_momentum.y = *(h_ff_variable + VAR_MOMENTUM + 1);
h_ff_momentum.z = *(h_ff_variable + VAR_MOMENTUM + 2);
float3 h_ff_flux_contribution_momentum_x;
float3 h_ff_flux_contribution_momentum_y;
float3 h_ff_flux_contribution_momentum_z;
float3 h_ff_flux_contribution_density_energy;
compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy);
// copy far field conditions to the gpu
checkCudaErrors(hipMemcpyToSymbol(ff_variable, h_ff_variable, NVAR * sizeof(float)));
checkCudaErrors(hipMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(float3)));
checkCudaErrors(hipMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(float3)));
checkCudaErrors(hipMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(float3)));
checkCudaErrors(hipMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(float3)));
}
int nel;
int nelr;
// read in domain geometry
float *areas;
int *elements_surrounding_elements;
float *normals;
{
std::ifstream file(data_file_name);
file >> nel;
nelr = BLOCK_SIZE_0 * ((nel / BLOCK_SIZE_0) + ::min(1, nel % BLOCK_SIZE_0));
float *h_areas = new float[nelr];
int *h_elements_surrounding_elements = new int[nelr * NNB];
float *h_normals = new float[nelr * NDIM * NNB];
// read in data
for (int i = 0; i < nel; i++)
{
file >> h_areas[i];
for (int j = 0; j < NNB; j++)
{
file >> h_elements_surrounding_elements[i + j * nelr];
if (h_elements_surrounding_elements[i + j * nelr] < 0)
h_elements_surrounding_elements[i + j * nelr] = -1;
h_elements_surrounding_elements[i + j * nelr]--; //it's coming in with Fortran numbering
for (int k = 0; k < NDIM; k++)
{
file >> h_normals[i + (j + k * NNB) * nelr];
h_normals[i + (j + k * NNB) * nelr] = -h_normals[i + (j + k * NNB) * nelr];
}
}
}
// fill in remaining data
int last = nel - 1;
for (int i = nel; i < nelr; i++)
{
h_areas[i] = h_areas[last];
for (int j = 0; j < NNB; j++)
{
// duplicate the last element
h_elements_surrounding_elements[i + j * nelr] = h_elements_surrounding_elements[last + j * nelr];
for (int k = 0; k < NDIM; k++)
h_normals[last + (j + k * NNB) * nelr] = h_normals[last + (j + k * NNB) * nelr];
}
}
areas = alloc<float>(nelr);
upload<float>(areas, h_areas, nelr);
elements_surrounding_elements = alloc<int>(nelr * NNB);
upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr * NNB);
normals = alloc<float>(nelr * NDIM * NNB);
upload<float>(normals, h_normals, nelr * NDIM * NNB);
delete[] h_areas;
delete[] h_elements_surrounding_elements;
delete[] h_normals;
}
// Create arrays and set initial conditions
float *variables = alloc<float>(nelr * NVAR);
initialize_variables(nelr, variables);
float *old_variables = alloc<float>(nelr * NVAR);
float *fluxes = alloc<float>(nelr * NVAR);
float *step_factors = alloc<float>(nelr);
// make sure all memory is floatly allocated before we start timing
initialize_variables(nelr, old_variables);
initialize_variables(nelr, fluxes);
hipMemset((void *)step_factors, 0, sizeof(float) * nelr);
// make sure CUDA isn't still doing something before we start timing
hipDeviceSynchronize();
// these need to be computed the first time in order to compute time step
std::cout << "Starting..." << std::endl;
StopWatchInterface *timer = 0;
// unsigned int timer = 0;
// CUT_SAFE_CALL( cutCreateTimer( &timer));
// CUT_SAFE_CALL( cutStartTimer( timer));
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
// for first iteration. In first iteration, compute_step_factor and compute_flux don't change array variables.
compute_step_factor2(nelr, variables, areas, step_factors);
getLastCudaError("compute_step_factor failed");
compute_flux2(nelr, elements_surrounding_elements, normals, variables, fluxes);
getLastCudaError("compute_flux failed");
time_step2(0, nelr, old_variables, variables, step_factors, fluxes);
getLastCudaError("time_step failed");
for (int j = 1; j < RK; j++)
{
compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes);
getLastCudaError("compute_flux failed");
time_step(j, nelr, old_variables, variables, step_factors, fluxes);
getLastCudaError("time_step failed");
}
// Begin iterations
for (int i = 1; i < iterations; i++)
{
copy<float>(old_variables, variables, nelr * NVAR);
// for the first iteration we compute the time step
compute_step_factor(nelr, variables, areas, step_factors);
getLastCudaError("compute_step_factor failed");
for (int j = 0; j < RK; j++)
{
compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes);
getLastCudaError("compute_flux failed");
time_step(j, nelr, old_variables, variables, step_factors, fluxes);
getLastCudaError("time_step failed");
}
}
hipDeviceSynchronize();
// CUT_SAFE_CALL( cutStopTimer(timer) );
sdkStopTimer(&timer);
std::cout << (sdkGetAverageTimerValue(&timer) / 1000.0) / iterations << " seconds per iteration" << std::endl;
std::cout << "Saving solution..." << std::endl;
dump(variables, nel, nelr);
std::cout << "Saved solution..." << std::endl;
std::cout << "Cleaning up..." << std::endl;
dealloc<float>(areas);
dealloc<int>(elements_surrounding_elements);
dealloc<float>(normals);
dealloc<float>(variables);
dealloc<float>(old_variables);
dealloc<float>(fluxes);
dealloc<float>(step_factors);
std::cout << "Done..." << std::endl;
return 0;
}
| b62323e2ae7871acf55ccbbccb5b68856b0ac107.cu | // Copyright 2009, Andrew Corrigan, [email protected]
// This code is from the AIAA-2009-4001 paper
//#include <cutil.h>
#include <helper_cuda.h>
#include <helper_timer.h>
#include <iostream>
#include <fstream>
/*
* Options
*
*/
#define GAMMA 1.4f
#define iterations 2
// #ifndef block_length
// #define block_length 192
// #endif
#define NDIM 3
#define NNB 4
#define RK 3 // 3rd order RK
#define ff_mach 1.2f
#define deg_angle_of_attack 0.0f
/*
* not options
*/
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE_0 RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE_0 RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_0 RD_WG_SIZE
#else
#define BLOCK_SIZE_0 192
#endif
#ifdef RD_WG_SIZE_1_0
#define BLOCK_SIZE_1 RD_WG_SIZE_1_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_1 RD_WG_SIZE_1
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_1 RD_WG_SIZE
#else
#define BLOCK_SIZE_1 192
#endif
#ifdef RD_WG_SIZE_2_0
#define BLOCK_SIZE_2 RD_WG_SIZE_2_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_2 RD_WG_SIZE_2
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_2 RD_WG_SIZE
#else
#define BLOCK_SIZE_2 192
#endif
#ifdef RD_WG_SIZE_3_0
#define BLOCK_SIZE_3 RD_WG_SIZE_3_0
#elif defined(RD_WG_SIZE_3)
#define BLOCK_SIZE_3 RD_WG_SIZE_3
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_3 RD_WG_SIZE
#else
#define BLOCK_SIZE_3 192
#endif
#ifdef RD_WG_SIZE_4_0
#define BLOCK_SIZE_4 RD_WG_SIZE_4_0
#elif defined(RD_WG_SIZE_4)
#define BLOCK_SIZE_4 RD_WG_SIZE_4
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_4 RD_WG_SIZE
#else
#define BLOCK_SIZE_4 192
#endif
// #if block_length > 128
// #warning "the kernels may fail too launch on some systems if the block length is too large"
// #endif
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM + NDIM)
#define NVAR (VAR_DENSITY_ENERGY + 1)
/*
* Generic functions
*/
template <typename T>
T *alloc(int N)
{
T *t;
checkCudaErrors(cudaMalloc((void **)&t, sizeof(T) * N));
return t;
}
template <typename T>
void dealloc(T *array)
{
checkCudaErrors(cudaFree((void *)array));
}
template <typename T>
void copy(T *dst, T *src, int N)
{
checkCudaErrors(cudaMemcpy((void *)dst, (void *)src, N * sizeof(T), cudaMemcpyDeviceToDevice));
}
template <typename T>
void upload(T *dst, T *src, int N)
{
checkCudaErrors(cudaMemcpy((void *)dst, (void *)src, N * sizeof(T), cudaMemcpyHostToDevice));
}
template <typename T>
void download(T *dst, T *src, int N)
{
checkCudaErrors(cudaMemcpy((void *)dst, (void *)src, N * sizeof(T), cudaMemcpyDeviceToHost));
}
void dump(float *variables, int nel, int nelr)
{
float *h_variables = new float[nelr * NVAR];
download(h_variables, variables, nelr * NVAR);
{
std::ofstream file("density");
file << nel << " " << nelr << std::endl;
for (int i = 0; i < nel; i++)
file << h_variables[i + VAR_DENSITY * nelr] << std::endl;
}
{
std::ofstream file("momentum");
file << nel << " " << nelr << std::endl;
for (int i = 0; i < nel; i++)
{
for (int j = 0; j != NDIM; j++)
file << h_variables[i + (VAR_MOMENTUM + j) * nelr] << " ";
file << std::endl;
}
}
{
std::ofstream file("density_energy");
file << nel << " " << nelr << std::endl;
for (int i = 0; i < nel; i++)
file << h_variables[i + VAR_DENSITY_ENERGY * nelr] << std::endl;
}
delete[] h_variables;
}
/*
* Element-based Cell-centered FVM solver functions
*/
__constant__ float ff_variable[NVAR];
__constant__ float3 ff_flux_contribution_momentum_x[1];
__constant__ float3 ff_flux_contribution_momentum_y[1];
__constant__ float3 ff_flux_contribution_momentum_z[1];
__constant__ float3 ff_flux_contribution_density_energy[1];
__global__ void cuda_initialize_variables(int nelr, float *variables)
{
const int i = (blockDim.x * blockIdx.x + threadIdx.x);
for (int j = 0; j < NVAR; j++)
variables[i + j * nelr] = ff_variable[j];
}
void initialize_variables(int nelr, float *variables)
{
dim3 Dg(nelr / BLOCK_SIZE_1), Db(BLOCK_SIZE_1);
cuda_initialize_variables<<<Dg, Db>>>(nelr, variables);
getLastCudaError("initialize_variables failed");
}
__device__ __host__ inline void compute_flux_contribution(float &density, float3 &momentum, float &density_energy, float &pressure, float3 &velocity, float3 &fc_momentum_x, float3 &fc_momentum_y, float3 &fc_momentum_z, float3 &fc_density_energy)
{
fc_momentum_x.x = velocity.x * momentum.x + pressure;
fc_momentum_x.y = velocity.x * momentum.y;
fc_momentum_x.z = velocity.x * momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y * momentum.y + pressure;
fc_momentum_y.z = velocity.y * momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z * momentum.z + pressure;
float de_p = density_energy + pressure;
fc_density_energy.x = velocity.x * de_p;
fc_density_energy.y = velocity.y * de_p;
fc_density_energy.z = velocity.z * de_p;
}
__device__ inline void compute_velocity(float &density, float3 &momentum, float3 &velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__device__ inline float compute_speed_sqd(float3 &velocity)
{
return velocity.x * velocity.x + velocity.y * velocity.y + velocity.z * velocity.z;
}
__device__ inline float compute_pressure(float &density, float &density_energy, float &speed_sqd)
{
return (float(GAMMA) - float(1.0f)) * (density_energy - float(0.5f) * density * speed_sqd);
}
__device__ inline float compute_speed_of_sound(float &density, float &pressure)
{
return sqrtf(float(GAMMA) * pressure / density);
}
template<bool first_round>
__global__ void cuda_compute_step_factor(int nelr, float *variables, float *areas, float *step_factors)
{
if (first_round) {
const int i = (blockDim.x * blockIdx.x + threadIdx.x);
int index = i / nelr + VAR_MOMENTUM;
float density = ff_variable[i/nelr + VAR_DENSITY];
float3 momentum;
momentum.x = ff_variable[index];
momentum.y = ff_variable[index + 1];
momentum.z = ff_variable[index + 2];
// float density_energy = variables[i + VAR_DENSITY_ENERGY * nelr];
float density_energy = ff_variable[i/nelr + VAR_DENSITY_ENERGY];
float3 velocity;
compute_velocity(density, momentum, velocity);
float speed_sqd = compute_speed_sqd(velocity);
float pressure = compute_pressure(density, density_energy, speed_sqd);
float speed_of_sound = compute_speed_of_sound(density, pressure);
// dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once
step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound));
} else {
const int i = (blockDim.x * blockIdx.x + threadIdx.x);
float density = variables[i + VAR_DENSITY * nelr];
float3 momentum;
momentum.x = variables[i + (VAR_MOMENTUM + 0) * nelr];
momentum.y = variables[i + (VAR_MOMENTUM + 1) * nelr];
momentum.z = variables[i + (VAR_MOMENTUM + 2) * nelr];
float density_energy = variables[i + VAR_DENSITY_ENERGY * nelr];
float3 velocity;
compute_velocity(density, momentum, velocity);
float speed_sqd = compute_speed_sqd(velocity);
float pressure = compute_pressure(density, density_energy, speed_sqd);
float speed_of_sound = compute_speed_of_sound(density, pressure);
// dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once
step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound));
}
}
void compute_step_factor(int nelr, float *variables, float *areas, float *step_factors)
{
dim3 Dg(nelr / BLOCK_SIZE_2), Db(BLOCK_SIZE_2);
cuda_compute_step_factor<false><<<Dg, Db>>>(nelr, variables, areas, step_factors);
getLastCudaError("compute_step_factor failed");
}
//@findhao for the first iteration
void compute_step_factor2(int nelr, float *variables, float *areas, float *step_factors)
{
dim3 Dg(nelr / BLOCK_SIZE_2), Db(BLOCK_SIZE_2);
cuda_compute_step_factor<true><<<Dg, Db>>>(nelr, variables, areas, step_factors);
getLastCudaError("compute_step_factor failed");
}
/*
*
*
*/
template<bool first_round>
__global__ void cuda_compute_flux(int nelr, int *elements_surrounding_elements, float *normals, float *variables, float *fluxes)
{
if (first_round) {
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x * blockIdx.x + threadIdx.x);
int j, nb;
float3 normal;
float normal_len;
float factor;
int index2 = i / nelr + VAR_DENSITY;
// float density_i = variables[i + VAR_DENSITY * nelr];
float density_i = ff_variable[index2];
float3 momentum_i;
int index = i / nelr + VAR_MOMENTUM;
// momentum_i.x = variables[i + (VAR_MOMENTUM + 0) * nelr];
// momentum_i.y = variables[i + (VAR_MOMENTUM + 1) * nelr];
// momentum_i.z = variables[i + (VAR_MOMENTUM + 2) * nelr];
momentum_i.x = ff_variable[index];
momentum_i.y = ff_variable[index + 1];
momentum_i.z = ff_variable[index + 2];
// float density_energy_i = variables[i + VAR_DENSITY_ENERGY * nelr];
float density_energy_i = ff_variable[i/nelr + VAR_DENSITY_ENERGY ];
float3 velocity_i;
compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
float flux_i_density = float(0.0f);
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for (j = 0; j < NNB; j++)
{
nb = elements_surrounding_elements[i + j * nelr];
normal.x = normals[i + (j + 0 * NNB) * nelr];
normal.y = normals[i + (j + 1 * NNB) * nelr];
normal.z = normals[i + (j + 2 * NNB) * nelr];
normal_len = sqrtf(normal.x * normal.x + normal.y * normal.y + normal.z * normal.z);
if (nb >= 0) // a legitimate neighbor
{
// density_nb = variables[nb + VAR_DENSITY * nelr];
// momentum_nb.x = variables[nb + (VAR_MOMENTUM + 0) * nelr];
// momentum_nb.y = variables[nb + (VAR_MOMENTUM + 1) * nelr];
// momentum_nb.z = variables[nb + (VAR_MOMENTUM + 2) * nelr];
// density_energy_nb = variables[nb + VAR_DENSITY_ENERGY * nelr];
density_nb = ff_variable[nb/nelr + VAR_DENSITY];
int index3 = nb/nelr + VAR_MOMENTUM;
momentum_nb.x = ff_variable[index3];
momentum_nb.y = ff_variable[index3 + 1];
momentum_nb.z = ff_variable[index3 + 2];
density_energy_nb = ff_variable[nb/nelr + VAR_DENSITY_ENERGY];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
factor = -normal_len * smoothing_coefficient * float(0.5f) * (speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
flux_i_density += factor * (density_i - density_nb);
flux_i_density_energy += factor * (density_energy_i - density_energy_nb);
flux_i_momentum.x += factor * (momentum_i.x - momentum_nb.x);
flux_i_momentum.y += factor * (momentum_i.y - momentum_nb.y);
flux_i_momentum.z += factor * (momentum_i.z - momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f) * normal.x;
flux_i_density += factor * (momentum_nb.x + momentum_i.x);
flux_i_density_energy += factor * (flux_contribution_nb_density_energy.x + flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor * (flux_contribution_nb_momentum_x.x + flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor * (flux_contribution_nb_momentum_y.x + flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor * (flux_contribution_nb_momentum_z.x + flux_contribution_i_momentum_z.x);
factor = float(0.5f) * normal.y;
flux_i_density += factor * (momentum_nb.y + momentum_i.y);
flux_i_density_energy += factor * (flux_contribution_nb_density_energy.y + flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor * (flux_contribution_nb_momentum_x.y + flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor * (flux_contribution_nb_momentum_y.y + flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor * (flux_contribution_nb_momentum_z.y + flux_contribution_i_momentum_z.y);
factor = float(0.5f) * normal.z;
flux_i_density += factor * (momentum_nb.z + momentum_i.z);
flux_i_density_energy += factor * (flux_contribution_nb_density_energy.z + flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor * (flux_contribution_nb_momentum_x.z + flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor * (flux_contribution_nb_momentum_y.z + flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor * (flux_contribution_nb_momentum_z.z + flux_contribution_i_momentum_z.z);
}
else if (nb == -1) // a wing boundary
{
flux_i_momentum.x += normal.x * pressure_i;
flux_i_momentum.y += normal.y * pressure_i;
flux_i_momentum.z += normal.z * pressure_i;
}
else if (nb == -2) // a far field boundary
{
factor = float(0.5f) * normal.x;
flux_i_density += factor * (ff_variable[VAR_MOMENTUM + 0] + momentum_i.x);
flux_i_density_energy += factor * (ff_flux_contribution_density_energy[0].x + flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor * (ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor * (ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor * (ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x);
factor = float(0.5f) * normal.y;
flux_i_density += factor * (ff_variable[VAR_MOMENTUM + 1] + momentum_i.y);
flux_i_density_energy += factor * (ff_flux_contribution_density_energy[0].y + flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor * (ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor * (ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor * (ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y);
factor = float(0.5f) * normal.z;
flux_i_density += factor * (ff_variable[VAR_MOMENTUM + 2] + momentum_i.z);
flux_i_density_energy += factor * (ff_flux_contribution_density_energy[0].z + flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor * (ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor * (ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor * (ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY * nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM + 0) * nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM + 1) * nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM + 2) * nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY * nelr] = flux_i_density_energy;
} else {
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x * blockIdx.x + threadIdx.x);
int j, nb;
float3 normal;
float normal_len;
float factor;
float density_i = variables[i + VAR_DENSITY * nelr];
float3 momentum_i;
momentum_i.x = variables[i + (VAR_MOMENTUM + 0) * nelr];
momentum_i.y = variables[i + (VAR_MOMENTUM + 1) * nelr];
momentum_i.z = variables[i + (VAR_MOMENTUM + 2) * nelr];
float density_energy_i = variables[i + VAR_DENSITY_ENERGY * nelr];
float3 velocity_i;
compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
float flux_i_density = float(0.0f);
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for (j = 0; j < NNB; j++)
{
nb = elements_surrounding_elements[i + j * nelr];
normal.x = normals[i + (j + 0 * NNB) * nelr];
normal.y = normals[i + (j + 1 * NNB) * nelr];
normal.z = normals[i + (j + 2 * NNB) * nelr];
normal_len = sqrtf(normal.x * normal.x + normal.y * normal.y + normal.z * normal.z);
if (nb >= 0) // a legitimate neighbor
{
density_nb = variables[nb + VAR_DENSITY * nelr];
momentum_nb.x = variables[nb + (VAR_MOMENTUM + 0) * nelr];
momentum_nb.y = variables[nb + (VAR_MOMENTUM + 1) * nelr];
momentum_nb.z = variables[nb + (VAR_MOMENTUM + 2) * nelr];
density_energy_nb = variables[nb + VAR_DENSITY_ENERGY * nelr];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
factor = -normal_len * smoothing_coefficient * float(0.5f) * (speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
flux_i_density += factor * (density_i - density_nb);
flux_i_density_energy += factor * (density_energy_i - density_energy_nb);
flux_i_momentum.x += factor * (momentum_i.x - momentum_nb.x);
flux_i_momentum.y += factor * (momentum_i.y - momentum_nb.y);
flux_i_momentum.z += factor * (momentum_i.z - momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f) * normal.x;
flux_i_density += factor * (momentum_nb.x + momentum_i.x);
flux_i_density_energy += factor * (flux_contribution_nb_density_energy.x + flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor * (flux_contribution_nb_momentum_x.x + flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor * (flux_contribution_nb_momentum_y.x + flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor * (flux_contribution_nb_momentum_z.x + flux_contribution_i_momentum_z.x);
factor = float(0.5f) * normal.y;
flux_i_density += factor * (momentum_nb.y + momentum_i.y);
flux_i_density_energy += factor * (flux_contribution_nb_density_energy.y + flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor * (flux_contribution_nb_momentum_x.y + flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor * (flux_contribution_nb_momentum_y.y + flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor * (flux_contribution_nb_momentum_z.y + flux_contribution_i_momentum_z.y);
factor = float(0.5f) * normal.z;
flux_i_density += factor * (momentum_nb.z + momentum_i.z);
flux_i_density_energy += factor * (flux_contribution_nb_density_energy.z + flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor * (flux_contribution_nb_momentum_x.z + flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor * (flux_contribution_nb_momentum_y.z + flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor * (flux_contribution_nb_momentum_z.z + flux_contribution_i_momentum_z.z);
}
else if (nb == -1) // a wing boundary
{
flux_i_momentum.x += normal.x * pressure_i;
flux_i_momentum.y += normal.y * pressure_i;
flux_i_momentum.z += normal.z * pressure_i;
}
else if (nb == -2) // a far field boundary
{
factor = float(0.5f) * normal.x;
flux_i_density += factor * (ff_variable[VAR_MOMENTUM + 0] + momentum_i.x);
flux_i_density_energy += factor * (ff_flux_contribution_density_energy[0].x + flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor * (ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor * (ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor * (ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x);
factor = float(0.5f) * normal.y;
flux_i_density += factor * (ff_variable[VAR_MOMENTUM + 1] + momentum_i.y);
flux_i_density_energy += factor * (ff_flux_contribution_density_energy[0].y + flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor * (ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor * (ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor * (ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y);
factor = float(0.5f) * normal.z;
flux_i_density += factor * (ff_variable[VAR_MOMENTUM + 2] + momentum_i.z);
flux_i_density_energy += factor * (ff_flux_contribution_density_energy[0].z + flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor * (ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor * (ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor * (ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY * nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM + 0) * nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM + 1) * nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM + 2) * nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY * nelr] = flux_i_density_energy;
}
}
void compute_flux(int nelr, int *elements_surrounding_elements, float *normals, float *variables, float *fluxes)
{
dim3 Dg(nelr / BLOCK_SIZE_3), Db(BLOCK_SIZE_3);
cuda_compute_flux<false><<<Dg, Db>>>(nelr, elements_surrounding_elements, normals, variables, fluxes);
getLastCudaError("compute_flux failed");
}
void compute_flux2(int nelr, int *elements_surrounding_elements, float *normals, float *variables, float *fluxes)
{
dim3 Dg(nelr / BLOCK_SIZE_3), Db(BLOCK_SIZE_3);
cuda_compute_flux<true><<<Dg, Db>>>(nelr, elements_surrounding_elements, normals, variables, fluxes);
getLastCudaError("compute_flux failed");
}
template<bool first_round>
__global__ void cuda_time_step(int j, int nelr, float *old_variables, float *variables, float *step_factors, float *fluxes)
{
if (first_round) {
const int i = (blockDim.x * blockIdx.x + threadIdx.x);
float factor = step_factors[i] / float(RK + 1 - j);
int index = i/nelr + VAR_MOMENTUM;
variables[i + VAR_DENSITY * nelr] = ff_variable[i/nelr + VAR_DENSITY] + factor * fluxes[i + VAR_DENSITY * nelr];
variables[i + VAR_DENSITY_ENERGY * nelr] = ff_variable[i/nelr + VAR_DENSITY_ENERGY ] + factor * fluxes[i + VAR_DENSITY_ENERGY * nelr];
variables[i + (VAR_MOMENTUM + 0) * nelr] = ff_variable[index] + factor * fluxes[i + (VAR_MOMENTUM + 0) * nelr];
variables[i + (VAR_MOMENTUM + 1) * nelr] = ff_variable[index + 1] + factor * fluxes[i + (VAR_MOMENTUM + 1) * nelr];
variables[i + (VAR_MOMENTUM + 2) * nelr] = ff_variable[index + 2] + factor * fluxes[i + (VAR_MOMENTUM + 2) * nelr];
} else {
const int i = (blockDim.x * blockIdx.x + threadIdx.x);
float factor = step_factors[i] / float(RK + 1 - j);
variables[i + VAR_DENSITY * nelr] = old_variables[i + VAR_DENSITY * nelr] + factor * fluxes[i + VAR_DENSITY * nelr];
variables[i + VAR_DENSITY_ENERGY * nelr] = old_variables[i + VAR_DENSITY_ENERGY * nelr] + factor * fluxes[i + VAR_DENSITY_ENERGY * nelr];
variables[i + (VAR_MOMENTUM + 0) * nelr] = old_variables[i + (VAR_MOMENTUM + 0) * nelr] + factor * fluxes[i + (VAR_MOMENTUM + 0) * nelr];
variables[i + (VAR_MOMENTUM + 1) * nelr] = old_variables[i + (VAR_MOMENTUM + 1) * nelr] + factor * fluxes[i + (VAR_MOMENTUM + 1) * nelr];
variables[i + (VAR_MOMENTUM + 2) * nelr] = old_variables[i + (VAR_MOMENTUM + 2) * nelr] + factor * fluxes[i + (VAR_MOMENTUM + 2) * nelr];
}
}
void time_step(int j, int nelr, float *old_variables, float *variables, float *step_factors, float *fluxes)
{
dim3 Dg(nelr / BLOCK_SIZE_4), Db(BLOCK_SIZE_4);
cuda_time_step<false><<<Dg, Db>>>(j, nelr, old_variables, variables, step_factors, fluxes);
getLastCudaError("update failed");
}
void time_step2(int j, int nelr, float *old_variables, float *variables, float *step_factors, float *fluxes)
{
dim3 Dg(nelr / BLOCK_SIZE_4), Db(BLOCK_SIZE_4);
cuda_time_step<true><<<Dg, Db>>>(j, nelr, old_variables, variables, step_factors, fluxes);
getLastCudaError("update failed");
}
/*
* Main function
*/
int main(int argc, char **argv)
{
printf("WG size of kernel:initialize = %d, WG size of kernel:compute_step_factor = %d, WG size of kernel:compute_flux = %d, WG size of kernel:time_step = %d\n", BLOCK_SIZE_1, BLOCK_SIZE_2, BLOCK_SIZE_3, BLOCK_SIZE_4);
if (argc < 2)
{
std::cout << "specify data file name" << std::endl;
return 0;
}
const char *data_file_name = argv[1];
cudaDeviceProp prop;
int dev;
checkCudaErrors(cudaSetDevice(0));
checkCudaErrors(cudaGetDevice(&dev));
checkCudaErrors(cudaGetDeviceProperties(&prop, dev));
printf("Name: %s\n", prop.name);
// set far field conditions and load them into constant memory on the gpu
{
float h_ff_variable[NVAR];
const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack);
h_ff_variable[VAR_DENSITY] = float(1.4);
float ff_pressure = float(1.0f);
float ff_speed_of_sound = sqrt(GAMMA * ff_pressure / h_ff_variable[VAR_DENSITY]);
float ff_speed = float(ff_mach) * ff_speed_of_sound;
float3 ff_velocity;
ff_velocity.x = ff_speed * float(cos((float)angle_of_attack));
ff_velocity.y = ff_speed * float(sin((float)angle_of_attack));
ff_velocity.z = 0.0f;
h_ff_variable[VAR_MOMENTUM + 0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x;
h_ff_variable[VAR_MOMENTUM + 1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y;
h_ff_variable[VAR_MOMENTUM + 2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z;
h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY] * (float(0.5f) * (ff_speed * ff_speed)) + (ff_pressure / float(GAMMA - 1.0f));
float3 h_ff_momentum;
h_ff_momentum.x = *(h_ff_variable + VAR_MOMENTUM + 0);
h_ff_momentum.y = *(h_ff_variable + VAR_MOMENTUM + 1);
h_ff_momentum.z = *(h_ff_variable + VAR_MOMENTUM + 2);
float3 h_ff_flux_contribution_momentum_x;
float3 h_ff_flux_contribution_momentum_y;
float3 h_ff_flux_contribution_momentum_z;
float3 h_ff_flux_contribution_density_energy;
compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy);
// copy far field conditions to the gpu
checkCudaErrors(cudaMemcpyToSymbol(ff_variable, h_ff_variable, NVAR * sizeof(float)));
checkCudaErrors(cudaMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(float3)));
checkCudaErrors(cudaMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(float3)));
checkCudaErrors(cudaMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(float3)));
checkCudaErrors(cudaMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(float3)));
}
int nel;
int nelr;
// read in domain geometry
float *areas;
int *elements_surrounding_elements;
float *normals;
{
std::ifstream file(data_file_name);
file >> nel;
nelr = BLOCK_SIZE_0 * ((nel / BLOCK_SIZE_0) + std::min(1, nel % BLOCK_SIZE_0));
float *h_areas = new float[nelr];
int *h_elements_surrounding_elements = new int[nelr * NNB];
float *h_normals = new float[nelr * NDIM * NNB];
// read in data
for (int i = 0; i < nel; i++)
{
file >> h_areas[i];
for (int j = 0; j < NNB; j++)
{
file >> h_elements_surrounding_elements[i + j * nelr];
if (h_elements_surrounding_elements[i + j * nelr] < 0)
h_elements_surrounding_elements[i + j * nelr] = -1;
h_elements_surrounding_elements[i + j * nelr]--; //it's coming in with Fortran numbering
for (int k = 0; k < NDIM; k++)
{
file >> h_normals[i + (j + k * NNB) * nelr];
h_normals[i + (j + k * NNB) * nelr] = -h_normals[i + (j + k * NNB) * nelr];
}
}
}
// fill in remaining data
int last = nel - 1;
for (int i = nel; i < nelr; i++)
{
h_areas[i] = h_areas[last];
for (int j = 0; j < NNB; j++)
{
// duplicate the last element
h_elements_surrounding_elements[i + j * nelr] = h_elements_surrounding_elements[last + j * nelr];
for (int k = 0; k < NDIM; k++)
h_normals[last + (j + k * NNB) * nelr] = h_normals[last + (j + k * NNB) * nelr];
}
}
areas = alloc<float>(nelr);
upload<float>(areas, h_areas, nelr);
elements_surrounding_elements = alloc<int>(nelr * NNB);
upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr * NNB);
normals = alloc<float>(nelr * NDIM * NNB);
upload<float>(normals, h_normals, nelr * NDIM * NNB);
delete[] h_areas;
delete[] h_elements_surrounding_elements;
delete[] h_normals;
}
// Create arrays and set initial conditions
float *variables = alloc<float>(nelr * NVAR);
initialize_variables(nelr, variables);
float *old_variables = alloc<float>(nelr * NVAR);
float *fluxes = alloc<float>(nelr * NVAR);
float *step_factors = alloc<float>(nelr);
// make sure all memory is floatly allocated before we start timing
initialize_variables(nelr, old_variables);
initialize_variables(nelr, fluxes);
cudaMemset((void *)step_factors, 0, sizeof(float) * nelr);
// make sure CUDA isn't still doing something before we start timing
cudaThreadSynchronize();
// these need to be computed the first time in order to compute time step
std::cout << "Starting..." << std::endl;
StopWatchInterface *timer = 0;
// unsigned int timer = 0;
// CUT_SAFE_CALL( cutCreateTimer( &timer));
// CUT_SAFE_CALL( cutStartTimer( timer));
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
// for first iteration. In first iteration, compute_step_factor and compute_flux don't change array variables.
compute_step_factor2(nelr, variables, areas, step_factors);
getLastCudaError("compute_step_factor failed");
compute_flux2(nelr, elements_surrounding_elements, normals, variables, fluxes);
getLastCudaError("compute_flux failed");
time_step2(0, nelr, old_variables, variables, step_factors, fluxes);
getLastCudaError("time_step failed");
for (int j = 1; j < RK; j++)
{
compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes);
getLastCudaError("compute_flux failed");
time_step(j, nelr, old_variables, variables, step_factors, fluxes);
getLastCudaError("time_step failed");
}
// Begin iterations
for (int i = 1; i < iterations; i++)
{
copy<float>(old_variables, variables, nelr * NVAR);
// for the first iteration we compute the time step
compute_step_factor(nelr, variables, areas, step_factors);
getLastCudaError("compute_step_factor failed");
for (int j = 0; j < RK; j++)
{
compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes);
getLastCudaError("compute_flux failed");
time_step(j, nelr, old_variables, variables, step_factors, fluxes);
getLastCudaError("time_step failed");
}
}
cudaThreadSynchronize();
// CUT_SAFE_CALL( cutStopTimer(timer) );
sdkStopTimer(&timer);
std::cout << (sdkGetAverageTimerValue(&timer) / 1000.0) / iterations << " seconds per iteration" << std::endl;
std::cout << "Saving solution..." << std::endl;
dump(variables, nel, nelr);
std::cout << "Saved solution..." << std::endl;
std::cout << "Cleaning up..." << std::endl;
dealloc<float>(areas);
dealloc<int>(elements_surrounding_elements);
dealloc<float>(normals);
dealloc<float>(variables);
dealloc<float>(old_variables);
dealloc<float>(fluxes);
dealloc<float>(step_factors);
std::cout << "Done..." << std::endl;
return 0;
}
|
cbebebe02d19d582f5ceeaff5fa64ad93229a6ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/common.hpp"
namespace cv { namespace gpu { namespace device
{
namespace split_merge
{
template <typename T, size_t elem_size = sizeof(T)>
struct TypeTraits
{
typedef T type;
typedef T type2;
typedef T type3;
typedef T type4;
};
template <typename T>
struct TypeTraits<T, 1>
{
typedef char type;
typedef char2 type2;
typedef char3 type3;
typedef char4 type4;
};
template <typename T>
struct TypeTraits<T, 2>
{
typedef short type;
typedef short2 type2;
typedef short3 type3;
typedef short4 type4;
};
template <typename T>
struct TypeTraits<T, 4>
{
typedef int type;
typedef int2 type2;
typedef int3 type3;
typedef int4 type4;
};
template <typename T>
struct TypeTraits<T, 8>
{
typedef double type;
typedef double2 type2;
//typedef double3 type3;
//typedef double4 type3;
};
typedef void (*MergeFunction)(const PtrStepSzb* src, PtrStepSzb& dst, const hipStream_t& stream);
typedef void (*SplitFunction)(const PtrStepSzb& src, PtrStepSzb* dst, const hipStream_t& stream);
//------------------------------------------------------------
// Merge
template <typename T>
__global__ void mergeC2_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type2 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_y[x] = dst_elem;
}
}
template <typename T>
__global__ void mergeC3_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type3 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
const T* src2_y = (const T*)(src2 + y * src2_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_elem.z = src2_y[x];
dst_y[x] = dst_elem;
}
}
template <>
__global__ void mergeC3_<double>(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src0_y = (const double*)(src0 + y * src0_step);
const double* src1_y = (const double*)(src1 + y * src1_step);
const double* src2_y = (const double*)(src2 + y * src2_step);
double* dst_y = (double*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_y[3 * x] = src0_y[x];
dst_y[3 * x + 1] = src1_y[x];
dst_y[3 * x + 2] = src2_y[x];
}
}
template <typename T>
__global__ void mergeC4_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
const uchar* src3, size_t src3_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type4 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
const T* src2_y = (const T*)(src2 + y * src2_step);
const T* src3_y = (const T*)(src3 + y * src3_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_elem.z = src2_y[x];
dst_elem.w = src3_y[x];
dst_y[x] = dst_elem;
}
}
template <>
__global__ void mergeC4_<double>(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
const uchar* src3, size_t src3_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src0_y = (const double*)(src0 + y * src0_step);
const double* src1_y = (const double*)(src1 + y * src1_step);
const double* src2_y = (const double*)(src2 + y * src2_step);
const double* src3_y = (const double*)(src3 + y * src3_step);
double2* dst_y = (double2*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_y[2 * x] = make_double2(src0_y[x], src1_y[x]);
dst_y[2 * x + 1] = make_double2(src2_y[x], src3_y[x]);
}
}
template <typename T>
static void mergeC2_(const PtrStepSzb* src, PtrStepSzb& dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
hipLaunchKernelGGL(( mergeC2_<T>), dim3(grid), dim3(block), 0, stream,
src[0].data, src[0].step,
src[1].data, src[1].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
template <typename T>
static void mergeC3_(const PtrStepSzb* src, PtrStepSzb& dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
hipLaunchKernelGGL(( mergeC3_<T>), dim3(grid), dim3(block), 0, stream,
src[0].data, src[0].step,
src[1].data, src[1].step,
src[2].data, src[2].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
template <typename T>
static void mergeC4_(const PtrStepSzb* src, PtrStepSzb& dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
hipLaunchKernelGGL(( mergeC4_<T>), dim3(grid), dim3(block), 0, stream,
src[0].data, src[0].step,
src[1].data, src[1].step,
src[2].data, src[2].step,
src[3].data, src[3].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
void merge_caller(const PtrStepSzb* src, PtrStepSzb& dst,
int total_channels, size_t elem_size,
const hipStream_t& stream)
{
static MergeFunction merge_func_tbl[] =
{
mergeC2_<char>, mergeC2_<short>, mergeC2_<int>, 0, mergeC2_<double>,
mergeC3_<char>, mergeC3_<short>, mergeC3_<int>, 0, mergeC3_<double>,
mergeC4_<char>, mergeC4_<short>, mergeC4_<int>, 0, mergeC4_<double>,
};
size_t merge_func_id = (total_channels - 2) * 5 + (elem_size >> 1);
MergeFunction merge_func = merge_func_tbl[merge_func_id];
if (merge_func == 0)
cv::gpu::error("Unsupported channel count or data type", __FILE__, __LINE__, "merge_caller");
merge_func(src, dst, stream);
}
//------------------------------------------------------------
// Split
template <typename T>
__global__ void splitC2_(const uchar* src, size_t src_step,
int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step)
{
typedef typename TypeTraits<T>::type2 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
}
}
template <typename T>
__global__ void splitC3_(const uchar* src, size_t src_step,
int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step)
{
typedef typename TypeTraits<T>::type3 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
T* dst2_y = (T*)(dst2 + y * dst2_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
dst2_y[x] = src_elem.z;
}
}
template <>
__global__ void splitC3_<double>(
const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src_y = (const double*)(src + y * src_step);
double* dst0_y = (double*)(dst0 + y * dst0_step);
double* dst1_y = (double*)(dst1 + y * dst1_step);
double* dst2_y = (double*)(dst2 + y * dst2_step);
if (x < cols && y < rows)
{
dst0_y[x] = src_y[3 * x];
dst1_y[x] = src_y[3 * x + 1];
dst2_y[x] = src_y[3 * x + 2];
}
}
template <typename T>
__global__ void splitC4_(const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step,
uchar* dst3, size_t dst3_step)
{
typedef typename TypeTraits<T>::type4 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
T* dst2_y = (T*)(dst2 + y * dst2_step);
T* dst3_y = (T*)(dst3 + y * dst3_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
dst2_y[x] = src_elem.z;
dst3_y[x] = src_elem.w;
}
}
template <>
__global__ void splitC4_<double>(
const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step,
uchar* dst3, size_t dst3_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double2* src_y = (const double2*)(src + y * src_step);
double* dst0_y = (double*)(dst0 + y * dst0_step);
double* dst1_y = (double*)(dst1 + y * dst1_step);
double* dst2_y = (double*)(dst2 + y * dst2_step);
double* dst3_y = (double*)(dst3 + y * dst3_step);
if (x < cols && y < rows)
{
double2 src_elem1 = src_y[2 * x];
double2 src_elem2 = src_y[2 * x + 1];
dst0_y[x] = src_elem1.x;
dst1_y[x] = src_elem1.y;
dst2_y[x] = src_elem2.x;
dst3_y[x] = src_elem2.y;
}
}
template <typename T>
static void splitC2_(const PtrStepSzb& src, PtrStepSzb* dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
hipLaunchKernelGGL(( splitC2_<T>), dim3(grid), dim3(block), 0, stream,
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
template <typename T>
static void splitC3_(const PtrStepSzb& src, PtrStepSzb* dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
hipLaunchKernelGGL(( splitC3_<T>), dim3(grid), dim3(block), 0, stream,
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step,
dst[2].data, dst[2].step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
template <typename T>
static void splitC4_(const PtrStepSzb& src, PtrStepSzb* dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
hipLaunchKernelGGL(( splitC4_<T>), dim3(grid), dim3(block), 0, stream,
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step,
dst[2].data, dst[2].step,
dst[3].data, dst[3].step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
void split_caller(const PtrStepSzb& src, PtrStepSzb* dst, int num_channels, size_t elem_size1, const hipStream_t& stream)
{
static SplitFunction split_func_tbl[] =
{
splitC2_<char>, splitC2_<short>, splitC2_<int>, 0, splitC2_<double>,
splitC3_<char>, splitC3_<short>, splitC3_<int>, 0, splitC3_<double>,
splitC4_<char>, splitC4_<short>, splitC4_<int>, 0, splitC4_<double>,
};
size_t split_func_id = (num_channels - 2) * 5 + (elem_size1 >> 1);
SplitFunction split_func = split_func_tbl[split_func_id];
if (split_func == 0)
cv::gpu::error("Unsupported channel count or data type", __FILE__, __LINE__, "split_caller");
split_func(src, dst, stream);
}
} // namespace split_merge
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */
| cbebebe02d19d582f5ceeaff5fa64ad93229a6ea.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/common.hpp"
namespace cv { namespace gpu { namespace device
{
namespace split_merge
{
template <typename T, size_t elem_size = sizeof(T)>
struct TypeTraits
{
typedef T type;
typedef T type2;
typedef T type3;
typedef T type4;
};
template <typename T>
struct TypeTraits<T, 1>
{
typedef char type;
typedef char2 type2;
typedef char3 type3;
typedef char4 type4;
};
template <typename T>
struct TypeTraits<T, 2>
{
typedef short type;
typedef short2 type2;
typedef short3 type3;
typedef short4 type4;
};
template <typename T>
struct TypeTraits<T, 4>
{
typedef int type;
typedef int2 type2;
typedef int3 type3;
typedef int4 type4;
};
template <typename T>
struct TypeTraits<T, 8>
{
typedef double type;
typedef double2 type2;
//typedef double3 type3;
//typedef double4 type3;
};
typedef void (*MergeFunction)(const PtrStepSzb* src, PtrStepSzb& dst, const cudaStream_t& stream);
typedef void (*SplitFunction)(const PtrStepSzb& src, PtrStepSzb* dst, const cudaStream_t& stream);
//------------------------------------------------------------
// Merge
template <typename T>
__global__ void mergeC2_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type2 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_y[x] = dst_elem;
}
}
template <typename T>
__global__ void mergeC3_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type3 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
const T* src2_y = (const T*)(src2 + y * src2_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_elem.z = src2_y[x];
dst_y[x] = dst_elem;
}
}
template <>
__global__ void mergeC3_<double>(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src0_y = (const double*)(src0 + y * src0_step);
const double* src1_y = (const double*)(src1 + y * src1_step);
const double* src2_y = (const double*)(src2 + y * src2_step);
double* dst_y = (double*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_y[3 * x] = src0_y[x];
dst_y[3 * x + 1] = src1_y[x];
dst_y[3 * x + 2] = src2_y[x];
}
}
template <typename T>
__global__ void mergeC4_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
const uchar* src3, size_t src3_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type4 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
const T* src2_y = (const T*)(src2 + y * src2_step);
const T* src3_y = (const T*)(src3 + y * src3_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_elem.z = src2_y[x];
dst_elem.w = src3_y[x];
dst_y[x] = dst_elem;
}
}
template <>
__global__ void mergeC4_<double>(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
const uchar* src3, size_t src3_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src0_y = (const double*)(src0 + y * src0_step);
const double* src1_y = (const double*)(src1 + y * src1_step);
const double* src2_y = (const double*)(src2 + y * src2_step);
const double* src3_y = (const double*)(src3 + y * src3_step);
double2* dst_y = (double2*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_y[2 * x] = make_double2(src0_y[x], src1_y[x]);
dst_y[2 * x + 1] = make_double2(src2_y[x], src3_y[x]);
}
}
template <typename T>
static void mergeC2_(const PtrStepSzb* src, PtrStepSzb& dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
mergeC2_<T><<<grid, block, 0, stream>>>(
src[0].data, src[0].step,
src[1].data, src[1].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
template <typename T>
static void mergeC3_(const PtrStepSzb* src, PtrStepSzb& dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
mergeC3_<T><<<grid, block, 0, stream>>>(
src[0].data, src[0].step,
src[1].data, src[1].step,
src[2].data, src[2].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
template <typename T>
static void mergeC4_(const PtrStepSzb* src, PtrStepSzb& dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
mergeC4_<T><<<grid, block, 0, stream>>>(
src[0].data, src[0].step,
src[1].data, src[1].step,
src[2].data, src[2].step,
src[3].data, src[3].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
void merge_caller(const PtrStepSzb* src, PtrStepSzb& dst,
int total_channels, size_t elem_size,
const cudaStream_t& stream)
{
static MergeFunction merge_func_tbl[] =
{
mergeC2_<char>, mergeC2_<short>, mergeC2_<int>, 0, mergeC2_<double>,
mergeC3_<char>, mergeC3_<short>, mergeC3_<int>, 0, mergeC3_<double>,
mergeC4_<char>, mergeC4_<short>, mergeC4_<int>, 0, mergeC4_<double>,
};
size_t merge_func_id = (total_channels - 2) * 5 + (elem_size >> 1);
MergeFunction merge_func = merge_func_tbl[merge_func_id];
if (merge_func == 0)
cv::gpu::error("Unsupported channel count or data type", __FILE__, __LINE__, "merge_caller");
merge_func(src, dst, stream);
}
//------------------------------------------------------------
// Split
template <typename T>
__global__ void splitC2_(const uchar* src, size_t src_step,
int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step)
{
typedef typename TypeTraits<T>::type2 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
}
}
template <typename T>
__global__ void splitC3_(const uchar* src, size_t src_step,
int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step)
{
typedef typename TypeTraits<T>::type3 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
T* dst2_y = (T*)(dst2 + y * dst2_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
dst2_y[x] = src_elem.z;
}
}
template <>
__global__ void splitC3_<double>(
const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src_y = (const double*)(src + y * src_step);
double* dst0_y = (double*)(dst0 + y * dst0_step);
double* dst1_y = (double*)(dst1 + y * dst1_step);
double* dst2_y = (double*)(dst2 + y * dst2_step);
if (x < cols && y < rows)
{
dst0_y[x] = src_y[3 * x];
dst1_y[x] = src_y[3 * x + 1];
dst2_y[x] = src_y[3 * x + 2];
}
}
template <typename T>
__global__ void splitC4_(const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step,
uchar* dst3, size_t dst3_step)
{
typedef typename TypeTraits<T>::type4 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
T* dst2_y = (T*)(dst2 + y * dst2_step);
T* dst3_y = (T*)(dst3 + y * dst3_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
dst2_y[x] = src_elem.z;
dst3_y[x] = src_elem.w;
}
}
template <>
__global__ void splitC4_<double>(
const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step,
uchar* dst3, size_t dst3_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double2* src_y = (const double2*)(src + y * src_step);
double* dst0_y = (double*)(dst0 + y * dst0_step);
double* dst1_y = (double*)(dst1 + y * dst1_step);
double* dst2_y = (double*)(dst2 + y * dst2_step);
double* dst3_y = (double*)(dst3 + y * dst3_step);
if (x < cols && y < rows)
{
double2 src_elem1 = src_y[2 * x];
double2 src_elem2 = src_y[2 * x + 1];
dst0_y[x] = src_elem1.x;
dst1_y[x] = src_elem1.y;
dst2_y[x] = src_elem2.x;
dst3_y[x] = src_elem2.y;
}
}
template <typename T>
static void splitC2_(const PtrStepSzb& src, PtrStepSzb* dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
splitC2_<T><<<grid, block, 0, stream>>>(
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
template <typename T>
static void splitC3_(const PtrStepSzb& src, PtrStepSzb* dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
splitC3_<T><<<grid, block, 0, stream>>>(
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step,
dst[2].data, dst[2].step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
template <typename T>
static void splitC4_(const PtrStepSzb& src, PtrStepSzb* dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
splitC4_<T><<<grid, block, 0, stream>>>(
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step,
dst[2].data, dst[2].step,
dst[3].data, dst[3].step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
void split_caller(const PtrStepSzb& src, PtrStepSzb* dst, int num_channels, size_t elem_size1, const cudaStream_t& stream)
{
static SplitFunction split_func_tbl[] =
{
splitC2_<char>, splitC2_<short>, splitC2_<int>, 0, splitC2_<double>,
splitC3_<char>, splitC3_<short>, splitC3_<int>, 0, splitC3_<double>,
splitC4_<char>, splitC4_<short>, splitC4_<int>, 0, splitC4_<double>,
};
size_t split_func_id = (num_channels - 2) * 5 + (elem_size1 >> 1);
SplitFunction split_func = split_func_tbl[split_func_id];
if (split_func == 0)
cv::gpu::error("Unsupported channel count or data type", __FILE__, __LINE__, "split_caller");
split_func(src, dst, stream);
}
} // namespace split_merge
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */
|
6cddfc7a35d870e908a790f54b59d71322b88d48.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sum.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
double *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
double *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sum), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sum), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sum), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 6cddfc7a35d870e908a790f54b59d71322b88d48.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sum.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
double *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
double *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sum<<<gridBlock,threadBlock>>>(a,b,c);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sum<<<gridBlock,threadBlock>>>(a,b,c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sum<<<gridBlock,threadBlock>>>(a,b,c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
dea0a207800db1638e0a99cd93ed122f0c60f154.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* pthreaded hw5, written by Adam Tygart abd Ryan Hershberger
* Could be further optimized by pipelining read operations and not cyclically creating/destroying child threads
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/*
* Length of "lines changes with every protein"
* Thanks to wikipedia for the following pseudocode:
* function LCSLength(X[1..m], Y[1..n])
* C = array(0..m, 0..n)
* for i := 0..m
* C[i,0] = 0
* for j := 0..n
* C[0,j] = 0
* for i := 1..m
* for j := 1..n
* if X[i] = Y[j]
* C[i,j] := C[i-1,j-1] + 1
* else:
* C[i,j] := max(C[i,j-1], C[i-1,j])
* return C[m,n]
*/
FILE *f;
int comp_count;
int offset = 0;
#ifndef NUM_THREADS
#define NUM_THREADS 4
#endif
#ifndef WORK_UNIT
#define WORK_UNIT 400
#endif
#define QUEUE_SIZE NUM_THREADS*WORK_UNIT
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
/*
* Calculate the LCS of the two strings.
*/
__device__ int MCSLength(char *str1, int len1, char* str2, int len2) {
int** arr = (int**) malloc(sizeof(int*)*(len1+1));
if ( arr == 0 ) {
printf("Couldn't allocate memory for the MCS array\n");
}
int i, j, local_max = 0;
for (i = 0; i <= len1; i++) {
arr[i] = (int*)malloc((len2+1)*sizeof(int));
if ( arr[i] == 0 ) {
printf("Couldn't allocate memory for the MCS subarray\n");
}
}
for (i = 1; i <= len1; i++) {
for (j = 1; j <= len2; j++) {
if (str1[i-1] == str2[j-1]) {
arr[i][j] = arr[i-1][j-1] + 1;
if (arr[i][j] > local_max)
local_max = arr[i][j];
}
}
}
for (i = 0; i <= len1; i++)
free(arr[i]);
free(arr);
return local_max;
}
/*
* Read file, char by char. headers start with '>' or ';', ignore until newline.
* read "gene" until we reach the next header. return int of num of chars in buff[i]
*/
int readLine(char **buff, int i) {
int readchars = 0;
int commentline = 0, startedgene = 0;
int buffStepSize = 4000;
int buffSize = 4000;
buff[i] = (char*)malloc(sizeof(char)*buffSize);
char c;
do {
if (((readchars) >= buffSize) && (buffSize != 0)) {
buffSize += buffStepSize;
char* temp_buff = (char*)realloc(buff[i],sizeof(char)*buffSize);
buff[i] = temp_buff;
}
if (buff[i] == 0) {
printf("Couldn't allocate memory for the buffer\n");
exit(-2);
}
c = fgetc(f);
switch (c) {
case '\n':
commentline = 0;
break;
case ';':
case '>':
commentline = 1;
if (startedgene == 1) {
long curr = ftell(f);
fseek(f, curr-1, SEEK_SET);
return readchars;
}
break;
default:
if ( commentline == 0 ) {
startedgene = 1;
if (c != EOF)
buff[i][readchars++] = c;
}
}
} while (c != EOF);
return readchars;
}
/*
* Is the worker function for a thread, calculate your chunk of the global data, calculate the MCS of each pair, copy the counts off to the global counts once locked
*/
__global__ void threaded_count(int* completed_count, int* counts, char* queue, int* lens) {
int local_work_unit = blockDim.x*blockIdx.x;
int local_counts[WORK_UNIT/2];
int local_count = 0;
int startPos = (threadIdx.x) + (local_work_unit);
int endPos = startPos + (local_work_unit);
char* str1;
char* str2;
int strlen1, strlen2;
int i, j, k;
for (i = 0; i < WORK_UNIT/2; i++) {
local_counts[i] = 0;
j = startPos + (i*2);
if ((lens[j] != 0) && (lens[j+1] != 0)) {
//dev_lens needs to hold starting positions of the current string in dev_queue
str1 = (char*) malloc(lens[j]+1*sizeof(char));
str2 = (char*) malloc(lens[j+1]+1*sizeof(char));
strlen1 = lens[j+1] - lens[j];
strlen2 = lens[j+2] - lens[j+1];
for (k = 0; k < strlen1; k++)
str1[k] = queue[lens[j] + k];
for (k = 0; k < strlen2; k++)
str2[k] = queue[lens[j+1] + k];
local_counts[i] = MCSLength(str1, strlen1, str2, strlen2);
free(str1);
free(str2);
local_count++;
}
else
break;
}
for (i = 0; i < WORK_UNIT/2; i++) {
counts[(startPos/2) + i] = local_counts[i];
}
atomicAdd(completed_count, local_count);
}
/*
* Take a file-name on the command line, open it and read portions of the file at a time. start threads to calcluate MCS. Find the max and average MCSs
*/
int main(int argc, char* argv[]) {
if (argc != 2 ) {
printf("Please specify a file on the command line\n");
exit(-1);
}
f = fopen(argv[1],"r");
if ( f == 0 ) {
printf("Couldn't open file\n");
exit(-1);
}
char **queue;
int *lens;
int *counts;
char *dev_queue;
int *dev_lens;
int *dev_counts;
//pthread
int i;
int perThread = WORK_UNIT;
int totalSize = QUEUE_SIZE;
int size = NUM_THREADS;
int numThreadsPerBlock = 100;
int numBlocks = size / numThreadsPerBlock;
int totalThreads = numThreadsPerBlock * numBlocks;
int* dev_completed_count;
hipMalloc((void**)&dev_completed_count, sizeof(int));
printf("we get this far!\n");
counts = (int*)calloc(sizeof(int),QUEUE_SIZE);
do {
queue = (char**)malloc(sizeof(char*)*QUEUE_SIZE);
printf("A\n");
lens = (int*)calloc(sizeof(int),QUEUE_SIZE+1);
hipMalloc((void**)&dev_lens, sizeof(int)*(QUEUE_SIZE +1));
printf("B\n");
int *temp_counts = (int*) realloc(counts, (QUEUE_SIZE + offset)/2 * sizeof(int));
printf("C\n");
if (( queue == 0 ) || (lens == 0) || (temp_counts == 0)) {
printf("Couldn't allocate memory for the work queues\n");
exit(-1);
}
counts = temp_counts;
printf("This is a TEST %d\n", QUEUE_SIZE);
int t = 0;
char *dev_queue_flat = (char *) malloc(sizeof(char));
char *temp_flat;
lens[0] = 0;
for (i = 0; i < QUEUE_SIZE; i++) {
lens[i+1] = t + readLine(queue, i);
temp_flat = (char *) realloc(dev_queue_flat, (lens[i+1] + 1) * sizeof(char));
dev_queue_flat = temp_flat;
int j;
for (j = 0; j <= lens[i+1] - t; j++)
dev_queue_flat[t+j] = queue[i][j];
t = lens[i+1];
if (( queue[i] == 0 )) {
printf("Couldn't allocate memory for the work subqueues\n");
exit(-1);
}
}
hipMalloc((void**)&dev_queue, (t * sizeof(char)));
hipMemcpy(dev_queue, dev_queue_flat, t*sizeof(char), hipMemcpyHostToDevice);
hipMemcpy(dev_lens, lens, QUEUE_SIZE*sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_counts, (QUEUE_SIZE*sizeof(int))/2);
hipMemset( dev_counts, 0, (QUEUE_SIZE*sizeof(int))/2);
printf("A1\n");
dim3 numBlocks(NUM_THREADS);
dim3 threadsPerBlock(WORK_UNIT);
hipLaunchKernelGGL(( threaded_count), dim3(numBlocks), dim3(threadsPerBlock) , 0, 0, dev_completed_count, dev_counts, dev_queue, dev_lens);
hipDeviceSynchronize();
int* temp = (int*) malloc(sizeof(int)*QUEUE_SIZE/2);
hipMemcpy(temp, dev_counts, (QUEUE_SIZE*sizeof(int))/2, hipMemcpyDeviceToHost);
for (i = 0; i < QUEUE_SIZE/2; i++)
counts[offset+i] = temp[i];
for (i = 0; i < QUEUE_SIZE; i++) {
free(queue[i]);
}
hipFree(dev_queue);
hipFree(dev_counts);
free(temp);
hipFree(dev_queue);
free(queue);
hipFree(dev_lens);
free(lens);
offset += QUEUE_SIZE;
} while (!feof(f));
unsigned long total = 0;
int longest = 0, longest_loc = -1;
for (i = 0; i < comp_count; i++) {
total += counts[i];
if (counts[i] > longest) {
longest = counts[i];
longest_loc = i;
}
}
printf("Longest LCS: %d, is the %dth pair in the file\n", longest, longest_loc);
printf("Average: %Lf\n",((long double) total)/comp_count);
fclose(f);
free(counts);
return 0;
}
| dea0a207800db1638e0a99cd93ed122f0c60f154.cu | /*
* pthreaded hw5, written by Adam Tygart abd Ryan Hershberger
* Could be further optimized by pipelining read operations and not cyclically creating/destroying child threads
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/*
* Length of "lines changes with every protein"
* Thanks to wikipedia for the following pseudocode:
* function LCSLength(X[1..m], Y[1..n])
* C = array(0..m, 0..n)
* for i := 0..m
* C[i,0] = 0
* for j := 0..n
* C[0,j] = 0
* for i := 1..m
* for j := 1..n
* if X[i] = Y[j]
* C[i,j] := C[i-1,j-1] + 1
* else:
* C[i,j] := max(C[i,j-1], C[i-1,j])
* return C[m,n]
*/
FILE *f;
int comp_count;
int offset = 0;
#ifndef NUM_THREADS
#define NUM_THREADS 4
#endif
#ifndef WORK_UNIT
#define WORK_UNIT 400
#endif
#define QUEUE_SIZE NUM_THREADS*WORK_UNIT
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
/*
* Calculate the LCS of the two strings.
*/
__device__ int MCSLength(char *str1, int len1, char* str2, int len2) {
int** arr = (int**) malloc(sizeof(int*)*(len1+1));
if ( arr == 0 ) {
printf("Couldn't allocate memory for the MCS array\n");
}
int i, j, local_max = 0;
for (i = 0; i <= len1; i++) {
arr[i] = (int*)malloc((len2+1)*sizeof(int));
if ( arr[i] == 0 ) {
printf("Couldn't allocate memory for the MCS subarray\n");
}
}
for (i = 1; i <= len1; i++) {
for (j = 1; j <= len2; j++) {
if (str1[i-1] == str2[j-1]) {
arr[i][j] = arr[i-1][j-1] + 1;
if (arr[i][j] > local_max)
local_max = arr[i][j];
}
}
}
for (i = 0; i <= len1; i++)
free(arr[i]);
free(arr);
return local_max;
}
/*
* Read file, char by char. headers start with '>' or ';', ignore until newline.
* read "gene" until we reach the next header. return int of num of chars in buff[i]
*/
int readLine(char **buff, int i) {
int readchars = 0;
int commentline = 0, startedgene = 0;
int buffStepSize = 4000;
int buffSize = 4000;
buff[i] = (char*)malloc(sizeof(char)*buffSize);
char c;
do {
if (((readchars) >= buffSize) && (buffSize != 0)) {
buffSize += buffStepSize;
char* temp_buff = (char*)realloc(buff[i],sizeof(char)*buffSize);
buff[i] = temp_buff;
}
if (buff[i] == 0) {
printf("Couldn't allocate memory for the buffer\n");
exit(-2);
}
c = fgetc(f);
switch (c) {
case '\n':
commentline = 0;
break;
case ';':
case '>':
commentline = 1;
if (startedgene == 1) {
long curr = ftell(f);
fseek(f, curr-1, SEEK_SET);
return readchars;
}
break;
default:
if ( commentline == 0 ) {
startedgene = 1;
if (c != EOF)
buff[i][readchars++] = c;
}
}
} while (c != EOF);
return readchars;
}
/*
* Is the worker function for a thread, calculate your chunk of the global data, calculate the MCS of each pair, copy the counts off to the global counts once locked
*/
__global__ void threaded_count(int* completed_count, int* counts, char* queue, int* lens) {
int local_work_unit = blockDim.x*blockIdx.x;
int local_counts[WORK_UNIT/2];
int local_count = 0;
int startPos = (threadIdx.x) + (local_work_unit);
int endPos = startPos + (local_work_unit);
char* str1;
char* str2;
int strlen1, strlen2;
int i, j, k;
for (i = 0; i < WORK_UNIT/2; i++) {
local_counts[i] = 0;
j = startPos + (i*2);
if ((lens[j] != 0) && (lens[j+1] != 0)) {
//dev_lens needs to hold starting positions of the current string in dev_queue
str1 = (char*) malloc(lens[j]+1*sizeof(char));
str2 = (char*) malloc(lens[j+1]+1*sizeof(char));
strlen1 = lens[j+1] - lens[j];
strlen2 = lens[j+2] - lens[j+1];
for (k = 0; k < strlen1; k++)
str1[k] = queue[lens[j] + k];
for (k = 0; k < strlen2; k++)
str2[k] = queue[lens[j+1] + k];
local_counts[i] = MCSLength(str1, strlen1, str2, strlen2);
free(str1);
free(str2);
local_count++;
}
else
break;
}
for (i = 0; i < WORK_UNIT/2; i++) {
counts[(startPos/2) + i] = local_counts[i];
}
atomicAdd(completed_count, local_count);
}
/*
* Take a file-name on the command line, open it and read portions of the file at a time. start threads to calcluate MCS. Find the max and average MCSs
*/
int main(int argc, char* argv[]) {
if (argc != 2 ) {
printf("Please specify a file on the command line\n");
exit(-1);
}
f = fopen(argv[1],"r");
if ( f == 0 ) {
printf("Couldn't open file\n");
exit(-1);
}
char **queue;
int *lens;
int *counts;
char *dev_queue;
int *dev_lens;
int *dev_counts;
//pthread
int i;
int perThread = WORK_UNIT;
int totalSize = QUEUE_SIZE;
int size = NUM_THREADS;
int numThreadsPerBlock = 100;
int numBlocks = size / numThreadsPerBlock;
int totalThreads = numThreadsPerBlock * numBlocks;
int* dev_completed_count;
cudaMalloc((void**)&dev_completed_count, sizeof(int));
printf("we get this far!\n");
counts = (int*)calloc(sizeof(int),QUEUE_SIZE);
do {
queue = (char**)malloc(sizeof(char*)*QUEUE_SIZE);
printf("A\n");
lens = (int*)calloc(sizeof(int),QUEUE_SIZE+1);
cudaMalloc((void**)&dev_lens, sizeof(int)*(QUEUE_SIZE +1));
printf("B\n");
int *temp_counts = (int*) realloc(counts, (QUEUE_SIZE + offset)/2 * sizeof(int));
printf("C\n");
if (( queue == 0 ) || (lens == 0) || (temp_counts == 0)) {
printf("Couldn't allocate memory for the work queues\n");
exit(-1);
}
counts = temp_counts;
printf("This is a TEST %d\n", QUEUE_SIZE);
int t = 0;
char *dev_queue_flat = (char *) malloc(sizeof(char));
char *temp_flat;
lens[0] = 0;
for (i = 0; i < QUEUE_SIZE; i++) {
lens[i+1] = t + readLine(queue, i);
temp_flat = (char *) realloc(dev_queue_flat, (lens[i+1] + 1) * sizeof(char));
dev_queue_flat = temp_flat;
int j;
for (j = 0; j <= lens[i+1] - t; j++)
dev_queue_flat[t+j] = queue[i][j];
t = lens[i+1];
if (( queue[i] == 0 )) {
printf("Couldn't allocate memory for the work subqueues\n");
exit(-1);
}
}
cudaMalloc((void**)&dev_queue, (t * sizeof(char)));
cudaMemcpy(dev_queue, dev_queue_flat, t*sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(dev_lens, lens, QUEUE_SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_counts, (QUEUE_SIZE*sizeof(int))/2);
cudaMemset( dev_counts, 0, (QUEUE_SIZE*sizeof(int))/2);
printf("A1\n");
dim3 numBlocks(NUM_THREADS);
dim3 threadsPerBlock(WORK_UNIT);
threaded_count<<< numBlocks, threadsPerBlock >>>(dev_completed_count, dev_counts, dev_queue, dev_lens);
cudaThreadSynchronize();
int* temp = (int*) malloc(sizeof(int)*QUEUE_SIZE/2);
cudaMemcpy(temp, dev_counts, (QUEUE_SIZE*sizeof(int))/2, cudaMemcpyDeviceToHost);
for (i = 0; i < QUEUE_SIZE/2; i++)
counts[offset+i] = temp[i];
for (i = 0; i < QUEUE_SIZE; i++) {
free(queue[i]);
}
cudaFree(dev_queue);
cudaFree(dev_counts);
free(temp);
cudaFree(dev_queue);
free(queue);
cudaFree(dev_lens);
free(lens);
offset += QUEUE_SIZE;
} while (!feof(f));
unsigned long total = 0;
int longest = 0, longest_loc = -1;
for (i = 0; i < comp_count; i++) {
total += counts[i];
if (counts[i] > longest) {
longest = counts[i];
longest_loc = i;
}
}
printf("Longest LCS: %d, is the %dth pair in the file\n", longest, longest_loc);
printf("Average: %Lf\n",((long double) total)/comp_count);
fclose(f);
free(counts);
return 0;
}
|
57a26d5f0ac28d2d90f5f7b9058ac914c38d137a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "nvgraph.h"
/* Shortest-Source-Single-Path
* Find the shortest path from a source node to every other node.
Initially :
V = 6
E = 10
Edges W
0 -> 1 0.50
0 -> 2 0.50
2 -> 0 0.33
2 -> 1 0.33
2 -> 4 0.33
3 -> 4 0.50
3 -> 5 0.50
4 -> 3 0.50
4 -> 5 0.50
5 -> 3 1.00
Source oriented representation (CSC):
destination_offsets {0, 1, 3, 4, 6, 8, 10}
source_indices {2, 0, 2, 0, 4, 5, 2, 3, 3, 4}
W0 = {0.33, 0.50, 0.33, 0.50, 0.50, 1.00, 0.33, 0.50, 0.50, 1.00}
*/
void check_status(nvgraphStatus_t status)
{
if ((int)status != NVGRAPH_STATUS_SUCCESS)
{
printf("ERROR : %d\n",status);
exit(0);
}
}
void setWeights(float * weights)
{
weights[0] = 0.333333f;
weights[1] = 0.500000f;
weights[2] = 0.333333f;
weights[3] = 0.500000f;
weights[4] = 0.500000f;
weights[5] = 1.000000f;
weights[6] = 0.333333f;
weights[7] = 0.500000f;
weights[8] = 0.500000f;
weights[9] = 0.500000f;
}
void setDestOffsets(int * destOffsets)
{
destOffsets[0] = 0;
destOffsets[1] = 1;
destOffsets[2] = 3;
destOffsets[3] = 4;
destOffsets[4] = 6;
destOffsets[5] = 8;
destOffsets[6] = 10;
}
void setSourceIndices(int * sourceIndices)
{
sourceIndices[0] = 2;
sourceIndices[1] = 0;
sourceIndices[2] = 2;
sourceIndices[3] = 0;
sourceIndices[4] = 4;
sourceIndices[5] = 5;
sourceIndices[6] = 2;
sourceIndices[7] = 3;
sourceIndices[8] = 3;
sourceIndices[9] = 4;
}
void getAndPrintAllPathLengths(nvgraphHandle_t handle, nvgraphGraphDescr_t graph, float * vertex_data, size_t n)
{
for (int source_vert = 0; source_vert < n; source_vert++)
{
// Find Single-Source-Shortest-Path
check_status(nvgraphSssp(handle, graph, 0, &source_vert, 0));
// Get and print result
check_status(nvgraphGetVertexData(handle, graph, (void*)vertex_data, 0));
for (int j = 0; j < n; j++)
{
printf("Shortest path from %d to %d is of length %f\n", source_vert, j, vertex_data[j]);
}
printf("\n\n");
}
}
int main(int argc, char ** argv)
{
const size_t n = 6, num_edges= 10, vertex_numsets = 3, edge_numsets = 1;
// nvgraph variables
nvgraphHandle_t handle;
nvgraphGraphDescr_t graph;
hipDataType edge_dimT = HIP_R_32F;
// Allocate host data
int *destination_offsets_h = (int *) malloc((n+1)*sizeof(int));
int *source_indices_h = (int *) malloc(num_edges*sizeof(int));
float *weights_h = (float *) malloc(num_edges*sizeof(float));
float *vertex_data = (float *) malloc(n * sizeof(float));
void **vertex_dim = (void **) malloc(vertex_numsets*sizeof(void*));
hipDataType *vertex_dimT = (hipDataType *) malloc(vertex_numsets*sizeof(hipDataType));
nvgraphCSCTopology32I_t CSC_input = (nvgraphCSCTopology32I_t) malloc(sizeof(struct nvgraphCSCTopology32I_st));
// Initialize host data
vertex_dim[0] = (void*)vertex_data;
vertex_dimT[0] = HIP_R_32F;
setWeights(weights_h);
setDestOffsets(destination_offsets_h);
setSourceIndices(source_indices_h);
// Starting nvgraph
check_status(nvgraphCreate(&handle));
check_status(nvgraphCreateGraphDescr(handle, &graph));
CSC_input->nvertices = n;
CSC_input->nedges = num_edges;
CSC_input->destination_offsets = destination_offsets_h;
CSC_input->source_indices = source_indices_h;
// Set graph connectivity and properties (tranfers)
check_status(nvgraphSetGraphStructure(handle, graph, (void*)CSC_input, NVGRAPH_CSC_32));
check_status(nvgraphAllocateVertexData(handle, graph, vertex_numsets, vertex_dimT));
check_status(nvgraphAllocateEdgeData (handle, graph, edge_numsets, &edge_dimT));
check_status(nvgraphSetEdgeData(handle, graph, (void*)weights_h, 0));
getAndPrintAllPathLengths(handle, graph, vertex_data, n);
//Clean
check_status(nvgraphDestroyGraphDescr(handle, graph));
check_status(nvgraphDestroy(handle));
free(destination_offsets_h);
free(source_indices_h);
free(weights_h);
free(vertex_data);
free(vertex_dim);
free(vertex_dimT);
free(CSC_input);
return EXIT_SUCCESS;
}
| 57a26d5f0ac28d2d90f5f7b9058ac914c38d137a.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include "nvgraph.h"
/* Shortest-Source-Single-Path
* Find the shortest path from a source node to every other node.
Initially :
V = 6
E = 10
Edges W
0 -> 1 0.50
0 -> 2 0.50
2 -> 0 0.33
2 -> 1 0.33
2 -> 4 0.33
3 -> 4 0.50
3 -> 5 0.50
4 -> 3 0.50
4 -> 5 0.50
5 -> 3 1.00
Source oriented representation (CSC):
destination_offsets {0, 1, 3, 4, 6, 8, 10}
source_indices {2, 0, 2, 0, 4, 5, 2, 3, 3, 4}
W0 = {0.33, 0.50, 0.33, 0.50, 0.50, 1.00, 0.33, 0.50, 0.50, 1.00}
*/
void check_status(nvgraphStatus_t status)
{
if ((int)status != NVGRAPH_STATUS_SUCCESS)
{
printf("ERROR : %d\n",status);
exit(0);
}
}
void setWeights(float * weights)
{
weights[0] = 0.333333f;
weights[1] = 0.500000f;
weights[2] = 0.333333f;
weights[3] = 0.500000f;
weights[4] = 0.500000f;
weights[5] = 1.000000f;
weights[6] = 0.333333f;
weights[7] = 0.500000f;
weights[8] = 0.500000f;
weights[9] = 0.500000f;
}
void setDestOffsets(int * destOffsets)
{
destOffsets[0] = 0;
destOffsets[1] = 1;
destOffsets[2] = 3;
destOffsets[3] = 4;
destOffsets[4] = 6;
destOffsets[5] = 8;
destOffsets[6] = 10;
}
void setSourceIndices(int * sourceIndices)
{
sourceIndices[0] = 2;
sourceIndices[1] = 0;
sourceIndices[2] = 2;
sourceIndices[3] = 0;
sourceIndices[4] = 4;
sourceIndices[5] = 5;
sourceIndices[6] = 2;
sourceIndices[7] = 3;
sourceIndices[8] = 3;
sourceIndices[9] = 4;
}
void getAndPrintAllPathLengths(nvgraphHandle_t handle, nvgraphGraphDescr_t graph, float * vertex_data, size_t n)
{
for (int source_vert = 0; source_vert < n; source_vert++)
{
// Find Single-Source-Shortest-Path
check_status(nvgraphSssp(handle, graph, 0, &source_vert, 0));
// Get and print result
check_status(nvgraphGetVertexData(handle, graph, (void*)vertex_data, 0));
for (int j = 0; j < n; j++)
{
printf("Shortest path from %d to %d is of length %f\n", source_vert, j, vertex_data[j]);
}
printf("\n\n");
}
}
int main(int argc, char ** argv)
{
const size_t n = 6, num_edges= 10, vertex_numsets = 3, edge_numsets = 1;
// nvgraph variables
nvgraphHandle_t handle;
nvgraphGraphDescr_t graph;
cudaDataType_t edge_dimT = CUDA_R_32F;
// Allocate host data
int *destination_offsets_h = (int *) malloc((n+1)*sizeof(int));
int *source_indices_h = (int *) malloc(num_edges*sizeof(int));
float *weights_h = (float *) malloc(num_edges*sizeof(float));
float *vertex_data = (float *) malloc(n * sizeof(float));
void **vertex_dim = (void **) malloc(vertex_numsets*sizeof(void*));
cudaDataType_t *vertex_dimT = (cudaDataType_t *) malloc(vertex_numsets*sizeof(cudaDataType_t));
nvgraphCSCTopology32I_t CSC_input = (nvgraphCSCTopology32I_t) malloc(sizeof(struct nvgraphCSCTopology32I_st));
// Initialize host data
vertex_dim[0] = (void*)vertex_data;
vertex_dimT[0] = CUDA_R_32F;
setWeights(weights_h);
setDestOffsets(destination_offsets_h);
setSourceIndices(source_indices_h);
// Starting nvgraph
check_status(nvgraphCreate(&handle));
check_status(nvgraphCreateGraphDescr(handle, &graph));
CSC_input->nvertices = n;
CSC_input->nedges = num_edges;
CSC_input->destination_offsets = destination_offsets_h;
CSC_input->source_indices = source_indices_h;
// Set graph connectivity and properties (tranfers)
check_status(nvgraphSetGraphStructure(handle, graph, (void*)CSC_input, NVGRAPH_CSC_32));
check_status(nvgraphAllocateVertexData(handle, graph, vertex_numsets, vertex_dimT));
check_status(nvgraphAllocateEdgeData (handle, graph, edge_numsets, &edge_dimT));
check_status(nvgraphSetEdgeData(handle, graph, (void*)weights_h, 0));
getAndPrintAllPathLengths(handle, graph, vertex_data, n);
//Clean
check_status(nvgraphDestroyGraphDescr(handle, graph));
check_status(nvgraphDestroy(handle));
free(destination_offsets_h);
free(source_indices_h);
free(weights_h);
free(vertex_data);
free(vertex_dim);
free(vertex_dimT);
free(CSC_input);
return EXIT_SUCCESS;
}
|
c801ad5394e72830207117da0a1512511f9c49a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/VolumetricUpSamplingTrilinear.cu"
#else
#include "../linear_upsampling.h"
static inline void THNN_(VolumetricUpSamplingTrilinear_shapeCheck)
(THCState *state,
THCTensor *input, THCTensor *gradOutput,
int nBatch, int nChannels,
int inputDepth, int inputHeight, int inputWidth,
int outputDepth, int outputHeight, int outputWidth) {
THArgCheck(inputDepth > 0 && inputHeight > 0 && inputWidth > 0
&& outputDepth && outputHeight > 0 && outputWidth > 0, 2,
"input and output sizes should be greater than 0,"
" but got input (D: %d, H: %d, W: %d) output (D: %d, H: %d, W: %d)",
inputDepth, inputHeight, inputWidth, outputDepth, outputHeight, outputWidth);
if (input != NULL) {
THCUNN_argCheck(state, !input->is_empty() && input->dim() == 5, 2, input,
"non-empty 5D input tensor expected but got: %s");
}
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, 5, 0, nBatch);
THCUNN_check_dim_size(state, gradOutput, 5, 1, nChannels);
THCUNN_check_dim_size(state, gradOutput, 5, 2, outputDepth);
THCUNN_check_dim_size(state, gradOutput, 5, 3, outputHeight);
THCUNN_check_dim_size(state, gradOutput, 5, 4, outputWidth);
}
}
void THNN_(VolumetricUpSamplingTrilinear_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
int outputDepth,
int outputHeight,
int outputWidth,
bool align_corners)
{
int nbatch = THCTensor_(size)(state, input, 0);
int channels = THCTensor_(size)(state, input, 1);
int inputDepth = THCTensor_(size)(state, input, 2);
int inputHeight = THCTensor_(size)(state, input, 3);
int inputWidth = THCTensor_(size)(state, input, 4);
THNN_(VolumetricUpSamplingTrilinear_shapeCheck)
(state, input, NULL,
nbatch, channels,
inputDepth, inputHeight, inputWidth,
outputDepth, outputHeight, outputWidth);
THCUNN_assertSameGPU(state, 2, input, output);
THCTensor_(resize5d)(state, output,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
outputDepth, outputHeight, outputWidth);
THCTensor_(zero)(state, output);
THCDeviceTensor<scalar_t, 5> idata = toDeviceTensor<scalar_t, 5>(state, input);
THCDeviceTensor<scalar_t, 5> odata = toDeviceTensor<scalar_t, 5>(state, output);
THAssert(inputDepth > 0 && inputHeight > 0 && inputWidth > 0 && outputDepth > 0 && outputHeight > 0 && outputWidth > 0);
const accreal rdepth = linear_upsampling_compute_scale<accreal>(inputDepth, outputDepth, align_corners);
const accreal rheight = linear_upsampling_compute_scale<accreal>(inputHeight, outputHeight, align_corners);
const accreal rwidth = linear_upsampling_compute_scale<accreal>(inputWidth, outputWidth, align_corners);
const int num_kernels = outputDepth * outputHeight * outputWidth;
const int num_threads =
THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock;
hipStream_t stream = THCState_getCurrentStream(state);
hipLaunchKernelGGL(( caffe_gpu_interp2_kernel<scalar_t, accreal>) , dim3(THCCeilDiv(num_kernels, num_threads)), dim3(num_threads) ,
0 , stream, num_kernels, rdepth, rheight, rwidth, align_corners, idata, odata);
THCudaCheck(hipGetLastError());
}
void THNN_(VolumetricUpSamplingTrilinear_updateGradInput)(
THCState *state,
THCTensor *gradOutput,
THCTensor *gradInput,
int nbatch,
int nchannels,
int inputDepth,
int inputHeight,
int inputWidth,
int outputDepth,
int outputHeight,
int outputWidth,
bool align_corners)
{
THNN_(VolumetricUpSamplingTrilinear_shapeCheck)
(state, NULL, gradOutput,
nbatch, nchannels,
inputDepth, inputHeight, inputWidth,
outputDepth, outputHeight, outputWidth);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCUNN_assertSameGPU(state, 2, gradOutput, gradInput);
THCTensor_(resize5d)(state, gradInput, nbatch, nchannels, inputDepth, inputHeight, inputWidth);
THCTensor_(zero)(state, gradInput);
THCDeviceTensor<scalar_t, 5> data1 = toDeviceTensor<scalar_t, 5>(state, gradInput);
THCDeviceTensor<scalar_t, 5> data2 = toDeviceTensor<scalar_t, 5>(state, gradOutput);
const accreal rdepth = linear_upsampling_compute_scale<accreal>(inputDepth, outputDepth, align_corners);
const accreal rheight = linear_upsampling_compute_scale<accreal>(inputHeight, outputHeight, align_corners);
const accreal rwidth = linear_upsampling_compute_scale<accreal>(inputWidth, outputWidth, align_corners);
const int num_kernels = outputDepth * outputHeight * outputWidth;
const int num_threads =
THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock;
hipStream_t stream = THCState_getCurrentStream(state);
hipLaunchKernelGGL(( caffe_gpu_interp2_kernel_backward<scalar_t ,accreal>) , dim3(THCCeilDiv(num_kernels, num_threads)),
dim3(num_threads), 0, stream, num_kernels, rdepth, rheight, rwidth, align_corners, data1, data2);
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, gradOutput);
}
#endif
| c801ad5394e72830207117da0a1512511f9c49a5.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/VolumetricUpSamplingTrilinear.cu"
#else
#include "../linear_upsampling.h"
static inline void THNN_(VolumetricUpSamplingTrilinear_shapeCheck)
(THCState *state,
THCTensor *input, THCTensor *gradOutput,
int nBatch, int nChannels,
int inputDepth, int inputHeight, int inputWidth,
int outputDepth, int outputHeight, int outputWidth) {
THArgCheck(inputDepth > 0 && inputHeight > 0 && inputWidth > 0
&& outputDepth && outputHeight > 0 && outputWidth > 0, 2,
"input and output sizes should be greater than 0,"
" but got input (D: %d, H: %d, W: %d) output (D: %d, H: %d, W: %d)",
inputDepth, inputHeight, inputWidth, outputDepth, outputHeight, outputWidth);
if (input != NULL) {
THCUNN_argCheck(state, !input->is_empty() && input->dim() == 5, 2, input,
"non-empty 5D input tensor expected but got: %s");
}
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, 5, 0, nBatch);
THCUNN_check_dim_size(state, gradOutput, 5, 1, nChannels);
THCUNN_check_dim_size(state, gradOutput, 5, 2, outputDepth);
THCUNN_check_dim_size(state, gradOutput, 5, 3, outputHeight);
THCUNN_check_dim_size(state, gradOutput, 5, 4, outputWidth);
}
}
void THNN_(VolumetricUpSamplingTrilinear_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
int outputDepth,
int outputHeight,
int outputWidth,
bool align_corners)
{
int nbatch = THCTensor_(size)(state, input, 0);
int channels = THCTensor_(size)(state, input, 1);
int inputDepth = THCTensor_(size)(state, input, 2);
int inputHeight = THCTensor_(size)(state, input, 3);
int inputWidth = THCTensor_(size)(state, input, 4);
THNN_(VolumetricUpSamplingTrilinear_shapeCheck)
(state, input, NULL,
nbatch, channels,
inputDepth, inputHeight, inputWidth,
outputDepth, outputHeight, outputWidth);
THCUNN_assertSameGPU(state, 2, input, output);
THCTensor_(resize5d)(state, output,
THCTensor_(size)(state, input, 0),
THCTensor_(size)(state, input, 1),
outputDepth, outputHeight, outputWidth);
THCTensor_(zero)(state, output);
THCDeviceTensor<scalar_t, 5> idata = toDeviceTensor<scalar_t, 5>(state, input);
THCDeviceTensor<scalar_t, 5> odata = toDeviceTensor<scalar_t, 5>(state, output);
THAssert(inputDepth > 0 && inputHeight > 0 && inputWidth > 0 && outputDepth > 0 && outputHeight > 0 && outputWidth > 0);
const accreal rdepth = linear_upsampling_compute_scale<accreal>(inputDepth, outputDepth, align_corners);
const accreal rheight = linear_upsampling_compute_scale<accreal>(inputHeight, outputHeight, align_corners);
const accreal rwidth = linear_upsampling_compute_scale<accreal>(inputWidth, outputWidth, align_corners);
const int num_kernels = outputDepth * outputHeight * outputWidth;
const int num_threads =
THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock;
cudaStream_t stream = THCState_getCurrentStream(state);
caffe_gpu_interp2_kernel<scalar_t, accreal> <<<THCCeilDiv(num_kernels, num_threads), num_threads ,
0 , stream>>>(num_kernels, rdepth, rheight, rwidth, align_corners, idata, odata);
THCudaCheck(cudaGetLastError());
}
void THNN_(VolumetricUpSamplingTrilinear_updateGradInput)(
THCState *state,
THCTensor *gradOutput,
THCTensor *gradInput,
int nbatch,
int nchannels,
int inputDepth,
int inputHeight,
int inputWidth,
int outputDepth,
int outputHeight,
int outputWidth,
bool align_corners)
{
THNN_(VolumetricUpSamplingTrilinear_shapeCheck)
(state, NULL, gradOutput,
nbatch, nchannels,
inputDepth, inputHeight, inputWidth,
outputDepth, outputHeight, outputWidth);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCUNN_assertSameGPU(state, 2, gradOutput, gradInput);
THCTensor_(resize5d)(state, gradInput, nbatch, nchannels, inputDepth, inputHeight, inputWidth);
THCTensor_(zero)(state, gradInput);
THCDeviceTensor<scalar_t, 5> data1 = toDeviceTensor<scalar_t, 5>(state, gradInput);
THCDeviceTensor<scalar_t, 5> data2 = toDeviceTensor<scalar_t, 5>(state, gradOutput);
const accreal rdepth = linear_upsampling_compute_scale<accreal>(inputDepth, outputDepth, align_corners);
const accreal rheight = linear_upsampling_compute_scale<accreal>(inputHeight, outputHeight, align_corners);
const accreal rwidth = linear_upsampling_compute_scale<accreal>(inputWidth, outputWidth, align_corners);
const int num_kernels = outputDepth * outputHeight * outputWidth;
const int num_threads =
THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock;
cudaStream_t stream = THCState_getCurrentStream(state);
caffe_gpu_interp2_kernel_backward<scalar_t ,accreal> <<<THCCeilDiv(num_kernels, num_threads),
num_threads, 0, stream>>>(num_kernels, rdepth, rheight, rwidth, align_corners, data1, data2);
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, gradOutput);
}
#endif
|
412549ed5d42d3f59d40f228d3efe6efa70a7d87.hip | // !!! This is a file automatically generated by hipify!!!
// ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2015, September 7 - October 6
// ###
// ###
// ### Thomas Moellenhoff, Robert Maier, Caner Hazirbas
// ###
// ###
// ###
// ### THIS FILE IS SUPPOSED TO REMAIN UNCHANGED
// ###
// ###
#include <hip/hip_runtime.h>
#include <iostream>
using namespace std;
// cuda error checking
#define CUDA_CHECK cuda_check(__FILE__,__LINE__)
void cuda_check(string file, int line)
{
hipError_t e = hipGetLastError();
if (e != hipSuccess)
{
cout << endl << file << ", line " << line << ": " << hipGetErrorString(e) << " (" << e << ")" << endl;
exit(1);
}
}
__device__ float square_value (float a)
{
return a * a;
}
__global__ void square_array (float *a, int n)
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
if (ind < n)
{
float val = a[ind];
a[ind] = square_value(val);
}
}
int main(int argc,char **argv)
{
// alloc and init input arrays on host (CPU)
int n = 10;
float *a = new float[n];
for(int i=0; i<n; i++) a[i] = i;
// CPU computation
for(int i=0; i<n; i++)
{
float val = a[i];
val = val*val;
a[i] = val;
}
// print result
cout << "CPU:"<<endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << endl;
cout << endl;
// GPU computation
// reinit data
for(int i=0; i<n; i++) a[i] = i;
// ###
// ### TODO: Implement the "square array" operation on the GPU and store the result in "a"
// ###
// ### Notes:
// ### 1. Remember to free all GPU arrays after the computation
// ### 2. Always use the macro CUDA_CHECK after each CUDA call, e.g. "hipMalloc(...); CUDA_CHECK;"
// ### For convenience this macro is defined directly in this file, later we will only include "aux.h"
// initialize the array on GPU
float *d_a = NULL;
size_t nbytes = n * sizeof(float);
hipMalloc(&d_a, nbytes); CUDA_CHECK;
// move from host to device memory
hipMemcpy(d_a, a, nbytes, hipMemcpyHostToDevice); CUDA_CHECK;
// initialize block and grid size
dim3 block = dim3(2, 1, 1);
dim3 grid = dim3((n + block.x - 1) / block.x, 1, 1);
// dispatch the kernel
hipLaunchKernelGGL(( square_array) , dim3(grid), dim3(block), 0, 0, d_a, n);
// copy result back to host memory
hipMemcpy(a, d_a, nbytes, hipMemcpyDeviceToHost); CUDA_CHECK;
// free the device memory
hipFree(d_a); CUDA_CHECK;
// print result
cout << "GPU:" << endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << endl;
cout << endl;
// free CPU arrays
delete[] a;
}
| 412549ed5d42d3f59d40f228d3efe6efa70a7d87.cu | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2015, September 7 - October 6
// ###
// ###
// ### Thomas Moellenhoff, Robert Maier, Caner Hazirbas
// ###
// ###
// ###
// ### THIS FILE IS SUPPOSED TO REMAIN UNCHANGED
// ###
// ###
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
// cuda error checking
#define CUDA_CHECK cuda_check(__FILE__,__LINE__)
void cuda_check(string file, int line)
{
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess)
{
cout << endl << file << ", line " << line << ": " << cudaGetErrorString(e) << " (" << e << ")" << endl;
exit(1);
}
}
__device__ float square_value (float a)
{
return a * a;
}
__global__ void square_array (float *a, int n)
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
if (ind < n)
{
float val = a[ind];
a[ind] = square_value(val);
}
}
int main(int argc,char **argv)
{
// alloc and init input arrays on host (CPU)
int n = 10;
float *a = new float[n];
for(int i=0; i<n; i++) a[i] = i;
// CPU computation
for(int i=0; i<n; i++)
{
float val = a[i];
val = val*val;
a[i] = val;
}
// print result
cout << "CPU:"<<endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << endl;
cout << endl;
// GPU computation
// reinit data
for(int i=0; i<n; i++) a[i] = i;
// ###
// ### TODO: Implement the "square array" operation on the GPU and store the result in "a"
// ###
// ### Notes:
// ### 1. Remember to free all GPU arrays after the computation
// ### 2. Always use the macro CUDA_CHECK after each CUDA call, e.g. "cudaMalloc(...); CUDA_CHECK;"
// ### For convenience this macro is defined directly in this file, later we will only include "aux.h"
// initialize the array on GPU
float *d_a = NULL;
size_t nbytes = n * sizeof(float);
cudaMalloc(&d_a, nbytes); CUDA_CHECK;
// move from host to device memory
cudaMemcpy(d_a, a, nbytes, cudaMemcpyHostToDevice); CUDA_CHECK;
// initialize block and grid size
dim3 block = dim3(2, 1, 1);
dim3 grid = dim3((n + block.x - 1) / block.x, 1, 1);
// dispatch the kernel
square_array <<<grid, block>>> (d_a, n);
// copy result back to host memory
cudaMemcpy(a, d_a, nbytes, cudaMemcpyDeviceToHost); CUDA_CHECK;
// free the device memory
cudaFree(d_a); CUDA_CHECK;
// print result
cout << "GPU:" << endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << endl;
cout << endl;
// free CPU arrays
delete[] a;
}
|
4ace60b95a9c99991c38d8ff2b3d8aaed9a19eaf.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <strings/regex/regex.cuh>
#include <strings/utilities.cuh>
#include <strings/utilities.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/replace_re.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace strings {
namespace detail {
namespace {
// this is a [begin,end) pair of character positions when a substring is matched
using found_range = thrust::pair<size_type, size_type>;
/**
* @brief This functor handles replacing strings by applying the compiled regex patterns
* and inserting the corresponding new string within the matched range of characters.
*
* The logic includes computing the size of each string and also writing the output.
*
* The stack is used to keep progress on evaluating the regex instructions on each string.
* So the size of the stack is in proportion to the number of instructions in the given regex
* pattern.
*
* There are three call types based on the number of regex instructions in the given pattern.
* Small to medium instruction lengths can use the stack effectively though smaller executes faster.
* Longer patterns require global memory. Shorter patterns are common in data cleaning.
*/
template <size_t stack_size>
struct replace_multi_regex_fn {
column_device_view const d_strings;
reprog_device* progs; // array of regex progs
size_type number_of_patterns;
found_range* d_found_ranges; // working array matched (begin,end) values
column_device_view const d_repls; // replacment strings
int32_t* d_offsets{}; // these are null when
char* d_chars{}; // only computing size
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
if (!d_chars) d_offsets[idx] = 0;
return;
}
u_char data1[stack_size];
u_char data2[stack_size];
auto const d_str = d_strings.element<string_view>(idx);
auto const nchars = d_str.length(); // number of characters in input string
auto nbytes = d_str.size_bytes(); // number of bytes in input string
auto in_ptr = d_str.data(); // input pointer
auto out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr;
found_range* d_ranges = d_found_ranges + (idx * number_of_patterns);
size_type lpos = 0;
size_type ch_pos = 0;
// initialize the working ranges memory to -1's
thrust::fill(thrust::seq, d_ranges, d_ranges + number_of_patterns, found_range{-1, 1});
// process string one character at a time
while (ch_pos < nchars) {
// this minimizes the regex-find calls by only calling it for stale patterns
// -- those that have not previously matched up to this point (ch_pos)
for (size_type ptn_idx = 0; ptn_idx < number_of_patterns; ++ptn_idx) {
if (d_ranges[ptn_idx].first >= ch_pos) // previously matched here
continue; // or later in the string
reprog_device prog = progs[ptn_idx];
prog.set_stack_mem(data1, data2);
auto begin = static_cast<int32_t>(ch_pos);
auto end = static_cast<int32_t>(nchars);
if (!prog.is_empty() && prog.find(idx, d_str, begin, end) > 0)
d_ranges[ptn_idx] = found_range{begin, end}; // found a match
else
d_ranges[ptn_idx] = found_range{nchars, nchars}; // this pattern is done
}
// all the ranges have been updated from each regex match;
// look for any that match at this character position (ch_pos)
auto itr =
thrust::find_if(thrust::seq, d_ranges, d_ranges + number_of_patterns, [ch_pos](auto range) {
return range.first == ch_pos;
});
if (itr != d_ranges + number_of_patterns) {
// match found, compute and replace the string in the output
size_type ptn_idx = static_cast<size_type>(itr - d_ranges);
size_type begin = d_ranges[ptn_idx].first;
size_type end = d_ranges[ptn_idx].second;
string_view d_repl = d_repls.size() > 1 ? d_repls.element<string_view>(ptn_idx)
: d_repls.element<string_view>(0);
auto spos = d_str.byte_offset(begin);
auto epos = d_str.byte_offset(end);
nbytes += d_repl.size_bytes() - (epos - spos);
if (out_ptr) { // copy unmodified content plus new replacement string
out_ptr = copy_and_increment(out_ptr, in_ptr + lpos, spos - lpos);
out_ptr = copy_string(out_ptr, d_repl);
lpos = epos;
}
ch_pos = end - 1;
}
++ch_pos;
}
if (out_ptr) // copy the remainder
memcpy(out_ptr, in_ptr + lpos, d_str.size_bytes() - lpos);
else
d_offsets[idx] = static_cast<int32_t>(nbytes);
}
};
} // namespace
std::unique_ptr<column> replace_re(
strings_column_view const& strings,
std::vector<std::string> const& patterns,
strings_column_view const& repls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
auto strings_count = strings.size();
if (strings_count == 0) return make_empty_strings_column(stream, mr);
if (patterns.empty()) // no patterns; just return a copy
return std::make_unique<column>(strings.parent(), stream, mr);
CUDF_EXPECTS(!repls.has_nulls(), "Parameter repls must not have any nulls");
auto d_strings = column_device_view::create(strings.parent(), stream);
auto d_repls = column_device_view::create(repls.parent(), stream);
auto d_flags = get_character_flags_table();
// compile regexes into device objects
size_type regex_insts = 0;
std::vector<std::unique_ptr<reprog_device, std::function<void(reprog_device*)>>> h_progs;
thrust::host_vector<reprog_device> progs;
for (auto itr = patterns.begin(); itr != patterns.end(); ++itr) {
auto prog = reprog_device::create(*itr, d_flags, strings_count, stream);
regex_insts = ::max(regex_insts, prog->insts_counts());
progs.push_back(*prog);
h_progs.emplace_back(std::move(prog));
}
// copy all the reprog_device instances to a device memory array
rmm::device_buffer progs_buffer{sizeof(reprog_device) * progs.size()};
CUDA_TRY(hipMemcpyAsync(progs_buffer.data(),
progs.data(),
progs.size() * sizeof(reprog_device),
hipMemcpyHostToDevice,
stream.value()));
reprog_device* d_progs = reinterpret_cast<reprog_device*>(progs_buffer.data());
// create working buffer for ranges pairs
rmm::device_uvector<found_range> found_ranges(patterns.size() * strings_count, stream);
auto d_found_ranges = found_ranges.data();
// create child columns
// std::pair<std::unique_ptr<column>, std::unique_ptr<column>> children(nullptr, nullptr);
auto children = [&] {
// Each invocation is predicated on the stack size which is dependent on the number of regex
// instructions
if ((regex_insts > MAX_STACK_INSTS) || (regex_insts <= RX_SMALL_INSTS))
return make_strings_children(
replace_multi_regex_fn<RX_STACK_SMALL>{
*d_strings, d_progs, static_cast<size_type>(progs.size()), d_found_ranges, *d_repls},
strings_count,
strings.null_count(),
stream,
mr);
else if (regex_insts <= RX_MEDIUM_INSTS)
return make_strings_children(
replace_multi_regex_fn<RX_STACK_MEDIUM>{
*d_strings, d_progs, static_cast<size_type>(progs.size()), d_found_ranges, *d_repls},
strings_count,
strings.null_count(),
stream,
mr);
else
return make_strings_children(
replace_multi_regex_fn<RX_STACK_LARGE>{
*d_strings, d_progs, static_cast<size_type>(progs.size()), d_found_ranges, *d_repls},
strings_count,
strings.null_count(),
stream,
mr);
}();
return make_strings_column(strings_count,
std::move(children.first),
std::move(children.second),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr),
stream,
mr);
}
} // namespace detail
// external API
std::unique_ptr<column> replace_re(strings_column_view const& strings,
std::vector<std::string> const& patterns,
strings_column_view const& repls,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace_re(strings, patterns, repls, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
| 4ace60b95a9c99991c38d8ff2b3d8aaed9a19eaf.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <strings/regex/regex.cuh>
#include <strings/utilities.cuh>
#include <strings/utilities.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/replace_re.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace strings {
namespace detail {
namespace {
// this is a [begin,end) pair of character positions when a substring is matched
using found_range = thrust::pair<size_type, size_type>;
/**
* @brief This functor handles replacing strings by applying the compiled regex patterns
* and inserting the corresponding new string within the matched range of characters.
*
* The logic includes computing the size of each string and also writing the output.
*
* The stack is used to keep progress on evaluating the regex instructions on each string.
* So the size of the stack is in proportion to the number of instructions in the given regex
* pattern.
*
* There are three call types based on the number of regex instructions in the given pattern.
* Small to medium instruction lengths can use the stack effectively though smaller executes faster.
* Longer patterns require global memory. Shorter patterns are common in data cleaning.
*/
template <size_t stack_size>
struct replace_multi_regex_fn {
column_device_view const d_strings;
reprog_device* progs; // array of regex progs
size_type number_of_patterns;
found_range* d_found_ranges; // working array matched (begin,end) values
column_device_view const d_repls; // replacment strings
int32_t* d_offsets{}; // these are null when
char* d_chars{}; // only computing size
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) {
if (!d_chars) d_offsets[idx] = 0;
return;
}
u_char data1[stack_size];
u_char data2[stack_size];
auto const d_str = d_strings.element<string_view>(idx);
auto const nchars = d_str.length(); // number of characters in input string
auto nbytes = d_str.size_bytes(); // number of bytes in input string
auto in_ptr = d_str.data(); // input pointer
auto out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr;
found_range* d_ranges = d_found_ranges + (idx * number_of_patterns);
size_type lpos = 0;
size_type ch_pos = 0;
// initialize the working ranges memory to -1's
thrust::fill(thrust::seq, d_ranges, d_ranges + number_of_patterns, found_range{-1, 1});
// process string one character at a time
while (ch_pos < nchars) {
// this minimizes the regex-find calls by only calling it for stale patterns
// -- those that have not previously matched up to this point (ch_pos)
for (size_type ptn_idx = 0; ptn_idx < number_of_patterns; ++ptn_idx) {
if (d_ranges[ptn_idx].first >= ch_pos) // previously matched here
continue; // or later in the string
reprog_device prog = progs[ptn_idx];
prog.set_stack_mem(data1, data2);
auto begin = static_cast<int32_t>(ch_pos);
auto end = static_cast<int32_t>(nchars);
if (!prog.is_empty() && prog.find(idx, d_str, begin, end) > 0)
d_ranges[ptn_idx] = found_range{begin, end}; // found a match
else
d_ranges[ptn_idx] = found_range{nchars, nchars}; // this pattern is done
}
// all the ranges have been updated from each regex match;
// look for any that match at this character position (ch_pos)
auto itr =
thrust::find_if(thrust::seq, d_ranges, d_ranges + number_of_patterns, [ch_pos](auto range) {
return range.first == ch_pos;
});
if (itr != d_ranges + number_of_patterns) {
// match found, compute and replace the string in the output
size_type ptn_idx = static_cast<size_type>(itr - d_ranges);
size_type begin = d_ranges[ptn_idx].first;
size_type end = d_ranges[ptn_idx].second;
string_view d_repl = d_repls.size() > 1 ? d_repls.element<string_view>(ptn_idx)
: d_repls.element<string_view>(0);
auto spos = d_str.byte_offset(begin);
auto epos = d_str.byte_offset(end);
nbytes += d_repl.size_bytes() - (epos - spos);
if (out_ptr) { // copy unmodified content plus new replacement string
out_ptr = copy_and_increment(out_ptr, in_ptr + lpos, spos - lpos);
out_ptr = copy_string(out_ptr, d_repl);
lpos = epos;
}
ch_pos = end - 1;
}
++ch_pos;
}
if (out_ptr) // copy the remainder
memcpy(out_ptr, in_ptr + lpos, d_str.size_bytes() - lpos);
else
d_offsets[idx] = static_cast<int32_t>(nbytes);
}
};
} // namespace
std::unique_ptr<column> replace_re(
strings_column_view const& strings,
std::vector<std::string> const& patterns,
strings_column_view const& repls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
auto strings_count = strings.size();
if (strings_count == 0) return make_empty_strings_column(stream, mr);
if (patterns.empty()) // no patterns; just return a copy
return std::make_unique<column>(strings.parent(), stream, mr);
CUDF_EXPECTS(!repls.has_nulls(), "Parameter repls must not have any nulls");
auto d_strings = column_device_view::create(strings.parent(), stream);
auto d_repls = column_device_view::create(repls.parent(), stream);
auto d_flags = get_character_flags_table();
// compile regexes into device objects
size_type regex_insts = 0;
std::vector<std::unique_ptr<reprog_device, std::function<void(reprog_device*)>>> h_progs;
thrust::host_vector<reprog_device> progs;
for (auto itr = patterns.begin(); itr != patterns.end(); ++itr) {
auto prog = reprog_device::create(*itr, d_flags, strings_count, stream);
regex_insts = std::max(regex_insts, prog->insts_counts());
progs.push_back(*prog);
h_progs.emplace_back(std::move(prog));
}
// copy all the reprog_device instances to a device memory array
rmm::device_buffer progs_buffer{sizeof(reprog_device) * progs.size()};
CUDA_TRY(cudaMemcpyAsync(progs_buffer.data(),
progs.data(),
progs.size() * sizeof(reprog_device),
cudaMemcpyHostToDevice,
stream.value()));
reprog_device* d_progs = reinterpret_cast<reprog_device*>(progs_buffer.data());
// create working buffer for ranges pairs
rmm::device_uvector<found_range> found_ranges(patterns.size() * strings_count, stream);
auto d_found_ranges = found_ranges.data();
// create child columns
// std::pair<std::unique_ptr<column>, std::unique_ptr<column>> children(nullptr, nullptr);
auto children = [&] {
// Each invocation is predicated on the stack size which is dependent on the number of regex
// instructions
if ((regex_insts > MAX_STACK_INSTS) || (regex_insts <= RX_SMALL_INSTS))
return make_strings_children(
replace_multi_regex_fn<RX_STACK_SMALL>{
*d_strings, d_progs, static_cast<size_type>(progs.size()), d_found_ranges, *d_repls},
strings_count,
strings.null_count(),
stream,
mr);
else if (regex_insts <= RX_MEDIUM_INSTS)
return make_strings_children(
replace_multi_regex_fn<RX_STACK_MEDIUM>{
*d_strings, d_progs, static_cast<size_type>(progs.size()), d_found_ranges, *d_repls},
strings_count,
strings.null_count(),
stream,
mr);
else
return make_strings_children(
replace_multi_regex_fn<RX_STACK_LARGE>{
*d_strings, d_progs, static_cast<size_type>(progs.size()), d_found_ranges, *d_repls},
strings_count,
strings.null_count(),
stream,
mr);
}();
return make_strings_column(strings_count,
std::move(children.first),
std::move(children.second),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr),
stream,
mr);
}
} // namespace detail
// external API
std::unique_ptr<column> replace_re(strings_column_view const& strings,
std::vector<std::string> const& patterns,
strings_column_view const& repls,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace_re(strings, patterns, repls, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
|
edea4025a8b5f53755e7180694496dcb96978d56.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <GL/glut.h>
#include <GL/gl.h>
#include <malloc.h>
#include <signal.h>
#define width 100
#define height 72
unsigned char image[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,0,0,0,0,0,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,0,0,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,
0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,255,0,255,255,0,0,0,0,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,255,0,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,
0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,
0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,0,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,0,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,0,
0,255,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,0,0,255,255,255,255,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,0,255,255,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
255,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,255,0,0,0,0,0,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,0,0,0,
0,0,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
};
unsigned char results[width * height];
__global__ void detect_edges(unsigned char *input, unsigned char *output) {
int i = (blockIdx.x * 72) + threadIdx.x;
int x, y; // the pixel of interest
int b, d, f, h; // the pixels adjacent to x,y used for the calculation
int r; // the result of calculate
y = i / width;;
x = i - (width * y);
if (x == 0 || y == 0 || x == width - 1 || y == height - 1) {
output[i] = 0;
} else {
b = i + width;
d = i - 1;
f = i + 1;
h = i - width;
r = (input[i] * 4) + (input[b] * -1) + (input[d] * -1) + (input[f] * -1)
+ (input[h] * -1);
if (r >= 0) {
output[i] = 255;
} else
{
output[i] = 0;
}
}
}
void tidy_and_exit() {
exit(0);
}
void sigint_callback(int signal_number){
printf("\nInterrupt from keyboard\n");
tidy_and_exit();
}
static void display() {
glClear(GL_COLOR_BUFFER_BIT);
glRasterPos4i(-1, -1, 0, 1);
glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, image);
glRasterPos4i(0, -1, 0, 1);
glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, results);
glFlush();
}
static void key_pressed(unsigned char key, int x, int y) {
switch(key){
case 27: // escape
tidy_and_exit();
break;
default:
printf("\nPress escape to exit\n");
break;
}
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(int argc, char **argv) {
unsigned char *d_results;
unsigned char *d_image;
hipMalloc((void**)&d_image, sizeof(unsigned char) * (width * height));
hipMalloc((void**)&d_results, sizeof(unsigned char) * (width * height));
hipMemcpy(d_image, &image, sizeof(unsigned char) * (width * height), hipMemcpyHostToDevice);
signal(SIGINT, sigint_callback);
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipLaunchKernelGGL(( detect_edges), dim3(100),dim3(72), 0, 0, d_image, d_results);
hipDeviceSynchronize();
hipMemcpy(&results, d_results, sizeof(unsigned char) * (width * height), hipMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC, &finish);
\
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
hipFree(&d_image);
hipFree(&d_results);
glutInit(&argc, argv);
glutInitWindowSize(width * 2,height);
glutInitDisplayMode(GLUT_SINGLE | GLUT_LUMINANCE);
glutCreateWindow("Cuda Image Processing ");
glutDisplayFunc(display);
glutKeyboardFunc(key_pressed);
glClearColor(0.0, 1.0, 0.0, 1.0);
glutMainLoop();
tidy_and_exit();
return 0;
}
| edea4025a8b5f53755e7180694496dcb96978d56.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <GL/glut.h>
#include <GL/gl.h>
#include <malloc.h>
#include <signal.h>
#define width 100
#define height 72
unsigned char image[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,0,0,0,0,0,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,0,0,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,
0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,255,0,255,255,0,0,0,0,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,255,0,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,
0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,
0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,0,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,0,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,
0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,0,
0,255,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,0,0,255,255,255,255,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,0,255,255,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,
255,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,255,0,0,0,0,0,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,
255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,255,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,
0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
255,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,255,255,
255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,0,0,0,0,
0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,
0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,
255,255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,255,255,255,255,255,0,0,0,0,0,0,0,0,255,255,
255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,255,255,255,255,255,0,0,0,0,0,
0,0,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,
0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,
255,255,255,255,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,255,255,255,255,
255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
};
unsigned char results[width * height];
__global__ void detect_edges(unsigned char *input, unsigned char *output) {
int i = (blockIdx.x * 72) + threadIdx.x;
int x, y; // the pixel of interest
int b, d, f, h; // the pixels adjacent to x,y used for the calculation
int r; // the result of calculate
y = i / width;;
x = i - (width * y);
if (x == 0 || y == 0 || x == width - 1 || y == height - 1) {
output[i] = 0;
} else {
b = i + width;
d = i - 1;
f = i + 1;
h = i - width;
r = (input[i] * 4) + (input[b] * -1) + (input[d] * -1) + (input[f] * -1)
+ (input[h] * -1);
if (r >= 0) {
output[i] = 255;
} else
{
output[i] = 0;
}
}
}
void tidy_and_exit() {
exit(0);
}
void sigint_callback(int signal_number){
printf("\nInterrupt from keyboard\n");
tidy_and_exit();
}
static void display() {
glClear(GL_COLOR_BUFFER_BIT);
glRasterPos4i(-1, -1, 0, 1);
glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, image);
glRasterPos4i(0, -1, 0, 1);
glDrawPixels(width, height, GL_LUMINANCE, GL_UNSIGNED_BYTE, results);
glFlush();
}
static void key_pressed(unsigned char key, int x, int y) {
switch(key){
case 27: // escape
tidy_and_exit();
break;
default:
printf("\nPress escape to exit\n");
break;
}
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(int argc, char **argv) {
unsigned char *d_results;
unsigned char *d_image;
cudaMalloc((void**)&d_image, sizeof(unsigned char) * (width * height));
cudaMalloc((void**)&d_results, sizeof(unsigned char) * (width * height));
cudaMemcpy(d_image, &image, sizeof(unsigned char) * (width * height), cudaMemcpyHostToDevice);
signal(SIGINT, sigint_callback);
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
detect_edges<<<100,72>>>(d_image, d_results);
cudaThreadSynchronize();
cudaMemcpy(&results, d_results, sizeof(unsigned char) * (width * height), cudaMemcpyDeviceToHost);
clock_gettime(CLOCK_MONOTONIC, &finish);
\
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
cudaFree(&d_image);
cudaFree(&d_results);
glutInit(&argc, argv);
glutInitWindowSize(width * 2,height);
glutInitDisplayMode(GLUT_SINGLE | GLUT_LUMINANCE);
glutCreateWindow("Cuda Image Processing ");
glutDisplayFunc(display);
glutKeyboardFunc(key_pressed);
glClearColor(0.0, 1.0, 0.0, 1.0);
glutMainLoop();
tidy_and_exit();
return 0;
}
|
42e3caee37eecd167956afd8afc6b971de8d7dd4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/InitialTensorOptions.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/native/hip/Resize.cuh>
#include <c10/util/Exception.h>
#include <THH/THHGeneral.h>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <algorithm>
#include <cstddef>
#include <cmath>
namespace at {
namespace native {
Tensor& eye_out_cuda(Tensor& result, int64_t n) {
return at::native::eye_out_cuda(result, n, /*m=*/-1);
}
Tensor& eye_out_cuda(Tensor& result, int64_t n, int64_t m) {
TORCH_CHECK(n >= 0, "n must be greater or equal to 0, got ", n);
if(m < 0) {
m = n;
}
result.resize_({n, m});
result.zero_();
int64_t sz = std::min<int64_t>(n, m);
int64_t stride = result.stride(0) + result.stride(1);
Tensor diag = result.as_strided({sz}, {stride});
diag.fill_(1);
return result;
}
Tensor empty_cuda(IntArrayRef size, const TensorOptions& options, c10::optional<MemoryFormat> optional_memory_format) {
AT_ASSERT(options.device().type() == at::DeviceType::CUDA);
TORCH_INTERNAL_ASSERT(impl::variable_excluded_from_dispatch());
TORCH_CHECK(!options.pinned_memory(), "Only dense CPU tensors can be pinned");
check_size_nonnegative(size);
auto* allocator = at::cuda::getCUDADeviceAllocator();
int64_t nelements = prod_intlist(size);
auto dtype = options.dtype();
auto storage_impl = c10::make_intrusive<StorageImpl>(
dtype,
nelements,
allocator->allocate(nelements * dtype.itemsize()),
allocator,
/*resizeable=*/true);
auto tensor = detail::make_tensor<TensorImpl>(storage_impl, TensorTypeId::CUDATensorId);
// Default TensorImpl has size [0]
if (size.size() != 1 || size[0] != 0) {
tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size);
}
auto memory_format = optional_memory_format.value_or(MemoryFormat::Contiguous);
tensor.unsafeGetTensorImpl()->empty_tensor_restride(memory_format);
return tensor;
}
Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, const TensorOptions& options) {
auto t = at::native::empty_cuda({0}, options);
at::native::resize_impl_cuda_(t.unsafeGetTensorImpl(), size, stride);
return t;
}
Tensor& randperm_out_cuda(Tensor& result, int64_t n, Generator* generator) {
TORCH_CHECK(n >= 0, "n must be non-negative, got", n);
check_supported_max_int_with_precision(n, result);
result.resize_({n});
if (n < 30000) { // For small inputs, we offload it to CPU instead.
auto result_cpu = at::empty({n}, result.options().device(kCPU));
randperm_out(result_cpu, n, generator);
return result.copy_(result_cpu);
}
#if 0
// This if condition should never be true because if n >= 30000 and the tensor has a Half type,
// check_supported_max_int_with_precision should have reported an error. This snippet is commented out but left here
// for the sake of clarity, because Half in thrust is spotty, and we do not want future change unaware of this.
if (result.scalar_type() == at::ScalarType::Half) { // Half in thrust is spotty. Avoid.
auto result_float = at::empty({n}, initialTensorOptions().device(Device(DeviceType::CUDA)));
return result.copy_(randperm_out_cuda(result_float, n, generator));
}
#endif
// Generate random values for the keys array
AT_DISPATCH_ALL_TYPES(
result.scalar_type(), "randperm_out_cuda", [&] {
auto keys = at::empty(result.sizes(), result.options()).random_(generator);
auto keys_data = thrust::device_ptr<scalar_t>(keys.data_ptr<scalar_t>());
// shuffled_data points to the underlying data of the output tensor if the tensor is contiguous; otherwise it
// points to a new tensor.
Tensor shuffled;
thrust::device_ptr<scalar_t> shuffled_data;
if (result.is_contiguous()) {
shuffled_data = thrust::device_ptr<scalar_t>(result.data_ptr<scalar_t>());
} else {
shuffled = at::empty(n, result.options());
shuffled_data = thrust::device_ptr<scalar_t>(shuffled.data_ptr<scalar_t>());
}
auto state = globalContext().getTHCState();
THCThrustAllocator thrustAlloc(state);
auto policy = thrust::hip::par(thrustAlloc).on(at::hip::getCurrentHIPStreamMasqueradingAsCUDA());
thrust::sequence(policy, shuffled_data, shuffled_data + n);
// Use the sorted order of keys to rearrange the result array
thrust::sort_by_key(policy, keys_data, keys_data + n, shuffled_data);
if (!result.is_contiguous()) {
result.copy_(shuffled);
}
}
);
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
namespace {
// To find the max integer that does not exceed the root of an int64_t variable,
// we could use a loop to test one bit at a time, which takes up to 31
// iterations. This would give the accurate result, but is relatively slow and
// is an overkill for most cases where double's precision suffice.
//
// If we directly use sqrt to calculate the root, the conversion from int64_t
// to double would lose 11 bits precision.
//
// The following solution uses sqrt directly for most cases, and would only
// special handle it if there is indeed precision loss.
__device__
inline int64_t resolve_root_int(
int64_t b, int64_t cX4, int64_t x, int32_t sign) {
int64_t bXb_cX4 = b*b - cX4;
// potential precision loss could occur here when casting int64_t (63 bits
// precision) to double (52 bits precision)
double sr = ::sqrt((double)bXb_cX4);
int64_t res = ::__double2ll_rd((-b + sign * sr)/2);
// have to cast double to int64_t, otherwise it would only compare up to the
// precision of a double variable, ignoring the precision loss
if (bXb_cX4 != (int64_t) (sr * sr)) {
// handle precision loss by using binary search
int64_t llsr = ::__double2ll_rd(sr);
// Use the following math to reduce search space.
// Suppose z is the accurate result of sqrt(bXb_cX4) without precision loss
// let d = abs(bXb_cX4 - llsr * llsr), then we have:
// z = sqrt(bXb_cX4) <= sqrt(llsr * llsr + d) <= llsr + sqrt(d)
// z = sqrt(bXb_cX4) >= sqrt(llsr * llsr - d) >= llsr - sqrt(d)
// Hence, it is sufficient to search range [llsr - sqrt(d), llsr + sqrt(d)).
// And the true value of row would also be with in range,
// [res - sqrt(d), res + sqrt(d) + 1)
// as the denominator would only reduce the precision penalty.
int64_t diff =
::__double2ll_ru(::sqrt(::fabs((double)(bXb_cX4 - llsr * llsr))));
// l never exceeds (could equal to) the target row index
auto l = res > diff ? res - diff : 0;
// r is always larger than the target row index
auto r = res + diff + 1;
// binary search for the correct answer
x <<= 1; // the loop always compares with 2x, so do it once here
while (l + 1 < r) {
auto m = (l + r) >> 1;
// for tril:
// b = 2f - 1, sign = 1, hence (2f + m - 1) * m / 2
// for triu:
// b = -2f - 1, sign = -1, hence (2f - m + 1) * m / 2
if (sign * (b + m) * m > x) {
r = m;
} else {
l = m;
}
}
res = l;
}
return res;
}
// f: the number of elements in the first row of the trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the tril as a top trapezoid stacked on a bottom rectangle. Assume x
// corresponds to the coordinate (row, col) in the trapezoid, where the row and
// the col both start from 0, then we have:
//
// (f + f + row - 1) * row / 2 <= x [1]
// (f + f + row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (row + 2f - 1)row <= 2x
// row^2 + (2f-1)row - 2x <= 0. [3]
//
// Based on ineuqality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = 2f - 1
// c = -2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the right. Intuitively, it is because:
// i) the valid solution range of row is between two roots, as it is <= 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 + (2f-1)row - 2x.
// Therefore, the valid range of row lies in between the nadir point and
// the larger root on the right.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b + sqrt(b^2 - 4c)) / 2)
// col = x - (f + f + row - 1) * row / 2
__device__
inline void get_coordinate_in_tril_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = f - 1;
auto cX4 = - (x << 3); // 4 * c = 4 * (-2x) = -8x;
row = resolve_root_int(b, cX4, x, 1);
col = x - ((f + row - 1) * row >> 1);
}
// f: the number of elements in the first row of the bottom trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the triu as a top rectangle stacked on a bottom trapezoid, where the
// trapezoid is upside down. Assume x corresponds to the coordinate (row, col)
// in the bottom trapezoid, where the row and the col start from 0, then we
// have:
//
// (f + f - row + 1) * row / 2 <= x [1]
// (f + f - row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (-row + 2f + 1)row <= 2x
// row^2 - (2f+1)row + 2x >= 0. [3]
//
// Based on ineuqality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = -1 - 2f
// c = 2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the left. Intuitively, it is because:
// i) the valid solution range of row is outside of the two roots, as it is <
// > 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 - (2f+1)row + 2x.
// Therefore, the valid range of row lies to the left of the smaller root
// on the left.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b - sqrt(b^2 - 4c)) / 2)
// col = x - (f + f - row + 1) * row / 2
__device__
inline void get_coordinate_in_triu_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = -1 - f;
auto cX4 = x << 3; // 4 * c = 4 * (2x) = 8x;
row = resolve_root_int(b, cX4, x, -1);
col = x - ((f - row + 1) * row >> 1) + row;
}
} // namespace
template <typename scalar_t>
__global__
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
void tril_indices_kernel(scalar_t * tensor,
int64_t row_offset,
int64_t m_first_row,
int64_t col,
int64_t trapezoid_size,
int64_t tril_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < tril_size) {
int64_t r, c;
if (linear_index < trapezoid_size) {
// the coordinate is within the top trapezoid
get_coordinate_in_tril_trapezoid(m_first_row, linear_index, r, c);
} else {
// the coordinate falls in the bottom rectangle
auto surplus = linear_index - trapezoid_size;
// add the height of trapezoid: m_last_row (col) - m_first_row + 1
r = surplus / col + col - m_first_row + 1;
c = surplus % col;
}
r += row_offset;
tensor[linear_index] = r;
tensor[linear_index + tril_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor tril_indices_cuda(
int64_t row, int64_t col, int64_t offset, const TensorOptions& options) {
check_args(row, col, options);
auto tril_size = get_tril_size(row, col, offset);
auto tensor = empty_cuda({2, tril_size}, options);
if (tril_size > 0) {
auto m_first_row = offset > 0 ?
std::min<int64_t>(col, 1 + offset) : // upper bounded by col
row + offset > 0; // either 0 or 1
auto trapezoid_row_offset = std::max<int64_t>(0, -offset);
auto rectangle_row_offset = trapezoid_row_offset + col - m_first_row + 1;
int64_t rectangle_size = 0;
if (rectangle_row_offset < row) {
rectangle_size = (row - rectangle_row_offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using tril_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(tril_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "tril_indices_cuda", [&] {
hipLaunchKernelGGL(( tril_indices_kernel),
dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
tensor.data_ptr<scalar_t>(),
trapezoid_row_offset,
m_first_row,
col,
tril_size - rectangle_size,
tril_size);
});
}
return tensor;
}
template <typename scalar_t>
__global__
void triu_indices_kernel(scalar_t * tensor,
int64_t col_offset,
int64_t m_first_row,
int64_t col,
int64_t rectangle_size,
int64_t triu_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < triu_size) {
int64_t r, c;
if (linear_index < rectangle_size) {
// the coordinate is within the top rectangle
r = linear_index / col;
c = linear_index % col;
} else {
// the coordinate falls in the bottom trapezoid
get_coordinate_in_triu_trapezoid(
m_first_row, linear_index - rectangle_size, r, c);
r += rectangle_size / col;
}
c += col_offset;
tensor[linear_index] = r;
tensor[linear_index + triu_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor triu_indices_cuda(
int64_t row, int64_t col, int64_t offset, const TensorOptions& options) {
check_args(row, col, options);
auto triu_size = row * col - get_tril_size(row, col, offset - 1);
auto tensor = empty_cuda({2, triu_size}, options);
if (triu_size > 0) {
// # of triu elements in the first row
auto m_first_row = offset > 0 ?
std::max<int64_t>(col - offset, 0) : // upper bounded by col
col;
// size of the top rectangle
int64_t rectangle_size = 0;
if (offset < 0) {
rectangle_size = std::min<int64_t>(row, -offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using triu_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(triu_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "triu_indices_cuda", [&] {
hipLaunchKernelGGL(( triu_indices_kernel),
dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
tensor.data_ptr<scalar_t>(),
std::max<int64_t>(0, offset),
m_first_row,
col,
rectangle_size,
triu_size);
});
}
return tensor;
}
}} // namespace at::native
| 42e3caee37eecd167956afd8afc6b971de8d7dd4.cu | #include <ATen/ATen.h>
#include <ATen/InitialTensorOptions.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/native/cuda/Resize.cuh>
#include <c10/util/Exception.h>
#include <THC/THCGeneral.h>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <algorithm>
#include <cstddef>
#include <cmath>
namespace at {
namespace native {
Tensor& eye_out_cuda(Tensor& result, int64_t n) {
return at::native::eye_out_cuda(result, n, /*m=*/-1);
}
Tensor& eye_out_cuda(Tensor& result, int64_t n, int64_t m) {
TORCH_CHECK(n >= 0, "n must be greater or equal to 0, got ", n);
if(m < 0) {
m = n;
}
result.resize_({n, m});
result.zero_();
int64_t sz = std::min<int64_t>(n, m);
int64_t stride = result.stride(0) + result.stride(1);
Tensor diag = result.as_strided({sz}, {stride});
diag.fill_(1);
return result;
}
Tensor empty_cuda(IntArrayRef size, const TensorOptions& options, c10::optional<MemoryFormat> optional_memory_format) {
AT_ASSERT(options.device().type() == at::DeviceType::CUDA);
TORCH_INTERNAL_ASSERT(impl::variable_excluded_from_dispatch());
TORCH_CHECK(!options.pinned_memory(), "Only dense CPU tensors can be pinned");
check_size_nonnegative(size);
auto* allocator = at::cuda::getCUDADeviceAllocator();
int64_t nelements = prod_intlist(size);
auto dtype = options.dtype();
auto storage_impl = c10::make_intrusive<StorageImpl>(
dtype,
nelements,
allocator->allocate(nelements * dtype.itemsize()),
allocator,
/*resizeable=*/true);
auto tensor = detail::make_tensor<TensorImpl>(storage_impl, TensorTypeId::CUDATensorId);
// Default TensorImpl has size [0]
if (size.size() != 1 || size[0] != 0) {
tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size);
}
auto memory_format = optional_memory_format.value_or(MemoryFormat::Contiguous);
tensor.unsafeGetTensorImpl()->empty_tensor_restride(memory_format);
return tensor;
}
Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, const TensorOptions& options) {
auto t = at::native::empty_cuda({0}, options);
at::native::resize_impl_cuda_(t.unsafeGetTensorImpl(), size, stride);
return t;
}
Tensor& randperm_out_cuda(Tensor& result, int64_t n, Generator* generator) {
TORCH_CHECK(n >= 0, "n must be non-negative, got", n);
check_supported_max_int_with_precision(n, result);
result.resize_({n});
if (n < 30000) { // For small inputs, we offload it to CPU instead.
auto result_cpu = at::empty({n}, result.options().device(kCPU));
randperm_out(result_cpu, n, generator);
return result.copy_(result_cpu);
}
#if 0
// This if condition should never be true because if n >= 30000 and the tensor has a Half type,
// check_supported_max_int_with_precision should have reported an error. This snippet is commented out but left here
// for the sake of clarity, because Half in thrust is spotty, and we do not want future change unaware of this.
if (result.scalar_type() == at::ScalarType::Half) { // Half in thrust is spotty. Avoid.
auto result_float = at::empty({n}, initialTensorOptions().device(Device(DeviceType::CUDA)));
return result.copy_(randperm_out_cuda(result_float, n, generator));
}
#endif
// Generate random values for the keys array
AT_DISPATCH_ALL_TYPES(
result.scalar_type(), "randperm_out_cuda", [&] {
auto keys = at::empty(result.sizes(), result.options()).random_(generator);
auto keys_data = thrust::device_ptr<scalar_t>(keys.data_ptr<scalar_t>());
// shuffled_data points to the underlying data of the output tensor if the tensor is contiguous; otherwise it
// points to a new tensor.
Tensor shuffled;
thrust::device_ptr<scalar_t> shuffled_data;
if (result.is_contiguous()) {
shuffled_data = thrust::device_ptr<scalar_t>(result.data_ptr<scalar_t>());
} else {
shuffled = at::empty(n, result.options());
shuffled_data = thrust::device_ptr<scalar_t>(shuffled.data_ptr<scalar_t>());
}
auto state = globalContext().getTHCState();
THCThrustAllocator thrustAlloc(state);
auto policy = thrust::cuda::par(thrustAlloc).on(at::cuda::getCurrentCUDAStream());
thrust::sequence(policy, shuffled_data, shuffled_data + n);
// Use the sorted order of keys to rearrange the result array
thrust::sort_by_key(policy, keys_data, keys_data + n, shuffled_data);
if (!result.is_contiguous()) {
result.copy_(shuffled);
}
}
);
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
namespace {
// To find the max integer that does not exceed the root of an int64_t variable,
// we could use a loop to test one bit at a time, which takes up to 31
// iterations. This would give the accurate result, but is relatively slow and
// is an overkill for most cases where double's precision suffice.
//
// If we directly use sqrt to calculate the root, the conversion from int64_t
// to double would lose 11 bits precision.
//
// The following solution uses sqrt directly for most cases, and would only
// special handle it if there is indeed precision loss.
__device__
inline int64_t resolve_root_int(
int64_t b, int64_t cX4, int64_t x, int32_t sign) {
int64_t bXb_cX4 = b*b - cX4;
// potential precision loss could occur here when casting int64_t (63 bits
// precision) to double (52 bits precision)
double sr = ::sqrt((double)bXb_cX4);
int64_t res = ::__double2ll_rd((-b + sign * sr)/2);
// have to cast double to int64_t, otherwise it would only compare up to the
// precision of a double variable, ignoring the precision loss
if (bXb_cX4 != (int64_t) (sr * sr)) {
// handle precision loss by using binary search
int64_t llsr = ::__double2ll_rd(sr);
// Use the following math to reduce search space.
// Suppose z is the accurate result of sqrt(bXb_cX4) without precision loss
// let d = abs(bXb_cX4 - llsr * llsr), then we have:
// z = sqrt(bXb_cX4) <= sqrt(llsr * llsr + d) <= llsr + sqrt(d)
// z = sqrt(bXb_cX4) >= sqrt(llsr * llsr - d) >= llsr - sqrt(d)
// Hence, it is sufficient to search range [llsr - sqrt(d), llsr + sqrt(d)).
// And the true value of row would also be with in range,
// [res - sqrt(d), res + sqrt(d) + 1)
// as the denominator would only reduce the precision penalty.
int64_t diff =
::__double2ll_ru(::sqrt(::fabs((double)(bXb_cX4 - llsr * llsr))));
// l never exceeds (could equal to) the target row index
auto l = res > diff ? res - diff : 0;
// r is always larger than the target row index
auto r = res + diff + 1;
// binary search for the correct answer
x <<= 1; // the loop always compares with 2x, so do it once here
while (l + 1 < r) {
auto m = (l + r) >> 1;
// for tril:
// b = 2f - 1, sign = 1, hence (2f + m - 1) * m / 2
// for triu:
// b = -2f - 1, sign = -1, hence (2f - m + 1) * m / 2
if (sign * (b + m) * m > x) {
r = m;
} else {
l = m;
}
}
res = l;
}
return res;
}
// f: the number of elements in the first row of the trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the tril as a top trapezoid stacked on a bottom rectangle. Assume x
// corresponds to the coordinate (row, col) in the trapezoid, where the row and
// the col both start from 0, then we have:
//
// (f + f + row - 1) * row / 2 <= x [1]
// (f + f + row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (row + 2f - 1)row <= 2x
// row^2 + (2f-1)row - 2x <= 0. [3]
//
// Based on ineuqality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = 2f - 1
// c = -2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the right. Intuitively, it is because:
// i) the valid solution range of row is between two roots, as it is <= 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 + (2f-1)row - 2x.
// Therefore, the valid range of row lies in between the nadir point and
// the larger root on the right.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b + sqrt(b^2 - 4c)) / 2)
// col = x - (f + f + row - 1) * row / 2
__device__
inline void get_coordinate_in_tril_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = f - 1;
auto cX4 = - (x << 3); // 4 * c = 4 * (-2x) = -8x;
row = resolve_root_int(b, cX4, x, 1);
col = x - ((f + row - 1) * row >> 1);
}
// f: the number of elements in the first row of the bottom trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the triu as a top rectangle stacked on a bottom trapezoid, where the
// trapezoid is upside down. Assume x corresponds to the coordinate (row, col)
// in the bottom trapezoid, where the row and the col start from 0, then we
// have:
//
// (f + f - row + 1) * row / 2 <= x [1]
// (f + f - row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (-row + 2f + 1)row <= 2x
// row^2 - (2f+1)row + 2x >= 0. [3]
//
// Based on ineuqality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = -1 - 2f
// c = 2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the left. Intuitively, it is because:
// i) the valid solution range of row is outside of the two roots, as it is <
// > 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 - (2f+1)row + 2x.
// Therefore, the valid range of row lies to the left of the smaller root
// on the left.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b - sqrt(b^2 - 4c)) / 2)
// col = x - (f + f - row + 1) * row / 2
__device__
inline void get_coordinate_in_triu_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = -1 - f;
auto cX4 = x << 3; // 4 * c = 4 * (2x) = 8x;
row = resolve_root_int(b, cX4, x, -1);
col = x - ((f - row + 1) * row >> 1) + row;
}
} // namespace
template <typename scalar_t>
__global__
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
void tril_indices_kernel(scalar_t * tensor,
int64_t row_offset,
int64_t m_first_row,
int64_t col,
int64_t trapezoid_size,
int64_t tril_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < tril_size) {
int64_t r, c;
if (linear_index < trapezoid_size) {
// the coordinate is within the top trapezoid
get_coordinate_in_tril_trapezoid(m_first_row, linear_index, r, c);
} else {
// the coordinate falls in the bottom rectangle
auto surplus = linear_index - trapezoid_size;
// add the height of trapezoid: m_last_row (col) - m_first_row + 1
r = surplus / col + col - m_first_row + 1;
c = surplus % col;
}
r += row_offset;
tensor[linear_index] = r;
tensor[linear_index + tril_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor tril_indices_cuda(
int64_t row, int64_t col, int64_t offset, const TensorOptions& options) {
check_args(row, col, options);
auto tril_size = get_tril_size(row, col, offset);
auto tensor = empty_cuda({2, tril_size}, options);
if (tril_size > 0) {
auto m_first_row = offset > 0 ?
std::min<int64_t>(col, 1 + offset) : // upper bounded by col
row + offset > 0; // either 0 or 1
auto trapezoid_row_offset = std::max<int64_t>(0, -offset);
auto rectangle_row_offset = trapezoid_row_offset + col - m_first_row + 1;
int64_t rectangle_size = 0;
if (rectangle_row_offset < row) {
rectangle_size = (row - rectangle_row_offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using tril_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(tril_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "tril_indices_cuda", [&] {
tril_indices_kernel<<<
dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
tensor.data_ptr<scalar_t>(),
trapezoid_row_offset,
m_first_row,
col,
tril_size - rectangle_size,
tril_size);
});
}
return tensor;
}
template <typename scalar_t>
__global__
void triu_indices_kernel(scalar_t * tensor,
int64_t col_offset,
int64_t m_first_row,
int64_t col,
int64_t rectangle_size,
int64_t triu_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < triu_size) {
int64_t r, c;
if (linear_index < rectangle_size) {
// the coordinate is within the top rectangle
r = linear_index / col;
c = linear_index % col;
} else {
// the coordinate falls in the bottom trapezoid
get_coordinate_in_triu_trapezoid(
m_first_row, linear_index - rectangle_size, r, c);
r += rectangle_size / col;
}
c += col_offset;
tensor[linear_index] = r;
tensor[linear_index + triu_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor triu_indices_cuda(
int64_t row, int64_t col, int64_t offset, const TensorOptions& options) {
check_args(row, col, options);
auto triu_size = row * col - get_tril_size(row, col, offset - 1);
auto tensor = empty_cuda({2, triu_size}, options);
if (triu_size > 0) {
// # of triu elements in the first row
auto m_first_row = offset > 0 ?
std::max<int64_t>(col - offset, 0) : // upper bounded by col
col;
// size of the top rectangle
int64_t rectangle_size = 0;
if (offset < 0) {
rectangle_size = std::min<int64_t>(row, -offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using triu_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(triu_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "triu_indices_cuda", [&] {
triu_indices_kernel<<<
dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
tensor.data_ptr<scalar_t>(),
std::max<int64_t>(0, offset),
m_first_row,
col,
rectangle_size,
triu_size);
});
}
return tensor;
}
}} // namespace at::native
|
7bdc8f78f25fcfaeaee68c7bcd2c8c0101370f85.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
__global__ void standard(float *ptr)
{
*ptr = powf(*ptr, 2.0f);
}
__global__ void intrinsic(float *ptr)
{
*ptr = __powf(*ptr, 2.0f);
}
//int main()
//{
// float value = 23;
// int SIZE = sizeof(float);
//
// float *d_val;
// hipMalloc((void**)&d_val, SIZE);
// hipMemcpy(d_val, &value, SIZE, hipMemcpyHostToDevice);
// standard << <1, 1 >> > (d_val);
// intrinsic << <1, 1 >> > (d_val);
// hipDeviceSynchronize();
//
// hipDeviceReset();
// return 0;
//} | 7bdc8f78f25fcfaeaee68c7bcd2c8c0101370f85.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void standard(float *ptr)
{
*ptr = powf(*ptr, 2.0f);
}
__global__ void intrinsic(float *ptr)
{
*ptr = __powf(*ptr, 2.0f);
}
//int main()
//{
// float value = 23;
// int SIZE = sizeof(float);
//
// float *d_val;
// cudaMalloc((void**)&d_val, SIZE);
// cudaMemcpy(d_val, &value, SIZE, cudaMemcpyHostToDevice);
// standard << <1, 1 >> > (d_val);
// intrinsic << <1, 1 >> > (d_val);
// cudaDeviceSynchronize();
//
// cudaDeviceReset();
// return 0;
//} |
03c859564e35661466c32bf6e4ce5db0b1fd861c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "process_baseband.h"
// quantities for D'Agostino normality test (see wikipedia)
#define NK float(NKURTO)
#define mu1 (-6./(NK+1))
#define mu2 ((24.*NK*(NK-2)*(NK-3))/((NK+1)*(NK+1)*(NK+3)*(NK+5)))
#define g1 (6.*(NK*NK-5*NK+2)/((NK+7)*(NK+9))*sqrt( (6.*(NK+3)*(NK+5))/(NK*(NK-2)*(NK-3)) ))
#define A (6.+(8./g1)*(2./g1 + sqrt(1. + 4./(g1*g1))))
#define Z2_1 sqrt(4.5*A)
#define Z2_2 (1-2./(9*A))
#define Z2_3 sqrt(2./(mu2*(A-4)))
#define NKb float(NFFT)
#define mu1b (-6./(NKb+1))
#define mu2b ((24.*NKb*(NKb-2)*(NKb-3))/((NKb+1)*(NKb+1)*(NKb+3)*(NKb+5)))
#define g1b (6.*(NKb*NKb-5*NKb+2)/((NKb+7)*(NKb+9))*sqrt( (6.*(NKb+3)*(NKb+5))/(NKb*(NKb-2)*(NKb-3)) ))
#define Ab (6.+(8./g1b)*(2./g1b + sqrt(1. + 4./(g1b*g1b))))
#define Z2b_1 sqrt(4.5*Ab)
#define Z2b_2 (1-2./(9*Ab))
#define Z2b_3 sqrt(2./(mu2b*(Ab-4)))
//convert unsigned char time array to float
__global__ void convertarray (hipfftReal *time, unsigned char *utime, size_t n)
{
for (int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n; i += blockDim.x*gridDim.x)
{
if (utime[i] == 0)
time[i] = 0;
else
time[i] = (hipfftReal)(utime[i])/128-1;
}
}
__global__ void kurtosis (hipfftReal *time, hipfftReal *pow, hipfftReal *kur)
{
// calculate the variance (power) and kurtosis for voltage statistics in
// relatively short windows. Do this by first copying from global memory,
// then using a hard-coded tree reduction. Right now, it is set up to
// use either 250 or 500 samples, so must be invoked with either 256
// or 512 threads.
// because each thread block works on a chunk of data that's commensurate
// with the packing of samples into the buffer, specifically with respect
// to the two polarizations, I think we don't need to worry at all about
// a thread block crossing the polarization. The output will simply
// contain the statistics for pol 0 first, then pol 1.
volatile __shared__ float data2[256];
volatile __shared__ float data4[256];
unsigned int tid = threadIdx.x;
size_t offset = blockIdx.x*NKURTO;
if (tid < 250)
{
if (NKURTO==500) {
// load up two values from global memory in this case
data2[tid] = time[offset + tid]*time[offset + tid];
float tmp = time[offset + tid + 250]*time[offset + tid + 250];
data4[tid] = data2[tid]*data2[tid] + tmp*tmp;
data2[tid] += tmp;
}
else {
data2[tid] = time[offset + tid]*time[offset + tid];
data4[tid] = data2[tid]*data2[tid];
}
}
else
data2[tid] = data4[tid] = 0;
__syncthreads ();
if (tid < 128)
{
data2[tid] += data2[tid + 128];
data4[tid] += data4[tid + 128];
}
__syncthreads ();
if (tid < 64)
{
data2[tid] += data2[tid + 64];
data4[tid] += data4[tid + 64];
}
__syncthreads ();
if (tid < 32)
{
data2[tid] += data2[tid + 32];
data4[tid] += data4[tid + 32];
data2[tid] += data2[tid + 16];
data4[tid] += data4[tid + 16];
data2[tid] += data2[tid + 8];
data4[tid] += data4[tid + 8];
data2[tid] += data2[tid + 4];
data4[tid] += data4[tid + 4];
data2[tid] += data2[tid + 2];
data4[tid] += data4[tid + 2];
}
if (tid==0)
{
data2[tid] += data2[tid + 1];
data4[tid] += data4[tid + 1];
pow[blockIdx.x] = data2[0]/NKURTO;
kur[blockIdx.x] = data4[0]/NKURTO/(pow[blockIdx.x]*pow[blockIdx.x]);
}
}
__global__ void compute_dagostino (hipfftReal* kur, hipfftReal* dag, size_t n)
{
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
// I'm not sure why I have a zero check here; the only time it should
// happen is if all of the samples are also 0.
float dag1 = DAG_INF, dag2 = DAG_INF;
if (kur[i] != 0.)
{
float t = (1-2./A)/(1.+(kur[i]-3.-mu1)*Z2_3);
if (t > 0)
dag1 = fabsf (Z2_1*(Z2_2 - powf (t,1./3)));
}
if (kur[i+n] != 0.)
{
float t = (1-2./A)/(1.+(kur[i+n]-3.-mu1)*Z2_3);
if (t > 0)
dag2 = fabsf (Z2_1*(Z2_2 - powf (t,1./3)));
}
// duplicate values to make bookkeeping in block_kurtosis easier
dag[i] = dag[i+n] = fmaxf (dag1, dag2);
}
}
// compute a filter-bank level statistic
// *** Importantly, this applies a fine-time filtering during calculation
// *** of statistic, by zero-weighting any NKURTO-sized blocks of samples
// *** that evade threshold.
__global__ void block_kurtosis (hipfftReal* pow, hipfftReal* kur, hipfftReal* dag, hipfftReal* pow_block, hipfftReal* kur_block)
{
volatile __shared__ float data2[256];
volatile __shared__ float data4[256];
volatile __shared__ unsigned char wt[256];
// run with 256 threads; break it up such that we either do 5 blocks (for
// NKURTO==500) or 10 blocks (for NKURTO=250)
unsigned int tid = threadIdx.x;
unsigned int warp_id = tid / 32;
unsigned int warp_tid = tid - warp_id*32;
if (warp_tid > 24)
{
data2[tid] = 0;
data4[tid] = 0;
wt[tid] = 0;
}
else
{
// each thread block does 8 filterbank blocks (one for each warp)
int idx = (blockIdx.x*8 + warp_id)*(NFFT/NKURTO) + warp_tid;
//wt[tid] = (dag[idx]<DAG_THRESH) && (dag[idx]>-DAG_THRESH);
// updated now that dag array is already absolute valued
wt[tid] = dag[idx]<DAG_THRESH;
data2[tid] = wt[tid]*pow[idx];
data4[tid] = wt[tid]*kur[idx]*pow[idx]*pow[idx];
if (NKURTO==250)
{
// if using finer time bins, add in the contribution from
// the other pieces (see comment above)
__syncthreads ();
idx += 25;
//float w = (dag[idx]<DAG_THRESH) && (dag[idx]>-DAG_THRESH);
float w = dag[idx]<DAG_THRESH;
data2[tid] += w*pow[idx];
data4[tid] += w*kur[idx]*pow[idx]*pow[idx];
wt[tid] += w;
}
}
if (warp_tid > 15)
return;
// do sum within each warp
data2[tid] += data2[tid + 16];
data4[tid] += data4[tid + 16];
wt[tid] += wt[tid + 16];
data2[tid] += data2[tid + 8];
data4[tid] += data4[tid + 8];
wt[tid] += wt[tid + 8];
data2[tid] += data2[tid + 4];
data4[tid] += data4[tid + 4];
wt[tid] += wt[tid + 4];
data2[tid] += data2[tid + 2];
data4[tid] += data4[tid + 2];
wt[tid] += wt[tid + 2];
data2[tid] += data2[tid + 1];
data4[tid] += data4[tid + 1];
wt[tid] += wt[tid + 1];
if (0==warp_tid)
{
if (wt[tid] > 0)
{
float p = pow_block[blockIdx.x*8+warp_id] = data2[tid]/wt[tid];
kur_block[blockIdx.x*8+warp_id] = data4[tid]/wt[tid]/(p*p);
}
else
{
pow_block[blockIdx.x*8+warp_id] = 0;
kur_block[blockIdx.x*8+warp_id] = 0;
}
}
}
// TODO -- this isn't quite right, because there won't necessarily be
// NFFT samples in the weighted version; since we're computing many fewer,
// don't need to precompute. However, empirically it doesn't seem to make
// much of a difference..
__global__ void compute_dagostino2 (hipfftReal* kur, hipfftReal* dag, size_t n)
{
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
float dag1 = DAG_INF, dag2 = DAG_INF;
if (kur[i] != 0.)
{
float t = (1-2./Ab)/(1.+(kur[i]-3.-mu1b)*Z2b_3);
if (t > 0)
dag1 = fabsf (Z2b_1*(Z2b_2 - powf (t,1./3)));
}
if (kur[i+n] != 0.)
{
float t = (1-2./Ab)/(1.+(kur[i+n]-3.-mu1b)*Z2b_3);
if (t > 0)
dag2 = fabsf (Z2b_1*(Z2b_2 - powf (t,1./3)));
}
dag[i] = dag[i+n] = fmaxf (dag1, dag2);
}
}
__global__ void apply_kurtosis (
hipfftReal *in, hipfftReal *out,
hipfftReal *dag, hipfftReal *dag_fb,
hipfftReal* norms)
{
unsigned int tid = threadIdx.x;
// D'Agostino kurtosis TS; already absolute valued and gathered
// over the two polarizations, but is duplicated, so just use the
// entry in the second polarization; will also make it easier if
// we revert to independent polarizations
//bool bad = (dag[blockIdx.x] > DAG_THRESH) || (dag_fb[blockIdx.x/(NFFT/NKURTO)] > DAG_FB_THRESH);
bool bad = (dag[blockIdx.x] > DAG_THRESH);
#ifdef DEBUG_WEIGHTS
// if debugging, set the weights to 0 for the second half of all samples in
// the chunk for 2nd pol and for the final eighth for the 1st pol
int time_idx = blockIdx.x * NKURTO;
bool c1 = time_idx > 3*(VLITE_RATE/(SEG_PER_SEC*2));
bool c2 = (time_idx < VLITE_RATE/SEG_PER_SEC) && (time_idx > (7*VLITE_RATE/SEG_PER_SEC)/8);
bad = c1 || c2;
#endif
if (bad)
{
// zero voltages
if (tid < 250)
{
size_t offset = blockIdx.x*NKURTO;
out[offset + tid] = 0;
if (NKURTO==500)
out[offset + tid + 250] = 0;
}
}
else
{
// if copying data, copy it
if (in != out && tid < 250)
{
size_t offset = blockIdx.x*NKURTO;
out[offset + tid] = in[offset + tid];
if (NKURTO==500)
out[offset + tid + 250] = in[offset + tid + 250];
}
// add one to the filterbank block samples for weights
if (tid==0)
{
atomicAdd (norms + (blockIdx.x*NKURTO)/NFFT, float(NKURTO)/NFFT);
}
}
}
__global__ void apply_kurtosis_fake (
hipfftReal *in, hipfftReal *out,
hipfftReal *dag, hipfftReal *dag_fb,
hipfftReal* norms)
{
unsigned int tid = threadIdx.x;
if (in != out && tid < 250)
{
size_t offset = blockIdx.x*NKURTO;
out[offset + tid] = in[offset + tid];
if (NKURTO==500)
out[offset + tid + 250] = in[offset + tid + 250];
}
// add one to the filterbank block samples for weights
if (tid==0)
{
atomicAdd (norms + (blockIdx.x*NKURTO)/NFFT, float(NKURTO)/NFFT);
}
}
__global__ void histogram ( unsigned char *utime, unsigned int* histo, size_t n)
{
__shared__ unsigned int lhisto[512];
lhisto[threadIdx.x] = 0;
__syncthreads ();
int i = threadIdx.x + blockIdx.x*blockDim.x;
for (; i < n/2; i += blockDim.x*gridDim.x)
atomicAdd (lhisto+utime[i], 1);
for (; i < n; i += blockDim.x*gridDim.x)
atomicAdd ((lhisto+256)+utime[i], 1);
__syncthreads ();
// MUST run with 512 threads for this global accumulation to work
atomicAdd ( histo+threadIdx.x, lhisto[threadIdx.x]);
}
__global__ void set_frb_delays (float* frb_delays, float dm)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i >= NCHAN) return;
double freq = 0.384 - (i*0.064)/NCHAN;
// delays are scaled by FFT timestep
double scale = 4.15e-3*dm*SEG_PER_SEC*FFTS_PER_SEG;
frb_delays[i] = float(scale/(freq*freq)-scale/(0.384*0.384));
}
__global__ void inject_frb ( hipfftComplex *fft_out, float* frb_delays,
int nfft_since_frb, float frb_width, float frb_amp)
{
// NB frb_width must be in FFT time steps!
// this is the channel; each thread does one channel for all time steps
// and both polarizations
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i >= NCHAN) return;
// for now, don't try to do any interpolation, just round to the nearest
// time index that the FRB encounters this channel
int time_idx_lo = int(frb_delays[i]+0.5)-nfft_since_frb;
int time_idx_hi = int(frb_delays[i]+frb_width+0.5)-nfft_since_frb;
// if the earliest is after this chunk, return
if (time_idx_lo >= FFTS_PER_SEG) return;
// if the latest time precedes this chunk, return
if (time_idx_hi < 0) return;
// ensure indices are within data bounds
if (time_idx_lo < 0) time_idx_lo = 0;
if (time_idx_hi >= FFTS_PER_SEG) time_idx_hi = FFTS_PER_SEG-1;
// otherwise, there is a portion of the FRB in this chunk, so loop over
// the time steps that it passes through channel i
for (int time_idx=time_idx_lo; time_idx<= time_idx_hi; time_idx++)
{
fft_out[time_idx*NCHAN+i].x *= frb_amp;
fft_out[time_idx*NCHAN+i].y *= frb_amp;
}
// do the next polarization
fft_out += FFTS_PER_SEG*NCHAN;
for (int time_idx=time_idx_lo; time_idx<= time_idx_hi; time_idx++)
{
fft_out[time_idx*NCHAN+i].x *= frb_amp;
fft_out[time_idx*NCHAN+i].y *= frb_amp;
}
}
__global__ void detect_and_normalize2 (hipfftComplex *fft_out, hipfftReal* bp,
float scale)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i >= NCHAN*2) return;
if (i >= NCHAN) // advance pointer to next polarization
{
fft_out += FFTS_PER_SEG*NCHAN;
bp += NCHAN;
i -= NCHAN;
}
// initialize bandpass to mean of first block
float bp_l = bp[i];
if (0. == bp_l) {
for (int j = i; j < FFTS_PER_SEG*NCHAN; j+= NCHAN)
bp_l += fft_out[j].x*fft_out[j].x + fft_out[j].y*fft_out[j].y;
bp_l /= FFTS_PER_SEG;
}
for (int j = i; j < FFTS_PER_SEG*NCHAN; j+= NCHAN)
{
// detect
float pow = fft_out[j].x*fft_out[j].x + fft_out[j].y*fft_out[j].y;
// update bandpass
bp_l = scale*pow + (1-scale)*bp_l;
// scale to bandpass and mean-subtract; this assumes the powers are
// chi^2_2 distributed, var(pow)=4, mean(pow)=std(pow)=2. Therefore
// dividing by mean will give standard deviation of 1 centred at 1.
fft_out[j].x = pow/bp_l-1;
}
// write out current bandpass
bp[i] = bp_l;
}
__global__ void detect_and_normalize3 (hipfftComplex *fft_out, hipfftReal* kur_weights_dev, hipfftReal* bp, float scale)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i >= NCHAN*2) return;
if (i >= NCHAN) // advance pointer to next polarization
{
fft_out += FFTS_PER_SEG*NCHAN;
kur_weights_dev += FFTS_PER_SEG;
bp += NCHAN;
i -= NCHAN;
}
// initialize bandpass to mean of first block
float bp_l = bp[i];
if (0. == bp_l) {
int good_samples = 0;
for (int j = i, time_idx=0; j < FFTS_PER_SEG*NCHAN; j+= NCHAN,time_idx++) {
float w = kur_weights_dev[time_idx];
if (0.==w)
continue;
good_samples++;
bp_l += (fft_out[j].x*fft_out[j].x + fft_out[j].y*fft_out[j].y)/w;
}
if (0==good_samples) {
// entire first block is bad; not sure what is best, try setting to
// 1 and hope for the best?
bp_l = 1;
}
else
bp_l /= good_samples;
}
for (int j = i, time_idx=0; j < FFTS_PER_SEG*NCHAN; j+= NCHAN,time_idx++)
{
// detect
//float w = kur_weights_dev[time_idx]*kur_weights_dev[time_idx];
// NB that this formulation works because the weights are 0 or 1; if
// we write out the expectation for the Fourier transform of the voltage
// squared, the weights go in squared, so we normalize by the sum over
// the weights squared, which is the same as the sum of the weights
// (here kur_weights_dev) since they are 0 or 1
float w = kur_weights_dev[time_idx];
if (0.==w) {
// if no samples are available, replace with mean bandpass
fft_out[j].x = 0;
}
else {
float pow = (fft_out[j].x*fft_out[j].x + fft_out[j].y*fft_out[j].y)/w;
// apply a rough filter; values in excess of 11xmean shouldn't happen
// more often than every 1.5 s, so we can clip values above this
// without substantial distortion and possibly prevent bandpass
// saturation; when we do, don't update the bandpass
// TODO
// NB this leads to a problem if we do allow in some RFI and the
// bandpass gets stuck at a very small value. Symptom is that the
// output re-quantized bits are all maxval.
if (pow > bp_l*11)
fft_out[j].x = 10;
else {
// update bandpass
bp_l = scale*pow + (1-scale)*bp_l;
// scale to bandpass and mean-subtract; this assumes the powers are
// chi^2_2 distributed, var(pow)=4, mean(pow)=std(pow)=2. Therefore
// dividing by mean will give standard deviation of 1 centred at 1.
fft_out[j].x = pow/bp_l-1;
}
}
}
// write out current bandpass
bp[i] = bp_l;
}
// sum polarizations in place
__global__ void pscrunch (hipfftComplex *fft_out, size_t n)
{
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
//fft_out[i].x += fft_out[i+n].x;
fft_out[i].x = M_SQRT1_2*(fft_out[i].x + fft_out[i+n].x);
}
}
// sum polarizations in place
__global__ void pscrunch_weights (hipfftComplex *fft_out, hipfftReal* kur_weights_dev, size_t n)
{
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
// this formulation excludes samples with more than 80% RFI
float w1_f = kur_weights_dev[i/NCHAN];
float w2_f = kur_weights_dev[(i+n)/NCHAN];
int w1 = w1_f >= MIN_WEIGHT;
int w2 = w2_f >= MIN_WEIGHT;
switch (w1+w2)
{
case 2:
// both samples OK, account for variance with sqrt(2)
fft_out[i].x = M_SQRT1_2*(fft_out[i].x + fft_out[i+n].x);
kur_weights_dev[i/NCHAN] = 0.5*(w1_f + w2_f);
break;
case 1:
// only one sample OK, variance = 1
fft_out[i].x = w1*fft_out[i].x + w2*fft_out[i+n].x;
//kur_weights_dev[i/NCHAN] = 0.5*(w1_f*w1 + w2_f*w2);
kur_weights_dev[i/NCHAN] = w1_f*w1 + w2_f*w2;
break;
case 0:
// no good samples, average bandpass (NB isn't this just 0?)
//fft_out[i].x = 0.5*(fft_out[i].x + fft_out[i+n].x);
fft_out[i].x = 0.;
kur_weights_dev[i/NCHAN] = 0;
break;
}
}
}
// average time samples
// TODO -- review normalization and make sure it's correct with polarization
__global__ void tscrunch (hipfftComplex *fft_out, hipfftReal* fft_ave,size_t n)
{
// loop over the output indices; calculate corresponding input index,
// then add up the subsequent NSCRUNCH samples
float scale = sqrt (1./NSCRUNCH);
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
// explicit calculation of indices for future reference
///////////////////////////////////////////////////////
//int out_time_idx = i/NCHAN;
//int out_chan_idx = i - out_time_idx*NCHAN;
//int src_idx = out_time_idx * NSCRUNCH* NCHAN + out_chan_idx;
//int src_idx = i+(NSCRUNCH-1)*out_time_idx*NCHAN;
///////////////////////////////////////////////////////
int src_idx = i+(NSCRUNCH-1)*(i/NCHAN)*NCHAN;
fft_ave[i] = 0.;
for (int j=0; j < NSCRUNCH; ++j, src_idx += NCHAN)
{
fft_ave[i] += fft_out[src_idx].x;
}
fft_ave[i] *= scale;
}
}
__global__ void tscrunch_weights (hipfftComplex *fft_out, hipfftReal* fft_ave, hipfftReal* kur_weights_dev, size_t n)
{
// loop over the output indices; calculate corresponding input index,
// then add up the subsequent NSCRUNCH samples
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
// explicit calculation of indices for future reference
///////////////////////////////////////////////////////
// int out_time_idx = i/NCHAN;
// int out_chan_idx = i - out_time_idx*NCHAN;
// int src_idx = out_time_idx * NSCRUNCH* NCHAN + out_chan_idx;
// int src_idx = i+(NSCRUNCH-1)*out_time_idx*NCHAN;
///////////////////////////////////////////////////////
// TODO -- we might not want an additional cut on MIN_WEIGHT here
int src_idx = i+(NSCRUNCH-1)*(i/NCHAN)*NCHAN;
fft_ave[i] = 0.;
int wt_sum = 0;
float wt_sumf = 0;
for (int j=0; j < NSCRUNCH; ++j, src_idx += NCHAN)
{
float wt = kur_weights_dev[src_idx/NCHAN];
if (wt < MIN_WEIGHT) continue;
wt_sum++;
wt_sumf += wt;
fft_ave[i] += wt*fft_out[src_idx].x;
}
if (wt_sumf/NSCRUNCH >= MIN_WEIGHT)
fft_ave[i] /= sqrt(float(wt_sum));
else
// this just copies the bandpass in; NB the average is needed, I'm not
// entirely sure why
//fft_ave[i] = fft_out[src_idx].x/NSCRUNCH;
fft_ave[i] = 0;
}
}
// select and digitize
__global__ void sel_and_dig_2b (
hipfftReal *fft_ave, unsigned char* fft_trim_u, size_t n, int npol)
{
int NCHANOUT = CHANMAX-CHANMIN+1;
//int NTIME = (n*4) / (NCHANOUT*npol); // total time samples
int NTIME = (VLITE_RATE/SEG_PER_SEC/NFFT)/NSCRUNCH;
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
// compute index into input array
// correct for packing of 4 (because i indexes a *byte*, not a sample)
//int time_idx = (i*4)/(NCHANOUT);
//int chan_idx = i*4 - time_idx*NCHANOUT;
int time_idx = (i*4)/(NCHANOUT*npol);
int pol_idx = (i*4 - time_idx*NCHANOUT*npol)/NCHANOUT;
int chan_idx = i*4 - time_idx*npol*NCHANOUT - pol_idx*NCHANOUT;
fft_trim_u[i] = 0;
for (int j = 0; j < 4; ++j)
{
// I have now done an optimization of the input thresholds for the
// approximate data format (chi^2 with 16 dof) assuming uniform
// output. This has about 5% more distortion than optimal output
// with nonuniform steps, but is simpler for downstream applications.
float tmp = fft_ave[pol_idx*NTIME*NCHAN + time_idx*NCHAN+chan_idx+CHANMIN+j];
if (tmp < -0.6109) // do nothing, bit already correctly set
continue;
if (tmp < 0.3970)
fft_trim_u[i] += 1 << 2*j;
else if (tmp < 1.4050)
fft_trim_u[i] += 2 << 2*j;
else
fft_trim_u[i] += 3 << 2*j;
}
}
}
// select and digitize
__global__ void sel_and_dig_4b (
hipfftReal *fft_ave, unsigned char* fft_trim_u, size_t n, int npol)
{
int NCHANOUT = CHANMAX-CHANMIN+1;
//int NTIME = n / (NCHANOUT*npol); // total time samples
int NTIME = (VLITE_RATE/SEG_PER_SEC/NFFT)/NSCRUNCH;
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
// compute index into input array
// correct for packing of 2 (because i indexes a *byte*, not a sample)
//int time_idx = (i*2)/(NCHANOUT);
//int chan_idx = i*2 - time_idx*NCHANOUT;
int time_idx = (i*2)/(NCHANOUT*npol);
int pol_idx = (i*2 - time_idx*NCHANOUT*npol)/NCHANOUT;
int chan_idx = i*2 - time_idx*npol*NCHANOUT - pol_idx*NCHANOUT;
// from Table 3 of Jenet & Anderson 1998
//float tmp = fft_ave[time_idx*NCHAN+chan_idx+CHANMIN]/0.3188 + 7.5;
float tmp = fft_ave[pol_idx*NTIME*NCHAN + time_idx*NCHAN+chan_idx+CHANMIN]/0.3188 + 7.5;
if (tmp <= 0)
fft_trim_u[i] = 0;
else if (tmp >= 15)
fft_trim_u[i] = 15;
else
fft_trim_u[i] = (unsigned char)(tmp);
//tmp = fft_ave[time_idx*NCHAN+chan_idx+CHANMIN+1]/0.3188 + 7.5;
tmp = fft_ave[pol_idx*NTIME*NCHAN + time_idx*NCHAN+chan_idx+CHANMIN+1]/0.3188 + 7.5;
if (tmp <= 0)
;
else if (tmp >= 15)
fft_trim_u[i] += 15 << 4;
else
fft_trim_u[i] += (unsigned char)(tmp) << 4;
}
}
// select and digitize
__global__ void sel_and_dig_8b (
hipfftReal *fft_ave, unsigned char* fft_trim_u, size_t n, int npol)
{
int NCHANOUT = CHANMAX-CHANMIN+1;
//int NTIME = n / (NCHANOUT*npol); // total time samples
int NTIME = (VLITE_RATE/SEG_PER_SEC/NFFT)/NSCRUNCH;
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
// compute index into input array
int time_idx = i/(NCHANOUT*npol);
int pol_idx = (i - time_idx*NCHANOUT*npol)/NCHANOUT;
int chan_idx = i - time_idx*npol*NCHANOUT - pol_idx*NCHANOUT;
// from Table 3 of Jenet & Anderson 1998
float tmp = fft_ave[pol_idx*NTIME*NCHAN + time_idx*NCHAN+chan_idx+CHANMIN]/0.02957 + 127.5;
if (tmp <= 0)
fft_trim_u[i] = 0;
else if (tmp >= 255)
fft_trim_u[i] = 255;
else
fft_trim_u[i] = (unsigned char) tmp;
}
}
/*
// convert floating point to integer
__global__ void digitizearray(hipfftReal *fft_ave, unsigned char* fft_ave_u, size_t n)
{
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
float tmp = fft_ave[i]/0.02957 + 127.5;
if (tmp <= 0)
fft_ave_u[i] = 0;
else if (tmp >= 255)
fft_ave_u[i] = 255;
else
fft_ave_u[i] = (unsigned char) tmp;
}
}
// remove extraneous channels; in practice, this means
__global__ void selectchannels(unsigned char* fft_ave_u, unsigned char* fft_trim_u, size_t n)
{
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
int nchan = CHANMAX-CHANMIN+1;
int time_idx = i/nchan;
int chan_idx = i - time_idx*nchan;
fft_trim_u[i] = fft_ave_u[time_idx*NCHAN+chan_idx+CHANMIN];
}
}
// detect total power and normalize the polarizations in place
// use a monolithic kernel pattern here since the total number of threads
// should be relatively small
// TODO -- this will probably be more efficient if done using lots of
// threads and syncing; however, it doesn't seem to be a bottleneck
__global__ void detect_and_normalize (hipfftComplex *fft_out, size_t ntime)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i >= NCHAN*2) return;
if (i >= NCHAN) // advance pointer to next polarization
{
fft_out += ntime*NCHAN;
i -= NCHAN;
}
float sum1 = 0;
float sum2 = 0;
for (int j = i; j < ntime*NCHAN; j+= NCHAN)
{
float pow = fft_out[j].x*fft_out[j].x + fft_out[j].y*fft_out[j].y;
fft_out[j].x = pow;
sum1 += pow;
sum2 += pow*pow;
}
sum1 *= 1./ntime;
sum2 = sqrt(1./(sum2/ntime-sum1*sum1));
for (int j = i; j < ntime*NCHAN; j+= NCHAN)
{
fft_out[j].x = (fft_out[j].x-sum1)*sum2;
}
}
*/
| 03c859564e35661466c32bf6e4ce5db0b1fd861c.cu | #include "process_baseband.h"
// quantities for D'Agostino normality test (see wikipedia)
#define NK float(NKURTO)
#define mu1 (-6./(NK+1))
#define mu2 ((24.*NK*(NK-2)*(NK-3))/((NK+1)*(NK+1)*(NK+3)*(NK+5)))
#define g1 (6.*(NK*NK-5*NK+2)/((NK+7)*(NK+9))*sqrt( (6.*(NK+3)*(NK+5))/(NK*(NK-2)*(NK-3)) ))
#define A (6.+(8./g1)*(2./g1 + sqrt(1. + 4./(g1*g1))))
#define Z2_1 sqrt(4.5*A)
#define Z2_2 (1-2./(9*A))
#define Z2_3 sqrt(2./(mu2*(A-4)))
#define NKb float(NFFT)
#define mu1b (-6./(NKb+1))
#define mu2b ((24.*NKb*(NKb-2)*(NKb-3))/((NKb+1)*(NKb+1)*(NKb+3)*(NKb+5)))
#define g1b (6.*(NKb*NKb-5*NKb+2)/((NKb+7)*(NKb+9))*sqrt( (6.*(NKb+3)*(NKb+5))/(NKb*(NKb-2)*(NKb-3)) ))
#define Ab (6.+(8./g1b)*(2./g1b + sqrt(1. + 4./(g1b*g1b))))
#define Z2b_1 sqrt(4.5*Ab)
#define Z2b_2 (1-2./(9*Ab))
#define Z2b_3 sqrt(2./(mu2b*(Ab-4)))
//convert unsigned char time array to float
__global__ void convertarray (cufftReal *time, unsigned char *utime, size_t n)
{
for (int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n; i += blockDim.x*gridDim.x)
{
if (utime[i] == 0)
time[i] = 0;
else
time[i] = (cufftReal)(utime[i])/128-1;
}
}
__global__ void kurtosis (cufftReal *time, cufftReal *pow, cufftReal *kur)
{
// calculate the variance (power) and kurtosis for voltage statistics in
// relatively short windows. Do this by first copying from global memory,
// then using a hard-coded tree reduction. Right now, it is set up to
// use either 250 or 500 samples, so must be invoked with either 256
// or 512 threads.
// because each thread block works on a chunk of data that's commensurate
// with the packing of samples into the buffer, specifically with respect
// to the two polarizations, I think we don't need to worry at all about
// a thread block crossing the polarization. The output will simply
// contain the statistics for pol 0 first, then pol 1.
volatile __shared__ float data2[256];
volatile __shared__ float data4[256];
unsigned int tid = threadIdx.x;
size_t offset = blockIdx.x*NKURTO;
if (tid < 250)
{
if (NKURTO==500) {
// load up two values from global memory in this case
data2[tid] = time[offset + tid]*time[offset + tid];
float tmp = time[offset + tid + 250]*time[offset + tid + 250];
data4[tid] = data2[tid]*data2[tid] + tmp*tmp;
data2[tid] += tmp;
}
else {
data2[tid] = time[offset + tid]*time[offset + tid];
data4[tid] = data2[tid]*data2[tid];
}
}
else
data2[tid] = data4[tid] = 0;
__syncthreads ();
if (tid < 128)
{
data2[tid] += data2[tid + 128];
data4[tid] += data4[tid + 128];
}
__syncthreads ();
if (tid < 64)
{
data2[tid] += data2[tid + 64];
data4[tid] += data4[tid + 64];
}
__syncthreads ();
if (tid < 32)
{
data2[tid] += data2[tid + 32];
data4[tid] += data4[tid + 32];
data2[tid] += data2[tid + 16];
data4[tid] += data4[tid + 16];
data2[tid] += data2[tid + 8];
data4[tid] += data4[tid + 8];
data2[tid] += data2[tid + 4];
data4[tid] += data4[tid + 4];
data2[tid] += data2[tid + 2];
data4[tid] += data4[tid + 2];
}
if (tid==0)
{
data2[tid] += data2[tid + 1];
data4[tid] += data4[tid + 1];
pow[blockIdx.x] = data2[0]/NKURTO;
kur[blockIdx.x] = data4[0]/NKURTO/(pow[blockIdx.x]*pow[blockIdx.x]);
}
}
__global__ void compute_dagostino (cufftReal* kur, cufftReal* dag, size_t n)
{
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
// I'm not sure why I have a zero check here; the only time it should
// happen is if all of the samples are also 0.
float dag1 = DAG_INF, dag2 = DAG_INF;
if (kur[i] != 0.)
{
float t = (1-2./A)/(1.+(kur[i]-3.-mu1)*Z2_3);
if (t > 0)
dag1 = fabsf (Z2_1*(Z2_2 - powf (t,1./3)));
}
if (kur[i+n] != 0.)
{
float t = (1-2./A)/(1.+(kur[i+n]-3.-mu1)*Z2_3);
if (t > 0)
dag2 = fabsf (Z2_1*(Z2_2 - powf (t,1./3)));
}
// duplicate values to make bookkeeping in block_kurtosis easier
dag[i] = dag[i+n] = fmaxf (dag1, dag2);
}
}
// compute a filter-bank level statistic
// *** Importantly, this applies a fine-time filtering during calculation
// *** of statistic, by zero-weighting any NKURTO-sized blocks of samples
// *** that evade threshold.
__global__ void block_kurtosis (cufftReal* pow, cufftReal* kur, cufftReal* dag, cufftReal* pow_block, cufftReal* kur_block)
{
volatile __shared__ float data2[256];
volatile __shared__ float data4[256];
volatile __shared__ unsigned char wt[256];
// run with 256 threads; break it up such that we either do 5 blocks (for
// NKURTO==500) or 10 blocks (for NKURTO=250)
unsigned int tid = threadIdx.x;
unsigned int warp_id = tid / 32;
unsigned int warp_tid = tid - warp_id*32;
if (warp_tid > 24)
{
data2[tid] = 0;
data4[tid] = 0;
wt[tid] = 0;
}
else
{
// each thread block does 8 filterbank blocks (one for each warp)
int idx = (blockIdx.x*8 + warp_id)*(NFFT/NKURTO) + warp_tid;
//wt[tid] = (dag[idx]<DAG_THRESH) && (dag[idx]>-DAG_THRESH);
// updated now that dag array is already absolute valued
wt[tid] = dag[idx]<DAG_THRESH;
data2[tid] = wt[tid]*pow[idx];
data4[tid] = wt[tid]*kur[idx]*pow[idx]*pow[idx];
if (NKURTO==250)
{
// if using finer time bins, add in the contribution from
// the other pieces (see comment above)
__syncthreads ();
idx += 25;
//float w = (dag[idx]<DAG_THRESH) && (dag[idx]>-DAG_THRESH);
float w = dag[idx]<DAG_THRESH;
data2[tid] += w*pow[idx];
data4[tid] += w*kur[idx]*pow[idx]*pow[idx];
wt[tid] += w;
}
}
if (warp_tid > 15)
return;
// do sum within each warp
data2[tid] += data2[tid + 16];
data4[tid] += data4[tid + 16];
wt[tid] += wt[tid + 16];
data2[tid] += data2[tid + 8];
data4[tid] += data4[tid + 8];
wt[tid] += wt[tid + 8];
data2[tid] += data2[tid + 4];
data4[tid] += data4[tid + 4];
wt[tid] += wt[tid + 4];
data2[tid] += data2[tid + 2];
data4[tid] += data4[tid + 2];
wt[tid] += wt[tid + 2];
data2[tid] += data2[tid + 1];
data4[tid] += data4[tid + 1];
wt[tid] += wt[tid + 1];
if (0==warp_tid)
{
if (wt[tid] > 0)
{
float p = pow_block[blockIdx.x*8+warp_id] = data2[tid]/wt[tid];
kur_block[blockIdx.x*8+warp_id] = data4[tid]/wt[tid]/(p*p);
}
else
{
pow_block[blockIdx.x*8+warp_id] = 0;
kur_block[blockIdx.x*8+warp_id] = 0;
}
}
}
// TODO -- this isn't quite right, because there won't necessarily be
// NFFT samples in the weighted version; since we're computing many fewer,
// don't need to precompute. However, empirically it doesn't seem to make
// much of a difference..
__global__ void compute_dagostino2 (cufftReal* kur, cufftReal* dag, size_t n)
{
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
float dag1 = DAG_INF, dag2 = DAG_INF;
if (kur[i] != 0.)
{
float t = (1-2./Ab)/(1.+(kur[i]-3.-mu1b)*Z2b_3);
if (t > 0)
dag1 = fabsf (Z2b_1*(Z2b_2 - powf (t,1./3)));
}
if (kur[i+n] != 0.)
{
float t = (1-2./Ab)/(1.+(kur[i+n]-3.-mu1b)*Z2b_3);
if (t > 0)
dag2 = fabsf (Z2b_1*(Z2b_2 - powf (t,1./3)));
}
dag[i] = dag[i+n] = fmaxf (dag1, dag2);
}
}
__global__ void apply_kurtosis (
cufftReal *in, cufftReal *out,
cufftReal *dag, cufftReal *dag_fb,
cufftReal* norms)
{
unsigned int tid = threadIdx.x;
// D'Agostino kurtosis TS; already absolute valued and gathered
// over the two polarizations, but is duplicated, so just use the
// entry in the second polarization; will also make it easier if
// we revert to independent polarizations
//bool bad = (dag[blockIdx.x] > DAG_THRESH) || (dag_fb[blockIdx.x/(NFFT/NKURTO)] > DAG_FB_THRESH);
bool bad = (dag[blockIdx.x] > DAG_THRESH);
#ifdef DEBUG_WEIGHTS
// if debugging, set the weights to 0 for the second half of all samples in
// the chunk for 2nd pol and for the final eighth for the 1st pol
int time_idx = blockIdx.x * NKURTO;
bool c1 = time_idx > 3*(VLITE_RATE/(SEG_PER_SEC*2));
bool c2 = (time_idx < VLITE_RATE/SEG_PER_SEC) && (time_idx > (7*VLITE_RATE/SEG_PER_SEC)/8);
bad = c1 || c2;
#endif
if (bad)
{
// zero voltages
if (tid < 250)
{
size_t offset = blockIdx.x*NKURTO;
out[offset + tid] = 0;
if (NKURTO==500)
out[offset + tid + 250] = 0;
}
}
else
{
// if copying data, copy it
if (in != out && tid < 250)
{
size_t offset = blockIdx.x*NKURTO;
out[offset + tid] = in[offset + tid];
if (NKURTO==500)
out[offset + tid + 250] = in[offset + tid + 250];
}
// add one to the filterbank block samples for weights
if (tid==0)
{
atomicAdd (norms + (blockIdx.x*NKURTO)/NFFT, float(NKURTO)/NFFT);
}
}
}
__global__ void apply_kurtosis_fake (
cufftReal *in, cufftReal *out,
cufftReal *dag, cufftReal *dag_fb,
cufftReal* norms)
{
unsigned int tid = threadIdx.x;
if (in != out && tid < 250)
{
size_t offset = blockIdx.x*NKURTO;
out[offset + tid] = in[offset + tid];
if (NKURTO==500)
out[offset + tid + 250] = in[offset + tid + 250];
}
// add one to the filterbank block samples for weights
if (tid==0)
{
atomicAdd (norms + (blockIdx.x*NKURTO)/NFFT, float(NKURTO)/NFFT);
}
}
__global__ void histogram ( unsigned char *utime, unsigned int* histo, size_t n)
{
__shared__ unsigned int lhisto[512];
lhisto[threadIdx.x] = 0;
__syncthreads ();
int i = threadIdx.x + blockIdx.x*blockDim.x;
for (; i < n/2; i += blockDim.x*gridDim.x)
atomicAdd (lhisto+utime[i], 1);
for (; i < n; i += blockDim.x*gridDim.x)
atomicAdd ((lhisto+256)+utime[i], 1);
__syncthreads ();
// MUST run with 512 threads for this global accumulation to work
atomicAdd ( histo+threadIdx.x, lhisto[threadIdx.x]);
}
__global__ void set_frb_delays (float* frb_delays, float dm)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i >= NCHAN) return;
double freq = 0.384 - (i*0.064)/NCHAN;
// delays are scaled by FFT timestep
double scale = 4.15e-3*dm*SEG_PER_SEC*FFTS_PER_SEG;
frb_delays[i] = float(scale/(freq*freq)-scale/(0.384*0.384));
}
__global__ void inject_frb ( cufftComplex *fft_out, float* frb_delays,
int nfft_since_frb, float frb_width, float frb_amp)
{
// NB frb_width must be in FFT time steps!
// this is the channel; each thread does one channel for all time steps
// and both polarizations
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i >= NCHAN) return;
// for now, don't try to do any interpolation, just round to the nearest
// time index that the FRB encounters this channel
int time_idx_lo = int(frb_delays[i]+0.5)-nfft_since_frb;
int time_idx_hi = int(frb_delays[i]+frb_width+0.5)-nfft_since_frb;
// if the earliest is after this chunk, return
if (time_idx_lo >= FFTS_PER_SEG) return;
// if the latest time precedes this chunk, return
if (time_idx_hi < 0) return;
// ensure indices are within data bounds
if (time_idx_lo < 0) time_idx_lo = 0;
if (time_idx_hi >= FFTS_PER_SEG) time_idx_hi = FFTS_PER_SEG-1;
// otherwise, there is a portion of the FRB in this chunk, so loop over
// the time steps that it passes through channel i
for (int time_idx=time_idx_lo; time_idx<= time_idx_hi; time_idx++)
{
fft_out[time_idx*NCHAN+i].x *= frb_amp;
fft_out[time_idx*NCHAN+i].y *= frb_amp;
}
// do the next polarization
fft_out += FFTS_PER_SEG*NCHAN;
for (int time_idx=time_idx_lo; time_idx<= time_idx_hi; time_idx++)
{
fft_out[time_idx*NCHAN+i].x *= frb_amp;
fft_out[time_idx*NCHAN+i].y *= frb_amp;
}
}
__global__ void detect_and_normalize2 (cufftComplex *fft_out, cufftReal* bp,
float scale)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i >= NCHAN*2) return;
if (i >= NCHAN) // advance pointer to next polarization
{
fft_out += FFTS_PER_SEG*NCHAN;
bp += NCHAN;
i -= NCHAN;
}
// initialize bandpass to mean of first block
float bp_l = bp[i];
if (0. == bp_l) {
for (int j = i; j < FFTS_PER_SEG*NCHAN; j+= NCHAN)
bp_l += fft_out[j].x*fft_out[j].x + fft_out[j].y*fft_out[j].y;
bp_l /= FFTS_PER_SEG;
}
for (int j = i; j < FFTS_PER_SEG*NCHAN; j+= NCHAN)
{
// detect
float pow = fft_out[j].x*fft_out[j].x + fft_out[j].y*fft_out[j].y;
// update bandpass
bp_l = scale*pow + (1-scale)*bp_l;
// scale to bandpass and mean-subtract; this assumes the powers are
// chi^2_2 distributed, var(pow)=4, mean(pow)=std(pow)=2. Therefore
// dividing by mean will give standard deviation of 1 centred at 1.
fft_out[j].x = pow/bp_l-1;
}
// write out current bandpass
bp[i] = bp_l;
}
__global__ void detect_and_normalize3 (cufftComplex *fft_out, cufftReal* kur_weights_dev, cufftReal* bp, float scale)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i >= NCHAN*2) return;
if (i >= NCHAN) // advance pointer to next polarization
{
fft_out += FFTS_PER_SEG*NCHAN;
kur_weights_dev += FFTS_PER_SEG;
bp += NCHAN;
i -= NCHAN;
}
// initialize bandpass to mean of first block
float bp_l = bp[i];
if (0. == bp_l) {
int good_samples = 0;
for (int j = i, time_idx=0; j < FFTS_PER_SEG*NCHAN; j+= NCHAN,time_idx++) {
float w = kur_weights_dev[time_idx];
if (0.==w)
continue;
good_samples++;
bp_l += (fft_out[j].x*fft_out[j].x + fft_out[j].y*fft_out[j].y)/w;
}
if (0==good_samples) {
// entire first block is bad; not sure what is best, try setting to
// 1 and hope for the best?
bp_l = 1;
}
else
bp_l /= good_samples;
}
for (int j = i, time_idx=0; j < FFTS_PER_SEG*NCHAN; j+= NCHAN,time_idx++)
{
// detect
//float w = kur_weights_dev[time_idx]*kur_weights_dev[time_idx];
// NB that this formulation works because the weights are 0 or 1; if
// we write out the expectation for the Fourier transform of the voltage
// squared, the weights go in squared, so we normalize by the sum over
// the weights squared, which is the same as the sum of the weights
// (here kur_weights_dev) since they are 0 or 1
float w = kur_weights_dev[time_idx];
if (0.==w) {
// if no samples are available, replace with mean bandpass
fft_out[j].x = 0;
}
else {
float pow = (fft_out[j].x*fft_out[j].x + fft_out[j].y*fft_out[j].y)/w;
// apply a rough filter; values in excess of 11xmean shouldn't happen
// more often than every 1.5 s, so we can clip values above this
// without substantial distortion and possibly prevent bandpass
// saturation; when we do, don't update the bandpass
// TODO
// NB this leads to a problem if we do allow in some RFI and the
// bandpass gets stuck at a very small value. Symptom is that the
// output re-quantized bits are all maxval.
if (pow > bp_l*11)
fft_out[j].x = 10;
else {
// update bandpass
bp_l = scale*pow + (1-scale)*bp_l;
// scale to bandpass and mean-subtract; this assumes the powers are
// chi^2_2 distributed, var(pow)=4, mean(pow)=std(pow)=2. Therefore
// dividing by mean will give standard deviation of 1 centred at 1.
fft_out[j].x = pow/bp_l-1;
}
}
}
// write out current bandpass
bp[i] = bp_l;
}
// sum polarizations in place
__global__ void pscrunch (cufftComplex *fft_out, size_t n)
{
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
//fft_out[i].x += fft_out[i+n].x;
fft_out[i].x = M_SQRT1_2*(fft_out[i].x + fft_out[i+n].x);
}
}
// sum polarizations in place
__global__ void pscrunch_weights (cufftComplex *fft_out, cufftReal* kur_weights_dev, size_t n)
{
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
// this formulation excludes samples with more than 80% RFI
float w1_f = kur_weights_dev[i/NCHAN];
float w2_f = kur_weights_dev[(i+n)/NCHAN];
int w1 = w1_f >= MIN_WEIGHT;
int w2 = w2_f >= MIN_WEIGHT;
switch (w1+w2)
{
case 2:
// both samples OK, account for variance with sqrt(2)
fft_out[i].x = M_SQRT1_2*(fft_out[i].x + fft_out[i+n].x);
kur_weights_dev[i/NCHAN] = 0.5*(w1_f + w2_f);
break;
case 1:
// only one sample OK, variance = 1
fft_out[i].x = w1*fft_out[i].x + w2*fft_out[i+n].x;
//kur_weights_dev[i/NCHAN] = 0.5*(w1_f*w1 + w2_f*w2);
kur_weights_dev[i/NCHAN] = w1_f*w1 + w2_f*w2;
break;
case 0:
// no good samples, average bandpass (NB isn't this just 0?)
//fft_out[i].x = 0.5*(fft_out[i].x + fft_out[i+n].x);
fft_out[i].x = 0.;
kur_weights_dev[i/NCHAN] = 0;
break;
}
}
}
// average time samples
// TODO -- review normalization and make sure it's correct with polarization
__global__ void tscrunch (cufftComplex *fft_out, cufftReal* fft_ave,size_t n)
{
// loop over the output indices; calculate corresponding input index,
// then add up the subsequent NSCRUNCH samples
float scale = sqrt (1./NSCRUNCH);
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
// explicit calculation of indices for future reference
///////////////////////////////////////////////////////
//int out_time_idx = i/NCHAN;
//int out_chan_idx = i - out_time_idx*NCHAN;
//int src_idx = out_time_idx * NSCRUNCH* NCHAN + out_chan_idx;
//int src_idx = i+(NSCRUNCH-1)*out_time_idx*NCHAN;
///////////////////////////////////////////////////////
int src_idx = i+(NSCRUNCH-1)*(i/NCHAN)*NCHAN;
fft_ave[i] = 0.;
for (int j=0; j < NSCRUNCH; ++j, src_idx += NCHAN)
{
fft_ave[i] += fft_out[src_idx].x;
}
fft_ave[i] *= scale;
}
}
__global__ void tscrunch_weights (cufftComplex *fft_out, cufftReal* fft_ave, cufftReal* kur_weights_dev, size_t n)
{
// loop over the output indices; calculate corresponding input index,
// then add up the subsequent NSCRUNCH samples
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
// explicit calculation of indices for future reference
///////////////////////////////////////////////////////
// int out_time_idx = i/NCHAN;
// int out_chan_idx = i - out_time_idx*NCHAN;
// int src_idx = out_time_idx * NSCRUNCH* NCHAN + out_chan_idx;
// int src_idx = i+(NSCRUNCH-1)*out_time_idx*NCHAN;
///////////////////////////////////////////////////////
// TODO -- we might not want an additional cut on MIN_WEIGHT here
int src_idx = i+(NSCRUNCH-1)*(i/NCHAN)*NCHAN;
fft_ave[i] = 0.;
int wt_sum = 0;
float wt_sumf = 0;
for (int j=0; j < NSCRUNCH; ++j, src_idx += NCHAN)
{
float wt = kur_weights_dev[src_idx/NCHAN];
if (wt < MIN_WEIGHT) continue;
wt_sum++;
wt_sumf += wt;
fft_ave[i] += wt*fft_out[src_idx].x;
}
if (wt_sumf/NSCRUNCH >= MIN_WEIGHT)
fft_ave[i] /= sqrt(float(wt_sum));
else
// this just copies the bandpass in; NB the average is needed, I'm not
// entirely sure why
//fft_ave[i] = fft_out[src_idx].x/NSCRUNCH;
fft_ave[i] = 0;
}
}
// select and digitize
__global__ void sel_and_dig_2b (
cufftReal *fft_ave, unsigned char* fft_trim_u, size_t n, int npol)
{
int NCHANOUT = CHANMAX-CHANMIN+1;
//int NTIME = (n*4) / (NCHANOUT*npol); // total time samples
int NTIME = (VLITE_RATE/SEG_PER_SEC/NFFT)/NSCRUNCH;
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
// compute index into input array
// correct for packing of 4 (because i indexes a *byte*, not a sample)
//int time_idx = (i*4)/(NCHANOUT);
//int chan_idx = i*4 - time_idx*NCHANOUT;
int time_idx = (i*4)/(NCHANOUT*npol);
int pol_idx = (i*4 - time_idx*NCHANOUT*npol)/NCHANOUT;
int chan_idx = i*4 - time_idx*npol*NCHANOUT - pol_idx*NCHANOUT;
fft_trim_u[i] = 0;
for (int j = 0; j < 4; ++j)
{
// I have now done an optimization of the input thresholds for the
// approximate data format (chi^2 with 16 dof) assuming uniform
// output. This has about 5% more distortion than optimal output
// with nonuniform steps, but is simpler for downstream applications.
float tmp = fft_ave[pol_idx*NTIME*NCHAN + time_idx*NCHAN+chan_idx+CHANMIN+j];
if (tmp < -0.6109) // do nothing, bit already correctly set
continue;
if (tmp < 0.3970)
fft_trim_u[i] += 1 << 2*j;
else if (tmp < 1.4050)
fft_trim_u[i] += 2 << 2*j;
else
fft_trim_u[i] += 3 << 2*j;
}
}
}
// select and digitize
__global__ void sel_and_dig_4b (
cufftReal *fft_ave, unsigned char* fft_trim_u, size_t n, int npol)
{
int NCHANOUT = CHANMAX-CHANMIN+1;
//int NTIME = n / (NCHANOUT*npol); // total time samples
int NTIME = (VLITE_RATE/SEG_PER_SEC/NFFT)/NSCRUNCH;
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
// compute index into input array
// correct for packing of 2 (because i indexes a *byte*, not a sample)
//int time_idx = (i*2)/(NCHANOUT);
//int chan_idx = i*2 - time_idx*NCHANOUT;
int time_idx = (i*2)/(NCHANOUT*npol);
int pol_idx = (i*2 - time_idx*NCHANOUT*npol)/NCHANOUT;
int chan_idx = i*2 - time_idx*npol*NCHANOUT - pol_idx*NCHANOUT;
// from Table 3 of Jenet & Anderson 1998
//float tmp = fft_ave[time_idx*NCHAN+chan_idx+CHANMIN]/0.3188 + 7.5;
float tmp = fft_ave[pol_idx*NTIME*NCHAN + time_idx*NCHAN+chan_idx+CHANMIN]/0.3188 + 7.5;
if (tmp <= 0)
fft_trim_u[i] = 0;
else if (tmp >= 15)
fft_trim_u[i] = 15;
else
fft_trim_u[i] = (unsigned char)(tmp);
//tmp = fft_ave[time_idx*NCHAN+chan_idx+CHANMIN+1]/0.3188 + 7.5;
tmp = fft_ave[pol_idx*NTIME*NCHAN + time_idx*NCHAN+chan_idx+CHANMIN+1]/0.3188 + 7.5;
if (tmp <= 0)
;
else if (tmp >= 15)
fft_trim_u[i] += 15 << 4;
else
fft_trim_u[i] += (unsigned char)(tmp) << 4;
}
}
// select and digitize
__global__ void sel_and_dig_8b (
cufftReal *fft_ave, unsigned char* fft_trim_u, size_t n, int npol)
{
int NCHANOUT = CHANMAX-CHANMIN+1;
//int NTIME = n / (NCHANOUT*npol); // total time samples
int NTIME = (VLITE_RATE/SEG_PER_SEC/NFFT)/NSCRUNCH;
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
// compute index into input array
int time_idx = i/(NCHANOUT*npol);
int pol_idx = (i - time_idx*NCHANOUT*npol)/NCHANOUT;
int chan_idx = i - time_idx*npol*NCHANOUT - pol_idx*NCHANOUT;
// from Table 3 of Jenet & Anderson 1998
float tmp = fft_ave[pol_idx*NTIME*NCHAN + time_idx*NCHAN+chan_idx+CHANMIN]/0.02957 + 127.5;
if (tmp <= 0)
fft_trim_u[i] = 0;
else if (tmp >= 255)
fft_trim_u[i] = 255;
else
fft_trim_u[i] = (unsigned char) tmp;
}
}
/*
// convert floating point to integer
__global__ void digitizearray(cufftReal *fft_ave, unsigned char* fft_ave_u, size_t n)
{
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
float tmp = fft_ave[i]/0.02957 + 127.5;
if (tmp <= 0)
fft_ave_u[i] = 0;
else if (tmp >= 255)
fft_ave_u[i] = 255;
else
fft_ave_u[i] = (unsigned char) tmp;
}
}
// remove extraneous channels; in practice, this means
__global__ void selectchannels(unsigned char* fft_ave_u, unsigned char* fft_trim_u, size_t n)
{
for (
int i = threadIdx.x + blockIdx.x*blockDim.x;
i < n;
i += blockDim.x*gridDim.x)
{
int nchan = CHANMAX-CHANMIN+1;
int time_idx = i/nchan;
int chan_idx = i - time_idx*nchan;
fft_trim_u[i] = fft_ave_u[time_idx*NCHAN+chan_idx+CHANMIN];
}
}
// detect total power and normalize the polarizations in place
// use a monolithic kernel pattern here since the total number of threads
// should be relatively small
// TODO -- this will probably be more efficient if done using lots of
// threads and syncing; however, it doesn't seem to be a bottleneck
__global__ void detect_and_normalize (cufftComplex *fft_out, size_t ntime)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i >= NCHAN*2) return;
if (i >= NCHAN) // advance pointer to next polarization
{
fft_out += ntime*NCHAN;
i -= NCHAN;
}
float sum1 = 0;
float sum2 = 0;
for (int j = i; j < ntime*NCHAN; j+= NCHAN)
{
float pow = fft_out[j].x*fft_out[j].x + fft_out[j].y*fft_out[j].y;
fft_out[j].x = pow;
sum1 += pow;
sum2 += pow*pow;
}
sum1 *= 1./ntime;
sum2 = sqrt(1./(sum2/ntime-sum1*sum1));
for (int j = i; j < ntime*NCHAN; j+= NCHAN)
{
fft_out[j].x = (fft_out[j].x-sum1)*sum2;
}
}
*/
|
7bf1588397b326e9d22d17f8472442b914d7a390.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
CS205 HW2 Part 3.1
The following code was written with reference to nVidia tutorial
and Programming Massively Parallel Processors.
*/
#include <stdio.h>
#include "rocblas.h"
/* define computation parameters */
#define MAT_SIZE 4096
#define THREADS_PER_BLOCK_X 32
#define THREADS_PER_BLOCK_Y 32
/* GPU kernel: naive matrix multiplcation */
__global__ void naive_matmul(float const * const mat_a, float const * const mat_b, float * const mat_c, const int mat_size){
/* get row and column for each thread in a block */
const int idx_row = blockIdx.x * blockDim.x + threadIdx.x;
const int idx_col = blockIdx.y * blockDim.y + threadIdx.y;
if( idx_row < mat_size && idx_col < mat_size ){
/* use c_temp to store summation of mat_size numbers */
register float c_temp = 0.0;
/* start naive summation to get one element in mat_c */
for( int idx_el = 0; idx_el < mat_size; idx_el++ ){
/* pick a row in mat_a[] and a column in mat_b[] */
c_temp += mat_a[idx_el * mat_size + idx_row] * mat_b[idx_col * mat_size + idx_el];
}
/* store an element of mat_c */
mat_c[idx_col * mat_size + idx_row] = c_temp;
}
return;
}
/* function to randomly assign values for matrix */
void randmat(float * matrix, int nsize){
for(int idx=0; idx < nsize * nsize; idx++){
matrix[idx] = double(rand())/ (double(RAND_MAX) + 1.0);
}
}
/* function to calculate max error between mat_1 and mat_2 */
void max_err(float * mat_1, float * mat_2, int nsize){
float err = 0.0;
for(int idx=0; idx < nsize * nsize; idx++){
err = max(err, abs(( (float)mat_1[idx] - (float)mat_2[idx]) / (float)mat_1[idx] ) );
}
printf("Max error is %e percent \n", err*100.0);
}
/* main function */
int main(int argc, char *argv[]){
/* check GPU info on Odyssey */
int dev;
hipDeviceProp_t prop;
hipGetDevice(&dev);
hipGetDeviceProperties(&prop, dev);
printf(" --- General info --- \n");
printf("GPU name: %d %s\n", dev, prop.name);
printf("Compute capability: %d.%d\n", prop.major, prop.minor);
printf("Clock rate: %d\n", prop.clockRate);
printf(" --- Memory info --- \n");
printf("Total global mem: %ld\n", prop.totalGlobalMem);
printf(" --- Multiprocessor (MP) Info --- \n");
printf("MP count: %d\n", prop.multiProcessorCount);
printf("Shared mem per MP: %ld\n", prop.sharedMemPerBlock);
printf("Registers per MP: %d\n", prop.regsPerBlock);
printf("Threads in warp: %d\n", prop.warpSize);
printf("Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf("Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0],
prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0],
prop.maxGridSize[1], prop.maxGridSize[2]);
printf( "\n" );
/* print matrix size n_now */
const int n_now = MAT_SIZE;
printf(" --- Matrix multiplicatoin --- \n");
printf("Matrix size n is %d\n", n_now);
/* define matrix in host and in device */
float *host_a, *host_b, *c_cublas, *c_naive;
float *dev_a, *dev_b, *dev_c;
/* size of matrix in byte */
size_t mat_in_byte = (size_t)n_now * (size_t)n_now * sizeof(float);
/* randomly setup host_a and host_b */
host_a = (float *) malloc(mat_in_byte);
host_b = (float *) malloc(mat_in_byte);
randmat(host_a, n_now);
randmat(host_b, n_now);
/* set c_cublas and c_naive to zeros */
c_cublas = (float *) malloc(mat_in_byte);
c_naive = (float *) malloc(mat_in_byte);
memset(c_cublas, 0, mat_in_byte);
memset(c_naive, 0, mat_in_byte);
/* setup dev_a, dev_b, and dev_c */
hipMalloc((void **)&dev_a, mat_in_byte);
hipMalloc((void **)&dev_b, mat_in_byte);
hipMalloc((void **)&dev_c, mat_in_byte);
hipMemcpy(dev_a, host_a, mat_in_byte, hipMemcpyHostToDevice);
hipMemcpy(dev_b, host_b, mat_in_byte, hipMemcpyHostToDevice);
hipMemset(dev_c, 0, mat_in_byte);
/* setup CUDA timer */
float time_naive, time_cublas;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
/*
naive_matmul kernel computation
*/
/* preparation */
dim3 dim_thread(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1);
dim3 dim_block( ceil(n_now / THREADS_PER_BLOCK_X), ceil(n_now / THREADS_PER_BLOCK_Y), 1);
/* start timer */
hipEventRecord(start, 0);
/* core computation */
hipLaunchKernelGGL(( naive_matmul), dim3(dim_block), dim3(dim_thread), 0, 0, dev_a, dev_b, dev_c, n_now);
/* end timer */
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time_naive, start, stop);
/* wrap up */
hipMemcpy(c_naive, dev_c, mat_in_byte, hipMemcpyDeviceToHost);
hipMemset(dev_c, 0, mat_in_byte);
printf("naive_matmul elapsed time is %f seconds\n", time_naive / 1000.0f);
printf("Throughput is %f GFlop/s\n", 2.0 * (double)n_now * (double)n_now * (double)n_now /
((double)time_naive * 1.e-3) * 1.e-9);
/*
CUBLAS computation
*/
/* preparation */
hipblasHandle_t handle;
hipblasCreate(&handle);
float alpha = 1.0;
float beta = 0.0;
/* start timer */
hipEventRecord(start, 0);
/* core computation */
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n_now, n_now, n_now,
(float *)&alpha, (float *)dev_a, n_now, (float *)dev_b, n_now,
(float *)&beta, (float *)dev_c, n_now);
hipDeviceSynchronize();
/* end timer */
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time_cublas, start, stop);
/* wrap up */
hipMemcpy(c_cublas, dev_c, mat_in_byte, hipMemcpyDeviceToHost);
hipEventDestroy(start);
hipEventDestroy(stop);
hipblasDestroy(handle);
printf("CUBLAS elapsed time is %f seconds\n", time_cublas / 1000.0f);
printf("Throughput is %f GFlop/s\n", 2.0 * (double)n_now * (double)n_now * (double)n_now /
((double)time_cublas * 1.e-3) * 1.e-9);
/* compare computation results */
max_err(c_cublas, c_naive, n_now);
/* clean up memory */
free(host_a);
free(host_b);
free(c_cublas);
free(c_naive);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
hipDeviceReset();
return 0;
}
| 7bf1588397b326e9d22d17f8472442b914d7a390.cu | /*
CS205 HW2 Part 3.1
The following code was written with reference to nVidia tutorial
and Programming Massively Parallel Processors.
*/
#include <stdio.h>
#include "cublas_v2.h"
/* define computation parameters */
#define MAT_SIZE 4096
#define THREADS_PER_BLOCK_X 32
#define THREADS_PER_BLOCK_Y 32
/* GPU kernel: naive matrix multiplcation */
__global__ void naive_matmul(float const * const mat_a, float const * const mat_b, float * const mat_c, const int mat_size){
/* get row and column for each thread in a block */
const int idx_row = blockIdx.x * blockDim.x + threadIdx.x;
const int idx_col = blockIdx.y * blockDim.y + threadIdx.y;
if( idx_row < mat_size && idx_col < mat_size ){
/* use c_temp to store summation of mat_size numbers */
register float c_temp = 0.0;
/* start naive summation to get one element in mat_c */
for( int idx_el = 0; idx_el < mat_size; idx_el++ ){
/* pick a row in mat_a[] and a column in mat_b[] */
c_temp += mat_a[idx_el * mat_size + idx_row] * mat_b[idx_col * mat_size + idx_el];
}
/* store an element of mat_c */
mat_c[idx_col * mat_size + idx_row] = c_temp;
}
return;
}
/* function to randomly assign values for matrix */
void randmat(float * matrix, int nsize){
for(int idx=0; idx < nsize * nsize; idx++){
matrix[idx] = double(rand())/ (double(RAND_MAX) + 1.0);
}
}
/* function to calculate max error between mat_1 and mat_2 */
void max_err(float * mat_1, float * mat_2, int nsize){
float err = 0.0;
for(int idx=0; idx < nsize * nsize; idx++){
err = max(err, abs(( (float)mat_1[idx] - (float)mat_2[idx]) / (float)mat_1[idx] ) );
}
printf("Max error is %e percent \n", err*100.0);
}
/* main function */
int main(int argc, char *argv[]){
/* check GPU info on Odyssey */
int dev;
cudaDeviceProp prop;
cudaGetDevice(&dev);
cudaGetDeviceProperties(&prop, dev);
printf(" --- General info --- \n");
printf("GPU name: %d %s\n", dev, prop.name);
printf("Compute capability: %d.%d\n", prop.major, prop.minor);
printf("Clock rate: %d\n", prop.clockRate);
printf(" --- Memory info --- \n");
printf("Total global mem: %ld\n", prop.totalGlobalMem);
printf(" --- Multiprocessor (MP) Info --- \n");
printf("MP count: %d\n", prop.multiProcessorCount);
printf("Shared mem per MP: %ld\n", prop.sharedMemPerBlock);
printf("Registers per MP: %d\n", prop.regsPerBlock);
printf("Threads in warp: %d\n", prop.warpSize);
printf("Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf("Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0],
prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0],
prop.maxGridSize[1], prop.maxGridSize[2]);
printf( "\n" );
/* print matrix size n_now */
const int n_now = MAT_SIZE;
printf(" --- Matrix multiplicatoin --- \n");
printf("Matrix size n is %d\n", n_now);
/* define matrix in host and in device */
float *host_a, *host_b, *c_cublas, *c_naive;
float *dev_a, *dev_b, *dev_c;
/* size of matrix in byte */
size_t mat_in_byte = (size_t)n_now * (size_t)n_now * sizeof(float);
/* randomly setup host_a and host_b */
host_a = (float *) malloc(mat_in_byte);
host_b = (float *) malloc(mat_in_byte);
randmat(host_a, n_now);
randmat(host_b, n_now);
/* set c_cublas and c_naive to zeros */
c_cublas = (float *) malloc(mat_in_byte);
c_naive = (float *) malloc(mat_in_byte);
memset(c_cublas, 0, mat_in_byte);
memset(c_naive, 0, mat_in_byte);
/* setup dev_a, dev_b, and dev_c */
cudaMalloc((void **)&dev_a, mat_in_byte);
cudaMalloc((void **)&dev_b, mat_in_byte);
cudaMalloc((void **)&dev_c, mat_in_byte);
cudaMemcpy(dev_a, host_a, mat_in_byte, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, host_b, mat_in_byte, cudaMemcpyHostToDevice);
cudaMemset(dev_c, 0, mat_in_byte);
/* setup CUDA timer */
float time_naive, time_cublas;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/*
naive_matmul kernel computation
*/
/* preparation */
dim3 dim_thread(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1);
dim3 dim_block( ceil(n_now / THREADS_PER_BLOCK_X), ceil(n_now / THREADS_PER_BLOCK_Y), 1);
/* start timer */
cudaEventRecord(start, 0);
/* core computation */
naive_matmul<<<dim_block, dim_thread>>> (dev_a, dev_b, dev_c, n_now);
/* end timer */
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_naive, start, stop);
/* wrap up */
cudaMemcpy(c_naive, dev_c, mat_in_byte, cudaMemcpyDeviceToHost);
cudaMemset(dev_c, 0, mat_in_byte);
printf("naive_matmul elapsed time is %f seconds\n", time_naive / 1000.0f);
printf("Throughput is %f GFlop/s\n", 2.0 * (double)n_now * (double)n_now * (double)n_now /
((double)time_naive * 1.e-3) * 1.e-9);
/*
CUBLAS computation
*/
/* preparation */
cublasHandle_t handle;
cublasCreate(&handle);
float alpha = 1.0;
float beta = 0.0;
/* start timer */
cudaEventRecord(start, 0);
/* core computation */
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n_now, n_now, n_now,
(float *)&alpha, (float *)dev_a, n_now, (float *)dev_b, n_now,
(float *)&beta, (float *)dev_c, n_now);
cudaThreadSynchronize();
/* end timer */
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_cublas, start, stop);
/* wrap up */
cudaMemcpy(c_cublas, dev_c, mat_in_byte, cudaMemcpyDeviceToHost);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cublasDestroy(handle);
printf("CUBLAS elapsed time is %f seconds\n", time_cublas / 1000.0f);
printf("Throughput is %f GFlop/s\n", 2.0 * (double)n_now * (double)n_now * (double)n_now /
((double)time_cublas * 1.e-3) * 1.e-9);
/* compare computation results */
max_err(c_cublas, c_naive, n_now);
/* clean up memory */
free(host_a);
free(host_b);
free(c_cublas);
free(c_naive);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaDeviceReset();
return 0;
}
|
3e0216c71b41b74a4389899ad6386256e5478685.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef FS_CALLS_CU
#define FS_CALLS_CU
#include "radix_tree.cu.h"
#include "fs_constants.h"
#include "fs_debug.cu.h"
#include "util.cu.h"
#include "cpu_ipc.cu.h"
#include "mallocfree.cu.h"
#include "fs_structures.cu.h"
#include "timer.h"
#include "hash_table.cu.h"
#include "swapper.cu.h"
#include "fs_globals.cu.h"
#include "preclose_table.cu.h"
#include "fs_calls.cu.h"
// no reference counting here
DEBUG_NOINLINE __device__ int single_thread_fsync(int fd)
{
int res=0;
GPU_ASSERT(fd>=0);
volatile OTable_entry* e=&g_otable->entries[fd];
GPU_ASSERT(e->refCount>0);
volatile FTable_entry* file=&(g_ftable->files[fd]);
unsigned int inode=g_otable->entries[fd].cpu_inode;
GPU_ASSERT(fd>=0);
GPU_ASSERT(inode!=(unsigned int)-1);
// globally locking until everything is flushed
// this is a slow operation so we don't hold the g_otable lock
res=flush_cpu(file,e,e->flags);
if (res<0) {
// TODO: add error handling
GPU_ASSERT(NULL);
}
return res;
}
DEBUG_NOINLINE __device__ int gfsync(int fd){
__shared__ int ret;
BEGIN_SINGLE_THREAD
ret=single_thread_fsync(fd);
END_SINGLE_THREAD;
return ret;
}
DEBUG_NOINLINE __device__ int single_thread_ftruncate(int fd, int size)
{
GPU_ASSERT(size==0);
GPU_ASSERT(fd>=0);
volatile OTable_entry* e=&g_otable->entries[fd];
int res= truncate_cpu(e->cpu_fd)==0;
if (res==0)
{
e->size=0;
g_ftable->files[fd].pages->lock_for_flush();
if (g_ftable->files[fd].pages->count !=0)
{
g_ftable->files[fd].pages->traverse_all(-1,true,0,0); // kill the tree
}
g_ftable->files[fd].pages->unlock_after_flush();
}
return res;
}
DEBUG_NOINLINE __device__ int gftruncate(int fd,int size){
__shared__ int ret;
BEGIN_SINGLE_THREAD
ret=single_thread_ftruncate(fd,size);
END_SINGLE_THREAD;
return ret;
}
DEBUG_NOINLINE __device__ int single_thread_close(int fd)
{
GPU_ASSERT(fd>=0);
g_otable->lock();
volatile OTable_entry* e=&g_otable->entries[fd];
e->refCount--;
GPU_ASSERT(e->refCount>=0);
int res=0;
if (e->refCount>0 || e->status!=FSENTRY_OPEN) { __threadfence(); g_otable->unlock(); return 0;}
// lock in the opening thread
e->status=FSENTRY_CLOSING;
volatile FTable_entry* file=&(g_ftable->files[fd]);
unsigned int inode=g_otable->entries[fd].cpu_inode;
GPU_ASSERT(fd>=0);
GPU_ASSERT(inode!=(unsigned int)-1);
volatile CPU_IPC_OPEN_Entry* cpu_e=&(g_cpu_ipcOpenQueue->entries[fd]);
if (file->pages->dirty_tree)
{
/// this file is dirty, so we put it into pre_close.
g_preclose_table->lock();
if( g_preclose_table->add(file,e))
GPU_ASSERT("Pre-close file table is full" == 0);
g_preclose_table->unlock();
// we do not close the file on a CPU
}
else{
// we do close now: we must hold a global lock on the otable
// because otherwise the thread which is opening a file will get
// a file handle for a closed file
// first, exchange the page cache for this file
g_closed_ftable.lock_table(inode);
// this might be a long because it deallocates and frees the tree
unsigned int drop_residence_inode=0;
file->pages=g_closed_ftable.exchange(inode, file->pages,&drop_residence_inode);
GPU_ASSERT(file->pages);
g_closed_ftable.unlock_table(inode);
res=cpu_e->close(g_otable->entries[fd].cpu_fd,drop_residence_inode);
if (res<0) {
// GPU_ASSERT(NULL);
}
}
cpu_e->clean();
file->clean();
e->clean();
__threadfence();
g_otable->unlock();
return res;
}
DEBUG_NOINLINE __device__ int gclose(int fd){
__shared__ int ret;
BEGIN_SINGLE_THREAD
ret=single_thread_close(fd);
END_SINGLE_THREAD;
return ret;
}
DEBUG_NOINLINE __device__ int single_thread_open(char* filename, int flags)
{
/*
Lock ftable
find entry
increase ref-count
Unlock ftable
if not found -> ret E_FTABLE_FULL
if (new_entry) -> send CPU open req
else -> wait on CPU open req
if (req failed) ->
Lock ftable
dec ref_count
if last -> delete entry
unlock ftable
*/
g_otable->lock();
bool isNewEntry=false;
int fd=g_otable->findEntry(filename,&isNewEntry,flags);
GPU_ASSERT(fd>=0);
if (fd<0) { g_otable->unlock(); return E_FSTABLE_FULL;}
volatile OTable_entry* e=&g_otable->entries[fd];
e->refCount++;
__threadfence();
g_otable->unlock();
volatile CPU_IPC_OPEN_Entry* cpu_e=&(g_cpu_ipcOpenQueue->entries[fd]);
if (isNewEntry)
{
g_preclose_table->lock();
if (g_preclose_table->size!=0) {
if (g_preclose_table->findEntry(e->filename,&g_ftable->files[fd],e) == 0)
{
g_preclose_table->unlock();
e->notify(e->cpu_fd,e->cpu_inode,e->size);
return fd;
}
}
g_preclose_table->unlock();
// fetch the
cpu_e->open(filename,flags);
unsigned int cpu_inode=readNoCache(&cpu_e->cpu_inode);
int cpu_fd=readNoCache(&cpu_e->cpu_fd);
g_closed_ftable.lock_table(cpu_inode);
volatile rtree* fpages=g_closed_ftable.get(cpu_inode);
if (fpages!=NULL)
{
volatile rtree* fpages_old=g_ftable->files[fd].pages;
g_ftable->files[fd].pages=fpages;
g_closed_ftable.reset(cpu_inode, fpages_old);
}else{
g_ftable->files[fd].pages->file_id=getNewFileId();
}
g_closed_ftable.unlock_table(cpu_inode);
// make sure we flush the cache if the owner has changed
int cpu_flush_cache=readNoCache(&cpu_e->flush_cache);
if (cpu_flush_cache){
g_ftable->files[fd].pages->lock_for_flush();
if (g_ftable->files[fd].pages->count !=0)
{
g_ftable->files[fd].pages->traverse_all(-1,true,0,0); // kill the tree
}
g_ftable->files[fd].pages->file_id=getNewFileId();
g_ftable->files[fd].pages->unlock_after_flush();
}
size_t size=readNoCache(&cpu_e->size);
e->notify(cpu_fd,cpu_inode,size);
}
else {
e->wait_open();
}
if (e->cpu_fd < 0)
{
g_otable->lock();
e->refCount--;
if (e->refCount==0)
{
e->clean();
cpu_e->clean();
}
__threadfence();
g_otable->unlock();
return E_IPC_OPEN_ERROR;
}
return fd;
}
DEBUG_NOINLINE __device__ int gopen(char* filename, int flags){
__shared__ int ret;
BEGIN_SINGLE_THREAD
ret=single_thread_open(filename,flags);
END_SINGLE_THREAD;
return ret;
}
#define READ 0
#define WRITE 1
DEBUG_NOINLINE __device__ volatile FTable_page* getRwLockedPage(volatile FTable_entry* fentry, size_t block_id, int fd, int cpu_fd,int type_req){
__shared__ volatile FTable_page* fpage;
__shared__ FTable_page_locker::page_states_t pstate;
// try lockless path first
BEGIN_SINGLE_THREAD
int deadlock=0;
int file_id=fentry->pages->file_id;
while(1){
deadlock++;
GPU_ASSERT(deadlock<200);
pstate=FTable_page_locker::P_INIT;
fpage=fentry->pages->getLeaf(block_id,&pstate,0,type_req); // lockless first
if (fpage && pstate == FTable_page_locker::P_READY) {
// success?
GPU_ASSERT(fpage->frame);
if ((block_id<<FS_LOGBLOCKSIZE) == fpage->frame->file_offset && file_id == fpage->frame->file_id)
{
LOCKLESS_SUCCESS;
break;
}
}
fentry->pages->lock_tree();
fpage=fentry->pages->getLeaf(block_id,&pstate,1,type_req); // locked version - updates all the counters
fentry->pages->unlock_tree();
// TODO: handle file size!
// TODO: add reasonable dirty bitmap update here
// at this point we have 3 options
// 1. pstate == P_INIT => page is locked and needs to be inited
// 2. pstate == P_UNDEFINED => page is locked by some other process and we need to
// retry to getLeaf
// 3. pstate = P_RW => lock page with lock_init_rw
if (pstate == FTable_page_locker::P_UNDEFINED ) {
// we'd better block
PAGE_ALLOC_RETRIES
fpage->locker.lock_wait_unlock(); // just wait
continue;
}
break;
}
if (pstate == FTable_page_locker::P_INIT ){
GPU_ASSERT(fpage->locker.lock ==1);
GPU_ASSERT(fpage->frame==NULL);
// if we inited, the page is locked and we just keep going
/*** DEBUG
if (atomicAdd(&countInited[block_id],1)>=1) {
GPU_ASSERT(0);
}
**/
fpage->allocPage(file_id,block_id<<FS_LOGBLOCKSIZE);
//GPU_ASSERT((fpage->frame->file_offset)>=0);
if (cpu_fd>=0)
{
int datasize=read_cpu(cpu_fd,fpage->frame);
if (datasize < 0) {
// TODO: error handling
GPU_ASSERT("Failed to read data from CPU"==NULL);
}
fpage->frame->content_size=datasize;
}
if (type_req==PAGE_WRITE_ACCESS) fpage->markDirty();
}
GPU_ASSERT((pstate == FTable_page_locker::P_INIT && fpage->locker.lock) || (( pstate == FTable_page_locker::P_RW || pstate== FTable_page_locker::P_READY) && fpage->locker.rw_counter>0) );
// if we do not need to zero out the page (cpu_fd<0)
// if the page was initialized, return. Make sure to return with all threads active
if (pstate == FTable_page_locker::P_INIT && cpu_fd>=0 ) fpage->locker.unlock_init();
END_SINGLE_THREAD
if ((pstate == FTable_page_locker::P_INIT && cpu_fd >= 0 ) || pstate == FTable_page_locker::P_RW || pstate == FTable_page_locker::P_READY )
return fpage;
//fill the page with zeros - optimization for the case of write-once exclusive create owned by GPU
bzero_page((volatile char*)fpage->frame->page);
__threadfence(); // make sure all threads will see these zeros
BEGIN_SINGLE_THREAD
GPU_ASSERT(cpu_fd<0);
GPU_ASSERT(pstate == FTable_page_locker::P_INIT);
fpage->frame->content_size=0;
fpage->locker.unlock_init();
//GPU_ASSERT(fpage->frame->file_offset>=0);
END_SINGLE_THREAD
return fpage;
}
DEBUG_NOINLINE __device__ int gmsync(volatile void *addr, size_t length,int flags)
{
size_t tmp=((char*)addr)- ((char*)g_ppool->rawStorage);
// assert(tmp>=0);
size_t offset=tmp>>FS_LOGBLOCKSIZE;
GPU_ASSERT(offset<PPOOL_FRAMES);
__threadfence(); // make sure all writes to the page become visible
BEGIN_SINGLE_THREAD
volatile PFrame* p=&(g_ppool->frames[offset]);
volatile FTable_page* fp=p->fpage;
GPU_ASSERT(fp);
// super ineffisient way to find which file this page belongs to
int i=0;
for( i=0;i<FSTABLE_SIZE;i++){
if (p->file_id == g_ftable->files[i].pages->file_id){
// no lock on page is required - last 0
writeback_page(g_otable->entries[i].cpu_fd,fp,g_otable->entries[i].flags,0);
break;
}
}
GPU_ASSERT(i!=FSTABLE_SIZE);
// if this assert fires it means that the file with that id was not
// found among open files. That's not valid becuase msync works only if the
// file is mapped -> it cannot be closed.
END_SINGLE_THREAD
return 0;
}
DEBUG_NOINLINE __device__ int gmunmap(volatile void *addr, size_t length)
{
size_t tmp=((char*)addr)- ((char*)g_ppool->rawStorage);
// assert(tmp>=0);
size_t offset=tmp>>FS_LOGBLOCKSIZE;
if (offset>=PPOOL_FRAMES) return -1;
__threadfence(); // make sure all writes to the page become visible
BEGIN_SINGLE_THREAD
volatile PFrame* p=&(g_ppool->frames[offset]);
volatile FTable_page* fp=p->fpage;
GPU_ASSERT(fp);
fp->locker.unlock_rw();
END_SINGLE_THREAD
return 0;
}
DEBUG_NOINLINE __device__ volatile void* gmmap(void *addr, size_t size,
int prot, int flags, int fd, off_t offset)
{
__shared__ volatile PFrame* frame; // the ptr is to global mem but is stored in shmem
__shared__ size_t block_id;
__shared__ int block_offset;
volatile FTable_page* fpage;
__shared__ int cpu_fd;
__shared__ volatile FTable_entry* fentry;
BEGIN_SINGLE_THREAD
fentry=&g_ftable->files[fd];
block_id=offset2block(offset,FS_LOGBLOCKSIZE);
block_offset=offset2blockoffset(offset,FS_BLOCKSIZE);
GPU_ASSERT(fd>=0 && fd<MAX_NUM_FILES);
cpu_fd=g_otable->entries[fd].cpu_fd;
GPU_ASSERT( cpu_fd >=0 && g_otable->entries[fd].refCount >0 );
if (block_offset+size > FS_BLOCKSIZE) assert("Reading beyond the page boundary"==0);
GPU_ASSERT(block_id<MAX_BLOCKS_PER_FILE);
// decide whether to fetch data or not
if ( g_otable->entries[fd].flags == O_GWRONCE ) cpu_fd=-1;
END_SINGLE_THREAD
int purpose= (g_otable->entries[fd].flags == O_GRDONLY) ? PAGE_READ_ACCESS:PAGE_WRITE_ACCESS;
fpage=getRwLockedPage(fentry,block_id,fd,cpu_fd, purpose);
BEGIN_SINGLE_THREAD
// page inited, just read, frane us a _shared_ mem variable
frame=fpage->frame;
//TODO: handle reading beyond eof
if (frame->content_size < block_offset+size && flags==O_GRDONLY)
{
GPU_ASSERT("Failed to map beyond the end of file"!=NULL);
}
if (flags!= O_GRDONLY) atomicMax((uint*)&(frame->content_size),block_offset+size);
END_SINGLE_THREAD
GPU_ASSERT(frame!=NULL);
return (void*)(((uchar*)(frame->page))+block_offset);
}
DEBUG_NOINLINE __device__ size_t gwrite(int fd,size_t offset, size_t size, uchar* buffer)
{
//attempt to write to a specific block
//if null -> allocate
// otherwise -> copy to bufcache
// mark dirty
// we ignore that we may run out of disk space
GPU_ASSERT(fd>=0 && fd<MAX_NUM_FILES);
GPU_ASSERT( g_otable->entries[fd].refCount >0 );
__shared__ volatile PFrame* frame; // the ptr is to global mem but is stored in shmem
__shared__ size_t block_id;
__shared__ int block_offset;
__shared__ int cpu_fd;
__shared__ int written;
__shared__ volatile FTable_page* fpage;
__shared__ volatile FTable_entry* fentry;
BEGIN_SINGLE_THREAD
block_id=offset2block(offset,FS_LOGBLOCKSIZE);
block_offset=offset2blockoffset(offset,FS_BLOCKSIZE);
fentry=&g_ftable->files[fd];
cpu_fd=g_otable->entries[fd].cpu_fd;
if (g_otable->entries[fd].flags == O_GWRONCE || ( size == FS_BLOCKSIZE && block_offset==0))
{
// we will not read the data from CPU if (1) the file is ONLY_ONCE, or the writes are whole-page writes
cpu_fd=-1;
}
written=0;
END_SINGLE_THREAD
while(written<size){
int single_op=min((int)(size-written),(int)(FS_BLOCKSIZE-block_offset));
GPU_ASSERT(block_id<MAX_BLOCKS_PER_FILE);
//TODO: handle reading beyond eof
// allow multiple threads to get into this function
// the value returned is correct only in thread 0
fpage=getRwLockedPage(fentry,block_id,fd,cpu_fd, PAGE_WRITE_ACCESS);
BEGIN_SINGLE_THREAD
frame=fpage->frame;
atomicMax((uint*)&frame->content_size,block_offset+single_op);
fpage->markDirty();
END_SINGLE_THREAD
// go over the page and reset it if necessary
// cpu_fd==-1 it will reset the page
copy_block((uchar*)(frame->page)+block_offset,buffer+written,single_op);
__threadfence(); // we must sync here otherwise swapper will be inconsistent
BEGIN_SINGLE_THREAD
written+=single_op;
fpage->locker.unlock_rw();
// the page is unlocked for flush only here.
block_id++;
block_offset=0;
END_SINGLE_THREAD;
}
return size;
}
// currently pread is expected to be issued by all threads in a thread block
// with the same parameters
// all parameters other than in thread idx ==0 are ignored
DEBUG_NOINLINE __device__ size_t gread(int fd, size_t offset, size_t size, uchar* buffer)
{
__shared__ volatile PFrame* frame; // the ptr is to global mem but is stored in shmem
__shared__ volatile FTable_page* fpage;
__shared__ size_t block_id;
__shared__ int block_offset;
__shared__ volatile FTable_entry* fentry;
__shared__ int cpu_fd;
__shared__ int data_read;
BEGIN_SINGLE_THREAD
block_id=offset2block(offset,FS_LOGBLOCKSIZE);
block_offset=offset2blockoffset(offset,FS_BLOCKSIZE);
fentry=&g_ftable->files[fd];
cpu_fd=g_otable->entries[fd].cpu_fd;
GPU_ASSERT(fd>=0 && fd<MAX_NUM_FILES);
GPU_ASSERT( cpu_fd >=0 && g_otable->entries[fd].refCount >0 );
data_read=0;
END_SINGLE_THREAD
while(data_read<size){
int single_op=min((int)(size-data_read),(int)(FS_BLOCKSIZE-block_offset));
GPU_ASSERT(block_id<MAX_BLOCKS_PER_FILE);
// synchtreads in getRwLockedPage
fpage=getRwLockedPage(fentry,block_id,fd,cpu_fd,PAGE_READ_ACCESS);
// page inited, just read, frane us a _shared_ mem variable
frame=fpage->frame;
//TODO: handle reading beyond eof
GPU_ASSERT(frame!=NULL);
copyNoCache_block(buffer+data_read,(uchar*)(frame->page)+block_offset,single_op);
BEGIN_SINGLE_THREAD
block_offset=0;
data_read+=single_op;
block_id++;
fpage->locker.unlock_rw();
END_SINGLE_THREAD
}
return size;
}
DEBUG_NOINLINE __device__ uint gunlink(char* filename)
{
GPU_ASSERT(NULL);
// tobe implemented
return 0;
}
DEBUG_NOINLINE __device__ size_t fstat(int fd)
{
return g_otable->entries[fd].size;
}
#endif
| 3e0216c71b41b74a4389899ad6386256e5478685.cu | #ifndef FS_CALLS_CU
#define FS_CALLS_CU
#include "radix_tree.cu.h"
#include "fs_constants.h"
#include "fs_debug.cu.h"
#include "util.cu.h"
#include "cpu_ipc.cu.h"
#include "mallocfree.cu.h"
#include "fs_structures.cu.h"
#include "timer.h"
#include "hash_table.cu.h"
#include "swapper.cu.h"
#include "fs_globals.cu.h"
#include "preclose_table.cu.h"
#include "fs_calls.cu.h"
// no reference counting here
DEBUG_NOINLINE __device__ int single_thread_fsync(int fd)
{
int res=0;
GPU_ASSERT(fd>=0);
volatile OTable_entry* e=&g_otable->entries[fd];
GPU_ASSERT(e->refCount>0);
volatile FTable_entry* file=&(g_ftable->files[fd]);
unsigned int inode=g_otable->entries[fd].cpu_inode;
GPU_ASSERT(fd>=0);
GPU_ASSERT(inode!=(unsigned int)-1);
// globally locking until everything is flushed
// this is a slow operation so we don't hold the g_otable lock
res=flush_cpu(file,e,e->flags);
if (res<0) {
// TODO: add error handling
GPU_ASSERT(NULL);
}
return res;
}
DEBUG_NOINLINE __device__ int gfsync(int fd){
__shared__ int ret;
BEGIN_SINGLE_THREAD
ret=single_thread_fsync(fd);
END_SINGLE_THREAD;
return ret;
}
DEBUG_NOINLINE __device__ int single_thread_ftruncate(int fd, int size)
{
GPU_ASSERT(size==0);
GPU_ASSERT(fd>=0);
volatile OTable_entry* e=&g_otable->entries[fd];
int res= truncate_cpu(e->cpu_fd)==0;
if (res==0)
{
e->size=0;
g_ftable->files[fd].pages->lock_for_flush();
if (g_ftable->files[fd].pages->count !=0)
{
g_ftable->files[fd].pages->traverse_all(-1,true,0,0); // kill the tree
}
g_ftable->files[fd].pages->unlock_after_flush();
}
return res;
}
DEBUG_NOINLINE __device__ int gftruncate(int fd,int size){
__shared__ int ret;
BEGIN_SINGLE_THREAD
ret=single_thread_ftruncate(fd,size);
END_SINGLE_THREAD;
return ret;
}
DEBUG_NOINLINE __device__ int single_thread_close(int fd)
{
GPU_ASSERT(fd>=0);
g_otable->lock();
volatile OTable_entry* e=&g_otable->entries[fd];
e->refCount--;
GPU_ASSERT(e->refCount>=0);
int res=0;
if (e->refCount>0 || e->status!=FSENTRY_OPEN) { __threadfence(); g_otable->unlock(); return 0;}
// lock in the opening thread
e->status=FSENTRY_CLOSING;
volatile FTable_entry* file=&(g_ftable->files[fd]);
unsigned int inode=g_otable->entries[fd].cpu_inode;
GPU_ASSERT(fd>=0);
GPU_ASSERT(inode!=(unsigned int)-1);
volatile CPU_IPC_OPEN_Entry* cpu_e=&(g_cpu_ipcOpenQueue->entries[fd]);
if (file->pages->dirty_tree)
{
/// this file is dirty, so we put it into pre_close.
g_preclose_table->lock();
if( g_preclose_table->add(file,e))
GPU_ASSERT("Pre-close file table is full" == 0);
g_preclose_table->unlock();
// we do not close the file on a CPU
}
else{
// we do close now: we must hold a global lock on the otable
// because otherwise the thread which is opening a file will get
// a file handle for a closed file
// first, exchange the page cache for this file
g_closed_ftable.lock_table(inode);
// this might be a long because it deallocates and frees the tree
unsigned int drop_residence_inode=0;
file->pages=g_closed_ftable.exchange(inode, file->pages,&drop_residence_inode);
GPU_ASSERT(file->pages);
g_closed_ftable.unlock_table(inode);
res=cpu_e->close(g_otable->entries[fd].cpu_fd,drop_residence_inode);
if (res<0) {
// GPU_ASSERT(NULL);
}
}
cpu_e->clean();
file->clean();
e->clean();
__threadfence();
g_otable->unlock();
return res;
}
DEBUG_NOINLINE __device__ int gclose(int fd){
__shared__ int ret;
BEGIN_SINGLE_THREAD
ret=single_thread_close(fd);
END_SINGLE_THREAD;
return ret;
}
DEBUG_NOINLINE __device__ int single_thread_open(char* filename, int flags)
{
/*
Lock ftable
find entry
increase ref-count
Unlock ftable
if not found -> ret E_FTABLE_FULL
if (new_entry) -> send CPU open req
else -> wait on CPU open req
if (req failed) ->
Lock ftable
dec ref_count
if last -> delete entry
unlock ftable
*/
g_otable->lock();
bool isNewEntry=false;
int fd=g_otable->findEntry(filename,&isNewEntry,flags);
GPU_ASSERT(fd>=0);
if (fd<0) { g_otable->unlock(); return E_FSTABLE_FULL;}
volatile OTable_entry* e=&g_otable->entries[fd];
e->refCount++;
__threadfence();
g_otable->unlock();
volatile CPU_IPC_OPEN_Entry* cpu_e=&(g_cpu_ipcOpenQueue->entries[fd]);
if (isNewEntry)
{
g_preclose_table->lock();
if (g_preclose_table->size!=0) {
if (g_preclose_table->findEntry(e->filename,&g_ftable->files[fd],e) == 0)
{
g_preclose_table->unlock();
e->notify(e->cpu_fd,e->cpu_inode,e->size);
return fd;
}
}
g_preclose_table->unlock();
// fetch the
cpu_e->open(filename,flags);
unsigned int cpu_inode=readNoCache(&cpu_e->cpu_inode);
int cpu_fd=readNoCache(&cpu_e->cpu_fd);
g_closed_ftable.lock_table(cpu_inode);
volatile rtree* fpages=g_closed_ftable.get(cpu_inode);
if (fpages!=NULL)
{
volatile rtree* fpages_old=g_ftable->files[fd].pages;
g_ftable->files[fd].pages=fpages;
g_closed_ftable.reset(cpu_inode, fpages_old);
}else{
g_ftable->files[fd].pages->file_id=getNewFileId();
}
g_closed_ftable.unlock_table(cpu_inode);
// make sure we flush the cache if the owner has changed
int cpu_flush_cache=readNoCache(&cpu_e->flush_cache);
if (cpu_flush_cache){
g_ftable->files[fd].pages->lock_for_flush();
if (g_ftable->files[fd].pages->count !=0)
{
g_ftable->files[fd].pages->traverse_all(-1,true,0,0); // kill the tree
}
g_ftable->files[fd].pages->file_id=getNewFileId();
g_ftable->files[fd].pages->unlock_after_flush();
}
size_t size=readNoCache(&cpu_e->size);
e->notify(cpu_fd,cpu_inode,size);
}
else {
e->wait_open();
}
if (e->cpu_fd < 0)
{
g_otable->lock();
e->refCount--;
if (e->refCount==0)
{
e->clean();
cpu_e->clean();
}
__threadfence();
g_otable->unlock();
return E_IPC_OPEN_ERROR;
}
return fd;
}
DEBUG_NOINLINE __device__ int gopen(char* filename, int flags){
__shared__ int ret;
BEGIN_SINGLE_THREAD
ret=single_thread_open(filename,flags);
END_SINGLE_THREAD;
return ret;
}
#define READ 0
#define WRITE 1
DEBUG_NOINLINE __device__ volatile FTable_page* getRwLockedPage(volatile FTable_entry* fentry, size_t block_id, int fd, int cpu_fd,int type_req){
__shared__ volatile FTable_page* fpage;
__shared__ FTable_page_locker::page_states_t pstate;
// try lockless path first
BEGIN_SINGLE_THREAD
int deadlock=0;
int file_id=fentry->pages->file_id;
while(1){
deadlock++;
GPU_ASSERT(deadlock<200);
pstate=FTable_page_locker::P_INIT;
fpage=fentry->pages->getLeaf(block_id,&pstate,0,type_req); // lockless first
if (fpage && pstate == FTable_page_locker::P_READY) {
// success?
GPU_ASSERT(fpage->frame);
if ((block_id<<FS_LOGBLOCKSIZE) == fpage->frame->file_offset && file_id == fpage->frame->file_id)
{
LOCKLESS_SUCCESS;
break;
}
}
fentry->pages->lock_tree();
fpage=fentry->pages->getLeaf(block_id,&pstate,1,type_req); // locked version - updates all the counters
fentry->pages->unlock_tree();
// TODO: handle file size!
// TODO: add reasonable dirty bitmap update here
// at this point we have 3 options
// 1. pstate == P_INIT => page is locked and needs to be inited
// 2. pstate == P_UNDEFINED => page is locked by some other process and we need to
// retry to getLeaf
// 3. pstate = P_RW => lock page with lock_init_rw
if (pstate == FTable_page_locker::P_UNDEFINED ) {
// we'd better block
PAGE_ALLOC_RETRIES
fpage->locker.lock_wait_unlock(); // just wait
continue;
}
break;
}
if (pstate == FTable_page_locker::P_INIT ){
GPU_ASSERT(fpage->locker.lock ==1);
GPU_ASSERT(fpage->frame==NULL);
// if we inited, the page is locked and we just keep going
/*** DEBUG
if (atomicAdd(&countInited[block_id],1)>=1) {
GPU_ASSERT(0);
}
**/
fpage->allocPage(file_id,block_id<<FS_LOGBLOCKSIZE);
//GPU_ASSERT((fpage->frame->file_offset)>=0);
if (cpu_fd>=0)
{
int datasize=read_cpu(cpu_fd,fpage->frame);
if (datasize < 0) {
// TODO: error handling
GPU_ASSERT("Failed to read data from CPU"==NULL);
}
fpage->frame->content_size=datasize;
}
if (type_req==PAGE_WRITE_ACCESS) fpage->markDirty();
}
GPU_ASSERT((pstate == FTable_page_locker::P_INIT && fpage->locker.lock) || (( pstate == FTable_page_locker::P_RW || pstate== FTable_page_locker::P_READY) && fpage->locker.rw_counter>0) );
// if we do not need to zero out the page (cpu_fd<0)
// if the page was initialized, return. Make sure to return with all threads active
if (pstate == FTable_page_locker::P_INIT && cpu_fd>=0 ) fpage->locker.unlock_init();
END_SINGLE_THREAD
if ((pstate == FTable_page_locker::P_INIT && cpu_fd >= 0 ) || pstate == FTable_page_locker::P_RW || pstate == FTable_page_locker::P_READY )
return fpage;
//fill the page with zeros - optimization for the case of write-once exclusive create owned by GPU
bzero_page((volatile char*)fpage->frame->page);
__threadfence(); // make sure all threads will see these zeros
BEGIN_SINGLE_THREAD
GPU_ASSERT(cpu_fd<0);
GPU_ASSERT(pstate == FTable_page_locker::P_INIT);
fpage->frame->content_size=0;
fpage->locker.unlock_init();
//GPU_ASSERT(fpage->frame->file_offset>=0);
END_SINGLE_THREAD
return fpage;
}
DEBUG_NOINLINE __device__ int gmsync(volatile void *addr, size_t length,int flags)
{
size_t tmp=((char*)addr)- ((char*)g_ppool->rawStorage);
// assert(tmp>=0);
size_t offset=tmp>>FS_LOGBLOCKSIZE;
GPU_ASSERT(offset<PPOOL_FRAMES);
__threadfence(); // make sure all writes to the page become visible
BEGIN_SINGLE_THREAD
volatile PFrame* p=&(g_ppool->frames[offset]);
volatile FTable_page* fp=p->fpage;
GPU_ASSERT(fp);
// super ineffisient way to find which file this page belongs to
int i=0;
for( i=0;i<FSTABLE_SIZE;i++){
if (p->file_id == g_ftable->files[i].pages->file_id){
// no lock on page is required - last 0
writeback_page(g_otable->entries[i].cpu_fd,fp,g_otable->entries[i].flags,0);
break;
}
}
GPU_ASSERT(i!=FSTABLE_SIZE);
// if this assert fires it means that the file with that id was not
// found among open files. That's not valid becuase msync works only if the
// file is mapped -> it cannot be closed.
END_SINGLE_THREAD
return 0;
}
DEBUG_NOINLINE __device__ int gmunmap(volatile void *addr, size_t length)
{
size_t tmp=((char*)addr)- ((char*)g_ppool->rawStorage);
// assert(tmp>=0);
size_t offset=tmp>>FS_LOGBLOCKSIZE;
if (offset>=PPOOL_FRAMES) return -1;
__threadfence(); // make sure all writes to the page become visible
BEGIN_SINGLE_THREAD
volatile PFrame* p=&(g_ppool->frames[offset]);
volatile FTable_page* fp=p->fpage;
GPU_ASSERT(fp);
fp->locker.unlock_rw();
END_SINGLE_THREAD
return 0;
}
DEBUG_NOINLINE __device__ volatile void* gmmap(void *addr, size_t size,
int prot, int flags, int fd, off_t offset)
{
__shared__ volatile PFrame* frame; // the ptr is to global mem but is stored in shmem
__shared__ size_t block_id;
__shared__ int block_offset;
volatile FTable_page* fpage;
__shared__ int cpu_fd;
__shared__ volatile FTable_entry* fentry;
BEGIN_SINGLE_THREAD
fentry=&g_ftable->files[fd];
block_id=offset2block(offset,FS_LOGBLOCKSIZE);
block_offset=offset2blockoffset(offset,FS_BLOCKSIZE);
GPU_ASSERT(fd>=0 && fd<MAX_NUM_FILES);
cpu_fd=g_otable->entries[fd].cpu_fd;
GPU_ASSERT( cpu_fd >=0 && g_otable->entries[fd].refCount >0 );
if (block_offset+size > FS_BLOCKSIZE) assert("Reading beyond the page boundary"==0);
GPU_ASSERT(block_id<MAX_BLOCKS_PER_FILE);
// decide whether to fetch data or not
if ( g_otable->entries[fd].flags == O_GWRONCE ) cpu_fd=-1;
END_SINGLE_THREAD
int purpose= (g_otable->entries[fd].flags == O_GRDONLY) ? PAGE_READ_ACCESS:PAGE_WRITE_ACCESS;
fpage=getRwLockedPage(fentry,block_id,fd,cpu_fd, purpose);
BEGIN_SINGLE_THREAD
// page inited, just read, frane us a _shared_ mem variable
frame=fpage->frame;
//TODO: handle reading beyond eof
if (frame->content_size < block_offset+size && flags==O_GRDONLY)
{
GPU_ASSERT("Failed to map beyond the end of file"!=NULL);
}
if (flags!= O_GRDONLY) atomicMax((uint*)&(frame->content_size),block_offset+size);
END_SINGLE_THREAD
GPU_ASSERT(frame!=NULL);
return (void*)(((uchar*)(frame->page))+block_offset);
}
DEBUG_NOINLINE __device__ size_t gwrite(int fd,size_t offset, size_t size, uchar* buffer)
{
//attempt to write to a specific block
//if null -> allocate
// otherwise -> copy to bufcache
// mark dirty
// we ignore that we may run out of disk space
GPU_ASSERT(fd>=0 && fd<MAX_NUM_FILES);
GPU_ASSERT( g_otable->entries[fd].refCount >0 );
__shared__ volatile PFrame* frame; // the ptr is to global mem but is stored in shmem
__shared__ size_t block_id;
__shared__ int block_offset;
__shared__ int cpu_fd;
__shared__ int written;
__shared__ volatile FTable_page* fpage;
__shared__ volatile FTable_entry* fentry;
BEGIN_SINGLE_THREAD
block_id=offset2block(offset,FS_LOGBLOCKSIZE);
block_offset=offset2blockoffset(offset,FS_BLOCKSIZE);
fentry=&g_ftable->files[fd];
cpu_fd=g_otable->entries[fd].cpu_fd;
if (g_otable->entries[fd].flags == O_GWRONCE || ( size == FS_BLOCKSIZE && block_offset==0))
{
// we will not read the data from CPU if (1) the file is ONLY_ONCE, or the writes are whole-page writes
cpu_fd=-1;
}
written=0;
END_SINGLE_THREAD
while(written<size){
int single_op=min((int)(size-written),(int)(FS_BLOCKSIZE-block_offset));
GPU_ASSERT(block_id<MAX_BLOCKS_PER_FILE);
//TODO: handle reading beyond eof
// allow multiple threads to get into this function
// the value returned is correct only in thread 0
fpage=getRwLockedPage(fentry,block_id,fd,cpu_fd, PAGE_WRITE_ACCESS);
BEGIN_SINGLE_THREAD
frame=fpage->frame;
atomicMax((uint*)&frame->content_size,block_offset+single_op);
fpage->markDirty();
END_SINGLE_THREAD
// go over the page and reset it if necessary
// cpu_fd==-1 it will reset the page
copy_block((uchar*)(frame->page)+block_offset,buffer+written,single_op);
__threadfence(); // we must sync here otherwise swapper will be inconsistent
BEGIN_SINGLE_THREAD
written+=single_op;
fpage->locker.unlock_rw();
// the page is unlocked for flush only here.
block_id++;
block_offset=0;
END_SINGLE_THREAD;
}
return size;
}
// currently pread is expected to be issued by all threads in a thread block
// with the same parameters
// all parameters other than in thread idx ==0 are ignored
DEBUG_NOINLINE __device__ size_t gread(int fd, size_t offset, size_t size, uchar* buffer)
{
__shared__ volatile PFrame* frame; // the ptr is to global mem but is stored in shmem
__shared__ volatile FTable_page* fpage;
__shared__ size_t block_id;
__shared__ int block_offset;
__shared__ volatile FTable_entry* fentry;
__shared__ int cpu_fd;
__shared__ int data_read;
BEGIN_SINGLE_THREAD
block_id=offset2block(offset,FS_LOGBLOCKSIZE);
block_offset=offset2blockoffset(offset,FS_BLOCKSIZE);
fentry=&g_ftable->files[fd];
cpu_fd=g_otable->entries[fd].cpu_fd;
GPU_ASSERT(fd>=0 && fd<MAX_NUM_FILES);
GPU_ASSERT( cpu_fd >=0 && g_otable->entries[fd].refCount >0 );
data_read=0;
END_SINGLE_THREAD
while(data_read<size){
int single_op=min((int)(size-data_read),(int)(FS_BLOCKSIZE-block_offset));
GPU_ASSERT(block_id<MAX_BLOCKS_PER_FILE);
// synchtreads in getRwLockedPage
fpage=getRwLockedPage(fentry,block_id,fd,cpu_fd,PAGE_READ_ACCESS);
// page inited, just read, frane us a _shared_ mem variable
frame=fpage->frame;
//TODO: handle reading beyond eof
GPU_ASSERT(frame!=NULL);
copyNoCache_block(buffer+data_read,(uchar*)(frame->page)+block_offset,single_op);
BEGIN_SINGLE_THREAD
block_offset=0;
data_read+=single_op;
block_id++;
fpage->locker.unlock_rw();
END_SINGLE_THREAD
}
return size;
}
DEBUG_NOINLINE __device__ uint gunlink(char* filename)
{
GPU_ASSERT(NULL);
// tobe implemented
return 0;
}
DEBUG_NOINLINE __device__ size_t fstat(int fd)
{
return g_otable->entries[fd].size;
}
#endif
|
50ea4bcf6a00b38726034eea96bc6692656230e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// richu shaji abraham richursa
using namespace std;
__global__ void scatter(int *d_array , int *d_scanArray , int *d_predicateArrry,int * d_scatteredArray ,int d_numberOfElements,int offset)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < d_numberOfElements)
{
if(d_predicateArrry[index] == 1)
{
d_scatteredArray[d_scanArray[index] - 1 +offset ] = d_array[index];
}
}
} | 50ea4bcf6a00b38726034eea96bc6692656230e5.cu | #include "includes.h"
// richu shaji abraham richursa
using namespace std;
__global__ void scatter(int *d_array , int *d_scanArray , int *d_predicateArrry,int * d_scatteredArray ,int d_numberOfElements,int offset)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < d_numberOfElements)
{
if(d_predicateArrry[index] == 1)
{
d_scatteredArray[d_scanArray[index] - 1 +offset ] = d_array[index];
}
}
} |
6cdbabe56de6b1817138dd2ba5578e80508049f0.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
//Buffer containing a pointer for each boid to its data in dev_pos and dev_vel1 and dev_vel2
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
// Buffer containing the grid index for each boid.
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
// Buffer containing a pointer for each cell to the begining of dev_particleArrayIndices
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
// Buffer containing a pointer for each cell to the end of dev_particleArrayIndices
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_coherent_pos;
glm::vec3 *dev_coherent_vel1;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
//Uniform Grid Buffers
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
// Coherent Buffers
hipMalloc((void**)&dev_coherent_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_coherent_pos failed!");
hipMalloc((void**)&dev_coherent_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_coherent_vel1 failed!");
// Thrust buffers, used for prallel sorting
dev_thrust_particleArrayIndices = thrust::device_pointer_cast<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_pointer_cast<int>(dev_particleGridIndices);
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* boidCohesionRuleNaive()
* boids move towards the perceived center of mass of their neighbors
*
* Cohesion depends soley on the position of each boid, so we want to calculate the center of mass.
* Assuming each boid weighs the same, the center of mass is simply the average position.
* Therefore, we add each component of each boid and divide.
* NOTE: For the Naive implementation, this means each thread is going to be doing the same exact work.
* That is super bad and goes against the idea of distributing work. This becomes a non-issue
* when each boid looks only at their local neighbors, as each boid will have a different subset
* to look at.
*/
__device__ glm::vec3 boidCohesionRuleNaive(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 result(0.0f);
glm::vec3 perceived_center(0.0f);
glm::vec3 selfPos = pos[iSelf];
float neighbors = 0.0f;
for (int i = 0; i < N; i++) {
if ((i != iSelf) && (glm::distance(selfPos, pos[i]) < rule1Distance)) {
perceived_center += pos[i];
neighbors++;
}
}
if (neighbors) {
perceived_center /= neighbors;
result = (perceived_center - selfPos) * rule1Scale;
}
return result;
}
/**
* boidCohesionRuleGrid()
* boids move towards the perceived center of mass of their neighbors
*
* Cohesion depends soley on the position of each boid, so we want to calculate the center of mass.
* Assuming each boid weighs the same, the center of mass is simply the average position.
* Therefore, we add each component of each boid and divide.
*/
__device__ glm::vec3 boidCohesionRuleGrid(int N, int iSelf, const int *boidIndices, int b_start, int b_end, glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 result(0.0f);
glm::vec3 perceived_center(0.0f);
glm::vec3 selfPos = pos[iSelf];
float neighbors = 0.0f;
for (int i = b_start; i < b_end; i++) {
int boid_idx = boidIndices[i];
if ((boid_idx != iSelf) && (glm::distance(selfPos, pos[boid_idx]) < rule1Distance)) {
perceived_center += pos[boid_idx];
neighbors++;
}
}
if (neighbors) {
perceived_center /= neighbors;
result = (perceived_center - selfPos) * rule1Scale;
}
return result;
}
/**
* boidSeperationRuleNaive()
* boids avoid getting to close to their neighbors
*
* In this rule, the boid is repulsed by nearby boids. To represent that, we take the distance
* between the boid and the neighbor boids and add the disance between the two as a sacled negative velocity.
* This has the effect of pushing each boid away from his neighbors. Note that a boid on either side will contribute
* to opposite directions.
*/
__device__ glm::vec3 boidSeperationRuleNaive(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 result(0.0f);
glm::vec3 seperation(0.0f);
glm::vec3 selfPos = pos[iSelf];
float neighbors = 0.0f;
for (int i = 0; i < N; i++) {
if ((i != iSelf) && (glm::distance(selfPos, pos[i]) < rule2Distance)) {
seperation -= pos[i] - selfPos;
neighbors++;
}
}
if (neighbors) {
result = seperation * rule2Scale;
}
return result;
}
/**
* boidSeperationRuleGrid()
* boids avoid getting to close to their neighbors
*
* In this rule, the boid is repulsed by nearby boids. To represent that, we take the distance
* between the boid and the neighbor boids and add the disance between the two as a sacled negative velocity.
* This has the effect of pushing each boid away from his neighbors. Note that a boid on either side will contribute
* to opposite directions.
*/
__device__ glm::vec3 boidSeperationRuleGrid(int N, int iSelf, const int *boidIndices, int b_start, int b_end, glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 result(0.0f);
glm::vec3 seperation(0.0f);
glm::vec3 selfPos = pos[iSelf];
float neighbors = 0.0f;
for (int i = b_start; i < b_end; i++) {
int boid_idx = boidIndices[i];
if ((boid_idx != iSelf) && (glm::distance(selfPos, pos[boid_idx]) < rule2Distance)) {
seperation -= pos[boid_idx] - selfPos;
neighbors++;
}
}
if (neighbors) {
result = seperation * rule2Scale;
}
return result;
}
/**
* boidAlignmentRuleNaive()
* boids generally try to move with the same direction and speed as their neighbors
*
* Boids want to match the velocit of their neighbors at t=a, so they will adjust their velocity accordingly.
* After each round, at t=a+dt, each boid will apply their change.
*/
__device__ glm::vec3 boidAlignmentRuleNaive(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 result(0.0f);
glm::vec3 perceived_velocity(0.0f);
glm::vec3 selfPos = pos[iSelf];
glm::vec3 selfVelocity = vel[iSelf];
float neighbors = 0.0f;
for (int i = 0; i < N; i++) {
if ((i != iSelf) && (glm::distance(selfPos, pos[i]) < rule3Distance)) {
perceived_velocity += vel[i];
neighbors++;
}
}
if (neighbors) {
perceived_velocity /= neighbors;
result = perceived_velocity * rule3Scale;
}
return result;
}
/**
* boidAlignmentRuleGrid()
* boids generally try to move with the same direction and speed as their neighbors
*
* Boids want to match the velocit of their neighbors at t=a, so they will adjust their velocity accordingly.
* After each round, at t=a+dt, each boid will apply their change.
*/
__device__ glm::vec3 boidAlignmentRuleGrid(int N, int iSelf, const int *boidIndices, int b_start, int b_end, glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 result(0.0f);
glm::vec3 perceived_velocity(0.0f);
glm::vec3 selfPos = pos[iSelf];
glm::vec3 selfVelocity = vel[iSelf];
float neighbors = 0.0f;
for (int i = b_start; i < b_end; i++) {
int boid_idx = boidIndices[i];
if ((boid_idx != iSelf) && (glm::distance(selfPos, pos[boid_idx]) < rule3Distance)) {
perceived_velocity += vel[boid_idx];
neighbors++;
}
}
if (neighbors) {
perceived_velocity /= neighbors;
result = perceived_velocity * rule3Scale;
}
return result;
}
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
// Rule 2: boids try to stay a distance d away from each other
// Rule 3: boids try to match the speed of surrounding boids
glm::vec3 delta(0.0f);
// Apply each rule.
delta += boidCohesionRuleNaive(N, iSelf, pos, vel);
delta += boidSeperationRuleNaive(N, iSelf, pos, vel);
delta += boidAlignmentRuleNaive(N, iSelf, pos, vel);
return delta;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
// Answer: Other threads may still be reading vel1!!
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 curntV = vel1[index];
glm::vec3 deltaV = computeVelocityChange(N, index, pos, vel1);
glm::vec3 newV = curntV + deltaV;
// Clamp the speed. We do it this way to ensure that the total velocity is clamped,
// not just the velocity in each direction (otherwise glm::clamp would be nice).
if (glm::length(newV) > maxSpeed) {
newV = glm::normalize(newV) * maxSpeed;
}
vel2[index] = newV;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 myGridPos = pos[index];
int myGridIdx = 0;
// Add grid minimum to move +/- halfWidth range to 0 - fullWidth range.
myGridPos -= gridMin;
// Cells are cubes, so all dimensions are identical, divide each pos by cell width
myGridPos *= inverseCellWidth;
// Round down to throw away float garbage!
myGridPos = glm::floor(myGridPos);
// Compute a 1D index from the 3D index
myGridIdx = gridIndex3Dto1D(myGridPos.x, myGridPos.y, myGridPos.z, gridResolution);
// Store the grid index in the indices buffer using the boid IDX as the key
// and the index in and index buffer. These two will be sorted in parallel.
// The end result will be that indices will be sorted by myGridIdx so consecutive
// boids will have their indices colocated in memory.
gridIndices[index] = myGridIdx;
indices[index] = index;
}
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
// Collect some information
int myGridIdx = particleGridIndices[index];
// Always Start if first cell
if ((index == 0) || particleGridIndices[index] != particleGridIndices[index - 1]) {
gridCellStartIndices[myGridIdx] = index; // Start of grid myGridIdx is boid at index
}
// Always End if last cell
if ((index == (N - 1)) || (particleGridIndices[index] != particleGridIndices[index + 1])) {
gridCellEndIndices[myGridIdx] = index;
}
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
// Identify the grid cell that this particle is in.
glm::vec3 myPos = pos[index];
glm::vec3 myGridPos = myPos - gridMin;
myGridPos *= inverseCellWidth; // Just like in kernComputeIndices()
myGridPos = glm::floor(myGridPos); // Just like in kernComputeIndices()
// Identify which cells contain neighbors
// Want to find each grid where one of our rules can apply. Therefore, we need to know the distance of our rules.
float neighbor_distance = glm::max(glm::max(rule1Distance, rule2Distance), rule3Distance);
// Create a vector with this value.
glm::vec3 neighbor_distance_vec(neighbor_distance);
// Remember, everything is a cube! If we check the corners of the cube, we will know
// what the rest of the cube looks like. ie, if min is {0, 0, 0} and max is {1, 1, 1},
// we know where each of the other corners lie. (Also true for rectangluar cubes)
glm::vec3 min_neighbor_cell = glm::floor((myPos - gridMin - neighbor_distance) * inverseCellWidth);
glm::vec3 max_neighbor_cell = glm::floor((myPos - gridMin + neighbor_distance) * inverseCellWidth);
// Now, if myPos is situated on any of the axis of the cell, then min and/or max may not change.
// This is clear from the case where myPos is in the middle of the cell. In that case, if the cellWidth
// is equal to the neighbor_disance, then the cube will consist of only the cell.
// So we don't need to check the cell interior edge conditions, those are covered already.
// Issue: What about the edge of the grid? We need to wrap around.
// We can handle this during the search. If any cell value exceeds the resolution of the grid,
// then we loop around.
glm::vec3 min_cell_search = glm::clamp(min_neighbor_cell, glm::vec3(0), glm::vec3(gridResolution));
glm::vec3 max_cell_search = glm::clamp(max_neighbor_cell, glm::vec3(0), glm::vec3(gridResolution));
// After all that work, we now start applying rules! Instead of searching over N boids, we will search over the boids
// in each cell between min_cell_search and max_cell_search only.
// I can already see how making the boids cohenernt in memory helps simplify this, but that's for later.
glm::vec3 velocity_change(0.0f);
glm::vec3 selfPos = pos[index];
glm::vec3 selfVelocity = vel1[index];
glm::vec3 alignment_perceived_velocity(0.0f);
glm::vec3 cohesion_perceived_center(0.0f);
glm::vec3 seperation(0.0f);
int alignment_neighbors = 0.0;
int cohesion_neighbors = 0.0;
int seperation_neighbors = 0.0;
for (float z = min_cell_search.z; z <= max_cell_search.z; z++) {
for (float y = min_cell_search.y; y <= max_cell_search.y; y++) {
for (float x = min_cell_search.x; x <= max_cell_search.x; x++) {
// Yikes! Triple for loop??? Not that bad, min and max differ by at most one.
// Calculate grid index of the grid under inspection
int gridIdx = gridIndex3Dto1D(x, y, z, gridResolution);
// Get the start and end indices for the boids
int b_start = gridCellStartIndices[gridIdx];
int b_end = gridCellEndIndices[gridIdx];
// Check if any values are -1, meaning an empty cell.
// Also check if less than N, don't want to cause a segfault.
if (b_start < 0 || b_start > N || b_end < 0 || b_end > N) {
continue;
}
// We now have the boids we need. Run each rule over the range of boids.
// WOW: Unrolling these calls gave a 2.27X performance boost!!!! Cache benefits?
//velocity_change += boidAlignmentRuleGrid(N, index, particleArrayIndices, b_start, b_end, pos, vel1);
//velocity_change += boidCohesionRuleGrid(N, index, particleArrayIndices, b_start, b_end, pos, vel1);
//velocity_change += boidSeperationRuleGrid(N, index, particleArrayIndices, b_start, b_end, pos, vel1);
for (int i = b_start; i <= b_end; i++) {
int boid_idx = particleArrayIndices[i];
if (index == boid_idx) { // Dip out early.
continue;
}
// Get relevant data
glm::vec3 boid_pos = pos[boid_idx];
float distance = glm::distance(selfPos, boid_pos);
// Cohesion
if (distance < rule1Distance) {
cohesion_perceived_center += boid_pos;
cohesion_neighbors++;
}
// Seperation
if (distance < rule2Distance) {
seperation -= boid_pos - selfPos;
seperation_neighbors++;
}
// Alignment
if (distance < rule3Distance) {
alignment_perceived_velocity += vel1[boid_idx];
alignment_neighbors++;
}
}
}
}
}
// Finalize Cohesion values
if (cohesion_neighbors) {
cohesion_perceived_center /= cohesion_neighbors;
velocity_change += (cohesion_perceived_center - selfPos) * rule1Scale;
}
// Finalize Seperation Values
if (seperation_neighbors) {
velocity_change += seperation * rule2Scale;
}
// Finalize Alignment Values
if (alignment_neighbors) {
alignment_perceived_velocity /= alignment_neighbors;
velocity_change += alignment_perceived_velocity * rule3Scale;
}
// Calculated total velocity change! Now apply, clamp, and store.
glm::vec3 newV = selfVelocity + velocity_change;
if (glm::length(newV) > maxSpeed) {
newV = glm::normalize(newV) * maxSpeed;
}
vel2[index] = newV;
}
}
__global__ void kernRearrangeCoherentData(int N, int* indicies, glm::vec3 *scattered_pos, glm::vec3 *coherent_pos, glm::vec3 *scattered_vel1, glm::vec3 *coherent_vel1) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
int bIdx = indicies[index];
coherent_pos[index] = scattered_pos[bIdx];
coherent_vel1[index] = scattered_vel1[bIdx];
}
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
// Identify the grid cell that this particle is in.
glm::vec3 myPos = pos[index];
glm::vec3 myGridPos = myPos - gridMin;
myGridPos *= inverseCellWidth; // Just like in kernComputeIndices()
myGridPos = glm::floor(myGridPos); // Just like in kernComputeIndices()
// Identify which cells contain neighbors
// Want to find each grid where one of our rules can apply. Therefore, we need to know the distance of our rules.
float neighbor_distance = glm::max(glm::max(rule1Distance, rule2Distance), rule3Distance);
// Create a vector with this value.
glm::vec3 neighbor_distance_vec(neighbor_distance);
// Remember, everything is a cube! If we check the corners of the cube, we will know
// what the rest of the cube looks like. ie, if min is {0, 0, 0} and max is {1, 1, 1},
// we know where each of the other corners lie. (Also true for rectangluar cubes)
glm::vec3 min_neighbor_cell = glm::floor((myPos - gridMin - neighbor_distance) * inverseCellWidth);
glm::vec3 max_neighbor_cell = glm::floor((myPos - gridMin + neighbor_distance) * inverseCellWidth);
// Now, if myPos is situated on any of the axis of the cell, then min and/or max may not change.
// This is clear from the case where myPos is in the middle of the cell. In that case, if the cellWidth
// is equal to the neighbor_disance, then the cube will consist of only the cell.
// So we don't need to check the cell interior edge conditions, those are covered already.
// Issue: What about the edge of the grid? We need to wrap around.
// We can handle this during the search. If any cell value exceeds the resolution of the grid,
// then we loop around.
glm::vec3 min_cell_search = glm::clamp(min_neighbor_cell, glm::vec3(0), glm::vec3(gridResolution));
glm::vec3 max_cell_search = glm::clamp(max_neighbor_cell, glm::vec3(0), glm::vec3(gridResolution));
// After all that work, we now start applying rules! Instead of searching over N boids, we will search over the boids
// in each cell between min_cell_search and max_cell_search only.
// I can already see how making the boids cohenernt in memory helps simplify this, but that's for later.
glm::vec3 velocity_change(0.0f);
glm::vec3 selfPos = pos[index];
glm::vec3 selfVelocity = vel1[index];
glm::vec3 alignment_perceived_velocity(0.0f);
glm::vec3 cohesion_perceived_center(0.0f);
glm::vec3 seperation(0.0f);
int alignment_neighbors = 0.0;
int cohesion_neighbors = 0.0;
int seperation_neighbors = 0.0;
for (float z = min_cell_search.z; z <= max_cell_search.z; z++) {
for (float y = min_cell_search.y; y <= max_cell_search.y; y++) {
for (float x = min_cell_search.x; x <= max_cell_search.x; x++) {
// Yikes! Triple for loop??? Not that bad, min and max differ by at most one.
// Calculate grid index of the grid under inspection
int gridIdx = gridIndex3Dto1D(x, y, z, gridResolution);
// Get the start and end indices for the boids
int b_start = gridCellStartIndices[gridIdx];
int b_end = gridCellEndIndices[gridIdx];
// Check if any values are -1, meaning an empty cell.
// Also check if less than N, don't want to cause a segfault.
if (b_start < 0 || b_start > N || b_end < 0 || b_end > N) {
continue;
}
// We now have the boids we need. Run each rule over the range of boids.
// WOW: Unrolling these calls gave a 2.27X performance boost!!!! Cache benefits?
//velocity_change += boidAlignmentRuleGrid(N, index, particleArrayIndices, b_start, b_end, pos, vel1);
//velocity_change += boidCohesionRuleGrid(N, index, particleArrayIndices, b_start, b_end, pos, vel1);
//velocity_change += boidSeperationRuleGrid(N, index, particleArrayIndices, b_start, b_end, pos, vel1);
for (int i = b_start; i <= b_end; i++) {
if (index == i) { // Dip out early.
continue;
}
// Get relevant data
glm::vec3 boid_pos = pos[i];
float distance = glm::distance(selfPos, boid_pos);
// Cohesion
if (distance < rule1Distance) {
cohesion_perceived_center += boid_pos;
cohesion_neighbors++;
}
// Seperation
if (distance < rule2Distance) {
seperation -= boid_pos - selfPos;
seperation_neighbors++;
}
// Alignment
if (distance < rule3Distance) {
alignment_perceived_velocity += vel1[i];
alignment_neighbors++;
}
}
}
}
}
// Finalize Cohesion values
if (cohesion_neighbors) {
cohesion_perceived_center /= cohesion_neighbors;
velocity_change += (cohesion_perceived_center - selfPos) * rule1Scale;
}
// Finalize Seperation Values
if (seperation_neighbors) {
velocity_change += seperation * rule2Scale;
}
// Finalize Alignment Values
if (alignment_neighbors) {
alignment_perceived_velocity /= alignment_neighbors;
velocity_change += alignment_perceived_velocity * rule3Scale;
}
// Calculated total velocity change! Now apply, clamp, and store.
glm::vec3 newV = selfVelocity + velocity_change;
if (glm::length(newV) > maxSpeed) {
newV = glm::normalize(newV) * maxSpeed;
}
vel2[index] = newV;
}
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
// TODO-1.2 ping-pong the velocity buffers
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernUpdateVelocityBruteForce), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_pos, dev_vel1, dev_vel2);
hipLaunchKernelGGL(( kernUpdatePos), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel2); // Use new velocity!
// Swap buffers, ping pong!
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Run the index labeling kernel
hipLaunchKernelGGL(( kernComputeIndices), dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0,
numObjects,
gridSideCount,
gridMinimum,
gridInverseCellWidth,
dev_pos,
dev_particleArrayIndices,
dev_particleGridIndices
);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// Use Thrust API to sort the indicies...
thrust::sort_by_key(
dev_thrust_particleGridIndices,
dev_thrust_particleGridIndices + numObjects,
dev_thrust_particleArrayIndices
);
// Locate start and stop indicies
hipLaunchKernelGGL(( kernResetIntBuffer), dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer1 failed!");
hipLaunchKernelGGL(( kernResetIntBuffer), dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer2 failed!");
hipLaunchKernelGGL(( kernIdentifyCellStartEnd), dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0,
numObjects,
dev_particleGridIndices,
dev_gridCellStartIndices,
dev_gridCellEndIndices
);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
// The fun part! Calculate velocity changes
hipLaunchKernelGGL(( kernUpdateVelNeighborSearchScattered), dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0,
numObjects,
gridSideCount,
gridMinimum,
gridInverseCellWidth,
gridCellWidth,
dev_gridCellStartIndices,
dev_gridCellEndIndices,
dev_particleArrayIndices,
dev_pos,
dev_vel1,
dev_vel2
);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
// Update positions
hipLaunchKernelGGL(( kernUpdatePos), dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0,
numObjects,
dt,
dev_pos,
dev_vel2
);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// Swap buffers and you're done!
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Run the index labeling kernel
kernComputeIndices << <fullBlocksPerGrid, blockSize >> > (
numObjects,
gridSideCount,
gridMinimum,
gridInverseCellWidth,
dev_pos,
dev_particleArrayIndices,
dev_particleGridIndices
);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// Use Thrust API to sort the indicies...
thrust::sort_by_key(
dev_thrust_particleGridIndices,
dev_thrust_particleGridIndices + numObjects,
dev_thrust_particleArrayIndices
);
// Locate start and stop indicies
kernResetIntBuffer << <fullBlocksPerGrid, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer1 failed!");
kernResetIntBuffer << <fullBlocksPerGrid, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer2 failed!");
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> > (
numObjects,
dev_particleGridIndices,
dev_gridCellStartIndices,
dev_gridCellEndIndices
);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
// Difference is here. We want the values in gridCellStart and gridCellEnd to be tghe exact indicies of each
// boid in the pos/vel arrays. This means we must rearrange the pos/vel arrays approprietly.
// We can do this in a kernel! Create an additional posSorted and vel1Sorted array. We take the old pos
// and vel1 arrays and copy them over based on the values in dev_particleGridIndices.
kernRearrangeCoherentData << <fullBlocksPerGrid, blockSize >> > (
numObjects,
dev_particleArrayIndices,
dev_pos,
dev_coherent_pos,
dev_vel1,
dev_coherent_vel1
);
// Data has been sorted coherrently, copy it back to original buffers.
hipMemcpy(dev_pos, dev_coherent_pos, numObjects * sizeof(glm::vec3), hipMemcpyDeviceToDevice);
hipMemcpy(dev_vel1, dev_coherent_vel1, numObjects * sizeof(glm::vec3), hipMemcpyDeviceToDevice);
// The fun part! Calculate velocity changes
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid, blockSize >> > (
numObjects,
gridSideCount,
gridMinimum,
gridInverseCellWidth,
gridCellWidth,
dev_gridCellStartIndices,
dev_gridCellEndIndices,
dev_pos,
dev_vel1,
dev_vel2
);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
// Update positions
kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (
numObjects,
dt,
dev_pos,
dev_vel2
);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// Swap buffers and you're done!
std::swap(dev_vel1, dev_vel2);
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
hipFree(dev_particleArrayIndices);
hipFree(dev_particleGridIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
| 6cdbabe56de6b1817138dd2ba5578e80508049f0.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
//Buffer containing a pointer for each boid to its data in dev_pos and dev_vel1 and dev_vel2
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
// Buffer containing the grid index for each boid.
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
// Buffer containing a pointer for each cell to the begining of dev_particleArrayIndices
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
// Buffer containing a pointer for each cell to the end of dev_particleArrayIndices
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_coherent_pos;
glm::vec3 *dev_coherent_vel1;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
//Uniform Grid Buffers
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
// Coherent Buffers
cudaMalloc((void**)&dev_coherent_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_coherent_pos failed!");
cudaMalloc((void**)&dev_coherent_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_coherent_vel1 failed!");
// Thrust buffers, used for prallel sorting
dev_thrust_particleArrayIndices = thrust::device_pointer_cast<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_pointer_cast<int>(dev_particleGridIndices);
cudaDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* boidCohesionRuleNaive()
* boids move towards the perceived center of mass of their neighbors
*
* Cohesion depends soley on the position of each boid, so we want to calculate the center of mass.
* Assuming each boid weighs the same, the center of mass is simply the average position.
* Therefore, we add each component of each boid and divide.
* NOTE: For the Naive implementation, this means each thread is going to be doing the same exact work.
* That is super bad and goes against the idea of distributing work. This becomes a non-issue
* when each boid looks only at their local neighbors, as each boid will have a different subset
* to look at.
*/
__device__ glm::vec3 boidCohesionRuleNaive(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 result(0.0f);
glm::vec3 perceived_center(0.0f);
glm::vec3 selfPos = pos[iSelf];
float neighbors = 0.0f;
for (int i = 0; i < N; i++) {
if ((i != iSelf) && (glm::distance(selfPos, pos[i]) < rule1Distance)) {
perceived_center += pos[i];
neighbors++;
}
}
if (neighbors) {
perceived_center /= neighbors;
result = (perceived_center - selfPos) * rule1Scale;
}
return result;
}
/**
* boidCohesionRuleGrid()
* boids move towards the perceived center of mass of their neighbors
*
* Cohesion depends soley on the position of each boid, so we want to calculate the center of mass.
* Assuming each boid weighs the same, the center of mass is simply the average position.
* Therefore, we add each component of each boid and divide.
*/
__device__ glm::vec3 boidCohesionRuleGrid(int N, int iSelf, const int *boidIndices, int b_start, int b_end, glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 result(0.0f);
glm::vec3 perceived_center(0.0f);
glm::vec3 selfPos = pos[iSelf];
float neighbors = 0.0f;
for (int i = b_start; i < b_end; i++) {
int boid_idx = boidIndices[i];
if ((boid_idx != iSelf) && (glm::distance(selfPos, pos[boid_idx]) < rule1Distance)) {
perceived_center += pos[boid_idx];
neighbors++;
}
}
if (neighbors) {
perceived_center /= neighbors;
result = (perceived_center - selfPos) * rule1Scale;
}
return result;
}
/**
* boidSeperationRuleNaive()
* boids avoid getting to close to their neighbors
*
* In this rule, the boid is repulsed by nearby boids. To represent that, we take the distance
* between the boid and the neighbor boids and add the disance between the two as a sacled negative velocity.
* This has the effect of pushing each boid away from his neighbors. Note that a boid on either side will contribute
* to opposite directions.
*/
__device__ glm::vec3 boidSeperationRuleNaive(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 result(0.0f);
glm::vec3 seperation(0.0f);
glm::vec3 selfPos = pos[iSelf];
float neighbors = 0.0f;
for (int i = 0; i < N; i++) {
if ((i != iSelf) && (glm::distance(selfPos, pos[i]) < rule2Distance)) {
seperation -= pos[i] - selfPos;
neighbors++;
}
}
if (neighbors) {
result = seperation * rule2Scale;
}
return result;
}
/**
* boidSeperationRuleGrid()
* boids avoid getting to close to their neighbors
*
* In this rule, the boid is repulsed by nearby boids. To represent that, we take the distance
* between the boid and the neighbor boids and add the disance between the two as a sacled negative velocity.
* This has the effect of pushing each boid away from his neighbors. Note that a boid on either side will contribute
* to opposite directions.
*/
__device__ glm::vec3 boidSeperationRuleGrid(int N, int iSelf, const int *boidIndices, int b_start, int b_end, glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 result(0.0f);
glm::vec3 seperation(0.0f);
glm::vec3 selfPos = pos[iSelf];
float neighbors = 0.0f;
for (int i = b_start; i < b_end; i++) {
int boid_idx = boidIndices[i];
if ((boid_idx != iSelf) && (glm::distance(selfPos, pos[boid_idx]) < rule2Distance)) {
seperation -= pos[boid_idx] - selfPos;
neighbors++;
}
}
if (neighbors) {
result = seperation * rule2Scale;
}
return result;
}
/**
* boidAlignmentRuleNaive()
* boids generally try to move with the same direction and speed as their neighbors
*
* Boids want to match the velocit of their neighbors at t=a, so they will adjust their velocity accordingly.
* After each round, at t=a+dt, each boid will apply their change.
*/
__device__ glm::vec3 boidAlignmentRuleNaive(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 result(0.0f);
glm::vec3 perceived_velocity(0.0f);
glm::vec3 selfPos = pos[iSelf];
glm::vec3 selfVelocity = vel[iSelf];
float neighbors = 0.0f;
for (int i = 0; i < N; i++) {
if ((i != iSelf) && (glm::distance(selfPos, pos[i]) < rule3Distance)) {
perceived_velocity += vel[i];
neighbors++;
}
}
if (neighbors) {
perceived_velocity /= neighbors;
result = perceived_velocity * rule3Scale;
}
return result;
}
/**
* boidAlignmentRuleGrid()
* boids generally try to move with the same direction and speed as their neighbors
*
* Boids want to match the velocit of their neighbors at t=a, so they will adjust their velocity accordingly.
* After each round, at t=a+dt, each boid will apply their change.
*/
__device__ glm::vec3 boidAlignmentRuleGrid(int N, int iSelf, const int *boidIndices, int b_start, int b_end, glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 result(0.0f);
glm::vec3 perceived_velocity(0.0f);
glm::vec3 selfPos = pos[iSelf];
glm::vec3 selfVelocity = vel[iSelf];
float neighbors = 0.0f;
for (int i = b_start; i < b_end; i++) {
int boid_idx = boidIndices[i];
if ((boid_idx != iSelf) && (glm::distance(selfPos, pos[boid_idx]) < rule3Distance)) {
perceived_velocity += vel[boid_idx];
neighbors++;
}
}
if (neighbors) {
perceived_velocity /= neighbors;
result = perceived_velocity * rule3Scale;
}
return result;
}
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
// Rule 2: boids try to stay a distance d away from each other
// Rule 3: boids try to match the speed of surrounding boids
glm::vec3 delta(0.0f);
// Apply each rule.
delta += boidCohesionRuleNaive(N, iSelf, pos, vel);
delta += boidSeperationRuleNaive(N, iSelf, pos, vel);
delta += boidAlignmentRuleNaive(N, iSelf, pos, vel);
return delta;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
// Answer: Other threads may still be reading vel1!!
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 curntV = vel1[index];
glm::vec3 deltaV = computeVelocityChange(N, index, pos, vel1);
glm::vec3 newV = curntV + deltaV;
// Clamp the speed. We do it this way to ensure that the total velocity is clamped,
// not just the velocity in each direction (otherwise glm::clamp would be nice).
if (glm::length(newV) > maxSpeed) {
newV = glm::normalize(newV) * maxSpeed;
}
vel2[index] = newV;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 myGridPos = pos[index];
int myGridIdx = 0;
// Add grid minimum to move +/- halfWidth range to 0 - fullWidth range.
myGridPos -= gridMin;
// Cells are cubes, so all dimensions are identical, divide each pos by cell width
myGridPos *= inverseCellWidth;
// Round down to throw away float garbage!
myGridPos = glm::floor(myGridPos);
// Compute a 1D index from the 3D index
myGridIdx = gridIndex3Dto1D(myGridPos.x, myGridPos.y, myGridPos.z, gridResolution);
// Store the grid index in the indices buffer using the boid IDX as the key
// and the index in and index buffer. These two will be sorted in parallel.
// The end result will be that indices will be sorted by myGridIdx so consecutive
// boids will have their indices colocated in memory.
gridIndices[index] = myGridIdx;
indices[index] = index;
}
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
// Collect some information
int myGridIdx = particleGridIndices[index];
// Always Start if first cell
if ((index == 0) || particleGridIndices[index] != particleGridIndices[index - 1]) {
gridCellStartIndices[myGridIdx] = index; // Start of grid myGridIdx is boid at index
}
// Always End if last cell
if ((index == (N - 1)) || (particleGridIndices[index] != particleGridIndices[index + 1])) {
gridCellEndIndices[myGridIdx] = index;
}
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
// Identify the grid cell that this particle is in.
glm::vec3 myPos = pos[index];
glm::vec3 myGridPos = myPos - gridMin;
myGridPos *= inverseCellWidth; // Just like in kernComputeIndices()
myGridPos = glm::floor(myGridPos); // Just like in kernComputeIndices()
// Identify which cells contain neighbors
// Want to find each grid where one of our rules can apply. Therefore, we need to know the distance of our rules.
float neighbor_distance = glm::max(glm::max(rule1Distance, rule2Distance), rule3Distance);
// Create a vector with this value.
glm::vec3 neighbor_distance_vec(neighbor_distance);
// Remember, everything is a cube! If we check the corners of the cube, we will know
// what the rest of the cube looks like. ie, if min is {0, 0, 0} and max is {1, 1, 1},
// we know where each of the other corners lie. (Also true for rectangluar cubes)
glm::vec3 min_neighbor_cell = glm::floor((myPos - gridMin - neighbor_distance) * inverseCellWidth);
glm::vec3 max_neighbor_cell = glm::floor((myPos - gridMin + neighbor_distance) * inverseCellWidth);
// Now, if myPos is situated on any of the axis of the cell, then min and/or max may not change.
// This is clear from the case where myPos is in the middle of the cell. In that case, if the cellWidth
// is equal to the neighbor_disance, then the cube will consist of only the cell.
// So we don't need to check the cell interior edge conditions, those are covered already.
// Issue: What about the edge of the grid? We need to wrap around.
// We can handle this during the search. If any cell value exceeds the resolution of the grid,
// then we loop around.
glm::vec3 min_cell_search = glm::clamp(min_neighbor_cell, glm::vec3(0), glm::vec3(gridResolution));
glm::vec3 max_cell_search = glm::clamp(max_neighbor_cell, glm::vec3(0), glm::vec3(gridResolution));
// After all that work, we now start applying rules! Instead of searching over N boids, we will search over the boids
// in each cell between min_cell_search and max_cell_search only.
// I can already see how making the boids cohenernt in memory helps simplify this, but that's for later.
glm::vec3 velocity_change(0.0f);
glm::vec3 selfPos = pos[index];
glm::vec3 selfVelocity = vel1[index];
glm::vec3 alignment_perceived_velocity(0.0f);
glm::vec3 cohesion_perceived_center(0.0f);
glm::vec3 seperation(0.0f);
int alignment_neighbors = 0.0;
int cohesion_neighbors = 0.0;
int seperation_neighbors = 0.0;
for (float z = min_cell_search.z; z <= max_cell_search.z; z++) {
for (float y = min_cell_search.y; y <= max_cell_search.y; y++) {
for (float x = min_cell_search.x; x <= max_cell_search.x; x++) {
// Yikes! Triple for loop??? Not that bad, min and max differ by at most one.
// Calculate grid index of the grid under inspection
int gridIdx = gridIndex3Dto1D(x, y, z, gridResolution);
// Get the start and end indices for the boids
int b_start = gridCellStartIndices[gridIdx];
int b_end = gridCellEndIndices[gridIdx];
// Check if any values are -1, meaning an empty cell.
// Also check if less than N, don't want to cause a segfault.
if (b_start < 0 || b_start > N || b_end < 0 || b_end > N) {
continue;
}
// We now have the boids we need. Run each rule over the range of boids.
// WOW: Unrolling these calls gave a 2.27X performance boost!!!! Cache benefits?
//velocity_change += boidAlignmentRuleGrid(N, index, particleArrayIndices, b_start, b_end, pos, vel1);
//velocity_change += boidCohesionRuleGrid(N, index, particleArrayIndices, b_start, b_end, pos, vel1);
//velocity_change += boidSeperationRuleGrid(N, index, particleArrayIndices, b_start, b_end, pos, vel1);
for (int i = b_start; i <= b_end; i++) {
int boid_idx = particleArrayIndices[i];
if (index == boid_idx) { // Dip out early.
continue;
}
// Get relevant data
glm::vec3 boid_pos = pos[boid_idx];
float distance = glm::distance(selfPos, boid_pos);
// Cohesion
if (distance < rule1Distance) {
cohesion_perceived_center += boid_pos;
cohesion_neighbors++;
}
// Seperation
if (distance < rule2Distance) {
seperation -= boid_pos - selfPos;
seperation_neighbors++;
}
// Alignment
if (distance < rule3Distance) {
alignment_perceived_velocity += vel1[boid_idx];
alignment_neighbors++;
}
}
}
}
}
// Finalize Cohesion values
if (cohesion_neighbors) {
cohesion_perceived_center /= cohesion_neighbors;
velocity_change += (cohesion_perceived_center - selfPos) * rule1Scale;
}
// Finalize Seperation Values
if (seperation_neighbors) {
velocity_change += seperation * rule2Scale;
}
// Finalize Alignment Values
if (alignment_neighbors) {
alignment_perceived_velocity /= alignment_neighbors;
velocity_change += alignment_perceived_velocity * rule3Scale;
}
// Calculated total velocity change! Now apply, clamp, and store.
glm::vec3 newV = selfVelocity + velocity_change;
if (glm::length(newV) > maxSpeed) {
newV = glm::normalize(newV) * maxSpeed;
}
vel2[index] = newV;
}
}
__global__ void kernRearrangeCoherentData(int N, int* indicies, glm::vec3 *scattered_pos, glm::vec3 *coherent_pos, glm::vec3 *scattered_vel1, glm::vec3 *coherent_vel1) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
int bIdx = indicies[index];
coherent_pos[index] = scattered_pos[bIdx];
coherent_vel1[index] = scattered_vel1[bIdx];
}
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
// Identify the grid cell that this particle is in.
glm::vec3 myPos = pos[index];
glm::vec3 myGridPos = myPos - gridMin;
myGridPos *= inverseCellWidth; // Just like in kernComputeIndices()
myGridPos = glm::floor(myGridPos); // Just like in kernComputeIndices()
// Identify which cells contain neighbors
// Want to find each grid where one of our rules can apply. Therefore, we need to know the distance of our rules.
float neighbor_distance = glm::max(glm::max(rule1Distance, rule2Distance), rule3Distance);
// Create a vector with this value.
glm::vec3 neighbor_distance_vec(neighbor_distance);
// Remember, everything is a cube! If we check the corners of the cube, we will know
// what the rest of the cube looks like. ie, if min is {0, 0, 0} and max is {1, 1, 1},
// we know where each of the other corners lie. (Also true for rectangluar cubes)
glm::vec3 min_neighbor_cell = glm::floor((myPos - gridMin - neighbor_distance) * inverseCellWidth);
glm::vec3 max_neighbor_cell = glm::floor((myPos - gridMin + neighbor_distance) * inverseCellWidth);
// Now, if myPos is situated on any of the axis of the cell, then min and/or max may not change.
// This is clear from the case where myPos is in the middle of the cell. In that case, if the cellWidth
// is equal to the neighbor_disance, then the cube will consist of only the cell.
// So we don't need to check the cell interior edge conditions, those are covered already.
// Issue: What about the edge of the grid? We need to wrap around.
// We can handle this during the search. If any cell value exceeds the resolution of the grid,
// then we loop around.
glm::vec3 min_cell_search = glm::clamp(min_neighbor_cell, glm::vec3(0), glm::vec3(gridResolution));
glm::vec3 max_cell_search = glm::clamp(max_neighbor_cell, glm::vec3(0), glm::vec3(gridResolution));
// After all that work, we now start applying rules! Instead of searching over N boids, we will search over the boids
// in each cell between min_cell_search and max_cell_search only.
// I can already see how making the boids cohenernt in memory helps simplify this, but that's for later.
glm::vec3 velocity_change(0.0f);
glm::vec3 selfPos = pos[index];
glm::vec3 selfVelocity = vel1[index];
glm::vec3 alignment_perceived_velocity(0.0f);
glm::vec3 cohesion_perceived_center(0.0f);
glm::vec3 seperation(0.0f);
int alignment_neighbors = 0.0;
int cohesion_neighbors = 0.0;
int seperation_neighbors = 0.0;
for (float z = min_cell_search.z; z <= max_cell_search.z; z++) {
for (float y = min_cell_search.y; y <= max_cell_search.y; y++) {
for (float x = min_cell_search.x; x <= max_cell_search.x; x++) {
// Yikes! Triple for loop??? Not that bad, min and max differ by at most one.
// Calculate grid index of the grid under inspection
int gridIdx = gridIndex3Dto1D(x, y, z, gridResolution);
// Get the start and end indices for the boids
int b_start = gridCellStartIndices[gridIdx];
int b_end = gridCellEndIndices[gridIdx];
// Check if any values are -1, meaning an empty cell.
// Also check if less than N, don't want to cause a segfault.
if (b_start < 0 || b_start > N || b_end < 0 || b_end > N) {
continue;
}
// We now have the boids we need. Run each rule over the range of boids.
// WOW: Unrolling these calls gave a 2.27X performance boost!!!! Cache benefits?
//velocity_change += boidAlignmentRuleGrid(N, index, particleArrayIndices, b_start, b_end, pos, vel1);
//velocity_change += boidCohesionRuleGrid(N, index, particleArrayIndices, b_start, b_end, pos, vel1);
//velocity_change += boidSeperationRuleGrid(N, index, particleArrayIndices, b_start, b_end, pos, vel1);
for (int i = b_start; i <= b_end; i++) {
if (index == i) { // Dip out early.
continue;
}
// Get relevant data
glm::vec3 boid_pos = pos[i];
float distance = glm::distance(selfPos, boid_pos);
// Cohesion
if (distance < rule1Distance) {
cohesion_perceived_center += boid_pos;
cohesion_neighbors++;
}
// Seperation
if (distance < rule2Distance) {
seperation -= boid_pos - selfPos;
seperation_neighbors++;
}
// Alignment
if (distance < rule3Distance) {
alignment_perceived_velocity += vel1[i];
alignment_neighbors++;
}
}
}
}
}
// Finalize Cohesion values
if (cohesion_neighbors) {
cohesion_perceived_center /= cohesion_neighbors;
velocity_change += (cohesion_perceived_center - selfPos) * rule1Scale;
}
// Finalize Seperation Values
if (seperation_neighbors) {
velocity_change += seperation * rule2Scale;
}
// Finalize Alignment Values
if (alignment_neighbors) {
alignment_perceived_velocity /= alignment_neighbors;
velocity_change += alignment_perceived_velocity * rule3Scale;
}
// Calculated total velocity change! Now apply, clamp, and store.
glm::vec3 newV = selfVelocity + velocity_change;
if (glm::length(newV) > maxSpeed) {
newV = glm::normalize(newV) * maxSpeed;
}
vel2[index] = newV;
}
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
// TODO-1.2 ping-pong the velocity buffers
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernUpdateVelocityBruteForce<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_pos, dev_vel1, dev_vel2);
kernUpdatePos<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel2); // Use new velocity!
// Swap buffers, ping pong!
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Run the index labeling kernel
kernComputeIndices<<<fullBlocksPerGrid, blockSize >>>(
numObjects,
gridSideCount,
gridMinimum,
gridInverseCellWidth,
dev_pos,
dev_particleArrayIndices,
dev_particleGridIndices
);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// Use Thrust API to sort the indicies...
thrust::sort_by_key(
dev_thrust_particleGridIndices,
dev_thrust_particleGridIndices + numObjects,
dev_thrust_particleArrayIndices
);
// Locate start and stop indicies
kernResetIntBuffer<<<fullBlocksPerGrid, blockSize >>>(gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer1 failed!");
kernResetIntBuffer<<<fullBlocksPerGrid, blockSize >>>(gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer2 failed!");
kernIdentifyCellStartEnd<<<fullBlocksPerGrid, blockSize >>> (
numObjects,
dev_particleGridIndices,
dev_gridCellStartIndices,
dev_gridCellEndIndices
);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
// The fun part! Calculate velocity changes
kernUpdateVelNeighborSearchScattered<<<fullBlocksPerGrid, blockSize >>>(
numObjects,
gridSideCount,
gridMinimum,
gridInverseCellWidth,
gridCellWidth,
dev_gridCellStartIndices,
dev_gridCellEndIndices,
dev_particleArrayIndices,
dev_pos,
dev_vel1,
dev_vel2
);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
// Update positions
kernUpdatePos<<<fullBlocksPerGrid, blockSize >>>(
numObjects,
dt,
dev_pos,
dev_vel2
);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// Swap buffers and you're done!
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Run the index labeling kernel
kernComputeIndices << <fullBlocksPerGrid, blockSize >> > (
numObjects,
gridSideCount,
gridMinimum,
gridInverseCellWidth,
dev_pos,
dev_particleArrayIndices,
dev_particleGridIndices
);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// Use Thrust API to sort the indicies...
thrust::sort_by_key(
dev_thrust_particleGridIndices,
dev_thrust_particleGridIndices + numObjects,
dev_thrust_particleArrayIndices
);
// Locate start and stop indicies
kernResetIntBuffer << <fullBlocksPerGrid, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer1 failed!");
kernResetIntBuffer << <fullBlocksPerGrid, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer2 failed!");
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> > (
numObjects,
dev_particleGridIndices,
dev_gridCellStartIndices,
dev_gridCellEndIndices
);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
// Difference is here. We want the values in gridCellStart and gridCellEnd to be tghe exact indicies of each
// boid in the pos/vel arrays. This means we must rearrange the pos/vel arrays approprietly.
// We can do this in a kernel! Create an additional posSorted and vel1Sorted array. We take the old pos
// and vel1 arrays and copy them over based on the values in dev_particleGridIndices.
kernRearrangeCoherentData << <fullBlocksPerGrid, blockSize >> > (
numObjects,
dev_particleArrayIndices,
dev_pos,
dev_coherent_pos,
dev_vel1,
dev_coherent_vel1
);
// Data has been sorted coherrently, copy it back to original buffers.
cudaMemcpy(dev_pos, dev_coherent_pos, numObjects * sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_vel1, dev_coherent_vel1, numObjects * sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
// The fun part! Calculate velocity changes
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid, blockSize >> > (
numObjects,
gridSideCount,
gridMinimum,
gridInverseCellWidth,
gridCellWidth,
dev_gridCellStartIndices,
dev_gridCellEndIndices,
dev_pos,
dev_vel1,
dev_vel2
);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
// Update positions
kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (
numObjects,
dt,
dev_pos,
dev_vel2
);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// Swap buffers and you're done!
std::swap(dev_vel1, dev_vel2);
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
cudaFree(dev_particleArrayIndices);
cudaFree(dev_particleGridIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
1ec2e0f9261caf986d5ed6ef8cd5356d4e31ffc0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "DecompressionKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int dimensionalityd = 1;
unsigned char *compressed_data_buffer_in = NULL;
hipMalloc(&compressed_data_buffer_in, XSIZE*YSIZE);
int *chunk_boundaries_buffer_in = NULL;
hipMalloc(&chunk_boundaries_buffer_in, XSIZE*YSIZE);
unsigned long long *uncompressed_data_buffer_out = NULL;
hipMalloc(&uncompressed_data_buffer_out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
DecompressionKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dimensionalityd,compressed_data_buffer_in,chunk_boundaries_buffer_in,uncompressed_data_buffer_out);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
DecompressionKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dimensionalityd,compressed_data_buffer_in,chunk_boundaries_buffer_in,uncompressed_data_buffer_out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
DecompressionKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, dimensionalityd,compressed_data_buffer_in,chunk_boundaries_buffer_in,uncompressed_data_buffer_out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 1ec2e0f9261caf986d5ed6ef8cd5356d4e31ffc0.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "DecompressionKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int dimensionalityd = 1;
unsigned char *compressed_data_buffer_in = NULL;
cudaMalloc(&compressed_data_buffer_in, XSIZE*YSIZE);
int *chunk_boundaries_buffer_in = NULL;
cudaMalloc(&chunk_boundaries_buffer_in, XSIZE*YSIZE);
unsigned long long *uncompressed_data_buffer_out = NULL;
cudaMalloc(&uncompressed_data_buffer_out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
DecompressionKernel<<<gridBlock,threadBlock>>>(dimensionalityd,compressed_data_buffer_in,chunk_boundaries_buffer_in,uncompressed_data_buffer_out);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
DecompressionKernel<<<gridBlock,threadBlock>>>(dimensionalityd,compressed_data_buffer_in,chunk_boundaries_buffer_in,uncompressed_data_buffer_out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
DecompressionKernel<<<gridBlock,threadBlock>>>(dimensionalityd,compressed_data_buffer_in,chunk_boundaries_buffer_in,uncompressed_data_buffer_out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
54cff7d35551f707a105b34c44ee5537c19ce81e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "RenderScene.h"
#include "ParallelScan.h"
struct Fusion {
DeviceMap map;
float invfx, invfy;
float fx, fy, cx, cy;
float minDepth, maxDepth;
int cols, rows;
Matrix3f Rview;
Matrix3f RviewInv;
float3 tview;
uint* noVisibleBlocks;
PtrStep<float> depth;
PtrStep<uchar3> rgb;
PtrStep<float4> bundle;
__device__ inline float2 project(float3& pt3d) {
float2 pt2d;
pt2d.x = fx * pt3d.x / pt3d.z + cx;
pt2d.y = fy * pt3d.y / pt3d.z + cy;
return pt2d;
}
__device__ inline float3 unproject(int& x, int& y, float& z) {
float3 pt3d;
pt3d.z = z;
pt3d.x = z * (x - cx) * invfx;
pt3d.y = z * (y - cy) * invfy;
return Rview * pt3d + tview;
}
__device__ inline bool CheckVertexVisibility(float3 pt3d) {
pt3d = RviewInv * (pt3d - tview);
if (pt3d.z < 1e-3f)
return false;
float2 pt2d = project(pt3d);
return pt2d.x >= 0 && pt2d.y >= 0 &&
pt2d.x < cols && pt2d.y < rows &&
pt3d.z >= minDepth && pt3d.z <= maxDepth;
}
__device__ inline bool CheckBlockVisibility(const int3& pos) {
float scale = DeviceMap::blockWidth;
float3 corner = pos * scale;
if (CheckVertexVisibility(corner))
return true;
corner.z += scale;
if (CheckVertexVisibility(corner))
return true;
corner.y += scale;
if (CheckVertexVisibility(corner))
return true;
corner.x += scale;
if (CheckVertexVisibility(corner))
return true;
corner.z -= scale;
if (CheckVertexVisibility(corner))
return true;
corner.y -= scale;
if (CheckVertexVisibility(corner))
return true;
corner.x -= scale;
corner.y += scale;
if (CheckVertexVisibility(corner))
return true;
corner.x += scale;
corner.y -= scale;
corner.z += scale;
if (CheckVertexVisibility(corner))
return true;
return false;
}
__device__ inline void CreateBlocks() {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cols && y >= rows)
return;
float z = depth.ptr(y)[x];
if (isnan(z) || z < DeviceMap::DepthMin ||
z > DeviceMap::DepthMax)
return;
float thresh = DeviceMap::TruncateDist / 2;
float z_near = min(DeviceMap::DepthMax, z - thresh);
float z_far = min(DeviceMap::DepthMax, z + thresh);
if (z_near >= z_far)
return;
float3 pt_near = unproject(x, y, z_near) * DeviceMap::voxelSizeInv;
float3 pt_far = unproject(x, y, z_far) * DeviceMap::voxelSizeInv;
float3 dir = pt_far - pt_near;
float length = norm(dir);
int nSteps = (int) ceil(2.0 * length);
dir = dir / (float) (nSteps - 1);
for (int i = 0; i < nSteps; ++i) {
int3 blockPos = map.voxelPosToBlockPos(make_int3(pt_near));
map.CreateBlock(blockPos);
pt_near += dir;
}
}
__device__ inline void CheckFullVisibility() {
__shared__ bool bScan;
if (threadIdx.x == 0)
bScan = false;
__syncthreads();
uint val = 0;
int x = blockDim.x * blockIdx.x + threadIdx.x;
if (x < map.hashEntries.size) {
HashEntry& e = map.hashEntries[x];
if (e.ptr != EntryAvailable) {
if (CheckBlockVisibility(e.pos)) {
bScan = true;
val = 1;
}
}
}
__syncthreads();
if (bScan) {
int offset = ComputeOffset<1024>(val, noVisibleBlocks);
if (offset != -1 && offset < map.visibleEntries.size
&& x < map.hashEntries.size)
map.visibleEntries[offset] = map.hashEntries[x];
}
}
__device__ inline void integrateColor() {
if(blockIdx.x >= map.visibleEntries.size ||
blockIdx.x >= *noVisibleBlocks)
return;
HashEntry& entry = map.visibleEntries[blockIdx.x];
if (entry.ptr == EntryAvailable)
return;
int3 block_pos = map.blockPosToVoxelPos(entry.pos);
#pragma unroll
for(int i = 0; i < 8; ++i) {
int3 localPos = make_int3(threadIdx.x, threadIdx.y, i);
int locId = map.localPosToLocalIdx(localPos);
float3 pos = map.voxelPosToWorldPos(block_pos + localPos);
pos = RviewInv * (pos - tview);
int2 uv = make_int2(project(pos));
if (uv.x < 0 || uv.y < 0 || uv.x >= cols || uv.y >= rows)
continue;
float dp = depth.ptr(uv.y)[uv.x];
if (isnan(dp) || dp > maxDepth || dp < minDepth)
continue;
float thresh = DeviceMap::TruncateDist;
float sdf = dp - pos.z;
if (sdf >= -thresh) {
sdf = fmin(1.0f, sdf / thresh);
uchar3 color = rgb.ptr(uv.y)[uv.x];
Voxel & prev = map.voxelBlocks[entry.ptr + locId];
if(prev.weight == 0) {
prev = Voxel(sdf, 1, color);
}
else {
float3 res = 0.2f * make_float3(color) + 0.8f * make_float3(prev.color);
prev.sdf = (prev.sdf * prev.weight + sdf) / (prev.weight + 1);
prev.weight = min(255, prev.weight + 1);
prev.color = make_uchar3(res);
}
}
}
}
};
__global__ void CreateBlocksKernel(Fusion fuse) {
fuse.CreateBlocks();
}
__global__ void FuseColorKernal(Fusion fuse) {
fuse.integrateColor();
}
__global__ void CheckVisibleBlockKernel(Fusion fuse) {
fuse.CheckFullVisibility();
}
void CheckBlockVisibility(DeviceMap map,
DeviceArray<uint> & noVisibleBlocks,
Matrix3f Rview,
Matrix3f RviewInv,
float3 tview,
int cols,
int rows,
float fx,
float fy,
float cx,
float cy,
float depthMax,
float depthMin,
uint * host_data) {
noVisibleBlocks.clear();
Fusion fuse;
fuse.map = map;
fuse.Rview = Rview;
fuse.RviewInv = RviewInv;
fuse.tview = tview;
fuse.fx = fx;
fuse.fy = fy;
fuse.cx = cx;
fuse.cy = cy;
fuse.invfx = 1.0 / fx;
fuse.invfy = 1.0 / fy;
fuse.rows = rows;
fuse.cols = cols;
fuse.noVisibleBlocks = noVisibleBlocks;
fuse.maxDepth = depthMax;
fuse.minDepth = depthMin;
dim3 thread = dim3(1024);
dim3 block = dim3(DivUp((int) DeviceMap::NumEntries, thread.x));
hipLaunchKernelGGL(( CheckVisibleBlockKernel), dim3(block), dim3(thread), 0, 0, fuse);
host_data[0] = 0;
noVisibleBlocks.download((void*) host_data);
if (host_data[0] == 0)
return;
}
void FuseMapColor(const DeviceArray2D<float> & depth,
const DeviceArray2D<uchar3> & color,
DeviceArray<uint> & noVisibleBlocks,
Matrix3f Rview,
Matrix3f RviewInv,
float3 tview,
DeviceMap map,
float fx,
float fy,
float cx,
float cy,
float depthMax,
float depthMin,
uint * host_data) {
int cols = depth.cols;
int rows = depth.rows;
noVisibleBlocks.clear();
Fusion fuse;
fuse.map = map;
fuse.Rview = Rview;
fuse.RviewInv = RviewInv;
fuse.tview = tview;
fuse.fx = fx;
fuse.fy = fy;
fuse.cx = cx;
fuse.cy = cy;
fuse.invfx = 1.0 / fx;
fuse.invfy = 1.0 / fy;
fuse.depth = depth;
fuse.rgb = color;
fuse.rows = rows;
fuse.cols = cols;
fuse.noVisibleBlocks = noVisibleBlocks;
fuse.maxDepth = DeviceMap::DepthMax;
fuse.minDepth = DeviceMap::DepthMin;
dim3 thread(16, 8);
dim3 block(DivUp(cols, thread.x), DivUp(rows, thread.y));
hipLaunchKernelGGL(( CreateBlocksKernel), dim3(block), dim3(thread), 0, 0, fuse);
SafeCall(hipDeviceSynchronize());
SafeCall(hipGetLastError());
thread = dim3(1024);
block = dim3(DivUp((int) DeviceMap::NumEntries, thread.x));
hipLaunchKernelGGL(( CheckVisibleBlockKernel), dim3(block), dim3(thread), 0, 0, fuse);
SafeCall(hipDeviceSynchronize());
SafeCall(hipGetLastError());
host_data[0] = 0;
noVisibleBlocks.download((void*) host_data);
if (host_data[0] == 0)
return;
thread = dim3(8, 8);
block = dim3(host_data[0]);
hipLaunchKernelGGL(( FuseColorKernal), dim3(block), dim3(thread), 0, 0, fuse);
SafeCall(hipDeviceSynchronize());
SafeCall(hipGetLastError());
}
__global__ void ResetHashKernel(DeviceMap map) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(x < map.hashEntries.size) {
map.hashEntries[x].release();
map.visibleEntries[x].release();
}
if (x < DeviceMap::NumBuckets) {
map.bucketMutex[x] = EntryAvailable;
}
}
__global__ void ResetSdfBlockKernel(DeviceMap map) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(x < DeviceMap::NumSdfBlocks) {
map.heapMem[x] = DeviceMap::NumSdfBlocks - x - 1;
}
int blockIdx = x * DeviceMap::BlockSize3;
for(int i = 0; i < DeviceMap::BlockSize3; ++i, ++blockIdx) {
map.voxelBlocks[blockIdx].release();
}
if(x == 0) {
map.heapCounter[0] = DeviceMap::NumSdfBlocks - 1;
map.entryPtr[0] = 1;
}
}
void ResetMap(DeviceMap map) {
dim3 thread(1024);
dim3 block(DivUp((int) DeviceMap::NumEntries, thread.x));
hipLaunchKernelGGL(( ResetHashKernel), dim3(block), dim3(thread), 0, 0, map);
block = dim3(DivUp((int) DeviceMap::NumSdfBlocks, thread.x));
hipLaunchKernelGGL(( ResetSdfBlockKernel), dim3(block), dim3(thread), 0, 0, map);
SafeCall(hipDeviceSynchronize());
SafeCall(hipGetLastError());
}
__global__ void ResetKeyPointsKernel(KeyMap map) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(x < KeyMap::maxEntries) {
map.Keys[x].valid = false;
}
if(x < KeyMap::nBuckets) {
map.Mutex[x] = EntryAvailable;
}
}
void ResetKeyPoints(KeyMap map) {
dim3 thread(1024);
dim3 block(DivUp((int) KeyMap::maxEntries, thread.x));
hipLaunchKernelGGL(( ResetKeyPointsKernel), dim3(block), dim3(thread), 0, 0, map);
SafeCall(hipDeviceSynchronize());
SafeCall(hipGetLastError());
}
struct KeyFusion {
__device__ __forceinline__ void CollectKeys() {
__shared__ bool scan;
if(threadIdx.x == 0)
scan = false;
__syncthreads();
uint val = 0;
int x = blockDim.x * blockIdx.x + threadIdx.x;
if(x < map.Keys.size) {
SurfKey * key = &map.Keys[x];
if(key->valid) {
scan = true;
val = 1;
}
}
__syncthreads();
if(scan) {
int offset = ComputeOffset<1024>(val, nokeys);
if(offset > 0 && x < map.Keys.size) {
memcpy(&keys[offset], &map.Keys[x], sizeof(SurfKey));
}
}
}
__device__ __forceinline__ void InsertKeys() {
int x = blockDim.x * blockIdx.x + threadIdx.x;
if (x < size) {
map.InsertKey(&keys[x]);
}
}
KeyMap map;
uint * nokeys;
PtrSz<SurfKey> keys;
size_t size;
};
__global__ void CollectKeyPointsKernel(KeyFusion fuse) {
fuse.CollectKeys();
}
__global__ void InsertKeyPointsKernel(KeyFusion fuse) {
fuse.InsertKeys();
}
void CollectKeyPoints(KeyMap map, DeviceArray<SurfKey> & keys, DeviceArray<uint> & noKeys) {
KeyFusion fuse;
fuse.map = map;
fuse.keys = keys;
fuse.nokeys = noKeys;
dim3 thread(1024);
dim3 block(DivUp(map.Keys.size, thread.x));
hipLaunchKernelGGL(( CollectKeyPointsKernel), dim3(block), dim3(thread), 0, 0, fuse);
SafeCall(hipDeviceSynchronize());
SafeCall(hipGetLastError());
}
void InsertKeyPoints(KeyMap map, DeviceArray<SurfKey> & keys, size_t size) {
if(size == 0)
return;
KeyFusion fuse;
fuse.map = map;
fuse.keys = keys;
fuse.size = size;
dim3 thread(1024);
dim3 block(DivUp(size, thread.x));
hipLaunchKernelGGL(( InsertKeyPointsKernel), dim3(block), dim3(thread), 0, 0, fuse);
SafeCall(hipDeviceSynchronize());
SafeCall(hipGetLastError());
}
| 54cff7d35551f707a105b34c44ee5537c19ce81e.cu | #include "RenderScene.h"
#include "ParallelScan.h"
struct Fusion {
DeviceMap map;
float invfx, invfy;
float fx, fy, cx, cy;
float minDepth, maxDepth;
int cols, rows;
Matrix3f Rview;
Matrix3f RviewInv;
float3 tview;
uint* noVisibleBlocks;
PtrStep<float> depth;
PtrStep<uchar3> rgb;
PtrStep<float4> bundle;
__device__ inline float2 project(float3& pt3d) {
float2 pt2d;
pt2d.x = fx * pt3d.x / pt3d.z + cx;
pt2d.y = fy * pt3d.y / pt3d.z + cy;
return pt2d;
}
__device__ inline float3 unproject(int& x, int& y, float& z) {
float3 pt3d;
pt3d.z = z;
pt3d.x = z * (x - cx) * invfx;
pt3d.y = z * (y - cy) * invfy;
return Rview * pt3d + tview;
}
__device__ inline bool CheckVertexVisibility(float3 pt3d) {
pt3d = RviewInv * (pt3d - tview);
if (pt3d.z < 1e-3f)
return false;
float2 pt2d = project(pt3d);
return pt2d.x >= 0 && pt2d.y >= 0 &&
pt2d.x < cols && pt2d.y < rows &&
pt3d.z >= minDepth && pt3d.z <= maxDepth;
}
__device__ inline bool CheckBlockVisibility(const int3& pos) {
float scale = DeviceMap::blockWidth;
float3 corner = pos * scale;
if (CheckVertexVisibility(corner))
return true;
corner.z += scale;
if (CheckVertexVisibility(corner))
return true;
corner.y += scale;
if (CheckVertexVisibility(corner))
return true;
corner.x += scale;
if (CheckVertexVisibility(corner))
return true;
corner.z -= scale;
if (CheckVertexVisibility(corner))
return true;
corner.y -= scale;
if (CheckVertexVisibility(corner))
return true;
corner.x -= scale;
corner.y += scale;
if (CheckVertexVisibility(corner))
return true;
corner.x += scale;
corner.y -= scale;
corner.z += scale;
if (CheckVertexVisibility(corner))
return true;
return false;
}
__device__ inline void CreateBlocks() {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cols && y >= rows)
return;
float z = depth.ptr(y)[x];
if (isnan(z) || z < DeviceMap::DepthMin ||
z > DeviceMap::DepthMax)
return;
float thresh = DeviceMap::TruncateDist / 2;
float z_near = min(DeviceMap::DepthMax, z - thresh);
float z_far = min(DeviceMap::DepthMax, z + thresh);
if (z_near >= z_far)
return;
float3 pt_near = unproject(x, y, z_near) * DeviceMap::voxelSizeInv;
float3 pt_far = unproject(x, y, z_far) * DeviceMap::voxelSizeInv;
float3 dir = pt_far - pt_near;
float length = norm(dir);
int nSteps = (int) ceil(2.0 * length);
dir = dir / (float) (nSteps - 1);
for (int i = 0; i < nSteps; ++i) {
int3 blockPos = map.voxelPosToBlockPos(make_int3(pt_near));
map.CreateBlock(blockPos);
pt_near += dir;
}
}
__device__ inline void CheckFullVisibility() {
__shared__ bool bScan;
if (threadIdx.x == 0)
bScan = false;
__syncthreads();
uint val = 0;
int x = blockDim.x * blockIdx.x + threadIdx.x;
if (x < map.hashEntries.size) {
HashEntry& e = map.hashEntries[x];
if (e.ptr != EntryAvailable) {
if (CheckBlockVisibility(e.pos)) {
bScan = true;
val = 1;
}
}
}
__syncthreads();
if (bScan) {
int offset = ComputeOffset<1024>(val, noVisibleBlocks);
if (offset != -1 && offset < map.visibleEntries.size
&& x < map.hashEntries.size)
map.visibleEntries[offset] = map.hashEntries[x];
}
}
__device__ inline void integrateColor() {
if(blockIdx.x >= map.visibleEntries.size ||
blockIdx.x >= *noVisibleBlocks)
return;
HashEntry& entry = map.visibleEntries[blockIdx.x];
if (entry.ptr == EntryAvailable)
return;
int3 block_pos = map.blockPosToVoxelPos(entry.pos);
#pragma unroll
for(int i = 0; i < 8; ++i) {
int3 localPos = make_int3(threadIdx.x, threadIdx.y, i);
int locId = map.localPosToLocalIdx(localPos);
float3 pos = map.voxelPosToWorldPos(block_pos + localPos);
pos = RviewInv * (pos - tview);
int2 uv = make_int2(project(pos));
if (uv.x < 0 || uv.y < 0 || uv.x >= cols || uv.y >= rows)
continue;
float dp = depth.ptr(uv.y)[uv.x];
if (isnan(dp) || dp > maxDepth || dp < minDepth)
continue;
float thresh = DeviceMap::TruncateDist;
float sdf = dp - pos.z;
if (sdf >= -thresh) {
sdf = fmin(1.0f, sdf / thresh);
uchar3 color = rgb.ptr(uv.y)[uv.x];
Voxel & prev = map.voxelBlocks[entry.ptr + locId];
if(prev.weight == 0) {
prev = Voxel(sdf, 1, color);
}
else {
float3 res = 0.2f * make_float3(color) + 0.8f * make_float3(prev.color);
prev.sdf = (prev.sdf * prev.weight + sdf) / (prev.weight + 1);
prev.weight = min(255, prev.weight + 1);
prev.color = make_uchar3(res);
}
}
}
}
};
__global__ void CreateBlocksKernel(Fusion fuse) {
fuse.CreateBlocks();
}
__global__ void FuseColorKernal(Fusion fuse) {
fuse.integrateColor();
}
__global__ void CheckVisibleBlockKernel(Fusion fuse) {
fuse.CheckFullVisibility();
}
void CheckBlockVisibility(DeviceMap map,
DeviceArray<uint> & noVisibleBlocks,
Matrix3f Rview,
Matrix3f RviewInv,
float3 tview,
int cols,
int rows,
float fx,
float fy,
float cx,
float cy,
float depthMax,
float depthMin,
uint * host_data) {
noVisibleBlocks.clear();
Fusion fuse;
fuse.map = map;
fuse.Rview = Rview;
fuse.RviewInv = RviewInv;
fuse.tview = tview;
fuse.fx = fx;
fuse.fy = fy;
fuse.cx = cx;
fuse.cy = cy;
fuse.invfx = 1.0 / fx;
fuse.invfy = 1.0 / fy;
fuse.rows = rows;
fuse.cols = cols;
fuse.noVisibleBlocks = noVisibleBlocks;
fuse.maxDepth = depthMax;
fuse.minDepth = depthMin;
dim3 thread = dim3(1024);
dim3 block = dim3(DivUp((int) DeviceMap::NumEntries, thread.x));
CheckVisibleBlockKernel<<<block, thread>>>(fuse);
host_data[0] = 0;
noVisibleBlocks.download((void*) host_data);
if (host_data[0] == 0)
return;
}
void FuseMapColor(const DeviceArray2D<float> & depth,
const DeviceArray2D<uchar3> & color,
DeviceArray<uint> & noVisibleBlocks,
Matrix3f Rview,
Matrix3f RviewInv,
float3 tview,
DeviceMap map,
float fx,
float fy,
float cx,
float cy,
float depthMax,
float depthMin,
uint * host_data) {
int cols = depth.cols;
int rows = depth.rows;
noVisibleBlocks.clear();
Fusion fuse;
fuse.map = map;
fuse.Rview = Rview;
fuse.RviewInv = RviewInv;
fuse.tview = tview;
fuse.fx = fx;
fuse.fy = fy;
fuse.cx = cx;
fuse.cy = cy;
fuse.invfx = 1.0 / fx;
fuse.invfy = 1.0 / fy;
fuse.depth = depth;
fuse.rgb = color;
fuse.rows = rows;
fuse.cols = cols;
fuse.noVisibleBlocks = noVisibleBlocks;
fuse.maxDepth = DeviceMap::DepthMax;
fuse.minDepth = DeviceMap::DepthMin;
dim3 thread(16, 8);
dim3 block(DivUp(cols, thread.x), DivUp(rows, thread.y));
CreateBlocksKernel<<<block, thread>>>(fuse);
SafeCall(cudaDeviceSynchronize());
SafeCall(cudaGetLastError());
thread = dim3(1024);
block = dim3(DivUp((int) DeviceMap::NumEntries, thread.x));
CheckVisibleBlockKernel<<<block, thread>>>(fuse);
SafeCall(cudaDeviceSynchronize());
SafeCall(cudaGetLastError());
host_data[0] = 0;
noVisibleBlocks.download((void*) host_data);
if (host_data[0] == 0)
return;
thread = dim3(8, 8);
block = dim3(host_data[0]);
FuseColorKernal<<<block, thread>>>(fuse);
SafeCall(cudaDeviceSynchronize());
SafeCall(cudaGetLastError());
}
__global__ void ResetHashKernel(DeviceMap map) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(x < map.hashEntries.size) {
map.hashEntries[x].release();
map.visibleEntries[x].release();
}
if (x < DeviceMap::NumBuckets) {
map.bucketMutex[x] = EntryAvailable;
}
}
__global__ void ResetSdfBlockKernel(DeviceMap map) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(x < DeviceMap::NumSdfBlocks) {
map.heapMem[x] = DeviceMap::NumSdfBlocks - x - 1;
}
int blockIdx = x * DeviceMap::BlockSize3;
for(int i = 0; i < DeviceMap::BlockSize3; ++i, ++blockIdx) {
map.voxelBlocks[blockIdx].release();
}
if(x == 0) {
map.heapCounter[0] = DeviceMap::NumSdfBlocks - 1;
map.entryPtr[0] = 1;
}
}
void ResetMap(DeviceMap map) {
dim3 thread(1024);
dim3 block(DivUp((int) DeviceMap::NumEntries, thread.x));
ResetHashKernel<<<block, thread>>>(map);
block = dim3(DivUp((int) DeviceMap::NumSdfBlocks, thread.x));
ResetSdfBlockKernel<<<block, thread>>>(map);
SafeCall(cudaDeviceSynchronize());
SafeCall(cudaGetLastError());
}
__global__ void ResetKeyPointsKernel(KeyMap map) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(x < KeyMap::maxEntries) {
map.Keys[x].valid = false;
}
if(x < KeyMap::nBuckets) {
map.Mutex[x] = EntryAvailable;
}
}
void ResetKeyPoints(KeyMap map) {
dim3 thread(1024);
dim3 block(DivUp((int) KeyMap::maxEntries, thread.x));
ResetKeyPointsKernel<<<block, thread>>>(map);
SafeCall(cudaDeviceSynchronize());
SafeCall(cudaGetLastError());
}
struct KeyFusion {
__device__ __forceinline__ void CollectKeys() {
__shared__ bool scan;
if(threadIdx.x == 0)
scan = false;
__syncthreads();
uint val = 0;
int x = blockDim.x * blockIdx.x + threadIdx.x;
if(x < map.Keys.size) {
SurfKey * key = &map.Keys[x];
if(key->valid) {
scan = true;
val = 1;
}
}
__syncthreads();
if(scan) {
int offset = ComputeOffset<1024>(val, nokeys);
if(offset > 0 && x < map.Keys.size) {
memcpy(&keys[offset], &map.Keys[x], sizeof(SurfKey));
}
}
}
__device__ __forceinline__ void InsertKeys() {
int x = blockDim.x * blockIdx.x + threadIdx.x;
if (x < size) {
map.InsertKey(&keys[x]);
}
}
KeyMap map;
uint * nokeys;
PtrSz<SurfKey> keys;
size_t size;
};
__global__ void CollectKeyPointsKernel(KeyFusion fuse) {
fuse.CollectKeys();
}
__global__ void InsertKeyPointsKernel(KeyFusion fuse) {
fuse.InsertKeys();
}
void CollectKeyPoints(KeyMap map, DeviceArray<SurfKey> & keys, DeviceArray<uint> & noKeys) {
KeyFusion fuse;
fuse.map = map;
fuse.keys = keys;
fuse.nokeys = noKeys;
dim3 thread(1024);
dim3 block(DivUp(map.Keys.size, thread.x));
CollectKeyPointsKernel<<<block, thread>>>(fuse);
SafeCall(cudaDeviceSynchronize());
SafeCall(cudaGetLastError());
}
void InsertKeyPoints(KeyMap map, DeviceArray<SurfKey> & keys, size_t size) {
if(size == 0)
return;
KeyFusion fuse;
fuse.map = map;
fuse.keys = keys;
fuse.size = size;
dim3 thread(1024);
dim3 block(DivUp(size, thread.x));
InsertKeyPointsKernel<<<block, thread>>>(fuse);
SafeCall(cudaDeviceSynchronize());
SafeCall(cudaGetLastError());
}
|
4dbf13378d6f15243f66d65a75b6523bb41f4095.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "adagrad_update_1D_1D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float *d = NULL;
hipMalloc(&d, XSIZE*YSIZE);
float *m = NULL;
hipMalloc(&m, XSIZE*YSIZE);
float clip = 1;
float lr = 1;
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
adagrad_update_1D_1D), dim3(gridBlock),dim3(threadBlock), 0, 0, x,d,m,clip,lr,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
adagrad_update_1D_1D), dim3(gridBlock),dim3(threadBlock), 0, 0, x,d,m,clip,lr,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
adagrad_update_1D_1D), dim3(gridBlock),dim3(threadBlock), 0, 0, x,d,m,clip,lr,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 4dbf13378d6f15243f66d65a75b6523bb41f4095.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "adagrad_update_1D_1D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float *d = NULL;
cudaMalloc(&d, XSIZE*YSIZE);
float *m = NULL;
cudaMalloc(&m, XSIZE*YSIZE);
float clip = 1;
float lr = 1;
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
adagrad_update_1D_1D<<<gridBlock,threadBlock>>>(x,d,m,clip,lr,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
adagrad_update_1D_1D<<<gridBlock,threadBlock>>>(x,d,m,clip,lr,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
adagrad_update_1D_1D<<<gridBlock,threadBlock>>>(x,d,m,clip,lr,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7a44918d12a3a4e29b62afbf57646725e7ad3507.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ==========================================================================
// Version 1.0
// ==========================================================================
// (C)opyright: 2010
//
// Ulm University
//
// Creator: Ferdinand Deger, <Firstname>.<Lastname>@gmail.com
// Creator: Helmut Sedding, <Firstname>@<Lastname>.net
// ==========================================================================
// Contains the CUDA kernel for the interpolated resize to create the pyramid
// The file also contains the C++ Wrapper code to access the GPU
// ==========================================================================
#include <cstdio>
#include <cstring>
#include <cmath>
#include <cutil_inline.h>
#include <cutil_math.h>
#include "../util/io_tga.h"
#include "../util/io_matlab.h"
#include "pyramid.h"
//const float pyr_factor = powf(2, 1/4.0);
// easier...:
#define PYR_FACTOR 1.18920711500272f
/*
== PYRAMID GPU FUNCTIONS
*/
// <<<<<<<<<<<<<<<<<<<
// CUBIC INTERPOLATION, source: nvidia examples
// w0, w1, w2, and w3 are the four cubic B-spline basis functions
__host__ __device__
float w0(float a) {
return (1.0f/6.0f)*(a*(a*(-a + 3.0f) - 3.0f) + 1.0f); // optimized
}
__host__ __device__
float w1(float a) {
// return (1.0f/6.0f)*(3.0f*a*a*a - 6.0f*a*a + 4.0f);
return (1.0f/6.0f)*(a*a*(3.0f*a - 6.0f) + 4.0f);
}
__host__ __device__
float w2(float a) {
// return (1.0f/6.0f)*(-3.0f*a*a*a + 3.0f*a*a + 3.0f*a + 1.0f);
return (1.0f/6.0f)*(a*(a*(-3.0f*a + 3.0f) + 3.0f) + 1.0f);
}
__host__ __device__
float w3(float a) {
return (1.0f/6.0f)*(a*a*a);
}
// g0 and g1 are the two amplitude functions
__device__ float g0(float a) {
return w0(a) + w1(a);
}
__device__ float g1(float a) {
return w2(a) + w3(a);
}
// h0 and h1 are the two offset functions
__device__ float h0(float a) {
// note +0.5 offset to compensate for CUDA linear filtering convention
return -1.0f + w1(a) / (w0(a) + w1(a)) + 0.5f;
}
__device__ float h1(float a) {
return 1.0f + w3(a) / (w2(a) + w3(a)) + 0.5f;
}
// filter 4 values using cubic splines
template<class T>
__device__
T cubicFilter(float x, T c0, T c1, T c2, T c3) {
T r;
r = c0 * w0(x);
r += c1 * w1(x);
r += c2 * w2(x);
r += c3 * w3(x);
return r;
}
// slow but precise bicubic lookup using 16 texture lookups
template<class T, class R> // return type, texture type
__device__
R interp2DBicubic(float x, float y) {
x -= 0.5f;
y -= 0.5f;
float px = floor(x);
float py = floor(y);
float fx = x - px;
float fy = y - py;
return cubicFilter<R>(fy,
cubicFilter<R>(fx,p2ReadP(0,px-1,py-1),p2ReadP(0,px,py-1),p2ReadP(0,px+1,py-1),p2ReadP(0,px+2,py-1)),
cubicFilter<R>(fx,p2ReadP(0,px-1,py), p2ReadP(0,px,py), p2ReadP(0,px+1,py), p2ReadP(0,px+2,py)),
cubicFilter<R>(fx,p2ReadP(0,px-1,py+1),p2ReadP(0,px,py+1),p2ReadP(0,px+1,py+1),p2ReadP(0,px+2,py+1)),
cubicFilter<R>(fx,p2ReadP(0,px-1,py+2),p2ReadP(0,px,py+2),p2ReadP(0,px+1,py+2),p2ReadP(0,px+2,py+2))
);
}
// >>>>>>>>>>>>>>>>>>>
// empty kernel to measure performance accurately. is executed first.
bool pseudoKernel_executed = false;
__global__
void pseudoKernel() {
}
// main pyramid resize kernel
__global__
void resizeKernel(float* img, size_t pitch, int width, int height, int lvl) {
// coords
int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if(x < width && y < height) {
// interpolate color at float-coordinates on lower layer
float color = interp2DBicubic<float, float>(x * PYR_FACTOR, y * PYR_FACTOR);
// write color
p2Write(img, pitch, x, y, color);
}
}
/*
== PYRAMID RESIZE FUNCTIONS
*/
// main resize method
void pyramidResizeImg2gpuPyr(float*** gpu_pyr, t_pyrprops* _pyrprops, float* _img, uint width, uint height) {
// run pseudo kernel
if(!pseudoKernel_executed) {
pseudoKernel_executed = true;
#ifdef DTIME
struct timespec ts_start, ts_end;
clock_gettime(CLOCK_MONOTONIC, &ts_start);
#endif
#ifdef MACTIME
timeval t1, t2;
double elapsedTime;
gettimeofday(&t1, NULL);
#endif
hipLaunchKernelGGL(( pseudoKernel), dim3(dim3(1,1,1)), dim3(dim3(1,1,1)) , 0, 0, );
#ifdef DTIME
clock_gettime(CLOCK_MONOTONIC, &ts_end);
printElapsedTime("Cuda Init", ts_start, ts_end);
#endif
#ifdef MACTIME
gettimeofday(&t2, NULL);
// compute and print the elapsed time in millisec
elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms
elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms
printf("Cuda Init %f ms\n",elapsedTime);// elapsedTime << " ms.\n";
#endif
}
#ifdef DTIME
struct timespec ts_start, ts_end;
clock_gettime(CLOCK_MONOTONIC, &ts_start);
#endif
#ifdef MACTIME
timeval t1, t2;
double elapsedTime;
gettimeofday(&t1, NULL);
#endif
#ifdef MEASURETIME
clock_gettime(CLOCK_MONOTONIC, &measure_ts_start);
#endif
// init pyramid properties
_pyrprops->init(MAX_NLEVELS, 1);
float currentfactor = 1;
for(int i=0; i<_pyrprops->nlevels; ++i) {
// calc height/width
int h = currentfactor * height;
int w = currentfactor * width;
// limit min size
if(i>0 && (h < MIN_SIDELENGTH || w < MIN_SIDELENGTH)) {
_pyrprops->nlevels = i;
break;
}
if(DEBUG) printf("i f w h pfx %d %f %d %d %d\n",i,currentfactor,w,h, _pyrprops->pfxsize[i] + h*w);
// save
_pyrprops->width[i] = w;
_pyrprops->height[i] = h;
_pyrprops->pfxsize[i+1] = _pyrprops->pfxsize[i] + h*w;
currentfactor /= PYR_FACTOR;
}
if(DEBUG) printf("pyr size %d\n",_pyrprops->size());
unsigned int timer;
if(DEBUG) {
(cutCreateTimer(&timer));
}
if(DEBUG) {
cutResetTimer(timer);
(cutStartTimer(timer));
}
// alloc mem for pyramid
float** gpu_img = new float*[_pyrprops->nlevels];
// each level
for(int a=0; a < _pyrprops->nlevels; a++) {
cutilSafeCall(hipMallocPitch((void**)&(gpu_img[a]), &(_pyrprops->pitch[a]),
_pyrprops->width[a] * sizeof(float), _pyrprops->height[a]));
}
// copy first layer image
cutilSafeCall(hipMemcpy2D(gpu_img[0], _pyrprops->pitch[0],
_img, width * sizeof(float), width * sizeof(float), height, hipMemcpyHostToDevice));
// copy constants, the pyramid properties
cutilSafeCall(hipMemcpyToSymbol(gpu_pyr_nlevels, &_pyrprops->nlevels, sizeof(int)));
cutilSafeCall(hipMemcpyToSymbol(gpu_pyr_npyramids, &_pyrprops->npyramids, sizeof(int)));
cutilSafeCall(hipMemcpyToSymbol(gpu_pyr_pfxsize, _pyrprops->pfxsize, sizeof(int) * (_pyrprops->nlevels+1)));
cutilSafeCall(hipMemcpyToSymbol(gpu_pyr_width, _pyrprops->width, sizeof(int) * (_pyrprops->nlevels)));
cutilSafeCall(hipMemcpyToSymbol(gpu_pyr_height, _pyrprops->height, sizeof(int) * (_pyrprops->nlevels)));
// texture descriptor
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
CUT_CHECK_ERROR("Pyramid Resize Kernel init failed");
if(DEBUG) {
(cutStopTimer(timer));
printf( "Pyramid Init time: %f (ms)\n", cutGetTimerValue(timer));
}
for(int ii=0; ii<DEBUG*9+1; ii++) //performance test
{
if(DEBUG) {
cutResetTimer(timer);
cutStartTimer(timer);
}
// process each level separately, starting from lvl=1
for(int lvl=1; lvl < _pyrprops->nlevels; ++lvl) {
dim3 blockSize(16, 16);
dim3 gridSize((_pyrprops->width[lvl] / blockSize.x) + 1,
(_pyrprops->height[lvl] / blockSize.y) + 1);
// bind texture of previous level
hipBindTexture2D(0, &gpu_pyr_tex_0, gpu_img[lvl-1], &channelDesc,
_pyrprops->width[lvl-1], _pyrprops->height[lvl-1], (_pyrprops->pitch)[lvl-1]);
gpu_pyr_tex_0.filterMode = hipFilterModePoint;
// run kernel
hipLaunchKernelGGL(( resizeKernel), dim3(gridSize),dim3(blockSize) , 0, 0, gpu_img[lvl], _pyrprops->pitch[lvl],
_pyrprops->width[lvl], _pyrprops->height[lvl], lvl);
CUT_CHECK_ERROR("Pyramid Resize Kernel execution failed");
hipUnbindTexture(&gpu_pyr_tex_0);
}
cutilSafeCall(hipDeviceSynchronize());
if(DEBUG) {
(cutStopTimer(timer));
printf( "Pyramid Processing time: %f (ms)\n", cutGetTimerValue(timer));
}
}
CUT_CHECK_ERROR("Pyramid Resize Kernel finalization failed");
if(DEBUG) {
// Delete the timer
cutDeleteTimer(timer);
}
#ifdef DTIME
clock_gettime(CLOCK_MONOTONIC, &ts_end);
printElapsedTime("Pyramid Resize", ts_start, ts_end);
#endif
#ifdef MACTIME
gettimeofday(&t2, NULL);
// compute and print the elapsed time in millisec
elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms
elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms
printf("Pyramid Resize %f ms\n",elapsedTime);// elapsedTime << " ms.\n";
#endif
*gpu_pyr = gpu_img;
}
void pyramidResizeFreeGpu(float** gpu_pyr, t_pyrprops* pyrprops) {
for (int i = pyrprops->nlevels; i--; ) {
cutilSafeCall(hipFree(gpu_pyr[i]));
}
CUT_CHECK_ERROR("Pyramid Resize cleanup/free failed");
}
// create image pyramid out of file
// stores result in device at gpu_pyr
// pyramid properies are in _pyrprops, and as constants in the device
void pyramidResizeImgFile2gpuPyr(float*** gpu_pyr, t_pyrprops* pyrprops, char* _filename) {
// read input img
float* img;
uint width, height, channels;
img = loadTGAImage(_filename, &width, &height, &channels);
pyramidResizeImg2gpuPyr(gpu_pyr, pyrprops, img, width, height);
delete[] img;
}
void pyramidResizeImgFile2PyrFile(char* _filename, char* _outfile) {
float** gpu_pyr;
t_pyrprops pyrprops;
pyramidResizeImgFile2gpuPyr(&gpu_pyr, &pyrprops, _filename);
savePyr_gpu2file(gpu_pyr, &pyrprops, _outfile);
pyramidResizeFreeGpu(gpu_pyr, &pyrprops);
}
| 7a44918d12a3a4e29b62afbf57646725e7ad3507.cu | // ==========================================================================
// Version 1.0
// ==========================================================================
// (C)opyright: 2010
//
// Ulm University
//
// Creator: Ferdinand Deger, <Firstname>.<Lastname>@gmail.com
// Creator: Helmut Sedding, <Firstname>@<Lastname>.net
// ==========================================================================
// Contains the CUDA kernel for the interpolated resize to create the pyramid
// The file also contains the C++ Wrapper code to access the GPU
// ==========================================================================
#include <cstdio>
#include <cstring>
#include <cmath>
#include <cutil_inline.h>
#include <cutil_math.h>
#include "../util/io_tga.h"
#include "../util/io_matlab.h"
#include "pyramid.h"
//const float pyr_factor = powf(2, 1/4.0);
// easier...:
#define PYR_FACTOR 1.18920711500272f
/*
== PYRAMID GPU FUNCTIONS
*/
// <<<<<<<<<<<<<<<<<<<
// CUBIC INTERPOLATION, source: nvidia examples
// w0, w1, w2, and w3 are the four cubic B-spline basis functions
__host__ __device__
float w0(float a) {
return (1.0f/6.0f)*(a*(a*(-a + 3.0f) - 3.0f) + 1.0f); // optimized
}
__host__ __device__
float w1(float a) {
// return (1.0f/6.0f)*(3.0f*a*a*a - 6.0f*a*a + 4.0f);
return (1.0f/6.0f)*(a*a*(3.0f*a - 6.0f) + 4.0f);
}
__host__ __device__
float w2(float a) {
// return (1.0f/6.0f)*(-3.0f*a*a*a + 3.0f*a*a + 3.0f*a + 1.0f);
return (1.0f/6.0f)*(a*(a*(-3.0f*a + 3.0f) + 3.0f) + 1.0f);
}
__host__ __device__
float w3(float a) {
return (1.0f/6.0f)*(a*a*a);
}
// g0 and g1 are the two amplitude functions
__device__ float g0(float a) {
return w0(a) + w1(a);
}
__device__ float g1(float a) {
return w2(a) + w3(a);
}
// h0 and h1 are the two offset functions
__device__ float h0(float a) {
// note +0.5 offset to compensate for CUDA linear filtering convention
return -1.0f + w1(a) / (w0(a) + w1(a)) + 0.5f;
}
__device__ float h1(float a) {
return 1.0f + w3(a) / (w2(a) + w3(a)) + 0.5f;
}
// filter 4 values using cubic splines
template<class T>
__device__
T cubicFilter(float x, T c0, T c1, T c2, T c3) {
T r;
r = c0 * w0(x);
r += c1 * w1(x);
r += c2 * w2(x);
r += c3 * w3(x);
return r;
}
// slow but precise bicubic lookup using 16 texture lookups
template<class T, class R> // return type, texture type
__device__
R interp2DBicubic(float x, float y) {
x -= 0.5f;
y -= 0.5f;
float px = floor(x);
float py = floor(y);
float fx = x - px;
float fy = y - py;
return cubicFilter<R>(fy,
cubicFilter<R>(fx,p2ReadP(0,px-1,py-1),p2ReadP(0,px,py-1),p2ReadP(0,px+1,py-1),p2ReadP(0,px+2,py-1)),
cubicFilter<R>(fx,p2ReadP(0,px-1,py), p2ReadP(0,px,py), p2ReadP(0,px+1,py), p2ReadP(0,px+2,py)),
cubicFilter<R>(fx,p2ReadP(0,px-1,py+1),p2ReadP(0,px,py+1),p2ReadP(0,px+1,py+1),p2ReadP(0,px+2,py+1)),
cubicFilter<R>(fx,p2ReadP(0,px-1,py+2),p2ReadP(0,px,py+2),p2ReadP(0,px+1,py+2),p2ReadP(0,px+2,py+2))
);
}
// >>>>>>>>>>>>>>>>>>>
// empty kernel to measure performance accurately. is executed first.
bool pseudoKernel_executed = false;
__global__
void pseudoKernel() {
}
// main pyramid resize kernel
__global__
void resizeKernel(float* img, size_t pitch, int width, int height, int lvl) {
// coords
int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if(x < width && y < height) {
// interpolate color at float-coordinates on lower layer
float color = interp2DBicubic<float, float>(x * PYR_FACTOR, y * PYR_FACTOR);
// write color
p2Write(img, pitch, x, y, color);
}
}
/*
== PYRAMID RESIZE FUNCTIONS
*/
// main resize method
void pyramidResizeImg2gpuPyr(float*** gpu_pyr, t_pyrprops* _pyrprops, float* _img, uint width, uint height) {
// run pseudo kernel
if(!pseudoKernel_executed) {
pseudoKernel_executed = true;
#ifdef DTIME
struct timespec ts_start, ts_end;
clock_gettime(CLOCK_MONOTONIC, &ts_start);
#endif
#ifdef MACTIME
timeval t1, t2;
double elapsedTime;
gettimeofday(&t1, NULL);
#endif
pseudoKernel<<< dim3(1,1,1), dim3(1,1,1) >>>();
#ifdef DTIME
clock_gettime(CLOCK_MONOTONIC, &ts_end);
printElapsedTime("Cuda Init", ts_start, ts_end);
#endif
#ifdef MACTIME
gettimeofday(&t2, NULL);
// compute and print the elapsed time in millisec
elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms
elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms
printf("Cuda Init %f ms\n",elapsedTime);// elapsedTime << " ms.\n";
#endif
}
#ifdef DTIME
struct timespec ts_start, ts_end;
clock_gettime(CLOCK_MONOTONIC, &ts_start);
#endif
#ifdef MACTIME
timeval t1, t2;
double elapsedTime;
gettimeofday(&t1, NULL);
#endif
#ifdef MEASURETIME
clock_gettime(CLOCK_MONOTONIC, &measure_ts_start);
#endif
// init pyramid properties
_pyrprops->init(MAX_NLEVELS, 1);
float currentfactor = 1;
for(int i=0; i<_pyrprops->nlevels; ++i) {
// calc height/width
int h = currentfactor * height;
int w = currentfactor * width;
// limit min size
if(i>0 && (h < MIN_SIDELENGTH || w < MIN_SIDELENGTH)) {
_pyrprops->nlevels = i;
break;
}
if(DEBUG) printf("i f w h pfx %d %f %d %d %d\n",i,currentfactor,w,h, _pyrprops->pfxsize[i] + h*w);
// save
_pyrprops->width[i] = w;
_pyrprops->height[i] = h;
_pyrprops->pfxsize[i+1] = _pyrprops->pfxsize[i] + h*w;
currentfactor /= PYR_FACTOR;
}
if(DEBUG) printf("pyr size %d\n",_pyrprops->size());
unsigned int timer;
if(DEBUG) {
(cutCreateTimer(&timer));
}
if(DEBUG) {
cutResetTimer(timer);
(cutStartTimer(timer));
}
// alloc mem for pyramid
float** gpu_img = new float*[_pyrprops->nlevels];
// each level
for(int a=0; a < _pyrprops->nlevels; a++) {
cutilSafeCall(cudaMallocPitch((void**)&(gpu_img[a]), &(_pyrprops->pitch[a]),
_pyrprops->width[a] * sizeof(float), _pyrprops->height[a]));
}
// copy first layer image
cutilSafeCall(cudaMemcpy2D(gpu_img[0], _pyrprops->pitch[0],
_img, width * sizeof(float), width * sizeof(float), height, cudaMemcpyHostToDevice));
// copy constants, the pyramid properties
cutilSafeCall(cudaMemcpyToSymbol(gpu_pyr_nlevels, &_pyrprops->nlevels, sizeof(int)));
cutilSafeCall(cudaMemcpyToSymbol(gpu_pyr_npyramids, &_pyrprops->npyramids, sizeof(int)));
cutilSafeCall(cudaMemcpyToSymbol(gpu_pyr_pfxsize, _pyrprops->pfxsize, sizeof(int) * (_pyrprops->nlevels+1)));
cutilSafeCall(cudaMemcpyToSymbol(gpu_pyr_width, _pyrprops->width, sizeof(int) * (_pyrprops->nlevels)));
cutilSafeCall(cudaMemcpyToSymbol(gpu_pyr_height, _pyrprops->height, sizeof(int) * (_pyrprops->nlevels)));
// texture descriptor
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
CUT_CHECK_ERROR("Pyramid Resize Kernel init failed");
if(DEBUG) {
(cutStopTimer(timer));
printf( "Pyramid Init time: %f (ms)\n", cutGetTimerValue(timer));
}
for(int ii=0; ii<DEBUG*9+1; ii++) //performance test
{
if(DEBUG) {
cutResetTimer(timer);
cutStartTimer(timer);
}
// process each level separately, starting from lvl=1
for(int lvl=1; lvl < _pyrprops->nlevels; ++lvl) {
dim3 blockSize(16, 16);
dim3 gridSize((_pyrprops->width[lvl] / blockSize.x) + 1,
(_pyrprops->height[lvl] / blockSize.y) + 1);
// bind texture of previous level
cudaBindTexture2D(0, &gpu_pyr_tex_0, gpu_img[lvl-1], &channelDesc,
_pyrprops->width[lvl-1], _pyrprops->height[lvl-1], (_pyrprops->pitch)[lvl-1]);
gpu_pyr_tex_0.filterMode = cudaFilterModePoint;
// run kernel
resizeKernel<<< gridSize,blockSize >>>(gpu_img[lvl], _pyrprops->pitch[lvl],
_pyrprops->width[lvl], _pyrprops->height[lvl], lvl);
CUT_CHECK_ERROR("Pyramid Resize Kernel execution failed");
cudaUnbindTexture(&gpu_pyr_tex_0);
}
cutilSafeCall(cudaThreadSynchronize());
if(DEBUG) {
(cutStopTimer(timer));
printf( "Pyramid Processing time: %f (ms)\n", cutGetTimerValue(timer));
}
}
CUT_CHECK_ERROR("Pyramid Resize Kernel finalization failed");
if(DEBUG) {
// Delete the timer
cutDeleteTimer(timer);
}
#ifdef DTIME
clock_gettime(CLOCK_MONOTONIC, &ts_end);
printElapsedTime("Pyramid Resize", ts_start, ts_end);
#endif
#ifdef MACTIME
gettimeofday(&t2, NULL);
// compute and print the elapsed time in millisec
elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms
elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms
printf("Pyramid Resize %f ms\n",elapsedTime);// elapsedTime << " ms.\n";
#endif
*gpu_pyr = gpu_img;
}
void pyramidResizeFreeGpu(float** gpu_pyr, t_pyrprops* pyrprops) {
for (int i = pyrprops->nlevels; i--; ) {
cutilSafeCall(cudaFree(gpu_pyr[i]));
}
CUT_CHECK_ERROR("Pyramid Resize cleanup/free failed");
}
// create image pyramid out of file
// stores result in device at gpu_pyr
// pyramid properies are in _pyrprops, and as constants in the device
void pyramidResizeImgFile2gpuPyr(float*** gpu_pyr, t_pyrprops* pyrprops, char* _filename) {
// read input img
float* img;
uint width, height, channels;
img = loadTGAImage(_filename, &width, &height, &channels);
pyramidResizeImg2gpuPyr(gpu_pyr, pyrprops, img, width, height);
delete[] img;
}
void pyramidResizeImgFile2PyrFile(char* _filename, char* _outfile) {
float** gpu_pyr;
t_pyrprops pyrprops;
pyramidResizeImgFile2gpuPyr(&gpu_pyr, &pyrprops, _filename);
savePyr_gpu2file(gpu_pyr, &pyrprops, _outfile);
pyramidResizeFreeGpu(gpu_pyr, &pyrprops);
}
|
6104c048cc740d921d88794728270d308257d27f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"random_kernel_initializers.cuh"
namespace random_kernel_initializers {
__global__
void initialiseRandomKernel1D(unsigned int seed, hiprandState_t* states,
unsigned int nPaths) {
// Thread Id (corresponds to path number)
const unsigned int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
// Initialise a RNG for every Path
if (t_idx < nPaths)
hiprand_init(seed, t_idx, 0, &states[t_idx]);
}
__global__
void initialiseRandomKernel2D(unsigned int seed, hiprandState_t* states,
unsigned int nPathsWidth, unsigned int nPathsHeight) {
// Thread t_idx (corresponds to path number)
const unsigned int c_idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int r_idx = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int t_idx = c_idx + nPathsWidth * r_idx;
// Initialise a RNG for every Path
const unsigned int nPaths = nPathsWidth * nPathsHeight;
if (t_idx < nPaths)
hiprand_init(seed, t_idx, 0, &states[t_idx]);
}
__global__
void initialiseRandomKernel3D(unsigned int seed, hiprandState_t* states,
unsigned int nPathsWidth, unsigned int nPathsHeight, unsigned int nPathsDepth) {
// Thread t_idx (corresponds to path number)
const unsigned int c_idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int r_idx = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int l_idx = blockIdx.z * blockDim.z + threadIdx.z;
const unsigned int t_idx = c_idx + nPathsWidth * r_idx + nPathsWidth * nPathsHeight*l_idx;
// Initialise a RNG for every Path
const unsigned int nPaths = nPathsWidth * nPathsHeight * nPathsDepth;
if (t_idx < nPaths)
hiprand_init(seed, t_idx, 0, &states[t_idx]);
}
}
| 6104c048cc740d921d88794728270d308257d27f.cu | #include"random_kernel_initializers.cuh"
namespace random_kernel_initializers {
__global__
void initialiseRandomKernel1D(unsigned int seed, curandState_t* states,
unsigned int nPaths) {
// Thread Id (corresponds to path number)
const unsigned int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
// Initialise a RNG for every Path
if (t_idx < nPaths)
curand_init(seed, t_idx, 0, &states[t_idx]);
}
__global__
void initialiseRandomKernel2D(unsigned int seed, curandState_t* states,
unsigned int nPathsWidth, unsigned int nPathsHeight) {
// Thread t_idx (corresponds to path number)
const unsigned int c_idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int r_idx = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int t_idx = c_idx + nPathsWidth * r_idx;
// Initialise a RNG for every Path
const unsigned int nPaths = nPathsWidth * nPathsHeight;
if (t_idx < nPaths)
curand_init(seed, t_idx, 0, &states[t_idx]);
}
__global__
void initialiseRandomKernel3D(unsigned int seed, curandState_t* states,
unsigned int nPathsWidth, unsigned int nPathsHeight, unsigned int nPathsDepth) {
// Thread t_idx (corresponds to path number)
const unsigned int c_idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int r_idx = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int l_idx = blockIdx.z * blockDim.z + threadIdx.z;
const unsigned int t_idx = c_idx + nPathsWidth * r_idx + nPathsWidth * nPathsHeight*l_idx;
// Initialise a RNG for every Path
const unsigned int nPaths = nPathsWidth * nPathsHeight * nPathsDepth;
if (t_idx < nPaths)
curand_init(seed, t_idx, 0, &states[t_idx]);
}
}
|
62904e2d1b6e76a98f7564e619849451eba41eb0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Raster.cuh"
#include "TensorflowOp_generated.h"
#include <hip/hip_fp16.h>
#include "MNNCUDAFunction.cuh"
namespace MNN {
namespace CUDA {
// Blit don't care offset
template <typename T>
__global__ void blitRegion(const T *inputO, T *outputO,
int loopCount,
const int32_t* dstIndice, const int32_t* srcIndice,
int dstUseIndice, int srcUseIndice,
int dstStep, int srcStep,int srcLimit,
int sizeZ, int sizeY, int sizeX,
int strideZ, int strideY, int strideX,
int dstStrideZ, int dstStrideY, int dstStrideX
) {
int total = loopCount;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) {
int srcOffsetO = i * srcStep;
if (srcUseIndice >= 0) {
srcOffsetO = srcIndice[i] * srcStep;
}
int dstOffsetO = i * dstStep;
if (dstUseIndice >= 0) {
dstOffsetO = dstIndice[i] * dstStep;
}
if (srcOffsetO >= 0 && srcOffsetO < srcLimit) {
const T* input = inputO + srcOffsetO;
T* output = outputO + dstOffsetO;
for (int z=0; z<sizeZ; ++z) {
for (int y=0; y<sizeY; ++y) {
for (int x=0; x<sizeX; ++x) {
int srcOffset = z * strideZ + y * strideY + x * strideX;
int dstOffset = z * dstStrideZ + y * dstStrideY + x * dstStrideX;
output[dstOffset] = input[srcOffset];
}
}
}
} else {
T* output = outputO + dstOffsetO;
for (int z=0; z<sizeZ; ++z) {
for (int y=0; y<sizeY; ++y) {
for (int x=0; x<sizeX; ++x) {
int dstOffset = z * dstStrideZ + y * dstStrideY + x * dstStrideX;
output[dstOffset] = (T)0;
}
}
}
}
}
}
void BlitWithIndice(uint8_t* output, const uint8_t* input, const int32_t* dstIndices, const int32_t* srcIndices, int dstUseIndice, int srcUseIndice, int loopCount, int dstStep, int srcStep, int srcLimit, const Tensor::InsideDescribe::Region& reg, int bytes, CUDARuntime* runtime) {
int count = loopCount;
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
switch (bytes) {
case 4:
hipLaunchKernelGGL(( blitRegion), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output,
loopCount,
dstIndices, srcIndices,
dstUseIndice, srcUseIndice,
dstStep, srcStep, srcLimit,
reg.size[0], reg.size[1], reg.size[2],
reg.src.stride[0], reg.src.stride[1], reg.src.stride[2],
reg.dst.stride[0], reg.dst.stride[1], reg.dst.stride[2]);
break;
case 2:
hipLaunchKernelGGL(( blitRegion), dim3(block_num), dim3(threads_num), 0, 0, (const int16_t*)input, (int16_t*)output,
loopCount,
dstIndices, srcIndices,
dstUseIndice, srcUseIndice,
dstStep, srcStep, srcLimit,
reg.size[0], reg.size[1], reg.size[2],
reg.src.stride[0], reg.src.stride[1], reg.src.stride[2],
reg.dst.stride[0], reg.dst.stride[1], reg.dst.stride[2]);
break;
case 1:
hipLaunchKernelGGL(( blitRegion), dim3(block_num), dim3(threads_num), 0, 0, (const int8_t*)input, (int8_t*)output,
loopCount,
dstIndices, srcIndices,
dstUseIndice, srcUseIndice,
dstStep, srcStep, srcLimit,
reg.size[0], reg.size[1], reg.size[2],
reg.src.stride[0], reg.src.stride[1], reg.src.stride[2],
reg.dst.stride[0], reg.dst.stride[1], reg.dst.stride[2]);
break;
default:
break;
}
}
#define UNARY_FUNC(Name, Func)\
template<typename T>\
__global__ void Name(const T *input, T *output,\
int count,\
DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX,\
int strideZ, int strideY, int strideX,\
int dstStrideZ, int dstStrideY, int dstStrideX\
) { \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {\
int ix, tmp, iy, iz;\
sizeX.divmod(i, tmp, ix);\
sizeY.divmod(tmp, iz, iy);\
int srcOffset = iz * strideZ + iy * strideY + ix * strideX;\
int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;\
T x = input[srcOffset];\
output[dstOffset] = Func;\
}\
}\
template<typename T>\
__global__ void FLOAT##Name(const T *input, T *output,\
int count,\
DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX,\
int strideZ, int strideY, int strideX,\
int dstStrideZ, int dstStrideY, int dstStrideX\
) { \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {\
int ix, tmp, iy, iz;\
sizeX.divmod(i, tmp, ix);\
sizeY.divmod(tmp, iz, iy);\
int srcOffset = iz * strideZ + iy * strideY + ix * strideX;\
int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;\
float x = (float)input[srcOffset];\
output[dstOffset] = (float)(Func);\
}\
}\
template<typename T>
__global__ void blit_2(const T *input, T *output,
int count,
DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX,
int strideZ, int strideY,
int dstStrideZ, int dstStrideY
) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
int ix, tmp, iy, iz;
sizeX.divmod(i, tmp, ix);
sizeY.divmod(tmp, iz, iy);
int srcOffset = iz * strideZ + iy * strideY + (ix << 1);
int dstOffset = iz * dstStrideZ + iy * dstStrideY + (ix << 1);
int2 * dstF = (int2 *)(output+dstOffset);
dstF[0] = ((int2 *)(input+srcOffset))[0];
}
}
struct Bytes512 {
int4 x[4];
};
UNARY_FUNC(blit, x);
UNARY_FUNC(ABS, abs(x));
UNARY_FUNC(EXP, exp(x));
UNARY_FUNC(NEG, -x);
UNARY_FUNC(RECIPROCAL, (1.0)/x);
UNARY_FUNC(FLOOR, floor(x));
UNARY_FUNC(CEIL, ceil(x));
UNARY_FUNC(SQUARE, x*x);
UNARY_FUNC(SQRT, (T)(sqrt((float)x)));
UNARY_FUNC(RSQRT, (T)(rsqrt((float)x)));
UNARY_FUNC(LOG, (T)(log((float)x)));
UNARY_FUNC(SIN, (T)(sin((float)x)));
UNARY_FUNC(COS, (T)(cos((float)x)));
UNARY_FUNC(TAN, (T)(tan((float)x)));
UNARY_FUNC(ASIN, (T)(asin((float)x)));
UNARY_FUNC(ACOS, (T)(acos((float)x)));
UNARY_FUNC(ATAN, (T)(atan((float)x)));
UNARY_FUNC(LOG1P, log(1+x));
UNARY_FUNC(TANH, tanh(x));
UNARY_FUNC(SIGMOID, 1./(1.+exp(-x)));
UNARY_FUNC(EXPM1, exp(x)-1);
UNARY_FUNC(ATANH, atanh(x));
UNARY_FUNC(ACOSH, acosh(x));
UNARY_FUNC(COSH, cosh(x));
UNARY_FUNC(SIGN, x > 0 ? 1 : (x<0 ? -1 : 0));
UNARY_FUNC(ROUND, round(x));
UNARY_FUNC(SINH, sinh(x));
UNARY_FUNC(ASINH, asinh(x));
UNARY_FUNC(HARDSWISH, 1.0/6.0 * x * min(max(x+3.0, 0.0), 6.0));
UNARY_FUNC(ERF, erf(x));
UNARY_FUNC(ERFC, erfc(x));
UNARY_FUNC(ERFINV, erfinv(x));
UNARY_FUNC(GELU, (1.0f + tanh(0.79788458f * (0.044715f * x * x * x + x))) * x * 0.5f);
UNARY_FUNC(GELU_STANDARD, (erf(x*0.7071067932881648f)+1.f)*x*0.5);
void RasterBlit(uint8_t* output, const uint8_t* input, const int32_t* size, const int32_t* srcStride, const int32_t* dstStride, int bytes, CUDARuntime* runtime) {
int count = size[0] * size[1] * size[2];
DivModFast sz(size[0]);
DivModFast sy(size[1]);
DivModFast sx(size[2]);
//printf("%d-%d-%d, %d-%d-%d,-%d-%d-%d\n", size[0], size[1], size[2], srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]);
if(bytes == 4 && count > 16384 && size[2] % 2 == 0 && srcStride[2] == 1 && dstStride[2] == 1) {
//printf("%d-%d-%d, %d-%d-%d,-%d-%d-%d\n\n", size[0], size[1], size[2], srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]);
count /= 2;
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
DivModFast sx_2((size[2]/2));
hipLaunchKernelGGL(( blit_2), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output,
count,
sz, sy, sx_2,
srcStride[0], srcStride[1],
dstStride[0], dstStride[1]);
return;
}
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
switch (bytes) {
case 64:
hipLaunchKernelGGL(( blit), dim3(block_num), dim3(threads_num), 0, 0, (const Bytes512*)input, (Bytes512*)output,
count,
sz, sy, sx,
srcStride[0], srcStride[1], srcStride[2],
dstStride[0], dstStride[1], dstStride[2]);
break;
case 32:
hipLaunchKernelGGL(( blit), dim3(block_num), dim3(threads_num), 0, 0, (const double4*)input, (double4*)output,
count,
sz, sy, sx,
srcStride[0], srcStride[1], srcStride[2],
dstStride[0], dstStride[1], dstStride[2]);
break;
case 4:
hipLaunchKernelGGL(( blit), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output,
count,
sz, sy, sx,
srcStride[0], srcStride[1], srcStride[2],
dstStride[0], dstStride[1], dstStride[2]);
break;
case 2:
hipLaunchKernelGGL(( blit), dim3(block_num), dim3(threads_num), 0, 0, (const int16_t*)input, (int16_t*)output,
count,
sz, sy, sx,
srcStride[0], srcStride[1], srcStride[2],
dstStride[0], dstStride[1], dstStride[2]);
break;
case 1:
hipLaunchKernelGGL(( blit), dim3(block_num), dim3(threads_num), 0, 0, (const int8_t*)input, (int8_t*)output,
count,
sz, sy, sx,
srcStride[0], srcStride[1], srcStride[2],
dstStride[0], dstStride[1], dstStride[2]);
break;
default:
break;
}
}
template<typename T0, typename T1>
__global__ void fuseblit(const T0 *input, T1 *output,
int fuseNum, int count, const int32_t* sliceOffset,
DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX,
int strideZ, int strideY, int strideX,
int dstStrideZ, int dstStrideY, int dstStrideX
) {
size_t c = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t c = blockIdx.x * blockDim.x + threadIdx.x; c < count; c += blockDim.x * gridDim.x) {
int ix, tmp, iy, tmp2, iz, j;
sizeX.divmod(c, tmp, ix);
sizeY.divmod(tmp, tmp2, iy);
sizeZ.divmod(tmp2, j, iz);
int src_offset = sliceOffset[j] + iz * strideZ + iy * strideY + ix * strideX;
int dst_offset = sliceOffset[fuseNum+j] + iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;
output[dst_offset] = input[src_offset];
}
}
__global__ void fuseblit_4(const int32_t *input, int32_t *output,
int fuseNum, int count, const int32_t* sliceOffset,
DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX,
int strideZ, int strideY,
int dstStrideZ, int dstStrideY
) {
for (size_t c = blockIdx.x * blockDim.x + threadIdx.x; c < count; c += blockDim.x * gridDim.x) {
int ix, tmp, iy, tmp2, iz, j;
sizeX.divmod(c, tmp, ix);
sizeY.divmod(tmp, tmp2, iy);
sizeZ.divmod(tmp2, j, iz);
int src_offset = sliceOffset[j] + iz * strideZ + iy * strideY + (ix << 2);
int dst_offset = sliceOffset[fuseNum+j] + iz * dstStrideZ + iy * dstStrideY + (ix << 2);
int4* srcF = (int4 *)(input + src_offset);
int4* dstF = (int4 *)(output + dst_offset);
dstF[0] = srcF[0];
}
}
__global__ void fuseblit_half_4(const int16_t *input, int16_t *output,
int fuseNum, int count, const int32_t* sliceOffset,
DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX,
int strideZ, int strideY,
int dstStrideZ, int dstStrideY
) {
for (size_t c = blockIdx.x * blockDim.x + threadIdx.x; c < count; c += blockDim.x * gridDim.x) {
int ix, tmp, iy, tmp2, iz, j;
sizeX.divmod(c, tmp, ix);
sizeY.divmod(tmp, tmp2, iy);
sizeZ.divmod(tmp2, j, iz);
int src_offset = sliceOffset[j] + iz * strideZ + iy * strideY + (ix << 2);
int dst_offset = sliceOffset[fuseNum+j] + iz * dstStrideZ + iy * dstStrideY + (ix << 2);
int2* srcF = (int2 *)(input + src_offset);
int2* dstF = (int2 *)(output + dst_offset);
dstF[0] = srcF[0];
}
}
void FuseRasterBlit(uint8_t* output, const uint8_t* input, const int32_t* size, const int32_t* srcStride, const int32_t* dstStride, int fuseNum, void* sliceOffset, int bytes, CUDARuntime* runtime, int unit) {
DivModFast sz(size[0]);
DivModFast sy(size[1]);
int count = fuseNum * size[0] * size[1] * size[2];
bool strideC4Support = srcStride[0] % 4 == 0 && srcStride[1] % 4 == 0 && dstStride[0] % 4 == 0 && dstStride[1] % 4 == 0;
if(size[2] % 4 == 0 && count > 16384 && srcStride[2] == 1 && dstStride[2] == 1 && unit == 4 && strideC4Support) {
int xL4 = size[2] / 4;
int countC4 = fuseNum * size[0] * size[1] * xL4;
int numBlocks = runtime->blocks_num(countC4);
int threadsPerBlock = runtime->threads_num();
DivModFast sx_4(xL4);
if(bytes == 4) {
hipLaunchKernelGGL(( fuseblit_4), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, (const int32_t*)input, (int32_t*)output,
fuseNum, countC4, (const int32_t*)sliceOffset,
sz, sy, sx_4,
srcStride[0], srcStride[1],
dstStride[0], dstStride[1]);
return;
} else if(bytes == 2){
hipLaunchKernelGGL(( fuseblit_half_4), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, (const int16_t*)input, (int16_t*)output,
fuseNum, countC4, (const int32_t*)sliceOffset,
sz, sy, sx_4,
srcStride[0], srcStride[1],
dstStride[0], dstStride[1]);
return;
}
}
DivModFast sx(size[2]);
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
switch (bytes) {
case 64:
hipLaunchKernelGGL(( fuseblit), dim3(block_num), dim3(threads_num), 0, 0, (const Bytes512*)input, (Bytes512*)output,
fuseNum, count, (const int32_t*)sliceOffset,
sz, sy, sx,
srcStride[0], srcStride[1], srcStride[2],
dstStride[0], dstStride[1], dstStride[2]);
break;
case 16:
hipLaunchKernelGGL(( fuseblit), dim3(block_num), dim3(threads_num), 0, 0, (const int4*)input, (int4*)output,
fuseNum, count, (const int32_t*)sliceOffset,
sz, sy, sx,
srcStride[0], srcStride[1], srcStride[2],
dstStride[0], dstStride[1], dstStride[2]);
break;
case 4:
hipLaunchKernelGGL(( fuseblit), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output,
fuseNum, count, (const int32_t*)sliceOffset,
sz, sy, sx,
srcStride[0], srcStride[1], srcStride[2],
dstStride[0], dstStride[1], dstStride[2]);
break;
case 2:
hipLaunchKernelGGL(( fuseblit), dim3(block_num), dim3(threads_num), 0, 0, (const int16_t*)input, (int16_t*)output,
fuseNum, count, (const int32_t*)sliceOffset,
sz, sy, sx,
srcStride[0], srcStride[1], srcStride[2],
dstStride[0], dstStride[1], dstStride[2]);
break;
case 1:
hipLaunchKernelGGL(( fuseblit), dim3(block_num), dim3(threads_num), 0, 0, (const int8_t*)input, (int8_t*)output,
fuseNum, count, (const int32_t*)sliceOffset,
sz, sy, sx,
srcStride[0], srcStride[1], srcStride[2],
dstStride[0], dstStride[1], dstStride[2]);
break;
default:
break;
}
//printf("%s, %d-%d-%d-%d\n", hipGetErrorString(hipGetLastError()), numBlocks.x, numBlocks.y, threadsPerBlock.x, threadsPerBlock.y);
}
template<typename T0, typename T1>
__global__ void fuseblitLimit(const T0 *input, T1 *output,
const FuseRegion* info, const int32_t* sliceOffset
) {
int sizeZ = info->size[0];
int sizeY = info->size[1];
int sizeX = info->size[2];
int strideZ = info->srcStride[0];
int strideY = info->srcStride[1];
int strideX = info->srcStride[2];
int dstStrideZ = info->dstStride[0];
int dstStrideY = info->dstStride[1];
int dstStrideX = info->dstStride[2];
int fuseNum = info->fuseNumber;
int count = fuseNum*sizeZ * sizeY * sizeX;
for (size_t c = blockIdx.x * blockDim.x + threadIdx.x; c < (count); c += blockDim.x * gridDim.x) {
int j = c / (sizeZ * sizeY * sizeX);
int i = c % (sizeZ * sizeY * sizeX);
int ix = i % sizeX;
int tmp = i / sizeX;
int iy = tmp % sizeY;
int iz = tmp / sizeY;
const int* srcOffsetPtr = sliceOffset + 8 * j;
const int* dstOffsetPtr = sliceOffset + 8 * j + 4;
T0 srcValue = (T0)0;
int src_offset = srcOffsetPtr[3] + iz * strideZ + iy * strideY + ix * strideX;
if (srcOffsetPtr[0] > iz && srcOffsetPtr[1] > iy && srcOffsetPtr[2] > ix) {
srcValue = input[src_offset];
}
int dst_offset = dstOffsetPtr[3] + iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;
//printf("%d -> %d - %f\n", src_offset, dst_offset, srcValue);
if (dstOffsetPtr[0] > iz && dstOffsetPtr[1] > iy && dstOffsetPtr[2] > ix) {
output[dst_offset] = srcValue;
}
}
}
void FuseRasterBlitFloatToHalf(uint8_t* output, const uint8_t* input, const FuseRegion* info, void* sliceOffset, CUDARuntime* runtime) {
auto& prop = runtime->prop();
int threads_num = prop.maxThreadsPerBlock;
int block_num = prop.multiProcessorCount;
hipLaunchKernelGGL(( fuseblitLimit), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (half*)output,
info, (const int32_t*)sliceOffset);
}
void FuseRasterBlitHalfToFloat(uint8_t* output, const uint8_t* input, const FuseRegion* info, void* sliceOffset, CUDARuntime* runtime) {
auto& prop = runtime->prop();
int threads_num = prop.maxThreadsPerBlock;
int block_num = prop.multiProcessorCount;
hipLaunchKernelGGL(( fuseblitLimit), dim3(block_num), dim3(threads_num), 0, 0, (const half*)input, (float*)output,
info, (const int32_t*)sliceOffset);
}
void FuseRasterBlitFloatToFloat(uint8_t* output, const uint8_t* input, const FuseRegion* info, void* sliceOffset, CUDARuntime* runtime) {
auto& prop = runtime->prop();
int threads_num = prop.maxThreadsPerBlock;
int block_num = prop.multiProcessorCount;
hipLaunchKernelGGL(( fuseblitLimit), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output,
info, (const int32_t*)sliceOffset);
}
void FuseRasterBlitCommon(uint8_t* output, const uint8_t* input, const FuseRegion* info, void* sliceOffset, CUDARuntime* runtime, int bytes) {
auto& prop = runtime->prop();
int threads_num = prop.maxThreadsPerBlock;
int block_num = prop.multiProcessorCount;
switch (bytes) {
case 4:
hipLaunchKernelGGL(( fuseblitLimit), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output,
info, (const int32_t*)sliceOffset);
break;
case 2:
hipLaunchKernelGGL(( fuseblitLimit), dim3(block_num), dim3(threads_num), 0, 0, (const half*)input, (half*)output,
info, (const int32_t*)sliceOffset);
break;
case 1:
hipLaunchKernelGGL(( fuseblitLimit), dim3(block_num), dim3(threads_num), 0, 0, (const int8_t*)input, (int8_t*)output,
info, (const int32_t*)sliceOffset);
break;
default:
break;
}
}
void UnaryBlit(uint8_t* output, const uint8_t* input, const int32_t* size, const int32_t* srcStride, const int32_t* dstStride, int bytes, CUDARuntime* runtime, int opType) {
int count = size[0] * size[1] * size[2];
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
DivModFast sz(size[0]);
DivModFast sy(size[1]);
DivModFast sx(size[2]);
// TODO: Support FP16
#define COMPUTE(TYPE)\
if (opType == MNN::UnaryOpOperation_##TYPE ) {\
if(bytes==2) {\
hipLaunchKernelGGL(( FLOAT##TYPE), dim3(block_num), dim3(threads_num), 0, 0, (const half*)input, (half*)output,\
count, \
sz, sy, sx,\
srcStride[0], srcStride[1], srcStride[2],\
dstStride[0], dstStride[1], dstStride[2]);\
} else {\
hipLaunchKernelGGL(( TYPE), dim3(block_num), dim3(threads_num), 0, 0, (const float*)input, (float*)output,\
count, \
sz, sy, sx,\
srcStride[0], srcStride[1], srcStride[2],\
dstStride[0], dstStride[1], dstStride[2]);\
}\
return;\
}\
COMPUTE(ABS);
COMPUTE(NEG);
COMPUTE(FLOOR);
COMPUTE(CEIL);
COMPUTE(SQUARE);
COMPUTE(SQRT);
COMPUTE(RSQRT);
COMPUTE(EXP);
COMPUTE(LOG);
COMPUTE(SIN);
COMPUTE(COS);
COMPUTE(TAN);
COMPUTE(GELU);
COMPUTE(GELU_STANDARD);
COMPUTE(ASIN);
COMPUTE(ACOS);
COMPUTE(ATAN);
COMPUTE(RECIPROCAL);
COMPUTE(LOG1P);
COMPUTE(TANH);
COMPUTE(SIGMOID);
COMPUTE(EXPM1);
COMPUTE(ACOSH);
COMPUTE(ATANH);
COMPUTE(SIGN);
COMPUTE(COSH);
COMPUTE(ROUND);
COMPUTE(SINH);
COMPUTE(ASINH);
COMPUTE(HARDSWISH);
COMPUTE(ERF);
COMPUTE(ERFC);
COMPUTE(ERFINV);
#undef COMPUTE
}
#define BINARY_FUNC(Name, Func)\
template<typename TIn, typename TOut>\
__global__ void Binary##Name(\
const TIn *input0, const TIn* input1, TOut *output,\
int sizeZ, int sizeY, int sizeX,\
int strideZ, int strideY, int strideX,\
int strideZ1, int strideY1, int strideX1,\
int dstStrideZ, int dstStrideY, int dstStrideX\
) { \
int count = sizeZ * sizeY * sizeX;\
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {\
int total = sizeZ * sizeY * sizeX;\
int ix = i % sizeX;\
int tmp = i / sizeX;\
int iy = tmp % sizeY;\
int iz = tmp / sizeY;\
int srcOffset = iz * strideZ + iy * strideY + ix * strideX;\
int srcOffset1 = iz * strideZ1 + iy * strideY1 + ix * strideX1;\
int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;\
TIn x = input0[srcOffset];\
TIn y = input1[srcOffset1];\
output[dstOffset] = (TOut)Func;\
}\
}\
#define BINARY_FUNC_FLOATMID(Name, Func)\
template<typename TIn, typename TOut>\
__global__ void BinaryMid##Name(\
const TIn *input0, const TIn* input1, TOut *output,\
int sizeZ, int sizeY, int sizeX,\
int strideZ, int strideY, int strideX,\
int strideZ1, int strideY1, int strideX1,\
int dstStrideZ, int dstStrideY, int dstStrideX\
) { \
int count = sizeZ * sizeY * sizeX;\
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {\
int total = sizeZ * sizeY * sizeX;\
int ix = i % sizeX;\
int tmp = i / sizeX;\
int iy = tmp % sizeY;\
int iz = tmp / sizeY;\
int srcOffset = iz * strideZ + iy * strideY + ix * strideX;\
int srcOffset1 = iz * strideZ1 + iy * strideY1 + ix * strideX1;\
int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;\
float x = input0[srcOffset];\
float y = input1[srcOffset1];\
output[dstOffset] = (TOut)(Func);\
}\
}\
template<typename TIn, typename TOut>\
__global__ void BinaryMidLinear##Name(\
const TIn *input0, const TIn* input1, TOut *output,\
int sizeZ,\
int strideZ,\
int strideZ1,\
int dstStrideZ\
) { \
int count = sizeZ;\
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {\
int iz = i;\
int srcOffset = iz * strideZ;\
int srcOffset1 = iz * strideZ1;\
int dstOffset = iz * dstStrideZ;\
float x = input0[srcOffset];\
float y = input1[srcOffset1];\
output[dstOffset] = (TOut)(Func);\
}\
}\
#define BINARY_FUNC_FLOATMID4(Name, Func)\
template<typename TIn, typename TOut>\
__global__ void BinaryMidLinear4_##Name(\
const TIn *input0, const TIn* input1, TOut *output,\
int count_4\
) { \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count_4); i += blockDim.x * gridDim.x) {\
int iz = i;\
int srcOffset = iz << 2;\
int srcOffset1 = iz << 2;\
int dstOffset = iz << 2;\
float4 xx = ((float4 *)(input0+srcOffset))[0];\
float4 yy = ((float4 *)(input1+srcOffset1))[0];\
float x = xx.x;\
float y = yy.x;\
output[dstOffset] = (TOut)(Func);\
x = xx.y;\
y = yy.y;\
output[dstOffset+1] = (TOut)(Func);\
x = xx.z;\
y = yy.z;\
output[dstOffset+2] = (TOut)(Func);\
x = xx.w;\
y = yy.w;\
output[dstOffset+3] = (TOut)(Func);\
}\
}\
template<typename TIn, typename TOut>\
__global__ void BinaryMidLinearHalf4_##Name(\
const TIn *input0, const TIn* input1, TOut *output,\
int count_4\
) { \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count_4); i += blockDim.x * gridDim.x) {\
int iz = i;\
int srcOffset = iz << 2;\
int srcOffset1 = iz << 2;\
int dstOffset = iz << 2;\
half2 xx = ((half2 *)(input0+srcOffset))[0];\
half2 yy = ((half2 *)(input1+srcOffset1))[0];\
float x = (float)xx.x;\
float y = (float)yy.x;\
output[dstOffset] = (TOut)(Func);\
x = (float)xx.y;\
y = (float)yy.y;\
output[dstOffset+1] = (TOut)(Func);\
xx = ((half2 *)(input0+srcOffset))[1];\
yy = ((half2 *)(input1+srcOffset1))[1];\
x = (float)xx.x;\
y = (float)yy.x;\
output[dstOffset+2] = (TOut)(Func);\
x = (float)xx.y;\
y = (float)yy.y;\
output[dstOffset+3] = (TOut)(Func);\
}\
}\
#define sign(y) ((y) > 0 ? 1 : ((y) < 0 ? -1 : 0))
BINARY_FUNC(ADD, x+y);
BINARY_FUNC(SUB, x-y);
BINARY_FUNC(MUL, x*y);
BINARY_FUNC(DIV, x/y);
BINARY_FUNC(REALDIV, (float)sign(y) * x / max(abs(y), 0.0000001));
BINARY_FUNC(MINIMUM, min(x, y));
BINARY_FUNC(MAXIMUM, max(x, y));
BINARY_FUNC(GREATER, x > y ? 1 : 0);
BINARY_FUNC(LESS, x < y ? 1 : 0);
BINARY_FUNC(LESS_EQUAL, x <= y ? 1 : 0);
BINARY_FUNC(GREATER_EQUAL, x >= y ? 1 : 0);
BINARY_FUNC(EQUAL, x == y ? 1 : 0);
BINARY_FUNC(NOTEQUAL, x != y ? 1 : 0);
BINARY_FUNC(FLOORDIV, floor(x / y));
BINARY_FUNC(FLOORMOD, x - floor(x / y) * y);
BINARY_FUNC(SquaredDifference, (x-y)*(x-y));
BINARY_FUNC(POW, pow(x, y));
BINARY_FUNC(ATAN2, atan2(x, y));
BINARY_FUNC(MOD, (x % y));
BINARY_FUNC(LOGICALOR, (x || y) ? 1 : 0);
BINARY_FUNC_FLOATMID(ADD, x+y);
BINARY_FUNC_FLOATMID(SUB, x-y);
BINARY_FUNC_FLOATMID(MUL, x*y);
BINARY_FUNC_FLOATMID(DIV, x/y);
BINARY_FUNC_FLOATMID(REALDIV, (float)sign(y) * x / max(abs(y), 0.0000001));
BINARY_FUNC_FLOATMID(MINIMUM, min(x, y));
BINARY_FUNC_FLOATMID(MAXIMUM, max(x, y));
BINARY_FUNC_FLOATMID(GREATER, x > y ? 1 : 0);
BINARY_FUNC_FLOATMID(LESS, x < y ? 1 : 0);
BINARY_FUNC_FLOATMID(LESS_EQUAL, x <= y ? 1 : 0);
BINARY_FUNC_FLOATMID(GREATER_EQUAL, x >= y ? 1 : 0);
BINARY_FUNC_FLOATMID(EQUAL, x == y ? 1 : 0);
BINARY_FUNC_FLOATMID(NOTEQUAL, x != y ? 1 : 0);
BINARY_FUNC_FLOATMID(FLOORDIV, floor(x / y));
BINARY_FUNC_FLOATMID(FLOORMOD, x - floor(x / y) * y);
BINARY_FUNC_FLOATMID(SquaredDifference, (x-y)*(x-y));
BINARY_FUNC_FLOATMID(POW, pow(x, y));
BINARY_FUNC_FLOATMID(ATAN2, atan2(x, y));
BINARY_FUNC_FLOATMID(MOD, fmod(x, y));
BINARY_FUNC_FLOATMID(LOGICALOR, (x || y) ? 1 : 0);
BINARY_FUNC_FLOATMID4(ADD, x+y);
BINARY_FUNC_FLOATMID4(SUB, x-y);
BINARY_FUNC_FLOATMID4(MUL, x*y);
BINARY_FUNC_FLOATMID4(DIV, x/y);
BINARY_FUNC_FLOATMID4(REALDIV, (float)sign(y) * x / max(abs(y), 0.0000001));
BINARY_FUNC_FLOATMID4(MINIMUM, min(x, y));
BINARY_FUNC_FLOATMID4(MAXIMUM, max(x, y));
BINARY_FUNC_FLOATMID4(GREATER, x > y ? 1 : 0);
BINARY_FUNC_FLOATMID4(LESS, x < y ? 1 : 0);
BINARY_FUNC_FLOATMID4(LESS_EQUAL, x <= y ? 1 : 0);
BINARY_FUNC_FLOATMID4(GREATER_EQUAL, x >= y ? 1 : 0);
BINARY_FUNC_FLOATMID4(EQUAL, x == y ? 1 : 0);
BINARY_FUNC_FLOATMID4(NOTEQUAL, x != y ? 1 : 0);
BINARY_FUNC_FLOATMID4(FLOORDIV, floor(x / y));
BINARY_FUNC_FLOATMID4(FLOORMOD, x - floor(x / y) * y);
BINARY_FUNC_FLOATMID4(SquaredDifference, (x-y)*(x-y));
BINARY_FUNC_FLOATMID4(POW, pow(x, y));
BINARY_FUNC_FLOATMID4(ATAN2, atan2(x, y));
BINARY_FUNC_FLOATMID4(MOD, fmod(x, y));
BINARY_FUNC_FLOATMID4(LOGICALOR, (x || y) ? 1 : 0);
template<typename T>
void BinaryBlitTemplateFloat(T* output, const T* input, const T* input1, const int32_t* size, const int32_t* srcStride, const int32_t* srcStride1, const int32_t* dstStride, int bytes, CUDARuntime* runtime, int opType) {
int count = size[0] * size[1] * size[2];
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
#define COMPUTE_FLOAT(TYPE, TOut)\
if (opType == MNN::BinaryOpOperation_##TYPE ) {\
if (size[2] == count) {\
if(count % 4 == 0 && count > 16384 && srcStride[2] == 1 && srcStride1[2] == 1 && dstStride[2] == 1) {\
block_num = runtime->blocks_num(count/4);\
threads_num = runtime->threads_num();\
if(bytes == 4) {\
hipLaunchKernelGGL(( BinaryMidLinear4_##TYPE), dim3(block_num), dim3(threads_num), 0, 0, (const T*)input, (const T*)(input1), (TOut*)output,\
count/4);\
} else {\
hipLaunchKernelGGL(( BinaryMidLinearHalf4_##TYPE), dim3(block_num), dim3(threads_num), 0, 0, (const T*)input, (const T*)(input1), (TOut*)output,\
count/4);\
}\
} else {\
hipLaunchKernelGGL(( BinaryMidLinear##TYPE), dim3(block_num), dim3(threads_num), 0, 0, (const T*)input, (const T*)(input1), (TOut*)output,\
size[2],\
srcStride[2],\
srcStride1[2],\
dstStride[2]);\
}\
} else {\
hipLaunchKernelGGL(( BinaryMid##TYPE), dim3(block_num), dim3(threads_num), 0, 0, (const T*)input, (const T*)(input1), (TOut*)output,\
size[0], size[1], size[2],\
srcStride[0], srcStride[1], srcStride[2],\
srcStride1[0], srcStride1[1], srcStride1[2],\
dstStride[0], dstStride[1], dstStride[2]);\
}\
return;\
}\
COMPUTE_FLOAT(ADD, T);
COMPUTE_FLOAT(SUB, T);
COMPUTE_FLOAT(MUL, T);
COMPUTE_FLOAT(DIV, T);
COMPUTE_FLOAT(REALDIV, T);
COMPUTE_FLOAT(MINIMUM, T);
COMPUTE_FLOAT(MAXIMUM, T);
COMPUTE_FLOAT(GREATER, int);
COMPUTE_FLOAT(LESS, int);
COMPUTE_FLOAT(LESS_EQUAL, int);
COMPUTE_FLOAT(GREATER_EQUAL, int);
COMPUTE_FLOAT(EQUAL, int);
COMPUTE_FLOAT(NOTEQUAL, int);
COMPUTE_FLOAT(FLOORDIV, T);
COMPUTE_FLOAT(FLOORMOD, T);
COMPUTE_FLOAT(POW, T);
COMPUTE_FLOAT(SquaredDifference, T);
COMPUTE_FLOAT(ATAN2, T);
COMPUTE_FLOAT(MOD, T);
#undef COMPUTE_FLOAT
}
void BinaryBlitTemplateInt32(uint8_t* output, const uint8_t* input, const uint8_t* input1, const int32_t* size, const int32_t* srcStride, const int32_t* srcStride1, const int32_t* dstStride, int bytes, CUDARuntime* runtime, int opType) {
int count = size[0] * size[1] * size[2];
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
#define COMPUTE_INT(TYPE, TOut)\
if (opType == MNN::BinaryOpOperation_##TYPE ) {\
hipLaunchKernelGGL(( Binary##TYPE), dim3(block_num), dim3(threads_num), 0, 0, (const int*)input, (const int*)(input1), (TOut*)output,\
size[0], size[1], size[2],\
srcStride[0], srcStride[1], srcStride[2],\
srcStride1[0], srcStride1[1], srcStride1[2],\
dstStride[0], dstStride[1], dstStride[2]);\
return;\
}\
COMPUTE_INT(ADD, int);
COMPUTE_INT(SUB, int);
COMPUTE_INT(MUL, int);
COMPUTE_INT(DIV, int);
COMPUTE_INT(MINIMUM, int);
COMPUTE_INT(MAXIMUM, int);
COMPUTE_INT(GREATER, int);
COMPUTE_INT(LESS, int);
COMPUTE_INT(LESS_EQUAL, int);
COMPUTE_INT(GREATER_EQUAL, int);
COMPUTE_INT(EQUAL, int);
COMPUTE_INT(NOTEQUAL, int);
COMPUTE_INT(SquaredDifference, int);
COMPUTE_INT(MOD, int);
COMPUTE_INT(LOGICALOR, int);
}
void BinaryBlit(uint8_t* output, const uint8_t* input, const uint8_t* input1, const int32_t* size, const int32_t* srcStride, const int32_t* srcStride1, const int32_t* dstStride, halide_type_t type, CUDARuntime* runtime, int opType) {
if (type.code == halide_type_float) {
if (type.bits == 32) {
BinaryBlitTemplateFloat((float*)output, (float*)input, (float*)input1, size, srcStride, srcStride1, dstStride, type.bytes(), runtime, opType);
} else if (type.bits == 16) {
BinaryBlitTemplateFloat((half*)output, (half*)input, (half*)input1, size, srcStride, srcStride1, dstStride, type.bytes(), runtime, opType);
}
} else if (type.code == halide_type_int) {
BinaryBlitTemplateInt32(output, input, input1, size, srcStride, srcStride1, dstStride, type.bytes(), runtime, opType);
}
}
}// namespace CUDA
}// namespace MNN
| 62904e2d1b6e76a98f7564e619849451eba41eb0.cu | #include "Raster.cuh"
#include "TensorflowOp_generated.h"
#include <cuda_fp16.h>
#include "MNNCUDAFunction.cuh"
namespace MNN {
namespace CUDA {
// Blit don't care offset
template <typename T>
__global__ void blitRegion(const T *inputO, T *outputO,
int loopCount,
const int32_t* dstIndice, const int32_t* srcIndice,
int dstUseIndice, int srcUseIndice,
int dstStep, int srcStep,int srcLimit,
int sizeZ, int sizeY, int sizeX,
int strideZ, int strideY, int strideX,
int dstStrideZ, int dstStrideY, int dstStrideX
) {
int total = loopCount;
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) {
int srcOffsetO = i * srcStep;
if (srcUseIndice >= 0) {
srcOffsetO = srcIndice[i] * srcStep;
}
int dstOffsetO = i * dstStep;
if (dstUseIndice >= 0) {
dstOffsetO = dstIndice[i] * dstStep;
}
if (srcOffsetO >= 0 && srcOffsetO < srcLimit) {
const T* input = inputO + srcOffsetO;
T* output = outputO + dstOffsetO;
for (int z=0; z<sizeZ; ++z) {
for (int y=0; y<sizeY; ++y) {
for (int x=0; x<sizeX; ++x) {
int srcOffset = z * strideZ + y * strideY + x * strideX;
int dstOffset = z * dstStrideZ + y * dstStrideY + x * dstStrideX;
output[dstOffset] = input[srcOffset];
}
}
}
} else {
T* output = outputO + dstOffsetO;
for (int z=0; z<sizeZ; ++z) {
for (int y=0; y<sizeY; ++y) {
for (int x=0; x<sizeX; ++x) {
int dstOffset = z * dstStrideZ + y * dstStrideY + x * dstStrideX;
output[dstOffset] = (T)0;
}
}
}
}
}
}
void BlitWithIndice(uint8_t* output, const uint8_t* input, const int32_t* dstIndices, const int32_t* srcIndices, int dstUseIndice, int srcUseIndice, int loopCount, int dstStep, int srcStep, int srcLimit, const Tensor::InsideDescribe::Region& reg, int bytes, CUDARuntime* runtime) {
int count = loopCount;
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
switch (bytes) {
case 4:
blitRegion<<<block_num, threads_num>>>((const float*)input, (float*)output,
loopCount,
dstIndices, srcIndices,
dstUseIndice, srcUseIndice,
dstStep, srcStep, srcLimit,
reg.size[0], reg.size[1], reg.size[2],
reg.src.stride[0], reg.src.stride[1], reg.src.stride[2],
reg.dst.stride[0], reg.dst.stride[1], reg.dst.stride[2]);
break;
case 2:
blitRegion<<<block_num, threads_num>>>((const int16_t*)input, (int16_t*)output,
loopCount,
dstIndices, srcIndices,
dstUseIndice, srcUseIndice,
dstStep, srcStep, srcLimit,
reg.size[0], reg.size[1], reg.size[2],
reg.src.stride[0], reg.src.stride[1], reg.src.stride[2],
reg.dst.stride[0], reg.dst.stride[1], reg.dst.stride[2]);
break;
case 1:
blitRegion<<<block_num, threads_num>>>((const int8_t*)input, (int8_t*)output,
loopCount,
dstIndices, srcIndices,
dstUseIndice, srcUseIndice,
dstStep, srcStep, srcLimit,
reg.size[0], reg.size[1], reg.size[2],
reg.src.stride[0], reg.src.stride[1], reg.src.stride[2],
reg.dst.stride[0], reg.dst.stride[1], reg.dst.stride[2]);
break;
default:
break;
}
}
#define UNARY_FUNC(Name, Func)\
template<typename T>\
__global__ void Name(const T *input, T *output,\
int count,\
DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX,\
int strideZ, int strideY, int strideX,\
int dstStrideZ, int dstStrideY, int dstStrideX\
) { \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {\
int ix, tmp, iy, iz;\
sizeX.divmod(i, tmp, ix);\
sizeY.divmod(tmp, iz, iy);\
int srcOffset = iz * strideZ + iy * strideY + ix * strideX;\
int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;\
T x = input[srcOffset];\
output[dstOffset] = Func;\
}\
}\
template<typename T>\
__global__ void FLOAT##Name(const T *input, T *output,\
int count,\
DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX,\
int strideZ, int strideY, int strideX,\
int dstStrideZ, int dstStrideY, int dstStrideX\
) { \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {\
int ix, tmp, iy, iz;\
sizeX.divmod(i, tmp, ix);\
sizeY.divmod(tmp, iz, iy);\
int srcOffset = iz * strideZ + iy * strideY + ix * strideX;\
int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;\
float x = (float)input[srcOffset];\
output[dstOffset] = (float)(Func);\
}\
}\
template<typename T>
__global__ void blit_2(const T *input, T *output,
int count,
DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX,
int strideZ, int strideY,
int dstStrideZ, int dstStrideY
) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
int ix, tmp, iy, iz;
sizeX.divmod(i, tmp, ix);
sizeY.divmod(tmp, iz, iy);
int srcOffset = iz * strideZ + iy * strideY + (ix << 1);
int dstOffset = iz * dstStrideZ + iy * dstStrideY + (ix << 1);
int2 * dstF = (int2 *)(output+dstOffset);
dstF[0] = ((int2 *)(input+srcOffset))[0];
}
}
struct Bytes512 {
int4 x[4];
};
UNARY_FUNC(blit, x);
UNARY_FUNC(ABS, abs(x));
UNARY_FUNC(EXP, exp(x));
UNARY_FUNC(NEG, -x);
UNARY_FUNC(RECIPROCAL, (1.0)/x);
UNARY_FUNC(FLOOR, floor(x));
UNARY_FUNC(CEIL, ceil(x));
UNARY_FUNC(SQUARE, x*x);
UNARY_FUNC(SQRT, (T)(sqrt((float)x)));
UNARY_FUNC(RSQRT, (T)(rsqrt((float)x)));
UNARY_FUNC(LOG, (T)(log((float)x)));
UNARY_FUNC(SIN, (T)(sin((float)x)));
UNARY_FUNC(COS, (T)(cos((float)x)));
UNARY_FUNC(TAN, (T)(tan((float)x)));
UNARY_FUNC(ASIN, (T)(asin((float)x)));
UNARY_FUNC(ACOS, (T)(acos((float)x)));
UNARY_FUNC(ATAN, (T)(atan((float)x)));
UNARY_FUNC(LOG1P, log(1+x));
UNARY_FUNC(TANH, tanh(x));
UNARY_FUNC(SIGMOID, 1./(1.+exp(-x)));
UNARY_FUNC(EXPM1, exp(x)-1);
UNARY_FUNC(ATANH, atanh(x));
UNARY_FUNC(ACOSH, acosh(x));
UNARY_FUNC(COSH, cosh(x));
UNARY_FUNC(SIGN, x > 0 ? 1 : (x<0 ? -1 : 0));
UNARY_FUNC(ROUND, round(x));
UNARY_FUNC(SINH, sinh(x));
UNARY_FUNC(ASINH, asinh(x));
UNARY_FUNC(HARDSWISH, 1.0/6.0 * x * min(max(x+3.0, 0.0), 6.0));
UNARY_FUNC(ERF, erf(x));
UNARY_FUNC(ERFC, erfc(x));
UNARY_FUNC(ERFINV, erfinv(x));
UNARY_FUNC(GELU, (1.0f + tanh(0.79788458f * (0.044715f * x * x * x + x))) * x * 0.5f);
UNARY_FUNC(GELU_STANDARD, (erf(x*0.7071067932881648f)+1.f)*x*0.5);
void RasterBlit(uint8_t* output, const uint8_t* input, const int32_t* size, const int32_t* srcStride, const int32_t* dstStride, int bytes, CUDARuntime* runtime) {
int count = size[0] * size[1] * size[2];
DivModFast sz(size[0]);
DivModFast sy(size[1]);
DivModFast sx(size[2]);
//printf("%d-%d-%d, %d-%d-%d,-%d-%d-%d\n", size[0], size[1], size[2], srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]);
if(bytes == 4 && count > 16384 && size[2] % 2 == 0 && srcStride[2] == 1 && dstStride[2] == 1) {
//printf("%d-%d-%d, %d-%d-%d,-%d-%d-%d\n\n", size[0], size[1], size[2], srcStride[0], srcStride[1], srcStride[2], dstStride[0], dstStride[1], dstStride[2]);
count /= 2;
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
DivModFast sx_2((size[2]/2));
blit_2<<<block_num, threads_num>>>((const float*)input, (float*)output,
count,
sz, sy, sx_2,
srcStride[0], srcStride[1],
dstStride[0], dstStride[1]);
return;
}
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
switch (bytes) {
case 64:
blit<<<block_num, threads_num>>>((const Bytes512*)input, (Bytes512*)output,
count,
sz, sy, sx,
srcStride[0], srcStride[1], srcStride[2],
dstStride[0], dstStride[1], dstStride[2]);
break;
case 32:
blit<<<block_num, threads_num>>>((const double4*)input, (double4*)output,
count,
sz, sy, sx,
srcStride[0], srcStride[1], srcStride[2],
dstStride[0], dstStride[1], dstStride[2]);
break;
case 4:
blit<<<block_num, threads_num>>>((const float*)input, (float*)output,
count,
sz, sy, sx,
srcStride[0], srcStride[1], srcStride[2],
dstStride[0], dstStride[1], dstStride[2]);
break;
case 2:
blit<<<block_num, threads_num>>>((const int16_t*)input, (int16_t*)output,
count,
sz, sy, sx,
srcStride[0], srcStride[1], srcStride[2],
dstStride[0], dstStride[1], dstStride[2]);
break;
case 1:
blit<<<block_num, threads_num>>>((const int8_t*)input, (int8_t*)output,
count,
sz, sy, sx,
srcStride[0], srcStride[1], srcStride[2],
dstStride[0], dstStride[1], dstStride[2]);
break;
default:
break;
}
}
template<typename T0, typename T1>
__global__ void fuseblit(const T0 *input, T1 *output,
int fuseNum, int count, const int32_t* sliceOffset,
DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX,
int strideZ, int strideY, int strideX,
int dstStrideZ, int dstStrideY, int dstStrideX
) {
size_t c = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t c = blockIdx.x * blockDim.x + threadIdx.x; c < count; c += blockDim.x * gridDim.x) {
int ix, tmp, iy, tmp2, iz, j;
sizeX.divmod(c, tmp, ix);
sizeY.divmod(tmp, tmp2, iy);
sizeZ.divmod(tmp2, j, iz);
int src_offset = sliceOffset[j] + iz * strideZ + iy * strideY + ix * strideX;
int dst_offset = sliceOffset[fuseNum+j] + iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;
output[dst_offset] = input[src_offset];
}
}
__global__ void fuseblit_4(const int32_t *input, int32_t *output,
int fuseNum, int count, const int32_t* sliceOffset,
DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX,
int strideZ, int strideY,
int dstStrideZ, int dstStrideY
) {
for (size_t c = blockIdx.x * blockDim.x + threadIdx.x; c < count; c += blockDim.x * gridDim.x) {
int ix, tmp, iy, tmp2, iz, j;
sizeX.divmod(c, tmp, ix);
sizeY.divmod(tmp, tmp2, iy);
sizeZ.divmod(tmp2, j, iz);
int src_offset = sliceOffset[j] + iz * strideZ + iy * strideY + (ix << 2);
int dst_offset = sliceOffset[fuseNum+j] + iz * dstStrideZ + iy * dstStrideY + (ix << 2);
int4* srcF = (int4 *)(input + src_offset);
int4* dstF = (int4 *)(output + dst_offset);
dstF[0] = srcF[0];
}
}
__global__ void fuseblit_half_4(const int16_t *input, int16_t *output,
int fuseNum, int count, const int32_t* sliceOffset,
DivModFast sizeZ, DivModFast sizeY, DivModFast sizeX,
int strideZ, int strideY,
int dstStrideZ, int dstStrideY
) {
for (size_t c = blockIdx.x * blockDim.x + threadIdx.x; c < count; c += blockDim.x * gridDim.x) {
int ix, tmp, iy, tmp2, iz, j;
sizeX.divmod(c, tmp, ix);
sizeY.divmod(tmp, tmp2, iy);
sizeZ.divmod(tmp2, j, iz);
int src_offset = sliceOffset[j] + iz * strideZ + iy * strideY + (ix << 2);
int dst_offset = sliceOffset[fuseNum+j] + iz * dstStrideZ + iy * dstStrideY + (ix << 2);
int2* srcF = (int2 *)(input + src_offset);
int2* dstF = (int2 *)(output + dst_offset);
dstF[0] = srcF[0];
}
}
void FuseRasterBlit(uint8_t* output, const uint8_t* input, const int32_t* size, const int32_t* srcStride, const int32_t* dstStride, int fuseNum, void* sliceOffset, int bytes, CUDARuntime* runtime, int unit) {
DivModFast sz(size[0]);
DivModFast sy(size[1]);
int count = fuseNum * size[0] * size[1] * size[2];
bool strideC4Support = srcStride[0] % 4 == 0 && srcStride[1] % 4 == 0 && dstStride[0] % 4 == 0 && dstStride[1] % 4 == 0;
if(size[2] % 4 == 0 && count > 16384 && srcStride[2] == 1 && dstStride[2] == 1 && unit == 4 && strideC4Support) {
int xL4 = size[2] / 4;
int countC4 = fuseNum * size[0] * size[1] * xL4;
int numBlocks = runtime->blocks_num(countC4);
int threadsPerBlock = runtime->threads_num();
DivModFast sx_4(xL4);
if(bytes == 4) {
fuseblit_4<<<numBlocks, threadsPerBlock>>>((const int32_t*)input, (int32_t*)output,
fuseNum, countC4, (const int32_t*)sliceOffset,
sz, sy, sx_4,
srcStride[0], srcStride[1],
dstStride[0], dstStride[1]);
return;
} else if(bytes == 2){
fuseblit_half_4<<<numBlocks, threadsPerBlock>>>((const int16_t*)input, (int16_t*)output,
fuseNum, countC4, (const int32_t*)sliceOffset,
sz, sy, sx_4,
srcStride[0], srcStride[1],
dstStride[0], dstStride[1]);
return;
}
}
DivModFast sx(size[2]);
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
switch (bytes) {
case 64:
fuseblit<<<block_num, threads_num>>>((const Bytes512*)input, (Bytes512*)output,
fuseNum, count, (const int32_t*)sliceOffset,
sz, sy, sx,
srcStride[0], srcStride[1], srcStride[2],
dstStride[0], dstStride[1], dstStride[2]);
break;
case 16:
fuseblit<<<block_num, threads_num>>>((const int4*)input, (int4*)output,
fuseNum, count, (const int32_t*)sliceOffset,
sz, sy, sx,
srcStride[0], srcStride[1], srcStride[2],
dstStride[0], dstStride[1], dstStride[2]);
break;
case 4:
fuseblit<<<block_num, threads_num>>>((const float*)input, (float*)output,
fuseNum, count, (const int32_t*)sliceOffset,
sz, sy, sx,
srcStride[0], srcStride[1], srcStride[2],
dstStride[0], dstStride[1], dstStride[2]);
break;
case 2:
fuseblit<<<block_num, threads_num>>>((const int16_t*)input, (int16_t*)output,
fuseNum, count, (const int32_t*)sliceOffset,
sz, sy, sx,
srcStride[0], srcStride[1], srcStride[2],
dstStride[0], dstStride[1], dstStride[2]);
break;
case 1:
fuseblit<<<block_num, threads_num>>>((const int8_t*)input, (int8_t*)output,
fuseNum, count, (const int32_t*)sliceOffset,
sz, sy, sx,
srcStride[0], srcStride[1], srcStride[2],
dstStride[0], dstStride[1], dstStride[2]);
break;
default:
break;
}
//printf("%s, %d-%d-%d-%d\n", cudaGetErrorString(cudaGetLastError()), numBlocks.x, numBlocks.y, threadsPerBlock.x, threadsPerBlock.y);
}
template<typename T0, typename T1>
__global__ void fuseblitLimit(const T0 *input, T1 *output,
const FuseRegion* info, const int32_t* sliceOffset
) {
int sizeZ = info->size[0];
int sizeY = info->size[1];
int sizeX = info->size[2];
int strideZ = info->srcStride[0];
int strideY = info->srcStride[1];
int strideX = info->srcStride[2];
int dstStrideZ = info->dstStride[0];
int dstStrideY = info->dstStride[1];
int dstStrideX = info->dstStride[2];
int fuseNum = info->fuseNumber;
int count = fuseNum*sizeZ * sizeY * sizeX;
for (size_t c = blockIdx.x * blockDim.x + threadIdx.x; c < (count); c += blockDim.x * gridDim.x) {
int j = c / (sizeZ * sizeY * sizeX);
int i = c % (sizeZ * sizeY * sizeX);
int ix = i % sizeX;
int tmp = i / sizeX;
int iy = tmp % sizeY;
int iz = tmp / sizeY;
const int* srcOffsetPtr = sliceOffset + 8 * j;
const int* dstOffsetPtr = sliceOffset + 8 * j + 4;
T0 srcValue = (T0)0;
int src_offset = srcOffsetPtr[3] + iz * strideZ + iy * strideY + ix * strideX;
if (srcOffsetPtr[0] > iz && srcOffsetPtr[1] > iy && srcOffsetPtr[2] > ix) {
srcValue = input[src_offset];
}
int dst_offset = dstOffsetPtr[3] + iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;
//printf("%d -> %d - %f\n", src_offset, dst_offset, srcValue);
if (dstOffsetPtr[0] > iz && dstOffsetPtr[1] > iy && dstOffsetPtr[2] > ix) {
output[dst_offset] = srcValue;
}
}
}
void FuseRasterBlitFloatToHalf(uint8_t* output, const uint8_t* input, const FuseRegion* info, void* sliceOffset, CUDARuntime* runtime) {
auto& prop = runtime->prop();
int threads_num = prop.maxThreadsPerBlock;
int block_num = prop.multiProcessorCount;
fuseblitLimit<<<block_num, threads_num>>>((const float*)input, (half*)output,
info, (const int32_t*)sliceOffset);
}
void FuseRasterBlitHalfToFloat(uint8_t* output, const uint8_t* input, const FuseRegion* info, void* sliceOffset, CUDARuntime* runtime) {
auto& prop = runtime->prop();
int threads_num = prop.maxThreadsPerBlock;
int block_num = prop.multiProcessorCount;
fuseblitLimit<<<block_num, threads_num>>>((const half*)input, (float*)output,
info, (const int32_t*)sliceOffset);
}
void FuseRasterBlitFloatToFloat(uint8_t* output, const uint8_t* input, const FuseRegion* info, void* sliceOffset, CUDARuntime* runtime) {
auto& prop = runtime->prop();
int threads_num = prop.maxThreadsPerBlock;
int block_num = prop.multiProcessorCount;
fuseblitLimit<<<block_num, threads_num>>>((const float*)input, (float*)output,
info, (const int32_t*)sliceOffset);
}
void FuseRasterBlitCommon(uint8_t* output, const uint8_t* input, const FuseRegion* info, void* sliceOffset, CUDARuntime* runtime, int bytes) {
auto& prop = runtime->prop();
int threads_num = prop.maxThreadsPerBlock;
int block_num = prop.multiProcessorCount;
switch (bytes) {
case 4:
fuseblitLimit<<<block_num, threads_num>>>((const float*)input, (float*)output,
info, (const int32_t*)sliceOffset);
break;
case 2:
fuseblitLimit<<<block_num, threads_num>>>((const half*)input, (half*)output,
info, (const int32_t*)sliceOffset);
break;
case 1:
fuseblitLimit<<<block_num, threads_num>>>((const int8_t*)input, (int8_t*)output,
info, (const int32_t*)sliceOffset);
break;
default:
break;
}
}
void UnaryBlit(uint8_t* output, const uint8_t* input, const int32_t* size, const int32_t* srcStride, const int32_t* dstStride, int bytes, CUDARuntime* runtime, int opType) {
int count = size[0] * size[1] * size[2];
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
DivModFast sz(size[0]);
DivModFast sy(size[1]);
DivModFast sx(size[2]);
// TODO: Support FP16
#define COMPUTE(TYPE)\
if (opType == MNN::UnaryOpOperation_##TYPE ) {\
if(bytes==2) {\
FLOAT##TYPE<<<block_num, threads_num>>>((const half*)input, (half*)output,\
count, \
sz, sy, sx,\
srcStride[0], srcStride[1], srcStride[2],\
dstStride[0], dstStride[1], dstStride[2]);\
} else {\
TYPE<<<block_num, threads_num>>>((const float*)input, (float*)output,\
count, \
sz, sy, sx,\
srcStride[0], srcStride[1], srcStride[2],\
dstStride[0], dstStride[1], dstStride[2]);\
}\
return;\
}\
COMPUTE(ABS);
COMPUTE(NEG);
COMPUTE(FLOOR);
COMPUTE(CEIL);
COMPUTE(SQUARE);
COMPUTE(SQRT);
COMPUTE(RSQRT);
COMPUTE(EXP);
COMPUTE(LOG);
COMPUTE(SIN);
COMPUTE(COS);
COMPUTE(TAN);
COMPUTE(GELU);
COMPUTE(GELU_STANDARD);
COMPUTE(ASIN);
COMPUTE(ACOS);
COMPUTE(ATAN);
COMPUTE(RECIPROCAL);
COMPUTE(LOG1P);
COMPUTE(TANH);
COMPUTE(SIGMOID);
COMPUTE(EXPM1);
COMPUTE(ACOSH);
COMPUTE(ATANH);
COMPUTE(SIGN);
COMPUTE(COSH);
COMPUTE(ROUND);
COMPUTE(SINH);
COMPUTE(ASINH);
COMPUTE(HARDSWISH);
COMPUTE(ERF);
COMPUTE(ERFC);
COMPUTE(ERFINV);
#undef COMPUTE
}
#define BINARY_FUNC(Name, Func)\
template<typename TIn, typename TOut>\
__global__ void Binary##Name(\
const TIn *input0, const TIn* input1, TOut *output,\
int sizeZ, int sizeY, int sizeX,\
int strideZ, int strideY, int strideX,\
int strideZ1, int strideY1, int strideX1,\
int dstStrideZ, int dstStrideY, int dstStrideX\
) { \
int count = sizeZ * sizeY * sizeX;\
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {\
int total = sizeZ * sizeY * sizeX;\
int ix = i % sizeX;\
int tmp = i / sizeX;\
int iy = tmp % sizeY;\
int iz = tmp / sizeY;\
int srcOffset = iz * strideZ + iy * strideY + ix * strideX;\
int srcOffset1 = iz * strideZ1 + iy * strideY1 + ix * strideX1;\
int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;\
TIn x = input0[srcOffset];\
TIn y = input1[srcOffset1];\
output[dstOffset] = (TOut)Func;\
}\
}\
#define BINARY_FUNC_FLOATMID(Name, Func)\
template<typename TIn, typename TOut>\
__global__ void BinaryMid##Name(\
const TIn *input0, const TIn* input1, TOut *output,\
int sizeZ, int sizeY, int sizeX,\
int strideZ, int strideY, int strideX,\
int strideZ1, int strideY1, int strideX1,\
int dstStrideZ, int dstStrideY, int dstStrideX\
) { \
int count = sizeZ * sizeY * sizeX;\
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {\
int total = sizeZ * sizeY * sizeX;\
int ix = i % sizeX;\
int tmp = i / sizeX;\
int iy = tmp % sizeY;\
int iz = tmp / sizeY;\
int srcOffset = iz * strideZ + iy * strideY + ix * strideX;\
int srcOffset1 = iz * strideZ1 + iy * strideY1 + ix * strideX1;\
int dstOffset = iz * dstStrideZ + iy * dstStrideY + ix * dstStrideX;\
float x = input0[srcOffset];\
float y = input1[srcOffset1];\
output[dstOffset] = (TOut)(Func);\
}\
}\
template<typename TIn, typename TOut>\
__global__ void BinaryMidLinear##Name(\
const TIn *input0, const TIn* input1, TOut *output,\
int sizeZ,\
int strideZ,\
int strideZ1,\
int dstStrideZ\
) { \
int count = sizeZ;\
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {\
int iz = i;\
int srcOffset = iz * strideZ;\
int srcOffset1 = iz * strideZ1;\
int dstOffset = iz * dstStrideZ;\
float x = input0[srcOffset];\
float y = input1[srcOffset1];\
output[dstOffset] = (TOut)(Func);\
}\
}\
#define BINARY_FUNC_FLOATMID4(Name, Func)\
template<typename TIn, typename TOut>\
__global__ void BinaryMidLinear4_##Name(\
const TIn *input0, const TIn* input1, TOut *output,\
int count_4\
) { \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count_4); i += blockDim.x * gridDim.x) {\
int iz = i;\
int srcOffset = iz << 2;\
int srcOffset1 = iz << 2;\
int dstOffset = iz << 2;\
float4 xx = ((float4 *)(input0+srcOffset))[0];\
float4 yy = ((float4 *)(input1+srcOffset1))[0];\
float x = xx.x;\
float y = yy.x;\
output[dstOffset] = (TOut)(Func);\
x = xx.y;\
y = yy.y;\
output[dstOffset+1] = (TOut)(Func);\
x = xx.z;\
y = yy.z;\
output[dstOffset+2] = (TOut)(Func);\
x = xx.w;\
y = yy.w;\
output[dstOffset+3] = (TOut)(Func);\
}\
}\
template<typename TIn, typename TOut>\
__global__ void BinaryMidLinearHalf4_##Name(\
const TIn *input0, const TIn* input1, TOut *output,\
int count_4\
) { \
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count_4); i += blockDim.x * gridDim.x) {\
int iz = i;\
int srcOffset = iz << 2;\
int srcOffset1 = iz << 2;\
int dstOffset = iz << 2;\
half2 xx = ((half2 *)(input0+srcOffset))[0];\
half2 yy = ((half2 *)(input1+srcOffset1))[0];\
float x = (float)xx.x;\
float y = (float)yy.x;\
output[dstOffset] = (TOut)(Func);\
x = (float)xx.y;\
y = (float)yy.y;\
output[dstOffset+1] = (TOut)(Func);\
xx = ((half2 *)(input0+srcOffset))[1];\
yy = ((half2 *)(input1+srcOffset1))[1];\
x = (float)xx.x;\
y = (float)yy.x;\
output[dstOffset+2] = (TOut)(Func);\
x = (float)xx.y;\
y = (float)yy.y;\
output[dstOffset+3] = (TOut)(Func);\
}\
}\
#define sign(y) ((y) > 0 ? 1 : ((y) < 0 ? -1 : 0))
BINARY_FUNC(ADD, x+y);
BINARY_FUNC(SUB, x-y);
BINARY_FUNC(MUL, x*y);
BINARY_FUNC(DIV, x/y);
BINARY_FUNC(REALDIV, (float)sign(y) * x / max(abs(y), 0.0000001));
BINARY_FUNC(MINIMUM, min(x, y));
BINARY_FUNC(MAXIMUM, max(x, y));
BINARY_FUNC(GREATER, x > y ? 1 : 0);
BINARY_FUNC(LESS, x < y ? 1 : 0);
BINARY_FUNC(LESS_EQUAL, x <= y ? 1 : 0);
BINARY_FUNC(GREATER_EQUAL, x >= y ? 1 : 0);
BINARY_FUNC(EQUAL, x == y ? 1 : 0);
BINARY_FUNC(NOTEQUAL, x != y ? 1 : 0);
BINARY_FUNC(FLOORDIV, floor(x / y));
BINARY_FUNC(FLOORMOD, x - floor(x / y) * y);
BINARY_FUNC(SquaredDifference, (x-y)*(x-y));
BINARY_FUNC(POW, pow(x, y));
BINARY_FUNC(ATAN2, atan2(x, y));
BINARY_FUNC(MOD, (x % y));
BINARY_FUNC(LOGICALOR, (x || y) ? 1 : 0);
BINARY_FUNC_FLOATMID(ADD, x+y);
BINARY_FUNC_FLOATMID(SUB, x-y);
BINARY_FUNC_FLOATMID(MUL, x*y);
BINARY_FUNC_FLOATMID(DIV, x/y);
BINARY_FUNC_FLOATMID(REALDIV, (float)sign(y) * x / max(abs(y), 0.0000001));
BINARY_FUNC_FLOATMID(MINIMUM, min(x, y));
BINARY_FUNC_FLOATMID(MAXIMUM, max(x, y));
BINARY_FUNC_FLOATMID(GREATER, x > y ? 1 : 0);
BINARY_FUNC_FLOATMID(LESS, x < y ? 1 : 0);
BINARY_FUNC_FLOATMID(LESS_EQUAL, x <= y ? 1 : 0);
BINARY_FUNC_FLOATMID(GREATER_EQUAL, x >= y ? 1 : 0);
BINARY_FUNC_FLOATMID(EQUAL, x == y ? 1 : 0);
BINARY_FUNC_FLOATMID(NOTEQUAL, x != y ? 1 : 0);
BINARY_FUNC_FLOATMID(FLOORDIV, floor(x / y));
BINARY_FUNC_FLOATMID(FLOORMOD, x - floor(x / y) * y);
BINARY_FUNC_FLOATMID(SquaredDifference, (x-y)*(x-y));
BINARY_FUNC_FLOATMID(POW, pow(x, y));
BINARY_FUNC_FLOATMID(ATAN2, atan2(x, y));
BINARY_FUNC_FLOATMID(MOD, fmod(x, y));
BINARY_FUNC_FLOATMID(LOGICALOR, (x || y) ? 1 : 0);
BINARY_FUNC_FLOATMID4(ADD, x+y);
BINARY_FUNC_FLOATMID4(SUB, x-y);
BINARY_FUNC_FLOATMID4(MUL, x*y);
BINARY_FUNC_FLOATMID4(DIV, x/y);
BINARY_FUNC_FLOATMID4(REALDIV, (float)sign(y) * x / max(abs(y), 0.0000001));
BINARY_FUNC_FLOATMID4(MINIMUM, min(x, y));
BINARY_FUNC_FLOATMID4(MAXIMUM, max(x, y));
BINARY_FUNC_FLOATMID4(GREATER, x > y ? 1 : 0);
BINARY_FUNC_FLOATMID4(LESS, x < y ? 1 : 0);
BINARY_FUNC_FLOATMID4(LESS_EQUAL, x <= y ? 1 : 0);
BINARY_FUNC_FLOATMID4(GREATER_EQUAL, x >= y ? 1 : 0);
BINARY_FUNC_FLOATMID4(EQUAL, x == y ? 1 : 0);
BINARY_FUNC_FLOATMID4(NOTEQUAL, x != y ? 1 : 0);
BINARY_FUNC_FLOATMID4(FLOORDIV, floor(x / y));
BINARY_FUNC_FLOATMID4(FLOORMOD, x - floor(x / y) * y);
BINARY_FUNC_FLOATMID4(SquaredDifference, (x-y)*(x-y));
BINARY_FUNC_FLOATMID4(POW, pow(x, y));
BINARY_FUNC_FLOATMID4(ATAN2, atan2(x, y));
BINARY_FUNC_FLOATMID4(MOD, fmod(x, y));
BINARY_FUNC_FLOATMID4(LOGICALOR, (x || y) ? 1 : 0);
template<typename T>
void BinaryBlitTemplateFloat(T* output, const T* input, const T* input1, const int32_t* size, const int32_t* srcStride, const int32_t* srcStride1, const int32_t* dstStride, int bytes, CUDARuntime* runtime, int opType) {
int count = size[0] * size[1] * size[2];
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
#define COMPUTE_FLOAT(TYPE, TOut)\
if (opType == MNN::BinaryOpOperation_##TYPE ) {\
if (size[2] == count) {\
if(count % 4 == 0 && count > 16384 && srcStride[2] == 1 && srcStride1[2] == 1 && dstStride[2] == 1) {\
block_num = runtime->blocks_num(count/4);\
threads_num = runtime->threads_num();\
if(bytes == 4) {\
BinaryMidLinear4_##TYPE<<<block_num, threads_num>>>((const T*)input, (const T*)(input1), (TOut*)output,\
count/4);\
} else {\
BinaryMidLinearHalf4_##TYPE<<<block_num, threads_num>>>((const T*)input, (const T*)(input1), (TOut*)output,\
count/4);\
}\
} else {\
BinaryMidLinear##TYPE<<<block_num, threads_num>>>((const T*)input, (const T*)(input1), (TOut*)output,\
size[2],\
srcStride[2],\
srcStride1[2],\
dstStride[2]);\
}\
} else {\
BinaryMid##TYPE<<<block_num, threads_num>>>((const T*)input, (const T*)(input1), (TOut*)output,\
size[0], size[1], size[2],\
srcStride[0], srcStride[1], srcStride[2],\
srcStride1[0], srcStride1[1], srcStride1[2],\
dstStride[0], dstStride[1], dstStride[2]);\
}\
return;\
}\
COMPUTE_FLOAT(ADD, T);
COMPUTE_FLOAT(SUB, T);
COMPUTE_FLOAT(MUL, T);
COMPUTE_FLOAT(DIV, T);
COMPUTE_FLOAT(REALDIV, T);
COMPUTE_FLOAT(MINIMUM, T);
COMPUTE_FLOAT(MAXIMUM, T);
COMPUTE_FLOAT(GREATER, int);
COMPUTE_FLOAT(LESS, int);
COMPUTE_FLOAT(LESS_EQUAL, int);
COMPUTE_FLOAT(GREATER_EQUAL, int);
COMPUTE_FLOAT(EQUAL, int);
COMPUTE_FLOAT(NOTEQUAL, int);
COMPUTE_FLOAT(FLOORDIV, T);
COMPUTE_FLOAT(FLOORMOD, T);
COMPUTE_FLOAT(POW, T);
COMPUTE_FLOAT(SquaredDifference, T);
COMPUTE_FLOAT(ATAN2, T);
COMPUTE_FLOAT(MOD, T);
#undef COMPUTE_FLOAT
}
void BinaryBlitTemplateInt32(uint8_t* output, const uint8_t* input, const uint8_t* input1, const int32_t* size, const int32_t* srcStride, const int32_t* srcStride1, const int32_t* dstStride, int bytes, CUDARuntime* runtime, int opType) {
int count = size[0] * size[1] * size[2];
int block_num = runtime->blocks_num(count);
int threads_num = runtime->threads_num();
#define COMPUTE_INT(TYPE, TOut)\
if (opType == MNN::BinaryOpOperation_##TYPE ) {\
Binary##TYPE<<<block_num, threads_num>>>((const int*)input, (const int*)(input1), (TOut*)output,\
size[0], size[1], size[2],\
srcStride[0], srcStride[1], srcStride[2],\
srcStride1[0], srcStride1[1], srcStride1[2],\
dstStride[0], dstStride[1], dstStride[2]);\
return;\
}\
COMPUTE_INT(ADD, int);
COMPUTE_INT(SUB, int);
COMPUTE_INT(MUL, int);
COMPUTE_INT(DIV, int);
COMPUTE_INT(MINIMUM, int);
COMPUTE_INT(MAXIMUM, int);
COMPUTE_INT(GREATER, int);
COMPUTE_INT(LESS, int);
COMPUTE_INT(LESS_EQUAL, int);
COMPUTE_INT(GREATER_EQUAL, int);
COMPUTE_INT(EQUAL, int);
COMPUTE_INT(NOTEQUAL, int);
COMPUTE_INT(SquaredDifference, int);
COMPUTE_INT(MOD, int);
COMPUTE_INT(LOGICALOR, int);
}
void BinaryBlit(uint8_t* output, const uint8_t* input, const uint8_t* input1, const int32_t* size, const int32_t* srcStride, const int32_t* srcStride1, const int32_t* dstStride, halide_type_t type, CUDARuntime* runtime, int opType) {
if (type.code == halide_type_float) {
if (type.bits == 32) {
BinaryBlitTemplateFloat((float*)output, (float*)input, (float*)input1, size, srcStride, srcStride1, dstStride, type.bytes(), runtime, opType);
} else if (type.bits == 16) {
BinaryBlitTemplateFloat((half*)output, (half*)input, (half*)input1, size, srcStride, srcStride1, dstStride, type.bytes(), runtime, opType);
}
} else if (type.code == halide_type_int) {
BinaryBlitTemplateInt32(output, input, input1, size, srcStride, srcStride1, dstStride, type.bytes(), runtime, opType);
}
}
}// namespace CUDA
}// namespace MNN
|
fe2561ce7548cf9254f9d0da994075b689df8884.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "common.h"
// kernels borrowed from Caffe
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* top_data,
Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height);
int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_data[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_data[maxidx];
}
}
}
top_data[index] = maxval;
top_mask[index] = maxidx + 1;
}
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff,
const Dtype* top_mask, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
int phend = min((h + pad_h) / stride_h + 1, pooled_height);
int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
int offset = (n * channels + c) * pooled_height * pooled_width;
top_diff += offset;
top_mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask[ph * pooled_width + pw] - 1 == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
bottom_diff[index] = gradient;
}
}
void THNN_CudaSpatialMaxPooling_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *indices, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode)
{
THAssert(THCudaTensor_checkGPU(state, 3, input, output, indices));
THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected");
long nInputCols, nInputRows, nInputPlane, batchSize;
long nOutputCols, nOutputRows;
if (input->nDimension == 3) {
nInputCols = input->size[2];
nInputRows = input->size[1];
nInputPlane = input->size[0];
batchSize = 1;
}
else
{
nInputCols = input->size[3];
nInputRows = input->size[2];
nInputPlane = input->size[1];
batchSize = input->size[0];
}
THArgCheck(nInputCols >= kW - padW && nInputRows >= kH - padH, 2, "input image smaller than kernel size");
THArgCheck(kW/2 >= padW && kH/2 >= padH, 2, "pad should be smaller than half of kernel size");
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
input = THCudaTensor_newContiguous(state, input);
float* input_data = THCudaTensor_data(state, input);
THCudaTensor_resize4d(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols);
THCudaTensor_resizeAs(state, indices, output);
float* indices_data = THCudaTensor_data(state, indices);
float* output_data = THCudaTensor_data(state, output);
int count = THCudaTensor_nElement(state, output);
hipLaunchKernelGGL(( MaxPoolForward) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, output_data, indices_data);
if(input->nDimension == 3)
THCudaTensor_resize3d(state, output, nInputPlane, nOutputRows, nOutputCols);
THCudaTensor_free(state, input);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in SpatialMaxPooling.updateOutput: %s\n", hipGetErrorString(err));
THError("aborting");
}
}
void THNN_CudaSpatialMaxPooling_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *indices, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode)
{
THAssert(THCudaTensor_checkGPU(state, 4, input, gradOutput, indices, gradInput));
input = THCudaTensor_newContiguous(state, input);
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
long nInputCols, nInputRows, nInputPlane, batchSize;
long nOutputCols, nOutputRows;
if (input->nDimension == 3) {
nInputCols = input->size[2];
nInputRows = input->size[1];
nInputPlane = input->size[0];
batchSize = 1;
}
else
{
nInputCols = input->size[3];
nInputRows = input->size[2];
nInputPlane = input->size[1];
batchSize = input->size[0];
}
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
THCudaTensor_resizeAs(state, gradInput, input);
int count = THCudaTensor_nElement(state, input);
hipLaunchKernelGGL(( MaxPoolBackward) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
count,
THCudaTensor_data(state, gradOutput),
THCudaTensor_data(state, indices),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW,
THCudaTensor_data(state, gradInput));
THCudaTensor_free(state, gradOutput);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in SpatialMaxPooling.updateGradInput: %s\n", hipGetErrorString(err));
THError("aborting");
}
// clean
THCudaTensor_free(state, input);
THCudaTensor_free(state, gradOutput);
}
| fe2561ce7548cf9254f9d0da994075b689df8884.cu | #include "THCUNN.h"
#include "common.h"
// kernels borrowed from Caffe
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* top_data,
Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height);
int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_data[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_data[maxidx];
}
}
}
top_data[index] = maxval;
top_mask[index] = maxidx + 1;
}
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff,
const Dtype* top_mask, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
int phend = min((h + pad_h) / stride_h + 1, pooled_height);
int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
int offset = (n * channels + c) * pooled_height * pooled_width;
top_diff += offset;
top_mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask[ph * pooled_width + pw] - 1 == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
bottom_diff[index] = gradient;
}
}
void THNN_CudaSpatialMaxPooling_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *indices, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode)
{
THAssert(THCudaTensor_checkGPU(state, 3, input, output, indices));
THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected");
long nInputCols, nInputRows, nInputPlane, batchSize;
long nOutputCols, nOutputRows;
if (input->nDimension == 3) {
nInputCols = input->size[2];
nInputRows = input->size[1];
nInputPlane = input->size[0];
batchSize = 1;
}
else
{
nInputCols = input->size[3];
nInputRows = input->size[2];
nInputPlane = input->size[1];
batchSize = input->size[0];
}
THArgCheck(nInputCols >= kW - padW && nInputRows >= kH - padH, 2, "input image smaller than kernel size");
THArgCheck(kW/2 >= padW && kH/2 >= padH, 2, "pad should be smaller than half of kernel size");
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
input = THCudaTensor_newContiguous(state, input);
float* input_data = THCudaTensor_data(state, input);
THCudaTensor_resize4d(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols);
THCudaTensor_resizeAs(state, indices, output);
float* indices_data = THCudaTensor_data(state, indices);
float* output_data = THCudaTensor_data(state, output);
int count = THCudaTensor_nElement(state, output);
MaxPoolForward <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>
(count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, output_data, indices_data);
if(input->nDimension == 3)
THCudaTensor_resize3d(state, output, nInputPlane, nOutputRows, nOutputCols);
THCudaTensor_free(state, input);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in SpatialMaxPooling.updateOutput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
}
void THNN_CudaSpatialMaxPooling_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *indices, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode)
{
THAssert(THCudaTensor_checkGPU(state, 4, input, gradOutput, indices, gradInput));
input = THCudaTensor_newContiguous(state, input);
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
long nInputCols, nInputRows, nInputPlane, batchSize;
long nOutputCols, nOutputRows;
if (input->nDimension == 3) {
nInputCols = input->size[2];
nInputRows = input->size[1];
nInputPlane = input->size[0];
batchSize = 1;
}
else
{
nInputCols = input->size[3];
nInputRows = input->size[2];
nInputPlane = input->size[1];
batchSize = input->size[0];
}
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
THCudaTensor_resizeAs(state, gradInput, input);
int count = THCudaTensor_nElement(state, input);
MaxPoolBackward <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>
(count,
THCudaTensor_data(state, gradOutput),
THCudaTensor_data(state, indices),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW,
THCudaTensor_data(state, gradInput));
THCudaTensor_free(state, gradOutput);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in SpatialMaxPooling.updateGradInput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
// clean
THCudaTensor_free(state, input);
THCudaTensor_free(state, gradOutput);
}
|
b6267d05b7ee8edcc7b8dc38f673663fc3f59408.hip | // !!! This is a file automatically generated by hipify!!!
#include "FFT.h"
#include "logger.h"
#include "parser.h"
#include <cmath>
#include <crt/host_defines.h>
#include <cstdlib>
#include <cuda_device_runtime_api.h>
#include <hip/hip_runtime_api.h>
#include <hip/driver_types.h>
#include <math.h>
#include <iostream>
#include <string>
#include <vector>
#include <algorithm>
#include <chrono>
#include <fstream>
// #include <opencv2/core/core.hpp>
// #include <opencv2/highgui/highgui.hpp>
using namespace std;
namespace CUDA {
template <typename T>
ostream &operator<<(ostream &o, vector<T> v) {
if (v.size() > 0) {
o << v[0];
}
for (unsigned i = 1; i < v.size(); i++) {
o << " " << v[i];
}
return o << endl;
}
static __device__ __host__ inline cmplx_struct add(cmplx_struct a, cmplx_struct b) {
cmplx_struct num;
num.x = a.x + b.x;
num.y = a.y + b.y;
return num;
}
static __device__ __host__ inline cmplx_struct inverse(cmplx_struct number) {
cmplx_struct inverse;
inverse.x = -number.x;
inverse.y = -number.y;
return inverse;
}
static __device__ __host__ inline cmplx_struct multiply(cmplx_struct first, cmplx_struct second) {
cmplx_struct mult;
mult.x = first.x * second.x - first.y * second.y;
mult.y = first.y * second.x + first.x * second.y;
return mult;
}
__global__ void inverse_divide(cmplx_struct* numbers, int n, int threads) {
int index = blockIdx.x * threads + threadIdx.x;
// bounds check
if (index < n) {
numbers[index].x /= n;
numbers[index].y /= n;
}
}
// TODO pointer aliasing optimization must go in the report
// https://developer.nvidia.com/blog/cuda-pro-tip-optimize-pointer-aliasing/
__global__ void reorder_array(cmplx_struct* __restrict__ rev, cmplx_struct* __restrict__ orig, int s, int threads, int n) {
unsigned int index = blockIdx.x * threads + threadIdx.x;
if (index < n && (__brev(index) >> (32 - s)) < n) {
// reversed
rev[__brev(index) >> (32 - s)] = orig[index];
}
}
__device__ void fft_inner_loop(cmplx_struct* __restrict__ numbers, int row, int col, int len, int n, bool invert) {
if (row + col + len / 2 < n && col < len / 2) {
cmplx_struct first, second;
float angle = (ANGLE_MULT * col) / (len * (invert ? 1 : -1));
second.x = cos(angle);
second.y = sin(angle);
first = numbers[row + col];
second = multiply(numbers[row + col + len / 2], second);
numbers[row + col] = add(first, second);
numbers[row + col + len / 2] = add(first, inverse(second));
}
}
__global__ void compute_fft(cmplx_struct* __restrict__ numbers, int row, int len, int n, int threads, bool invert) {
int col = blockIdx.x * threads + threadIdx.x;
fft_inner_loop(numbers, row, col, len, n, invert);
}
__global__ void fft_outer_loop(cmplx_struct* __restrict__ numbers, int len, int n, int threads, bool invert) {
int row = (blockIdx.x * threads + threadIdx.x) * len;
for (int col = 0; col < len / 2; col++) {
fft_inner_loop(numbers, row, col, len, n, invert);
}
}
void real_fft(int size, int threads, cmplx_struct* reversed_nums, cmplx_struct* nums, int balance, bool invert) {
int power = log2(size);
auto start = chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( reorder_array), dim3(ceil(float(size) / threads)), dim3(threads), 0, 0, reversed_nums, nums, power, threads, size);
// no need to wait for all the bits to be reversed (implicit sync)
// hipDeviceSynchronize();
// parallel fft
for (int len = 2; len <= size; len <<= 1) {
if (size / len > balance) {
hipLaunchKernelGGL(( fft_outer_loop), dim3(ceil((float) size / threads / len)), dim3(threads), 0, 0, reversed_nums, len, size, threads, invert);
} else {
for (int row = 0; row < size; row += len) {
float repeat = (float) len / 2;
hipLaunchKernelGGL(( compute_fft), dim3(ceil(repeat / threads)), dim3(threads), 0, 0, reversed_nums, row, len, size, threads, invert);
}
}
}
if (invert) {
hipLaunchKernelGGL(( inverse_divide), dim3(ceil((float) size / threads)), dim3(threads), 0, 0, reversed_nums, size, threads);
}
auto stop = chrono::high_resolution_clock::now();
auto duration = chrono::duration_cast<chrono::microseconds>(stop - start);
cout << "cuda fft pure algorithmic computation time for " << threads << " threads (micros) - " << duration.count() << endl;
}
void fft(vector<cmplx>& array, bool invert, int balance, int threads) {
int size = (int) array.size();
cmplx_struct* data = (cmplx_struct*)malloc(sizeof(cmplx_struct) * size);
for (int i = 0; i < size; i++) {
data[i].x = array[i].real();
data[i].y = array[i].imag();
}
cmplx_struct *reversed_nums, *nums;
hipMalloc((void **)&reversed_nums, sizeof(cmplx_struct) * size);
hipMalloc((void **)&nums, sizeof(cmplx_struct) * size);
hipMemcpy(nums, data, sizeof(cmplx_struct) * size, hipMemcpyHostToDevice);
real_fft(size, threads, reversed_nums, nums, balance, invert);
cmplx_struct* results;
results = (cmplx_struct*)malloc(sizeof(cmplx_struct) * size);
hipMemcpy(results, reversed_nums, sizeof(cmplx_struct) * size, hipMemcpyDeviceToHost);
for (int i = 0; i < size; i++) {
array[i] = cmplx(results[i].x, results[i].y);
}
// cleanup
free(data);
hipFree(reversed_nums);
hipFree(nums);
}
void fft_2D(vector<vector<cmplx> >& data, bool invert, int thread_balance, int threads) {
vector<vector<cmplx> >& matrix = data;
for (int i = 0; i < matrix.size(); i++) {
fft(matrix[i], invert, thread_balance, threads);
}
data = matrix;
matrix.resize(data[0].size());
for (int i = 0; i < matrix.size(); i++) {
matrix[i].resize(data.size());
}
for (int i = 0; i < data.size(); i++) {
for (int j = 0; j < data[0].size(); j++) {
matrix[j][i] = data[i][j];
}
}
for (int i = 0; i < matrix.size(); i++) {
fft(matrix[i], invert, thread_balance, threads);
}
for (int i = 0; i < data.size(); i++) {
for (int j = 0; j < data[0].size(); j++) {
data[j][i] = matrix[i][j];
}
}
}
void compress_image(vector<vector<uint8_t> > &image, double threshold, int balance, int threads) {
//Convert image to complex type
vector<vector<cmplx> > complex_image(image.size(), vector<cmplx>(image[0].size()));
for (int i = 0; i < image.size(); i++) {
for (int j = 0; j < image[0].size(); j++) {
complex_image[i][j] = image[i][j];
}
}
//Perform 2D fft on image
fft_2D(complex_image, false, balance, threads);
//Threshold the fft
double maximum_value = 0.0;
for (int i = 0; i < complex_image.size(); i++) {
for (int j = 0; j < complex_image[0].size(); j++) {
maximum_value = max(maximum_value, abs(complex_image[i][j]));
}
}
threshold *= maximum_value;
for (int i = 0; i < complex_image.size(); i++) {
for (int j = 0; j < complex_image[0].size(); j++) {
if (abs(complex_image[i][j]) < threshold) {
complex_image[i][j] = 0;
}
}
}
int zeros_count = 0;
for (int i = 0; i < complex_image.size(); i++) {
for (int j = 0; j < complex_image[0].size(); j++) {
if (abs(complex_image[i][j]) == 0) {
zeros_count++;
}
}
}
cout << "Components removed: " << ((zeros_count*1.00/(complex_image.size()*complex_image[0].size())))*100 << endl;
// Perform inverse FFT
fft_2D(complex_image, true, balance, threads);
// We will consider only the real part of the image
for (int i = 0; i < complex_image.size(); i++) {
for (int j = 0; j < complex_image[0].size(); j++) {
image[i][j] = uint8_t(complex_image[i][j].real() + 0.5);
}
}
}
}
| b6267d05b7ee8edcc7b8dc38f673663fc3f59408.cu | #include "FFT.h"
#include "logger.h"
#include "parser.h"
#include <cmath>
#include <crt/host_defines.h>
#include <cstdlib>
#include <cuda_device_runtime_api.h>
#include <cuda_runtime_api.h>
#include <driver_types.h>
#include <math.h>
#include <iostream>
#include <string>
#include <vector>
#include <algorithm>
#include <chrono>
#include <fstream>
// #include <opencv2/core/core.hpp>
// #include <opencv2/highgui/highgui.hpp>
using namespace std;
namespace CUDA {
template <typename T>
ostream &operator<<(ostream &o, vector<T> v) {
if (v.size() > 0) {
o << v[0];
}
for (unsigned i = 1; i < v.size(); i++) {
o << " " << v[i];
}
return o << endl;
}
static __device__ __host__ inline cmplx_struct add(cmplx_struct a, cmplx_struct b) {
cmplx_struct num;
num.x = a.x + b.x;
num.y = a.y + b.y;
return num;
}
static __device__ __host__ inline cmplx_struct inverse(cmplx_struct number) {
cmplx_struct inverse;
inverse.x = -number.x;
inverse.y = -number.y;
return inverse;
}
static __device__ __host__ inline cmplx_struct multiply(cmplx_struct first, cmplx_struct second) {
cmplx_struct mult;
mult.x = first.x * second.x - first.y * second.y;
mult.y = first.y * second.x + first.x * second.y;
return mult;
}
__global__ void inverse_divide(cmplx_struct* numbers, int n, int threads) {
int index = blockIdx.x * threads + threadIdx.x;
// bounds check
if (index < n) {
numbers[index].x /= n;
numbers[index].y /= n;
}
}
// TODO pointer aliasing optimization must go in the report
// https://developer.nvidia.com/blog/cuda-pro-tip-optimize-pointer-aliasing/
__global__ void reorder_array(cmplx_struct* __restrict__ rev, cmplx_struct* __restrict__ orig, int s, int threads, int n) {
unsigned int index = blockIdx.x * threads + threadIdx.x;
if (index < n && (__brev(index) >> (32 - s)) < n) {
// reversed
rev[__brev(index) >> (32 - s)] = orig[index];
}
}
__device__ void fft_inner_loop(cmplx_struct* __restrict__ numbers, int row, int col, int len, int n, bool invert) {
if (row + col + len / 2 < n && col < len / 2) {
cmplx_struct first, second;
float angle = (ANGLE_MULT * col) / (len * (invert ? 1 : -1));
second.x = cos(angle);
second.y = sin(angle);
first = numbers[row + col];
second = multiply(numbers[row + col + len / 2], second);
numbers[row + col] = add(first, second);
numbers[row + col + len / 2] = add(first, inverse(second));
}
}
__global__ void compute_fft(cmplx_struct* __restrict__ numbers, int row, int len, int n, int threads, bool invert) {
int col = blockIdx.x * threads + threadIdx.x;
fft_inner_loop(numbers, row, col, len, n, invert);
}
__global__ void fft_outer_loop(cmplx_struct* __restrict__ numbers, int len, int n, int threads, bool invert) {
int row = (blockIdx.x * threads + threadIdx.x) * len;
for (int col = 0; col < len / 2; col++) {
fft_inner_loop(numbers, row, col, len, n, invert);
}
}
void real_fft(int size, int threads, cmplx_struct* reversed_nums, cmplx_struct* nums, int balance, bool invert) {
int power = log2(size);
auto start = chrono::high_resolution_clock::now();
reorder_array<<<ceil(float(size) / threads), threads>>>(reversed_nums, nums, power, threads, size);
// no need to wait for all the bits to be reversed (implicit sync)
// cudaDeviceSynchronize();
// parallel fft
for (int len = 2; len <= size; len <<= 1) {
if (size / len > balance) {
fft_outer_loop<<<ceil((float) size / threads / len), threads>>>(reversed_nums, len, size, threads, invert);
} else {
for (int row = 0; row < size; row += len) {
float repeat = (float) len / 2;
compute_fft<<<ceil(repeat / threads), threads>>>(reversed_nums, row, len, size, threads, invert);
}
}
}
if (invert) {
inverse_divide<<<ceil((float) size / threads), threads>>>(reversed_nums, size, threads);
}
auto stop = chrono::high_resolution_clock::now();
auto duration = chrono::duration_cast<chrono::microseconds>(stop - start);
cout << "cuda fft pure algorithmic computation time for " << threads << " threads (micros) - " << duration.count() << endl;
}
void fft(vector<cmplx>& array, bool invert, int balance, int threads) {
int size = (int) array.size();
cmplx_struct* data = (cmplx_struct*)malloc(sizeof(cmplx_struct) * size);
for (int i = 0; i < size; i++) {
data[i].x = array[i].real();
data[i].y = array[i].imag();
}
cmplx_struct *reversed_nums, *nums;
cudaMalloc((void **)&reversed_nums, sizeof(cmplx_struct) * size);
cudaMalloc((void **)&nums, sizeof(cmplx_struct) * size);
cudaMemcpy(nums, data, sizeof(cmplx_struct) * size, cudaMemcpyHostToDevice);
real_fft(size, threads, reversed_nums, nums, balance, invert);
cmplx_struct* results;
results = (cmplx_struct*)malloc(sizeof(cmplx_struct) * size);
cudaMemcpy(results, reversed_nums, sizeof(cmplx_struct) * size, cudaMemcpyDeviceToHost);
for (int i = 0; i < size; i++) {
array[i] = cmplx(results[i].x, results[i].y);
}
// cleanup
free(data);
cudaFree(reversed_nums);
cudaFree(nums);
}
void fft_2D(vector<vector<cmplx> >& data, bool invert, int thread_balance, int threads) {
vector<vector<cmplx> >& matrix = data;
for (int i = 0; i < matrix.size(); i++) {
fft(matrix[i], invert, thread_balance, threads);
}
data = matrix;
matrix.resize(data[0].size());
for (int i = 0; i < matrix.size(); i++) {
matrix[i].resize(data.size());
}
for (int i = 0; i < data.size(); i++) {
for (int j = 0; j < data[0].size(); j++) {
matrix[j][i] = data[i][j];
}
}
for (int i = 0; i < matrix.size(); i++) {
fft(matrix[i], invert, thread_balance, threads);
}
for (int i = 0; i < data.size(); i++) {
for (int j = 0; j < data[0].size(); j++) {
data[j][i] = matrix[i][j];
}
}
}
void compress_image(vector<vector<uint8_t> > &image, double threshold, int balance, int threads) {
//Convert image to complex type
vector<vector<cmplx> > complex_image(image.size(), vector<cmplx>(image[0].size()));
for (int i = 0; i < image.size(); i++) {
for (int j = 0; j < image[0].size(); j++) {
complex_image[i][j] = image[i][j];
}
}
//Perform 2D fft on image
fft_2D(complex_image, false, balance, threads);
//Threshold the fft
double maximum_value = 0.0;
for (int i = 0; i < complex_image.size(); i++) {
for (int j = 0; j < complex_image[0].size(); j++) {
maximum_value = max(maximum_value, abs(complex_image[i][j]));
}
}
threshold *= maximum_value;
for (int i = 0; i < complex_image.size(); i++) {
for (int j = 0; j < complex_image[0].size(); j++) {
if (abs(complex_image[i][j]) < threshold) {
complex_image[i][j] = 0;
}
}
}
int zeros_count = 0;
for (int i = 0; i < complex_image.size(); i++) {
for (int j = 0; j < complex_image[0].size(); j++) {
if (abs(complex_image[i][j]) == 0) {
zeros_count++;
}
}
}
cout << "Components removed: " << ((zeros_count*1.00/(complex_image.size()*complex_image[0].size())))*100 << endl;
// Perform inverse FFT
fft_2D(complex_image, true, balance, threads);
// We will consider only the real part of the image
for (int i = 0; i < complex_image.size(); i++) {
for (int j = 0; j < complex_image[0].size(); j++) {
image[i][j] = uint8_t(complex_image[i][j].real() + 0.5);
}
}
}
}
|
894807e4296c61a4241e6dbf3a7365dab6936bd1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "neg_float.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int idx = 1;
float *dy = NULL;
hipMalloc(&dy, XSIZE*YSIZE);
int incy = 1;
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
neg_float), dim3(gridBlock),dim3(threadBlock), 0, 0, n,idx,dy,incy,result);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
neg_float), dim3(gridBlock),dim3(threadBlock), 0, 0, n,idx,dy,incy,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
neg_float), dim3(gridBlock),dim3(threadBlock), 0, 0, n,idx,dy,incy,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 894807e4296c61a4241e6dbf3a7365dab6936bd1.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "neg_float.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int idx = 1;
float *dy = NULL;
cudaMalloc(&dy, XSIZE*YSIZE);
int incy = 1;
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
neg_float<<<gridBlock,threadBlock>>>(n,idx,dy,incy,result);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
neg_float<<<gridBlock,threadBlock>>>(n,idx,dy,incy,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
neg_float<<<gridBlock,threadBlock>>>(n,idx,dy,incy,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2fa3e45041b8ae50f254cb273442cfe954458b4f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include "ptx.cuh"
#include "secp256k1.cuh"
#include "sha256.cuh"
#include "ripemd160.cuh"
#include "secp256k1.h"
#include "DeviceContextShared.h"
__constant__ unsigned int _TARGET_HASH[5];
__constant__ unsigned int _INC_X[8];
__constant__ unsigned int _INC_Y[8];
static const unsigned int _RIPEMD160_IV_HOST[5] = {
0x67452301,
0xefcdab89,
0x98badcfe,
0x10325476,
0xc3d2e1f0
};
static unsigned int swp(unsigned int x)
{
return (x << 24) | ((x << 8) & 0x00ff0000) | ((x >> 8) & 0x0000ff00) | (x >> 24);
}
hipError_t setTargetHash(const unsigned int hash[5])
{
unsigned int h[5];
// Undo the final round of RIPEMD160 and endian swap to save some computation
for(int i = 0; i < 5; i++) {
h[i] = swp(hash[i]) - _RIPEMD160_IV_HOST[(i + 1) % 5];
}
return hipMemcpyToSymbol(_TARGET_HASH, h, sizeof(unsigned int) * 5);
}
hipError_t setIncrementorPoint(const secp256k1::uint256 &x, const secp256k1::uint256 &y)
{
unsigned int xWords[8];
unsigned int yWords[8];
x.exportWords(xWords, 8, secp256k1::uint256::BigEndian);
y.exportWords(yWords, 8, secp256k1::uint256::BigEndian);
hipError_t err = hipMemcpyToSymbol(_INC_X, xWords, sizeof(unsigned int) * 8);
if(err) {
return err;
}
return hipMemcpyToSymbol(_INC_Y, yWords, sizeof(unsigned int) * 8);
}
__device__ void hashPublicKey(const unsigned int *x, const unsigned int *y, unsigned int *digestOut)
{
unsigned int hash[8];
sha256PublicKey(x, y, hash);
// Swap to little-endian
for(int i = 0; i < 8; i++) {
hash[i] = endian(hash[i]);
}
ripemd160sha256NoFinal(hash, digestOut);
}
__device__ void hashPublicKeyCompressed(const unsigned int *x, const unsigned int *y, unsigned int *digestOut)
{
unsigned int hash[8];
sha256PublicKeyCompressed(x, y, hash);
// Swap to little-endian
for(int i = 0; i < 8; i++) {
hash[i] = endian(hash[i]);
}
ripemd160sha256NoFinal(hash, digestOut);
}
__device__ void addResult(unsigned int *numResultsPtr, void *results, void *info, int size)
{
grabLock();
unsigned char *ptr = (unsigned char *)results + (*numResultsPtr);
memcpy(ptr, info, size);
(*numResultsPtr)++;
releaseLock();
}
__device__ void setResultFound(unsigned int *numResultsPtr, void *results, int idx, bool compressed, unsigned int x[8], unsigned int y[8], unsigned int digest[5])
{
struct KeyFinderDeviceResult r;
r.block = blockIdx.x;
r.thread = threadIdx.x;
r.idx = idx;
r.compressed = compressed;
for(int i = 0; i < 8; i++) {
r.x[i] = x[i];
r.y[i] = y[i];
}
for(int i = 0; i < 5; i++) {
r.digest[i] = endian(digest[i] + _RIPEMD160_IV[(i + 1) % 5]);
}
addResult(numResultsPtr, results, &r, sizeof(r));
}
__device__ bool checkHash(unsigned int hash[5])
{
bool equal = true;
for(int i = 0; i < 5; i++) {
equal &= (hash[i] == _TARGET_HASH[i]);
}
return equal;
}
__device__ void doIteration(unsigned int *xPtr, unsigned int *yPtr, unsigned int *chain, int pointsPerThread, unsigned int *numResults, void *results, int compression)
{
// Multiply together all (_Gx - x) and then invert
unsigned int inverse[8] = { 0,0,0,0,0,0,0,1 };
for(int i = 0; i < pointsPerThread; i++) {
unsigned int x[8];
unsigned int y[8];
unsigned int digest[5];
readInt(xPtr, i, x);
readInt(yPtr, i, y);
if(compression == PointCompressionType::UNCOMPRESSED || compression == PointCompressionType::BOTH) {
hashPublicKey(x, y, digest);
if(checkHash(digest)) {
setResultFound(numResults, results, i, false, x, y, digest);
}
}
if(compression == PointCompressionType::COMPRESSED || compression == PointCompressionType::BOTH) {
hashPublicKeyCompressed(x, y, digest);
if(checkHash(digest)) {
setResultFound(numResults, results, i, true, x, y, digest);
}
}
beginBatchAdd(_INC_X, xPtr, chain, i, inverse);
}
doBatchInverse(inverse);
for(int i = pointsPerThread - 1; i >= 0; i--) {
unsigned int newX[8];
unsigned int newY[8];
completeBatchAdd(_INC_X, _INC_Y, xPtr, yPtr, i, chain, inverse, newX, newY);
writeInt(xPtr, i, newX);
writeInt(yPtr, i, newY);
}
}
__device__ void doIterationWithDouble(unsigned int *xPtr, unsigned int *yPtr, unsigned int *chain, int pointsPerThread, unsigned int *numResults, void *results, int compression)
{
// Multiply together all (_Gx - x) and then invert
unsigned int inverse[8] = { 0,0,0,0,0,0,0,1 };
for(int i = 0; i < pointsPerThread; i++) {
unsigned int x[8];
unsigned int y[8];
unsigned int digest[5];
readInt(xPtr, i, x);
readInt(yPtr, i, y);
// uncompressed
if(compression == 1 || compression == 2) {
hashPublicKey(x, y, digest);
if(checkHash(digest)) {
setResultFound(numResults, results, i, false, x, y, digest);
}
}
// compressed
if(compression == 0 || compression == 2) {
hashPublicKeyCompressed(x, y, digest);
if(checkHash(digest)) {
setResultFound(numResults, results, i, true, x, y, digest);
}
}
beginBatchAddWithDouble(_INC_X, _INC_Y, xPtr, chain, i, inverse);
}
doBatchInverse(inverse);
for(int i = pointsPerThread - 1; i >= 0; i--) {
unsigned int newX[8];
unsigned int newY[8];
completeBatchAddWithDouble(_INC_X, _INC_Y, xPtr, yPtr, i, chain, inverse, newX, newY);
writeInt(xPtr, i, newX);
writeInt(yPtr, i, newY);
}
}
/**
* Performs a single iteration
*/
__global__ void keyFinderKernel(int points, unsigned int *x, unsigned int *y, unsigned int *chain, unsigned int *numResults, void *results, int compression)
{
doIteration(x, y, chain, points, numResults, results, compression);
}
__global__ void keyFinderKernelWithDouble(int points, unsigned int *x, unsigned int *y, unsigned int *chain, unsigned int *numResults, void *results, int compression)
{
doIterationWithDouble(x, y, chain, points, numResults, results, compression);
} | 2fa3e45041b8ae50f254cb273442cfe954458b4f.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include "ptx.cuh"
#include "secp256k1.cuh"
#include "sha256.cuh"
#include "ripemd160.cuh"
#include "secp256k1.h"
#include "DeviceContextShared.h"
__constant__ unsigned int _TARGET_HASH[5];
__constant__ unsigned int _INC_X[8];
__constant__ unsigned int _INC_Y[8];
static const unsigned int _RIPEMD160_IV_HOST[5] = {
0x67452301,
0xefcdab89,
0x98badcfe,
0x10325476,
0xc3d2e1f0
};
static unsigned int swp(unsigned int x)
{
return (x << 24) | ((x << 8) & 0x00ff0000) | ((x >> 8) & 0x0000ff00) | (x >> 24);
}
cudaError_t setTargetHash(const unsigned int hash[5])
{
unsigned int h[5];
// Undo the final round of RIPEMD160 and endian swap to save some computation
for(int i = 0; i < 5; i++) {
h[i] = swp(hash[i]) - _RIPEMD160_IV_HOST[(i + 1) % 5];
}
return cudaMemcpyToSymbol(_TARGET_HASH, h, sizeof(unsigned int) * 5);
}
cudaError_t setIncrementorPoint(const secp256k1::uint256 &x, const secp256k1::uint256 &y)
{
unsigned int xWords[8];
unsigned int yWords[8];
x.exportWords(xWords, 8, secp256k1::uint256::BigEndian);
y.exportWords(yWords, 8, secp256k1::uint256::BigEndian);
cudaError_t err = cudaMemcpyToSymbol(_INC_X, xWords, sizeof(unsigned int) * 8);
if(err) {
return err;
}
return cudaMemcpyToSymbol(_INC_Y, yWords, sizeof(unsigned int) * 8);
}
__device__ void hashPublicKey(const unsigned int *x, const unsigned int *y, unsigned int *digestOut)
{
unsigned int hash[8];
sha256PublicKey(x, y, hash);
// Swap to little-endian
for(int i = 0; i < 8; i++) {
hash[i] = endian(hash[i]);
}
ripemd160sha256NoFinal(hash, digestOut);
}
__device__ void hashPublicKeyCompressed(const unsigned int *x, const unsigned int *y, unsigned int *digestOut)
{
unsigned int hash[8];
sha256PublicKeyCompressed(x, y, hash);
// Swap to little-endian
for(int i = 0; i < 8; i++) {
hash[i] = endian(hash[i]);
}
ripemd160sha256NoFinal(hash, digestOut);
}
__device__ void addResult(unsigned int *numResultsPtr, void *results, void *info, int size)
{
grabLock();
unsigned char *ptr = (unsigned char *)results + (*numResultsPtr);
memcpy(ptr, info, size);
(*numResultsPtr)++;
releaseLock();
}
__device__ void setResultFound(unsigned int *numResultsPtr, void *results, int idx, bool compressed, unsigned int x[8], unsigned int y[8], unsigned int digest[5])
{
struct KeyFinderDeviceResult r;
r.block = blockIdx.x;
r.thread = threadIdx.x;
r.idx = idx;
r.compressed = compressed;
for(int i = 0; i < 8; i++) {
r.x[i] = x[i];
r.y[i] = y[i];
}
for(int i = 0; i < 5; i++) {
r.digest[i] = endian(digest[i] + _RIPEMD160_IV[(i + 1) % 5]);
}
addResult(numResultsPtr, results, &r, sizeof(r));
}
__device__ bool checkHash(unsigned int hash[5])
{
bool equal = true;
for(int i = 0; i < 5; i++) {
equal &= (hash[i] == _TARGET_HASH[i]);
}
return equal;
}
__device__ void doIteration(unsigned int *xPtr, unsigned int *yPtr, unsigned int *chain, int pointsPerThread, unsigned int *numResults, void *results, int compression)
{
// Multiply together all (_Gx - x) and then invert
unsigned int inverse[8] = { 0,0,0,0,0,0,0,1 };
for(int i = 0; i < pointsPerThread; i++) {
unsigned int x[8];
unsigned int y[8];
unsigned int digest[5];
readInt(xPtr, i, x);
readInt(yPtr, i, y);
if(compression == PointCompressionType::UNCOMPRESSED || compression == PointCompressionType::BOTH) {
hashPublicKey(x, y, digest);
if(checkHash(digest)) {
setResultFound(numResults, results, i, false, x, y, digest);
}
}
if(compression == PointCompressionType::COMPRESSED || compression == PointCompressionType::BOTH) {
hashPublicKeyCompressed(x, y, digest);
if(checkHash(digest)) {
setResultFound(numResults, results, i, true, x, y, digest);
}
}
beginBatchAdd(_INC_X, xPtr, chain, i, inverse);
}
doBatchInverse(inverse);
for(int i = pointsPerThread - 1; i >= 0; i--) {
unsigned int newX[8];
unsigned int newY[8];
completeBatchAdd(_INC_X, _INC_Y, xPtr, yPtr, i, chain, inverse, newX, newY);
writeInt(xPtr, i, newX);
writeInt(yPtr, i, newY);
}
}
__device__ void doIterationWithDouble(unsigned int *xPtr, unsigned int *yPtr, unsigned int *chain, int pointsPerThread, unsigned int *numResults, void *results, int compression)
{
// Multiply together all (_Gx - x) and then invert
unsigned int inverse[8] = { 0,0,0,0,0,0,0,1 };
for(int i = 0; i < pointsPerThread; i++) {
unsigned int x[8];
unsigned int y[8];
unsigned int digest[5];
readInt(xPtr, i, x);
readInt(yPtr, i, y);
// uncompressed
if(compression == 1 || compression == 2) {
hashPublicKey(x, y, digest);
if(checkHash(digest)) {
setResultFound(numResults, results, i, false, x, y, digest);
}
}
// compressed
if(compression == 0 || compression == 2) {
hashPublicKeyCompressed(x, y, digest);
if(checkHash(digest)) {
setResultFound(numResults, results, i, true, x, y, digest);
}
}
beginBatchAddWithDouble(_INC_X, _INC_Y, xPtr, chain, i, inverse);
}
doBatchInverse(inverse);
for(int i = pointsPerThread - 1; i >= 0; i--) {
unsigned int newX[8];
unsigned int newY[8];
completeBatchAddWithDouble(_INC_X, _INC_Y, xPtr, yPtr, i, chain, inverse, newX, newY);
writeInt(xPtr, i, newX);
writeInt(yPtr, i, newY);
}
}
/**
* Performs a single iteration
*/
__global__ void keyFinderKernel(int points, unsigned int *x, unsigned int *y, unsigned int *chain, unsigned int *numResults, void *results, int compression)
{
doIteration(x, y, chain, points, numResults, results, compression);
}
__global__ void keyFinderKernelWithDouble(int points, unsigned int *x, unsigned int *y, unsigned int *chain, unsigned int *numResults, void *results, int compression)
{
doIterationWithDouble(x, y, chain, points, numResults, results, compression);
} |
a4fc1364ca266756f680fd6818be30dc047e4c3b.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright Contributors to the Open Shading Language project.
// SPDX-License-Identifier: BSD-3-Clause
// https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
#define OPTIX_COMPATIBILITY 7
#include <OSL/oslclosure.h>
#include <hip/hip_runtime.h>
#include "rend_lib.h"
OSL_NAMESPACE_ENTER
namespace pvt {
extern __device__ char* s_color_system;
}
OSL_NAMESPACE_EXIT
// Taken from the SimplePool class
__device__ static inline size_t
alignment_offset_calc(void* ptr, size_t alignment)
{
uintptr_t ptrbits = reinterpret_cast<uintptr_t>(ptr);
uintptr_t offset = ((ptrbits + alignment - 1) & -alignment) - ptrbits;
return offset;
}
// These functions are declared extern to prevent name mangling.
extern "C" {
__device__ void*
closure_component_allot(void* pool, int id, size_t prim_size,
const OSL::Color3& w)
{
((OSL::ClosureComponent*)pool)->id = id;
((OSL::ClosureComponent*)pool)->w = w;
size_t needed = (sizeof(OSL::ClosureComponent) - sizeof(void*) + prim_size
+ (alignof(OSL::ClosureComponent) - 1))
& ~(alignof(OSL::ClosureComponent) - 1);
char* char_ptr = (char*)pool;
return (void*)&char_ptr[needed];
}
__device__ void*
closure_mul_allot(void* pool, const OSL::Color3& w, OSL::ClosureColor* c)
{
((OSL::ClosureMul*)pool)->id = OSL::ClosureColor::MUL;
((OSL::ClosureMul*)pool)->weight = w;
((OSL::ClosureMul*)pool)->closure = c;
size_t needed = (sizeof(OSL::ClosureMul)
+ (alignof(OSL::ClosureComponent) - 1))
& ~(alignof(OSL::ClosureComponent) - 1);
char* char_ptr = (char*)pool;
return &char_ptr[needed];
}
__device__ void*
closure_mul_float_allot(void* pool, const float& w, OSL::ClosureColor* c)
{
((OSL::ClosureMul*)pool)->id = OSL::ClosureColor::MUL;
((OSL::ClosureMul*)pool)->weight.x = w;
((OSL::ClosureMul*)pool)->weight.y = w;
((OSL::ClosureMul*)pool)->weight.z = w;
((OSL::ClosureMul*)pool)->closure = c;
size_t needed = (sizeof(OSL::ClosureMul)
+ (alignof(OSL::ClosureComponent) - 1))
& ~(alignof(OSL::ClosureComponent) - 1);
char* char_ptr = (char*)pool;
return &char_ptr[needed];
}
__device__ void*
closure_add_allot(void* pool, OSL::ClosureColor* a, OSL::ClosureColor* b)
{
((OSL::ClosureAdd*)pool)->id = OSL::ClosureColor::ADD;
((OSL::ClosureAdd*)pool)->closureA = a;
((OSL::ClosureAdd*)pool)->closureB = b;
size_t needed = (sizeof(OSL::ClosureAdd)
+ (alignof(OSL::ClosureComponent) - 1))
& ~(alignof(OSL::ClosureComponent) - 1);
char* char_ptr = (char*)pool;
return &char_ptr[needed];
}
__device__ void*
osl_allocate_closure_component(void* sg_, int id, int size)
{
ShaderGlobals* sg_ptr = (ShaderGlobals*)sg_;
OSL::Color3 w = OSL::Color3(1, 1, 1);
// Fix up the alignment
void* ret = ((char*)sg_ptr->renderstate)
+ alignment_offset_calc(sg_ptr->renderstate,
alignof(OSL::ClosureComponent));
size = max(4, size);
sg_ptr->renderstate = closure_component_allot(ret, id, size, w);
return ret;
}
__device__ void*
osl_allocate_weighted_closure_component(void* sg_, int id, int size,
const OSL::Color3* w)
{
ShaderGlobals* sg_ptr = (ShaderGlobals*)sg_;
if (w->x == 0.0f && w->y == 0.0f && w->z == 0.0f) {
return NULL;
}
size = max(4, size);
// Fix up the alignment
void* ret = ((char*)sg_ptr->renderstate)
+ alignment_offset_calc(sg_ptr->renderstate,
alignof(OSL::ClosureComponent));
sg_ptr->renderstate = closure_component_allot(ret, id, size, *w);
return ret;
}
__device__ void*
osl_mul_closure_color(void* sg_, OSL::ClosureColor* a, const OSL::Color3* w)
{
ShaderGlobals* sg_ptr = (ShaderGlobals*)sg_;
if (a == NULL) {
return NULL;
}
if (w->x == 0.0f && w->y == 0.0f && w->z == 0.0f) {
return NULL;
}
if (w->x == 1.0f && w->y == 1.0f && w->z == 1.0f) {
return a;
}
// Fix up the alignment
void* ret = ((char*)sg_ptr->renderstate)
+ alignment_offset_calc(sg_ptr->renderstate,
alignof(OSL::ClosureComponent));
sg_ptr->renderstate = closure_mul_allot(ret, *w, a);
return ret;
}
__device__ void*
osl_mul_closure_float(void* sg_, OSL::ClosureColor* a, float w)
{
ShaderGlobals* sg_ptr = (ShaderGlobals*)sg_;
if (a == NULL || w == 0.0f) {
return NULL;
}
if (w == 1.0f) {
return a;
}
// Fix up the alignment
void* ret = ((char*)sg_ptr->renderstate)
+ alignment_offset_calc(sg_ptr->renderstate,
alignof(OSL::ClosureComponent));
sg_ptr->renderstate = closure_mul_float_allot(ret, w, a);
return ret;
}
__device__ void*
osl_add_closure_closure(void* sg_, OSL::ClosureColor* a, OSL::ClosureColor* b)
{
ShaderGlobals* sg_ptr = (ShaderGlobals*)sg_;
if (a == NULL) {
return b;
}
if (b == NULL) {
return a;
}
// Fix up the alignment
void* ret = ((char*)sg_ptr->renderstate)
+ alignment_offset_calc(sg_ptr->renderstate,
alignof(OSL::ClosureComponent));
sg_ptr->renderstate = closure_add_allot(ret, a, b);
return ret;
}
#define IS_STRING(type) (type.basetype == OSL::TypeDesc::STRING)
#define IS_PTR(type) (type.basetype == OSL::TypeDesc::PTR)
#define IS_COLOR(type) (type.vecsemantics == OSL::TypeDesc::COLOR)
__device__ bool
rend_get_userdata(OSL::StringParam name, void* data, int data_size,
const OSL::TypeDesc& type, int index)
{
return false;
}
#undef IS_COLOR
#undef IS_STRING
#undef IS_PTR
__device__ int
osl_bind_interpolated_param(void* sg_, OSL::ustring_pod name, long long type,
int userdata_has_derivs, void* userdata_data,
int symbol_has_derivs, void* symbol_data,
int symbol_data_size, char* userdata_initialized,
int userdata_index)
{
char status = *userdata_initialized;
if (status == 0) {
bool ok = rend_get_userdata(HDSTR(name), userdata_data,
symbol_data_size, (*(OSL::TypeDesc*)&type),
userdata_index);
*userdata_initialized = status = 1 + ok;
}
if (status == 2) {
memcpy(symbol_data, userdata_data, symbol_data_size);
return 1;
}
return 0;
}
__device__ int
osl_strlen_is(const char* str)
{
return HDSTR(str).length();
}
__device__ int
osl_hash_is(const char* str)
{
return HDSTR(str).hash();
}
__device__ int
osl_getchar_isi(const char* str, int index)
{
return (str && unsigned(index) < HDSTR(str).length()) ? str[index] : 0;
}
__device__ void
osl_printf(void* sg_, char* fmt_str, void* args)
{
// This can be used to limit printing to one Cuda thread for debugging
// if (launch_index.x == 0 && launch_index.y == 0)
//
// vprintf(fmt_str, (const char*)args);
}
__device__ void*
osl_get_noise_options(void* sg_)
{
ShaderGlobals* sg = ((ShaderGlobals*)sg_);
NoiseOptCUDA* opt
= (NoiseOptCUDA*)((ShadingContextCUDA*)sg->context)->noise_options_ptr();
new (opt) NoiseOptCUDA;
return opt;
}
__device__ void*
osl_get_texture_options(void* sg_)
{
return 0;
}
__device__ void
osl_texture_set_interp_code(void* opt, int mode)
{
// ((TextureOpt *)opt)->interpmode = (TextureOpt::InterpMode)mode;
}
__device__ void
osl_texture_set_stwrap_code(void* opt, int mode)
{
//((TextureOpt *)opt)->swrap = (TextureOpt::Wrap)mode;
//((TextureOpt *)opt)->twrap = (TextureOpt::Wrap)mode;
}
__forceinline__ __device__ float3
make_float3(const float4& a)
{
return make_float3(a.x, a.y, a.z);
}
// FIXME:
// clang++ 9.0 seems to have trouble with tex2d<float4>() look-ups,
// so we'll declare this external and implement texture lookups in
// CUDA files compiled by nvcc (optix_grid_renderer.cu and
// optix_raytrace.cu).
// (clang++ 9.0 error 'undefined __nv_tex_surf_handler')
extern __device__ float4
osl_tex2DLookup(void* handle, float s, float t);
__device__ int
osl_texture(void* sg_, const char* name, void* handle, void* opt_, float s,
float t, float dsdx, float dtdx, float dsdy, float dtdy, int chans,
void* result, void* dresultdx, void* dresultdy, void* alpha,
void* dalphadx, void* dalphady, void* ustring_errormessage)
{
if (!handle)
return 0;
// hipTextureObject_t texID = hipTextureObject_t(handle);
float4 fromTexture = osl_tex2DLookup(handle, s, t);
// see note above
// float4 fromTexture = tex2D<float4>(texID, s, t);
*((float3*)result) = make_float3(fromTexture.x, fromTexture.y,
fromTexture.z);
return 1;
}
__device__ int
osl_range_check_err(int indexvalue, int length, OSL::ustring_pod symname,
void* sg, OSL::ustring_pod sourcefile, int sourceline,
OSL::ustring_pod groupname, int layer,
OSL::ustring_pod layername, OSL::ustring_pod shadername)
{
if (indexvalue < 0 || indexvalue >= length) {
return indexvalue < 0 ? 0 : length - 1;
}
return indexvalue;
}
__device__ int
osl_range_check(int indexvalue, int length, OSL::ustring_pod symname, void* sg,
OSL::ustring_pod sourcefile, int sourceline,
OSL::ustring_pod groupname, int layer,
OSL::ustring_pod layername, OSL::ustring_pod shadername)
{
if (indexvalue < 0 || indexvalue >= length) {
indexvalue = osl_range_check_err(indexvalue, length, symname, sg,
sourcefile, sourceline, groupname,
layer, layername, shadername);
}
return indexvalue;
}
#define MAT(m) (*(OSL::Matrix44*)m)
__device__ int
osl_get_matrix(void* sg_, void* r, const char* from)
{
ShaderGlobals* sg = (ShaderGlobals*)sg_;
//ShadingContext *ctx = (ShadingContext *)sg->context;
if (HDSTR(from) == StringParams::common ||
//HDSTR(from) == ctx->shadingsys().commonspace_synonym() ||
HDSTR(from) == StringParams::shader) {
MAT(r).makeIdentity();
return true;
}
if (HDSTR(from) == StringParams::object) {
// TODO: Implement transform
return false;
}
int ok = false; // TODO: Implement transform
if (!ok) {
MAT(r).makeIdentity();
// TBR: OSL would throw an error here, what should we do?
}
return ok;
}
__device__ int
osl_get_inverse_matrix(void* sg_, void* r, const char* to)
{
ShaderGlobals* sg = (ShaderGlobals*)sg_;
if (HDSTR(to) == StringParams::common ||
//HDSTR(to) == ctx->shadingsys().commonspace_synonym() ||
HDSTR(to) == StringParams::shader) {
MAT(r).makeIdentity();
return true;
}
if (HDSTR(to) == StringParams::object) {
// TODO: Implement transform
return false;
}
int ok = false; // TODO: Implement transform
if (!ok) {
MAT(r).makeIdentity();
// TBR: OSL would throw an error here, what should we do?
}
return ok;
}
#undef MAT
}
| a4fc1364ca266756f680fd6818be30dc047e4c3b.cu | // Copyright Contributors to the Open Shading Language project.
// SPDX-License-Identifier: BSD-3-Clause
// https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
#define OPTIX_COMPATIBILITY 7
#include <OSL/oslclosure.h>
#include <cuda_runtime.h>
#include "rend_lib.h"
OSL_NAMESPACE_ENTER
namespace pvt {
extern __device__ char* s_color_system;
}
OSL_NAMESPACE_EXIT
// Taken from the SimplePool class
__device__ static inline size_t
alignment_offset_calc(void* ptr, size_t alignment)
{
uintptr_t ptrbits = reinterpret_cast<uintptr_t>(ptr);
uintptr_t offset = ((ptrbits + alignment - 1) & -alignment) - ptrbits;
return offset;
}
// These functions are declared extern to prevent name mangling.
extern "C" {
__device__ void*
closure_component_allot(void* pool, int id, size_t prim_size,
const OSL::Color3& w)
{
((OSL::ClosureComponent*)pool)->id = id;
((OSL::ClosureComponent*)pool)->w = w;
size_t needed = (sizeof(OSL::ClosureComponent) - sizeof(void*) + prim_size
+ (alignof(OSL::ClosureComponent) - 1))
& ~(alignof(OSL::ClosureComponent) - 1);
char* char_ptr = (char*)pool;
return (void*)&char_ptr[needed];
}
__device__ void*
closure_mul_allot(void* pool, const OSL::Color3& w, OSL::ClosureColor* c)
{
((OSL::ClosureMul*)pool)->id = OSL::ClosureColor::MUL;
((OSL::ClosureMul*)pool)->weight = w;
((OSL::ClosureMul*)pool)->closure = c;
size_t needed = (sizeof(OSL::ClosureMul)
+ (alignof(OSL::ClosureComponent) - 1))
& ~(alignof(OSL::ClosureComponent) - 1);
char* char_ptr = (char*)pool;
return &char_ptr[needed];
}
__device__ void*
closure_mul_float_allot(void* pool, const float& w, OSL::ClosureColor* c)
{
((OSL::ClosureMul*)pool)->id = OSL::ClosureColor::MUL;
((OSL::ClosureMul*)pool)->weight.x = w;
((OSL::ClosureMul*)pool)->weight.y = w;
((OSL::ClosureMul*)pool)->weight.z = w;
((OSL::ClosureMul*)pool)->closure = c;
size_t needed = (sizeof(OSL::ClosureMul)
+ (alignof(OSL::ClosureComponent) - 1))
& ~(alignof(OSL::ClosureComponent) - 1);
char* char_ptr = (char*)pool;
return &char_ptr[needed];
}
__device__ void*
closure_add_allot(void* pool, OSL::ClosureColor* a, OSL::ClosureColor* b)
{
((OSL::ClosureAdd*)pool)->id = OSL::ClosureColor::ADD;
((OSL::ClosureAdd*)pool)->closureA = a;
((OSL::ClosureAdd*)pool)->closureB = b;
size_t needed = (sizeof(OSL::ClosureAdd)
+ (alignof(OSL::ClosureComponent) - 1))
& ~(alignof(OSL::ClosureComponent) - 1);
char* char_ptr = (char*)pool;
return &char_ptr[needed];
}
__device__ void*
osl_allocate_closure_component(void* sg_, int id, int size)
{
ShaderGlobals* sg_ptr = (ShaderGlobals*)sg_;
OSL::Color3 w = OSL::Color3(1, 1, 1);
// Fix up the alignment
void* ret = ((char*)sg_ptr->renderstate)
+ alignment_offset_calc(sg_ptr->renderstate,
alignof(OSL::ClosureComponent));
size = max(4, size);
sg_ptr->renderstate = closure_component_allot(ret, id, size, w);
return ret;
}
__device__ void*
osl_allocate_weighted_closure_component(void* sg_, int id, int size,
const OSL::Color3* w)
{
ShaderGlobals* sg_ptr = (ShaderGlobals*)sg_;
if (w->x == 0.0f && w->y == 0.0f && w->z == 0.0f) {
return NULL;
}
size = max(4, size);
// Fix up the alignment
void* ret = ((char*)sg_ptr->renderstate)
+ alignment_offset_calc(sg_ptr->renderstate,
alignof(OSL::ClosureComponent));
sg_ptr->renderstate = closure_component_allot(ret, id, size, *w);
return ret;
}
__device__ void*
osl_mul_closure_color(void* sg_, OSL::ClosureColor* a, const OSL::Color3* w)
{
ShaderGlobals* sg_ptr = (ShaderGlobals*)sg_;
if (a == NULL) {
return NULL;
}
if (w->x == 0.0f && w->y == 0.0f && w->z == 0.0f) {
return NULL;
}
if (w->x == 1.0f && w->y == 1.0f && w->z == 1.0f) {
return a;
}
// Fix up the alignment
void* ret = ((char*)sg_ptr->renderstate)
+ alignment_offset_calc(sg_ptr->renderstate,
alignof(OSL::ClosureComponent));
sg_ptr->renderstate = closure_mul_allot(ret, *w, a);
return ret;
}
__device__ void*
osl_mul_closure_float(void* sg_, OSL::ClosureColor* a, float w)
{
ShaderGlobals* sg_ptr = (ShaderGlobals*)sg_;
if (a == NULL || w == 0.0f) {
return NULL;
}
if (w == 1.0f) {
return a;
}
// Fix up the alignment
void* ret = ((char*)sg_ptr->renderstate)
+ alignment_offset_calc(sg_ptr->renderstate,
alignof(OSL::ClosureComponent));
sg_ptr->renderstate = closure_mul_float_allot(ret, w, a);
return ret;
}
__device__ void*
osl_add_closure_closure(void* sg_, OSL::ClosureColor* a, OSL::ClosureColor* b)
{
ShaderGlobals* sg_ptr = (ShaderGlobals*)sg_;
if (a == NULL) {
return b;
}
if (b == NULL) {
return a;
}
// Fix up the alignment
void* ret = ((char*)sg_ptr->renderstate)
+ alignment_offset_calc(sg_ptr->renderstate,
alignof(OSL::ClosureComponent));
sg_ptr->renderstate = closure_add_allot(ret, a, b);
return ret;
}
#define IS_STRING(type) (type.basetype == OSL::TypeDesc::STRING)
#define IS_PTR(type) (type.basetype == OSL::TypeDesc::PTR)
#define IS_COLOR(type) (type.vecsemantics == OSL::TypeDesc::COLOR)
__device__ bool
rend_get_userdata(OSL::StringParam name, void* data, int data_size,
const OSL::TypeDesc& type, int index)
{
return false;
}
#undef IS_COLOR
#undef IS_STRING
#undef IS_PTR
__device__ int
osl_bind_interpolated_param(void* sg_, OSL::ustring_pod name, long long type,
int userdata_has_derivs, void* userdata_data,
int symbol_has_derivs, void* symbol_data,
int symbol_data_size, char* userdata_initialized,
int userdata_index)
{
char status = *userdata_initialized;
if (status == 0) {
bool ok = rend_get_userdata(HDSTR(name), userdata_data,
symbol_data_size, (*(OSL::TypeDesc*)&type),
userdata_index);
*userdata_initialized = status = 1 + ok;
}
if (status == 2) {
memcpy(symbol_data, userdata_data, symbol_data_size);
return 1;
}
return 0;
}
__device__ int
osl_strlen_is(const char* str)
{
return HDSTR(str).length();
}
__device__ int
osl_hash_is(const char* str)
{
return HDSTR(str).hash();
}
__device__ int
osl_getchar_isi(const char* str, int index)
{
return (str && unsigned(index) < HDSTR(str).length()) ? str[index] : 0;
}
__device__ void
osl_printf(void* sg_, char* fmt_str, void* args)
{
// This can be used to limit printing to one Cuda thread for debugging
// if (launch_index.x == 0 && launch_index.y == 0)
//
// vprintf(fmt_str, (const char*)args);
}
__device__ void*
osl_get_noise_options(void* sg_)
{
ShaderGlobals* sg = ((ShaderGlobals*)sg_);
NoiseOptCUDA* opt
= (NoiseOptCUDA*)((ShadingContextCUDA*)sg->context)->noise_options_ptr();
new (opt) NoiseOptCUDA;
return opt;
}
__device__ void*
osl_get_texture_options(void* sg_)
{
return 0;
}
__device__ void
osl_texture_set_interp_code(void* opt, int mode)
{
// ((TextureOpt *)opt)->interpmode = (TextureOpt::InterpMode)mode;
}
__device__ void
osl_texture_set_stwrap_code(void* opt, int mode)
{
//((TextureOpt *)opt)->swrap = (TextureOpt::Wrap)mode;
//((TextureOpt *)opt)->twrap = (TextureOpt::Wrap)mode;
}
__forceinline__ __device__ float3
make_float3(const float4& a)
{
return make_float3(a.x, a.y, a.z);
}
// FIXME:
// clang++ 9.0 seems to have trouble with tex2d<float4>() look-ups,
// so we'll declare this external and implement texture lookups in
// CUDA files compiled by nvcc (optix_grid_renderer.cu and
// optix_raytrace.cu).
// (clang++ 9.0 error 'undefined __nv_tex_surf_handler')
extern __device__ float4
osl_tex2DLookup(void* handle, float s, float t);
__device__ int
osl_texture(void* sg_, const char* name, void* handle, void* opt_, float s,
float t, float dsdx, float dtdx, float dsdy, float dtdy, int chans,
void* result, void* dresultdx, void* dresultdy, void* alpha,
void* dalphadx, void* dalphady, void* ustring_errormessage)
{
if (!handle)
return 0;
// cudaTextureObject_t texID = cudaTextureObject_t(handle);
float4 fromTexture = osl_tex2DLookup(handle, s, t);
// see note above
// float4 fromTexture = tex2D<float4>(texID, s, t);
*((float3*)result) = make_float3(fromTexture.x, fromTexture.y,
fromTexture.z);
return 1;
}
__device__ int
osl_range_check_err(int indexvalue, int length, OSL::ustring_pod symname,
void* sg, OSL::ustring_pod sourcefile, int sourceline,
OSL::ustring_pod groupname, int layer,
OSL::ustring_pod layername, OSL::ustring_pod shadername)
{
if (indexvalue < 0 || indexvalue >= length) {
return indexvalue < 0 ? 0 : length - 1;
}
return indexvalue;
}
__device__ int
osl_range_check(int indexvalue, int length, OSL::ustring_pod symname, void* sg,
OSL::ustring_pod sourcefile, int sourceline,
OSL::ustring_pod groupname, int layer,
OSL::ustring_pod layername, OSL::ustring_pod shadername)
{
if (indexvalue < 0 || indexvalue >= length) {
indexvalue = osl_range_check_err(indexvalue, length, symname, sg,
sourcefile, sourceline, groupname,
layer, layername, shadername);
}
return indexvalue;
}
#define MAT(m) (*(OSL::Matrix44*)m)
__device__ int
osl_get_matrix(void* sg_, void* r, const char* from)
{
ShaderGlobals* sg = (ShaderGlobals*)sg_;
//ShadingContext *ctx = (ShadingContext *)sg->context;
if (HDSTR(from) == StringParams::common ||
//HDSTR(from) == ctx->shadingsys().commonspace_synonym() ||
HDSTR(from) == StringParams::shader) {
MAT(r).makeIdentity();
return true;
}
if (HDSTR(from) == StringParams::object) {
// TODO: Implement transform
return false;
}
int ok = false; // TODO: Implement transform
if (!ok) {
MAT(r).makeIdentity();
// TBR: OSL would throw an error here, what should we do?
}
return ok;
}
__device__ int
osl_get_inverse_matrix(void* sg_, void* r, const char* to)
{
ShaderGlobals* sg = (ShaderGlobals*)sg_;
if (HDSTR(to) == StringParams::common ||
//HDSTR(to) == ctx->shadingsys().commonspace_synonym() ||
HDSTR(to) == StringParams::shader) {
MAT(r).makeIdentity();
return true;
}
if (HDSTR(to) == StringParams::object) {
// TODO: Implement transform
return false;
}
int ok = false; // TODO: Implement transform
if (!ok) {
MAT(r).makeIdentity();
// TBR: OSL would throw an error here, what should we do?
}
return ok;
}
#undef MAT
}
|
a34b47318a62bb45bd0f14053acc2d7974678ac3.hip | // !!! This is a file automatically generated by hipify!!!
// Run multiple scans in separate streams.
// Example for video 6.2.
#include <assert.h>
#include <iostream>
#include <memory>
#include <numeric>
#include <random>
// Standard CUDA API functions
#include <hip/hip_runtime_api.h>
// CUDA cooperative groups API
#include <hip/hip_cooperative_groups.h>
#include "../utils.h"
void scan_reference(const int *source, int *dest, unsigned int count)
{
int sum = 0;
for (int i = 0; i < count; i++) {
sum += source[i];
dest[i] = sum;
}
}
const int BLOCK_SIZE = 1024;
// Scan using shared memory, within a single block.
__device__ int block_scan(int idata, int shared_data[],
cooperative_groups::thread_block block)
{
// Index into shared memory
int si = threadIdx.x;
shared_data[si] = 0;
si += blockDim.x;
shared_data[si] = idata;
for (int offset = 1; offset < blockDim.x; offset *= 2) {
cooperative_groups::sync(block);
int t = shared_data[si] + shared_data[si - offset];
cooperative_groups::sync(block);
shared_data[si] = t;
}
return shared_data[si];
}
// First step of scan: process each block separately
__global__ void scan1(const int *source, int *dest)
{
// Shared memory buffer. By allocating extra elements we avoid bounds
// checks on shared memory access.
__shared__ int shared_data[2 * BLOCK_SIZE];
// Index into global memory
int index = blockIdx.x * blockDim.x + threadIdx.x;
// Load data from global memory
int idata = source[index];
// Shared memory scan within this block
int result =
block_scan(idata, shared_data, cooperative_groups::this_thread_block());
// Write back to global memory
dest[index] = result;
}
// Second step of scan: compute prefix sums for each block
__global__ void scan2(const int *dest, int *block_sums, unsigned int count)
{
// Shared memory buffer. By allocating extra elements we avoid bounds
// checks on shared memory access.
__shared__ int shared_data[2 * BLOCK_SIZE];
int index = blockIdx.x * blockDim.x + threadIdx.x;
int idata = (index == 0) ? 0 : dest[index * blockDim.x - 1];
block_sums[index] =
block_scan(idata, shared_data, cooperative_groups::this_thread_block());
}
// Final step of scan: add block sums to every result.
__global__ void finish_scan(const int *block_sums, int *dest)
{
__shared__ int block_sum;
if (threadIdx.x == 0) {
block_sum = block_sums[blockIdx.x];
}
cooperative_groups::sync(cooperative_groups::this_thread_block());
int index = blockIdx.x * blockDim.x + threadIdx.x;
dest[index] += block_sum;
}
int main(int argc, char **argv)
{
// Maximum possible size with two-level scan.
const unsigned int COUNT = BLOCK_SIZE * BLOCK_SIZE;
const int N_STREAMS = 2;
int *sources[N_STREAMS], *dests[N_STREAMS];
// Fill source arrays with some arbitrary test values
std::mt19937 rng;
rng.seed(0);
std::uniform_int_distribution<std::mt19937::result_type> dist(0, 9);
for (int i = 0; i < N_STREAMS; i++) {
sources[i] = new int[COUNT];
dests[i] = new int[COUNT];
for (int j = 0; j < COUNT; j++) {
sources[i][j] = dist(rng);
}
}
// Allocate device memory and transfer data
int n_blocks1 = (COUNT + BLOCK_SIZE - 1) / BLOCK_SIZE;
int *sources_dev[N_STREAMS], *dests_dev[N_STREAMS], *block_sums[N_STREAMS];
size_t size = COUNT * sizeof(int);
hipStream_t stream[N_STREAMS];
for (int i = 0; i < N_STREAMS; i++) {
cudaCheckError(hipStreamCreate(&stream[i]));
cudaCheckError(hipMalloc(&sources_dev[i], size));
cudaCheckError(hipMalloc(&dests_dev[i], size));
// Temporary buffer for kernels
cudaCheckError(hipMalloc(&block_sums[i], n_blocks1 * sizeof(int)));
}
// Code in this block will be timed by KernelTimer
{
KernelTimer t;
// Copy data to device
for (int i = 0; i < N_STREAMS; i++) {
cudaCheckError(
hipMemcpy(sources_dev[i], sources[i], size, hipMemcpyHostToDevice));
}
// Run the scans in separate streams
for (int i = 0; i < N_STREAMS; i++) {
hipLaunchKernelGGL(( scan1), dim3(n_blocks1), dim3(BLOCK_SIZE), 0, stream[i], sources_dev[i],
dests_dev[i]);
int n_blocks2 = (n_blocks1 + BLOCK_SIZE - 1) / BLOCK_SIZE;
assert(n_blocks2 == 1);
hipLaunchKernelGGL(( scan2), dim3(n_blocks2), dim3(BLOCK_SIZE), 0, stream[i], dests_dev[i],
block_sums[i], n_blocks1);
hipLaunchKernelGGL(( finish_scan), dim3(n_blocks1), dim3(BLOCK_SIZE), 0, stream[i], block_sums[i],
dests_dev[i]);
}
// Copy results back to the host
for (int i = 0; i < N_STREAMS; i++) {
cudaCheckError(
hipMemcpy(dests[i], dests_dev[i], size, hipMemcpyDeviceToHost));
}
}
for (int i = 0; i < N_STREAMS; i++) {
cudaCheckError(hipFree(sources_dev[i]));
cudaCheckError(hipFree(dests_dev[i]));
cudaCheckError(hipFree(block_sums[i]));
}
// Compare with reference implementation
std::unique_ptr<int[]> dest_reference(new int[COUNT]);
for (int i = 0; i < N_STREAMS; i++) {
scan_reference(sources[i], dest_reference.get(), COUNT);
for (int j = 0; j < COUNT; j++) {
assert(dest_reference.get()[j] == dests[i][j]);
}
}
return 0;
}
| a34b47318a62bb45bd0f14053acc2d7974678ac3.cu | // Run multiple scans in separate streams.
// Example for video 6.2.
#include <assert.h>
#include <iostream>
#include <memory>
#include <numeric>
#include <random>
// Standard CUDA API functions
#include <cuda_runtime_api.h>
// CUDA cooperative groups API
#include <cooperative_groups.h>
#include "../utils.h"
void scan_reference(const int *source, int *dest, unsigned int count)
{
int sum = 0;
for (int i = 0; i < count; i++) {
sum += source[i];
dest[i] = sum;
}
}
const int BLOCK_SIZE = 1024;
// Scan using shared memory, within a single block.
__device__ int block_scan(int idata, int shared_data[],
cooperative_groups::thread_block block)
{
// Index into shared memory
int si = threadIdx.x;
shared_data[si] = 0;
si += blockDim.x;
shared_data[si] = idata;
for (int offset = 1; offset < blockDim.x; offset *= 2) {
cooperative_groups::sync(block);
int t = shared_data[si] + shared_data[si - offset];
cooperative_groups::sync(block);
shared_data[si] = t;
}
return shared_data[si];
}
// First step of scan: process each block separately
__global__ void scan1(const int *source, int *dest)
{
// Shared memory buffer. By allocating extra elements we avoid bounds
// checks on shared memory access.
__shared__ int shared_data[2 * BLOCK_SIZE];
// Index into global memory
int index = blockIdx.x * blockDim.x + threadIdx.x;
// Load data from global memory
int idata = source[index];
// Shared memory scan within this block
int result =
block_scan(idata, shared_data, cooperative_groups::this_thread_block());
// Write back to global memory
dest[index] = result;
}
// Second step of scan: compute prefix sums for each block
__global__ void scan2(const int *dest, int *block_sums, unsigned int count)
{
// Shared memory buffer. By allocating extra elements we avoid bounds
// checks on shared memory access.
__shared__ int shared_data[2 * BLOCK_SIZE];
int index = blockIdx.x * blockDim.x + threadIdx.x;
int idata = (index == 0) ? 0 : dest[index * blockDim.x - 1];
block_sums[index] =
block_scan(idata, shared_data, cooperative_groups::this_thread_block());
}
// Final step of scan: add block sums to every result.
__global__ void finish_scan(const int *block_sums, int *dest)
{
__shared__ int block_sum;
if (threadIdx.x == 0) {
block_sum = block_sums[blockIdx.x];
}
cooperative_groups::sync(cooperative_groups::this_thread_block());
int index = blockIdx.x * blockDim.x + threadIdx.x;
dest[index] += block_sum;
}
int main(int argc, char **argv)
{
// Maximum possible size with two-level scan.
const unsigned int COUNT = BLOCK_SIZE * BLOCK_SIZE;
const int N_STREAMS = 2;
int *sources[N_STREAMS], *dests[N_STREAMS];
// Fill source arrays with some arbitrary test values
std::mt19937 rng;
rng.seed(0);
std::uniform_int_distribution<std::mt19937::result_type> dist(0, 9);
for (int i = 0; i < N_STREAMS; i++) {
sources[i] = new int[COUNT];
dests[i] = new int[COUNT];
for (int j = 0; j < COUNT; j++) {
sources[i][j] = dist(rng);
}
}
// Allocate device memory and transfer data
int n_blocks1 = (COUNT + BLOCK_SIZE - 1) / BLOCK_SIZE;
int *sources_dev[N_STREAMS], *dests_dev[N_STREAMS], *block_sums[N_STREAMS];
size_t size = COUNT * sizeof(int);
cudaStream_t stream[N_STREAMS];
for (int i = 0; i < N_STREAMS; i++) {
cudaCheckError(cudaStreamCreate(&stream[i]));
cudaCheckError(cudaMalloc(&sources_dev[i], size));
cudaCheckError(cudaMalloc(&dests_dev[i], size));
// Temporary buffer for kernels
cudaCheckError(cudaMalloc(&block_sums[i], n_blocks1 * sizeof(int)));
}
// Code in this block will be timed by KernelTimer
{
KernelTimer t;
// Copy data to device
for (int i = 0; i < N_STREAMS; i++) {
cudaCheckError(
cudaMemcpy(sources_dev[i], sources[i], size, cudaMemcpyHostToDevice));
}
// Run the scans in separate streams
for (int i = 0; i < N_STREAMS; i++) {
scan1<<<n_blocks1, BLOCK_SIZE, 0, stream[i]>>>(sources_dev[i],
dests_dev[i]);
int n_blocks2 = (n_blocks1 + BLOCK_SIZE - 1) / BLOCK_SIZE;
assert(n_blocks2 == 1);
scan2<<<n_blocks2, BLOCK_SIZE, 0, stream[i]>>>(dests_dev[i],
block_sums[i], n_blocks1);
finish_scan<<<n_blocks1, BLOCK_SIZE, 0, stream[i]>>>(block_sums[i],
dests_dev[i]);
}
// Copy results back to the host
for (int i = 0; i < N_STREAMS; i++) {
cudaCheckError(
cudaMemcpy(dests[i], dests_dev[i], size, cudaMemcpyDeviceToHost));
}
}
for (int i = 0; i < N_STREAMS; i++) {
cudaCheckError(cudaFree(sources_dev[i]));
cudaCheckError(cudaFree(dests_dev[i]));
cudaCheckError(cudaFree(block_sums[i]));
}
// Compare with reference implementation
std::unique_ptr<int[]> dest_reference(new int[COUNT]);
for (int i = 0; i < N_STREAMS; i++) {
scan_reference(sources[i], dest_reference.get(), COUNT);
for (int j = 0; j < COUNT; j++) {
assert(dest_reference.get()[j] == dests[i][j]);
}
}
return 0;
}
|
3c46550e411a368ab803987851b55a2a1ec2f9bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 1993-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Compile:
nvcc -arch=sm_52 -O3 -lcublas -lcurand -o LSTM LSTM.cu
To enable/disable different performance options add the flat -DPERFOPTSx
Where x is a bitmask defining the options used (see below).
Run:
./LSTM
or
./LSTM <seqLength> <numLayers> <hiddenSize> <miniBatch>
Example (run on an NVIDIA M40):
> ./LSTM
Running with default settings
seqLength 100, numLayers 4, hiddenSize 512, miniBatch 64
i checksum (example 0) 5.113463E+04
h checksum (example 0) 2.048000E+03
c checksum (example 0) 2.058137E+05
i checksum 3.272639E+06 c checksum 1.317278E+07 h checksum 1.310720E+05
Runtime 27.807743ms
*/
#include <stdio.h>
#include <rocblas.h>
#include <hiprand/hiprand.h>
#define TRAINING (true)
// #define UPDATE
// #define PEEPHOLES
// #define CIFG
#define GATE_NUM (4)
#define PEEP_NUM (3)
#define I_INDEX (0)
#define F_INDEX (1)
#define G_INDEX (2)
#define O_INDEX (3)
#ifndef PERFOPTS
#define PERFOPTS (31)
#endif
#define GROUP_GEMM ((PERFOPTS & 1))
#define USE_STREAMS ((PERFOPTS & 2))
// #define FUSE_PW ((PERFOPTS & 4))
#define FUSE_PW (1)
// #define PRE_TRANSPOSE ((PERFOPTS & 8))
#define PRE_TRANSPOSE (1)
#define RECUR_BATCH_SIZE (((PERFOPTS & 16) ? 2 : 1))
#define RECUR_BATCH_BP_SIZE (((PERFOPTS & 16) ? 5 : 1))
#define HFUNC tanhf
#define DEHFUNC de_tanhf
#define GFUNC tanhf
#define DEGFUNC de_tanhf
#define LOSSFUNC squaree
#define DELOSSFUNC de_squaree
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(hipError_t stat, const char *file, int line) {
if (stat != hipSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line);
}
}
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
void cublasErrCheck_(hipblasStatus_t stat, const char *file, int line) {
if (stat != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(hiprandStatus_t stat, const char *file, int line) {
if (stat != HIPRAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
#define GetOffsetW(x) (GATE_NUM * inputSize * hiddenSize + GATE_NUM * hiddenSize * hiddenSize + (x - 1) * 2 * GATE_NUM * hiddenSize * hiddenSize)
#define GetOffsetR(x) (GetOffsetW(x) + GATE_NUM * hiddenSize * hiddenSize)
// (layer - 1) * 8 * hiddenSize * hiddenSize + 8 * hiddenSize * hiddenSize + 4 * hiddenSize * inputSize// #define GetOffsetR(x)
// Device functions
__forceinline__ __device__ float sigmoidf(float in) {
return 1.f / (1.f + expf(-in));
}
__forceinline__ __device__ float de_sigmoidf(float out) {
return out * (1-out);
}
__forceinline__ __device__ float linearf(float in) {
return in;
}
__forceinline__ __device__ float de_linearf(float out) {
return 1.f;
}
__forceinline__ __device__ float de_tanhf(float out) {
return 1.f - pow(out, 2);
}
__forceinline__ __device__ float squaree(float output, float target) {
return pow(output - target, 2);
}
__forceinline__ __device__ float de_squaree(float output, float target) {
return 2 * (output - target);
}
__forceinline__ __device__ float entropye(float output, float target) {
return -(target * logf(output) + (1.f - target) * logf(1.f - output));
}
__forceinline__ __device__ float de_entropye(float output, float target) {
return -(target / output + (1.f - target) / (output - 1.f));
}
__global__ void pw_de_tanh(float *y, float *a, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = 1 - pow(a[i], 2);
}
__global__ void pw_de_sigmoid(float *y, float *a, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = a[i] * (1 - a[i]);
}
// Pointwise functions
__global__ void pw_biasAdd(float *y, float *bias, int n, int nBias) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] += bias[i % nBias];
}
__global__ void pw_peepsAdd(float *y, float *peeps, float *x, int n, int nPeeps) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] += peeps[i % nPeeps] * x[i];
}
__global__ void pw_vecAdd(float *y, float *a, float *b, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = a[i] + b[i];
}
__global__ void pw_vecMul(float *y, float *a, float *b, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = a[i] * b[i];
}
__global__ void pw_tanh(float *y, float *a, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = tanh(a[i]);
}
__global__ void pw_sigmoid(float *y, float *a, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = sigmoidf(a[i]);
}
// Unfused LSTM (calling many pointwise kernels).
int LSTM_elementwise_unfused( int hiddenSize,
int miniBatch,
float * __restrict__ tmp_h,
float * __restrict__ tmp_i,
float * __restrict__ bias,
float * __restrict__ peeps,
// float * __restrict__ linearGates,
// float * __restrict__ h_data,
float * __restrict__ i_data,
float * __restrict__ c_in,
float * __restrict__ c_out,
bool training,
hipStream_t stream) {
dim3 blockDim;
dim3 gridDim;
int numElements = hiddenSize * miniBatch;
blockDim.x = 128;
gridDim.x = (numElements + blockDim.x - 1) / blockDim.x;
for (int i = 0; i < GATE_NUM; i++) {
if (tmp_h != NULL) {
hipLaunchKernelGGL(( pw_vecAdd) , dim3(gridDim), dim3(blockDim), 0, stream , tmp_i + i * numElements, tmp_i + i * numElements,
tmp_h + i * numElements, numElements);
cudaErrCheck(hipGetLastError());
}
hipLaunchKernelGGL(( pw_biasAdd) , dim3(gridDim), dim3(blockDim), 0, stream , tmp_i + i * numElements, bias + i * hiddenSize, numElements, hiddenSize);
cudaErrCheck(hipGetLastError());
if (i == 0) {
hipLaunchKernelGGL(( pw_biasAdd) , dim3(gridDim), dim3(blockDim), 0, stream , tmp_i + i * numElements, bias + i * hiddenSize, numElements, hiddenSize);
cudaErrCheck(hipGetLastError());
}
if (training) {
printf("LSTM_elementWise_unfused does not support training\n");
return 1;
}
}
float *in_gate = tmp_i +
#ifdef I_INDEX
I_INDEX * numElements;//i
#else
NULL;
#endif
float *forget_gate = tmp_i +
#ifdef F_INDEX
F_INDEX * numElements;//f
#else
NULL;
#endif
float *in_gate2 = tmp_i +
#ifdef G_INDEX
G_INDEX * numElements;//z
#else
NULL;
#endif
float *out_gate = tmp_i +
#ifdef O_INDEX
O_INDEX * numElements;//z
#else
NULL;
#endif
if (c_in != NULL) {
//i_t += p_i * c_t-1
#ifdef I_INDEX
hipLaunchKernelGGL(( pw_peepsAdd) , dim3(gridDim), dim3(blockDim), 0, stream , in_gate, peeps + I_INDEX * hiddenSize, c_in, numElements, hiddenSize);
cudaErrCheck(hipGetLastError());
#endif
//f_t += p_f * c_t-1
#ifdef F_INDEX
hipLaunchKernelGGL(( pw_peepsAdd) , dim3(gridDim), dim3(blockDim), 0, stream , forget_gate, peeps + F_INDEX * hiddenSize, c_in, numElements, hiddenSize);
cudaErrCheck(hipGetLastError());
#endif
}
hipLaunchKernelGGL(( pw_sigmoid) , dim3(gridDim), dim3(blockDim), 0, stream , in_gate, tmp_i + 0 * numElements, numElements);
cudaErrCheck(hipGetLastError());
hipLaunchKernelGGL(( pw_sigmoid) , dim3(gridDim), dim3(blockDim), 0, stream , forget_gate, tmp_i + 1 * numElements, numElements);
cudaErrCheck(hipGetLastError());
//z'
hipLaunchKernelGGL(( pw_tanh) , dim3(gridDim), dim3(blockDim), 0, stream , in_gate2, tmp_i + 2 * numElements, numElements);
cudaErrCheck(hipGetLastError());
if (c_in == NULL) {
hipLaunchKernelGGL(( pw_vecMul) , dim3(gridDim), dim3(blockDim), 0, stream , in_gate, in_gate, in_gate2, numElements);
cudaErrCheck(hipGetLastError());
} else {
//f_t * c_t-1
hipLaunchKernelGGL(( pw_vecMul) , dim3(gridDim), dim3(blockDim), 0, stream , forget_gate, forget_gate, c_in, numElements);
cudaErrCheck(hipGetLastError());
//i_t * z
hipLaunchKernelGGL(( pw_vecMul) , dim3(gridDim), dim3(blockDim), 0, stream , in_gate, in_gate, in_gate2, numElements);
cudaErrCheck(hipGetLastError());
//c_t = f_t * c_t-1 + i_t * c_t'
hipLaunchKernelGGL(( pw_vecAdd) , dim3(gridDim), dim3(blockDim), 0, stream , in_gate, in_gate, forget_gate, numElements);
cudaErrCheck(hipGetLastError());
}
//o_t += p_o * c_t
hipLaunchKernelGGL(( pw_peepsAdd) , dim3(gridDim), dim3(blockDim), 0, stream , out_gate, peeps + 2 * hiddenSize, in_gate, numElements, hiddenSize);
cudaErrCheck(hipGetLastError());
hipLaunchKernelGGL(( pw_sigmoid) , dim3(gridDim), dim3(blockDim), 0, stream , out_gate, tmp_i + 3 * numElements, numElements);
cudaErrCheck(hipGetLastError());
if (c_out != NULL) {
cudaErrCheck(hipMemcpyAsync(c_out, in_gate, numElements * sizeof(float), hipMemcpyDeviceToDevice, stream));
}
//tanh(c_t)
hipLaunchKernelGGL(( pw_tanh) , dim3(gridDim), dim3(blockDim), 0, stream , in_gate, in_gate, numElements);
cudaErrCheck(hipGetLastError());
//y = o_t * tanh(c_t)
//hipLaunchKernelGGL(( pw_vecMul) , dim3(gridDim), dim3(blockDim), 0, stream , h_data, out_gate, in_gate, numElements);
// cudaErrCheck(hipGetLastError());
hipLaunchKernelGGL(( pw_vecMul) , dim3(gridDim), dim3(blockDim), 0, stream , i_data, out_gate, in_gate, numElements);
cudaErrCheck(hipGetLastError());
return 0;
}
// Fused forward kernel
__global__ void elementWise_fp(int hiddenSize, int miniBatch,
float *tmp_h, //hidden_size * mini_batch * 4: W*xt
float *tmp_i, //hidden_size * mini_batch * 4: R*yt
float *bias, //hidden_size * 4: b*
float *peeps,//hidden_size * 3: p*
// float *linearGates,// hidden_size * mini_batch * 4
float *stateGates,
// float *h_out, //h_data
float *i_out,
float *c_in,
float *c_out,
float *label,
float *loss,
float *mask,
float *y_diff,
bool training) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int numElements = miniBatch * hiddenSize;
if (index >= numElements) return;
int batch = index / hiddenSize;
int gateIndex = (index % hiddenSize) + GATE_NUM * batch * hiddenSize;
float g[GATE_NUM];
for (int i = 0; i < GATE_NUM; i++) {
g[i] = tmp_i[i * hiddenSize + gateIndex] + (tmp_h == NULL? 0:tmp_h[i * hiddenSize + gateIndex]);
g[i] += bias[i * hiddenSize + index % hiddenSize];
}
#ifdef PEEPHOLES
#ifdef I_INDEX
g[I_INDEX] += peeps[I_INDEX * hiddenSize + index % hiddenSize] * c_in[index];//pi * c[t-1]
#endif
#ifdef F_INDEX
g[F_INDEX] += peeps[F_INDEX * hiddenSize + index % hiddenSize] * c_in[index];//pf * c[t-1]
#endif
#endif
float in_gate =
#ifdef I_INDEX
sigmoidf(g[I_INDEX]);//i
#else
1.f;
#endif
float forget_gate =
#ifdef F_INDEX
sigmoidf(g[F_INDEX]);//f
#else
#ifdef CIFG
(1 - in_gate);
#else
1.f;
#endif
#endif
float in_gate2 =
#ifdef G_INDEX
GFUNC(g[G_INDEX]);//z
#else
1.f;
#endif
float val = (forget_gate * c_in[index]) + (in_gate * in_gate2);//c[t] = z*i+c[t-1]*f
c_out[index] = val;
#ifdef PEEPHOLES
#ifdef O_INDEX
g[O_INDEX] += peeps[hiddenSize * (O_INDEX - 1) + index % hiddenSize] * c_out[index];//po * c[t]
#endif
#endif
float out_gate =
#ifdef O_INDEX
sigmoidf(g[O_INDEX]);//o
#else
1.f;
#endif
// #ifdef TRAINING
if(training) {
#ifdef I_INDEX
stateGates[I_INDEX * hiddenSize + gateIndex] = in_gate;
#endif
#ifdef F_INDEX
stateGates[F_INDEX * hiddenSize + gateIndex] = forget_gate;
#endif
#ifdef G_INDEX
stateGates[G_INDEX*hiddenSize + gateIndex] = in_gate2;
#endif
#if O_INDEX
stateGates[O_INDEX*hiddenSize + gateIndex] = out_gate;
#endif
}
// #endif
val = out_gate * HFUNC(val); //h
// h_out[index] = val;
i_out[index] = val;
if (label != NULL) {
if (mask[index % hiddenSize] == 1) {
loss[index] = LOSSFUNC(val, label[index]);
if (training)
y_diff[index] = DELOSSFUNC(val, label[index]);
} else {
if (training)
y_diff[index] = 0;
}
}
}
// Fused backward kernel
__global__ void elementWise_bp(int hiddenSize, int miniBatch,
float *y_diff,
float *stateGates_diff_in,// hidden_size * mini_batch * 4
float *stateGates_diff_out,// hidden_size * mini_batch * 4
float *stateGates,
float *peeps,
float *peeps_diff,
float *c_in,
float *c_out,
float *c_diff,
bool peeps_update) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int numElements = miniBatch * hiddenSize;
if (index >= numElements) return;
int batch = index / hiddenSize;
int gateIndex = (index % hiddenSize) + GATE_NUM * batch * hiddenSize;
float in_gate =
#ifdef I_INDEX
stateGates[I_INDEX * hiddenSize + gateIndex];
#else
1.f;
#endif
float forget_gate =
#ifdef F_INDEX
stateGates[F_INDEX * hiddenSize + gateIndex];
#else
#ifdef CIFG
1 - in_gate;
#else
1.f;
#endif
#endif
float in_gate2 =
#ifdef G_INDEX
stateGates[G_INDEX * hiddenSize + gateIndex];
#else
1.f
#endif
float out_gate =
#ifdef O_INDEX
stateGates[O_INDEX * hiddenSize + gateIndex];
#else
1.f;
#endif
#ifdef O_INDEX
float out_diff = y_diff[index]*HFUNC(c_out[index])*de_sigmoidf(out_gate); //do
#endif
float peep_diff = 0;
if (stateGates_diff_in != NULL) peep_diff += c_diff[index];
#ifdef PEEPHOLES
#ifdef O_INDEX
peep_diff += peeps[(O_INDEX - 1) * hiddenSize + index % hiddenSize] * out_diff;//po * do
#endif
if (stateGates_diff_in != NULL) {
#ifdef I_INDEX
peep_diff += peeps[I_INDEX * hiddenSize + index % hiddenSize] * stateGates_diff_in[I_INDEX * hiddenSize + gateIndex]; //pi * di[t+1]
#endif
#ifdef F_INDEX
peep_diff += peeps[F_INDEX * hiddenSize + index % hiddenSize] * stateGates_diff_in[F_INDEX * hiddenSize + gateIndex];//pf * df[t+1]
#endif
}
#endif
float local_c_diff = y_diff[index]*out_gate*DEHFUNC(HFUNC(c_out[index])) + peep_diff;
float in_diff2 = local_c_diff * in_gate * DEGFUNC(in_gate2);
#ifdef I_INDEX
float in_diff = local_c_diff *
#ifdef CIFG
(in_gate2 - c_in[index])
#else
in_gate2
#endif
* de_sigmoidf(in_gate);
stateGates_diff_out[I_INDEX * hiddenSize + gateIndex] = in_diff;
#endif
#ifdef F_INDEX
float forget_diff = local_c_diff * c_in[index] * de_sigmoidf(forget_gate);
stateGates_diff_out[F_INDEX * hiddenSize + gateIndex] = forget_diff;
#endif
#ifdef G_INDEX
stateGates_diff_out[G_INDEX * hiddenSize + gateIndex] = in_diff2;
#endif
#ifdef O_INDEX
stateGates_diff_out[O_INDEX * hiddenSize + gateIndex] = out_diff;
#endif
#ifdef PEEPHOLES
int peepIndex = (index % hiddenSize) + PEEP_NUM * batch * hiddenSize;
if (peeps_update) {
#ifdef I_INDEX
peeps_diff[I_INDEX * hiddenSize + peepIndex] = in_diff * c_in[index];//p_i
#endif
#ifdef F_INDEX
peeps_diff[F_INDEX * hiddenSize + peepIndex] = forget_diff * c_in[index]; //p_f
#endif
}
#ifdef O_INDEX
peeps_diff[(O_INDEX - 1) * hiddenSize + peepIndex] = out_diff * c_out[index]; //p_o
#endif
#endif
c_diff[index] = local_c_diff * forget_gate;
}
struct LSTM_scheduler
{
// float *h_data;//y
float *i_data;//x
float *c_data;//c
float *T;
float *T_f;
float *bias;
float *tmp_h;
float *tmp_i;
// float *linearGates;
float *stateGates;
//diff
float *stateGates_diff; //di,df,dz,do
float *y_diff;//dy
// float *T_diff;//dW, dR
float *diff_helper;
#ifdef PEEPHOLES
float *peeps_diff;
float *peeps;
#endif
float *c_diff;//dc*ft
float *label;
float *loss;
float *mask;
hipStream_t *stream_i;
hipStream_t *stream_h;
hipEvent_t **events_i;
hipEvent_t **events_h;
hipblasHandle_t handle;
int hiddenSize;
int miniBatch;
int seqLength;
int numLayers;
int numElements;
int inputSize;
int inputNumElements;
int inputLayerSize;
int weightSize;
hipblasOperation_t transa;
hipblasOperation_t transb;
hiprandGenerator_t rng;
void init_helper(float * device_ptr, float data, int size) {
if (size == 0)
return;
float* host_ptr;
host_ptr = (float *)malloc(size * sizeof(float));
for (int i = 0; i < size; ++i) {
host_ptr[i] = data;
}
cudaErrCheck(hipMemcpy(device_ptr, host_ptr, size * sizeof(float), hipMemcpyHostToDevice));
}
void set_input(float * input) {
cudaErrCheck(hipMemcpy(i_data, input, inputLayerSize * sizeof(float), hipMemcpyHostToDevice));
}
void set_label(float * label_) {
cudaErrCheck(hipMemcpy(label, label_, numElements * seqLength * sizeof(float), hipMemcpyHostToDevice));
}
void set_mask(float * mask_) {
if (mask_ == NULL) {
init_helper(mask, 1, 1);
init_helper(mask + 1, 0, hiddenSize-1);
} else {
cudaErrCheck(hipMemcpy(mask, mask_, numElements * seqLength * sizeof(float), hipMemcpyHostToDevice));
}
}
void set_weight(float T_f_=0.1, float bias_=0.1, float peeps_=0.1, int random=0) {
if (!(random & 1)) {
init_helper(T_f, T_f_, weightSize);
}
else {
curandErrCheck(hiprandGenerateUniform(rng, T_f, weightSize));
}
if (!(random & 2)) {
init_helper(bias, bias_, hiddenSize * GATE_NUM * numLayers);
}
else {
curandErrCheck(hiprandGenerateUniform(rng, bias, hiddenSize * GATE_NUM * numLayers));
}
#ifdef PEEPHOLES
if (!(random & 4)) {
init_helper(peeps, peeps_, hiddenSize * 3 * numLayers);
}
else {
curandErrCheck(hiprandGenerateUniform(rng, bias, hiddenSize * PEEP_NUM * numLayers));
}
#endif
}
LSTM_scheduler(int hiddenSize_, int miniBatch_, int seqLength_, int numLayers_, int inputSize_)
{
transa = (PRE_TRANSPOSE && (seqLength > 1)) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
transb = HIPBLAS_OP_N;
hiddenSize = hiddenSize_;
miniBatch = miniBatch_;
seqLength = seqLength_;
numLayers = numLayers_;
inputSize = inputSize_;
numElements = hiddenSize * miniBatch;
inputNumElements = inputSize * miniBatch;
inputLayerSize = inputNumElements * seqLength;
weightSize = GetOffsetW(numLayers);
cublasErrCheck(hipblasCreate(&handle));
curandErrCheck(hiprandCreateGenerator(&rng, HIPRAND_RNG_PSEUDO_DEFAULT));
curandErrCheck(hiprandSetPseudoRandomGeneratorSeed(rng, 1337ull));
stream_i = (hipStream_t*)malloc(numLayers * sizeof(hipStream_t));
stream_h = (hipStream_t*)malloc(numLayers * sizeof(hipStream_t));
// If we don't want to use streams we can launch everything in to the NULL stream
for (int i = 0; i < numLayers; i++) {
if (USE_STREAMS) {
cudaErrCheck(hipStreamCreate(&stream_i[i]));
// Priority is empirical.
cudaErrCheck(hipStreamCreateWithPriority(&stream_h[i], 0, -1));
}
else {
stream_i[i] = NULL;
stream_h[i] = NULL;
}
}
events_i = (hipEvent_t**)malloc(numLayers * sizeof(hipEvent_t*));
events_h = (hipEvent_t**)malloc(numLayers * sizeof(hipEvent_t*));
for (int i = 0; i < numLayers; i++) {
events_i[i] = (hipEvent_t*)malloc(seqLength * sizeof(hipEvent_t));
events_h[i] = (hipEvent_t*)malloc(seqLength * sizeof(hipEvent_t));
}
}
void init(float* mask_ = NULL) {
// cudaErrCheck(hipMalloc((void**)&h_data, (seqLength + 1) * (inputNumElements + (numLayers - 1) * numElements) * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&i_data, (seqLength * inputNumElements + seqLength * numLayers * numElements) * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&c_data, (seqLength + 1) * (numLayers) * numElements * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&T, (inputSize * hiddenSize * GATE_NUM + hiddenSize * hiddenSize * GATE_NUM + (numLayers - 1) * hiddenSize * hiddenSize * 2 * GATE_NUM) * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&T_f, (inputSize * hiddenSize * GATE_NUM + hiddenSize * hiddenSize * GATE_NUM + (numLayers - 1) * hiddenSize * hiddenSize * 2 * GATE_NUM) * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&bias, numLayers * hiddenSize * GATE_NUM * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&label, numElements * seqLength * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&loss, numElements * seqLength * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&mask, hiddenSize * sizeof(float)));
set_mask(NULL);
init_helper(loss, 0, numElements * seqLength);
init_helper(label, 1, numElements * seqLength);
#ifdef PEEPHOLES
cudaErrCheck(hipMalloc((void**)&peeps, numLayers * hiddenSize * PEEP_NUM * sizeof(float)));
init_helper(peeps, 0.1, numLayers * hiddenSize * PEEP_NUM);
// curandErrCheck(hiprandGenerateUniform(rng, peeps, numLayers * hiddenSize * 3));
#endif
// Workspace
cudaErrCheck(hipMalloc((void**)&tmp_h, GATE_NUM * numLayers * numElements * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&tmp_i, GATE_NUM * seqLength * numElements * sizeof(float)));
// // Activations
if (TRAINING) {
// cudaErrCheck(hipMalloc((void**)&linearGates, 4 * seqLength * numLayers * numElements * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&stateGates, GATE_NUM * seqLength * numLayers * numElements * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&stateGates_diff, GATE_NUM * seqLength * numLayers * numElements * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&y_diff, seqLength * numLayers * numElements * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&c_diff, numLayers * numElements * seqLength * sizeof(float)));
#ifdef PEEPHOLES
cudaErrCheck(hipMalloc((void**)&peeps_diff, numLayers * numElements * seqLength * PEEP_NUM * sizeof(float)));
init_helper(peeps_diff, 0, numLayers * numElements * seqLength * PEEP_NUM);
#endif
cudaErrCheck(hipMalloc((void**)&diff_helper, miniBatch * seqLength * sizeof(float)));
init_helper(diff_helper, 1.0, miniBatch * seqLength);
init_helper(c_diff, 0, numLayers * numElements);
init_helper(stateGates_diff, 0, GATE_NUM * seqLength * numLayers * numElements);
}
// Initialise with random values.
// curandErrCheck(hiprandGenerateUniform(rng, h_data, (seqLength + 1) * (numLayers) * numElements));
// curandErrCheck(hiprandGenerateUniform(rng, c_data, (seqLength + 1) * (numLayers) * numElements));
// cudaErrCheck(hipMemset(c_data, 0, (seqLength + 1) * (numLayers) * numElements * sizeof(float)));
init_helper(c_data, 0, (seqLength + 1) * (numLayers) * numElements);
// curandErrCheck(hiprandGenerateUniform(rng, i_data, seqLength * (inputNumElements + numLayers * numElements)));
// cudaErrCheck(hipMemset(i_data, 0, seqLength * (inputNumElements + numLayers * numElements) * sizeof(float)));
init_helper(i_data, 0.2, seqLength * (inputNumElements + numLayers * numElements));
// curandErrCheck(hiprandGenerateUniform(rng, T_f, inputSize * hiddenSize * 4 + hiddenSize * hiddenSize * 4+ (numLayers - 1) * hiddenSize * hiddenSize * 8));
// cudaErrCheck(hipMemset(T_f, 0.1, inputSize * hiddenSize * 4 + hiddenSize * hiddenSize * 4+ (numLayers - 1) * hiddenSize * hiddenSize * 8 * sizeof(float)));
// curandErrCheck(hiprandGenerateUniform(rng, bias, numLayers * hiddenSize * 4));
set_weight(0.1,0.1,0.1,0);
// if (TRAINING) {
// init_helper(y_diff, 0, seqLength*numLayers*numElements);
// init_helper(y_diff+seqLength*(numLayers-1)*numElements, 1, seqLength * numElements);
// // curandErrCheck(hiprandGenerateUniform(rng, y_diff+seqLength*(numLayers-1)*numElements, seqLength * numElements));
// }
// curandErrCheck(hiprandDestroyGenerator(rng));
// Make sure everything is done before we start the timers
cudaErrCheck(hipDeviceSynchronize());
// prepare T
// float alpha = 1.f;
// float beta = 0.f;
// for (int layer = 0; layer < numLayers; layer++) {
// float *T_i_in = T + layer * hiddenSize * hiddenSize * 8;
// float *T_i_out = T_f + layer * hiddenSize * hiddenSize * 8;
// float *T_h_in = T + layer * hiddenSize * hiddenSize * 8 + hiddenSize * hiddenSize * 4;
// float *T_h_out = T_f + layer * hiddenSize * hiddenSize * 8 + hiddenSize * hiddenSize * 4;
// cublasErrCheck(hipblasSetStream(handle, stream_i[layer]));
// cublasErrCheck(hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, 4 * hiddenSize, hiddenSize, &alpha, T_i_in, hiddenSize, &beta, NULL, 4 * hiddenSize, T_i_out, 4 * hiddenSize));
// cublasErrCheck(hipblasSetStream(handle, stream_h[layer]));
// cublasErrCheck(hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, 4 * hiddenSize, hiddenSize, &alpha, T_h_in, hiddenSize, &beta, NULL, 4 * hiddenSize, T_h_out, 4 * hiddenSize));
// }
}
void clearStates(float * input=NULL, float * label_=NULL) {
if(TRAINING) {
if(input) {set_input(input);}
else {init_helper(i_data, 0.2, seqLength * (inputNumElements + numLayers * numElements));}
if(label) set_label(label);
else {init_helper(label, 1, numElements * seqLength);}
}
// init_helper(y_diff, 0, seqLength*(numLayers-1)*numElements);
// init_helper(y_diff+seqLength*(numLayers-1)*numElements, 1, seqLength * numElements);
// init_helper(peeps_diff, 0, 3 * numElements * numLayers * seqLength);
// curandErrCheck(hiprandGenerateUniform(rng, y_diff+seqLength*(numLayers-1)*numElements, seqLength * numElements));
// init_helper(c_diff, 0, numLayers * numElements );
// init_helper(loss, 0, numElements * seqLength);
// cudaErrCheck(hipDeviceSynchronize());
}
float Forward(float* sumLoss) {
float alpha = 1.f;
float beta = 0.f;
float elapsedTime;
hipEvent_t start, stop;
cudaErrCheck(hipEventCreate(&start));
cudaErrCheck(hipEventCreate(&stop));
cudaErrCheck(hipEventRecord(start));
int lStart = 0;
int lEnd = 0;
int rStart = 0;
int rEnd = 0;
int recurBatchSize = RECUR_BATCH_SIZE;
while (true) {
// Many layer "scheduling".
if (lEnd == 0) {
lStart = 0;
lEnd = 1;
rStart = 0;
}
else {
// Move "up" and "left"
lStart++;
lEnd++;
rStart -= recurBatchSize;
// Over the top or off the left, reset to layer 0
if (lEnd > numLayers || rStart < 0) {
rStart += (lStart + 1) * recurBatchSize;
lStart = 0;
lEnd = 1;
}
// Off the right, step up
while (rStart >= seqLength && lEnd <= numLayers) {
lStart++;
lEnd++;
rStart -= recurBatchSize;
}
// Over the top or off the left, done!
if (lEnd > numLayers || rStart < 0) {
break;
}
}
rEnd = rStart + recurBatchSize;
// printf("lStart %d lEnd %d rStart %d rEnd %d\n", lStart, lEnd,
// rStart, rEnd);
if (rEnd > seqLength) rEnd = seqLength;
for (int layer = lStart; layer < lEnd; layer++) {
cublasErrCheck(hipblasSetStream(handle, stream_i[layer]));
//wait for xt to be calculated
for (int i = rStart; i < rEnd; i++) {
if (layer > 0) {
cudaErrCheck(hipStreamWaitEvent(stream_i[layer], events_h[layer - 1][i], 0));
cudaErrCheck(hipEventDestroy(events_h[layer - 1][i]));
}
}
// Optimization 1
if (GROUP_GEMM) {
//[4N * N] x [N * 2m] = [4N * 2m]
cublasErrCheck(hipblasSgemm(handle,
transa, transb,
GATE_NUM * hiddenSize, miniBatch * (rEnd - rStart), layer > 0 ? hiddenSize: inputSize,
&alpha,
layer > 0 ? &T_f[GetOffsetW(layer)]:T_f,
transa == HIPBLAS_OP_N ? GATE_NUM * hiddenSize : hiddenSize,
layer > 0 ? i_data + rStart * numElements + (layer - 1) * seqLength * numElements + seqLength * inputNumElements: i_data + rStart * inputNumElements,
layer > 0 ? hiddenSize:inputSize,
&beta,
tmp_i + GATE_NUM * rStart * numElements,
GATE_NUM * hiddenSize));
}
else {
for (int igemm =0; igemm < GATE_NUM; igemm++) {
cublasErrCheck(hipblasSgemm(handle,
transa, transb,
hiddenSize, miniBatch * (rEnd - rStart), layer > 0 ? hiddenSize:inputSize,
&alpha,
layer > 0 ? &T_f[GetOffsetW(layer) + igemm * hiddenSize]:&T_f[igemm * hiddenSize],
transa == HIPBLAS_OP_N ? GATE_NUM * hiddenSize : hiddenSize,
layer > 0 ? i_data + rStart * numElements + (layer - 1) * seqLength * numElements + seqLength * inputNumElements: i_data + rStart * inputNumElements,
layer > 0 ? hiddenSize:inputSize,
&beta,
tmp_i + GATE_NUM * rStart * numElements + igemm * hiddenSize,
GATE_NUM * hiddenSize));
}
}
for (int i = rStart; i < rEnd; i++) {
cudaErrCheck(hipEventCreate(&events_i[layer][i], hipEventDisableTiming));
cudaErrCheck(hipEventRecord(events_i[layer][i], stream_i[layer]));
}
for (int i = rStart; i < rEnd; i++) {
cublasErrCheck(hipblasSetStream(handle, stream_h[layer]));
// Optimization 1
if (GROUP_GEMM) {
//[4N * N] x [N * m] = [4N * m]
if (i > 0) {
cublasErrCheck(hipblasSgemm(handle,
transa, transb,
GATE_NUM * hiddenSize, miniBatch, hiddenSize,
&alpha,
layer > 0? &T_f[GetOffsetR(layer)]:&T_f[GATE_NUM * inputSize * hiddenSize],
transa == HIPBLAS_OP_N ? GATE_NUM * hiddenSize : hiddenSize,
// h_data + i * numElements + layer * (seqLength + 1) * numElements,
i_data + layer * seqLength * numElements + seqLength * inputNumElements + (i - 1) * numElements,
hiddenSize,
&beta,
tmp_h + GATE_NUM * layer * numElements,
GATE_NUM * hiddenSize));
}
}
else {
if (i > 0) {
for (int igemm =0; igemm < GATE_NUM; igemm++) {
cublasErrCheck(hipblasSgemm(handle,
transa, transb,
hiddenSize, miniBatch, hiddenSize,
&alpha,
layer > 0? &T_f[GetOffsetR(layer) + igemm * hiddenSize]:&T_f[GATE_NUM * inputSize * hiddenSize + igemm * hiddenSize],
transa == HIPBLAS_OP_N ? GATE_NUM * hiddenSize : hiddenSize,
i_data + layer * seqLength * numElements + seqLength * inputNumElements + (i - 1) * numElements,
hiddenSize,
&beta,
tmp_h + GATE_NUM * layer * numElements + igemm * hiddenSize,
GATE_NUM * hiddenSize));
}
}
}
cudaErrCheck(hipStreamWaitEvent(stream_h[layer], events_i[layer][i], 0));
cudaErrCheck(hipEventDestroy(events_i[layer][i]));
// Optimization 3
if (FUSE_PW) {
dim3 blockDim;
dim3 gridDim;
blockDim.x = 256;
gridDim.x = (numElements + blockDim.x - 1) / blockDim.x;
hipLaunchKernelGGL(( elementWise_fp) , dim3(gridDim), dim3(blockDim) , 0, stream_h[layer] ,
hiddenSize, miniBatch,
i > 0 ? tmp_h + GATE_NUM * layer * numElements:NULL,
tmp_i + GATE_NUM * i * numElements,
bias + GATE_NUM * layer * hiddenSize,
#ifdef PEEPHOLES
peeps + PEEP_NUM * layer * hiddenSize,
#else
NULL,
#endif
// TRAINING ? linearGates + 4 * (i * numElements + layer * seqLength * numElements) : NULL,
TRAINING ? stateGates + GATE_NUM * (i * numElements + layer * seqLength * numElements) : NULL,
// h_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements,
i_data + i * numElements + layer * seqLength * numElements + seqLength * inputNumElements,
c_data + i * numElements + layer * (seqLength + 1) * numElements,
c_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements,
layer == numLayers - 1 ? label + i * numElements : NULL,
layer == numLayers - 1 ? loss + i * numElements: NULL,
mask,
y_diff + i * numElements + layer * seqLength * numElements,
TRAINING);
cudaErrCheck(hipGetLastError());
}
else {
LSTM_elementwise_unfused(hiddenSize, miniBatch,
i > 0 ? tmp_h + GATE_NUM * layer * numElements:NULL,
tmp_i + GATE_NUM * i * numElements,
bias + GATE_NUM * layer * hiddenSize,
#ifdef PEEPHOLES
peeps + PEEP_NUM * layer * hiddenSize,
#else
NULL,
#endif
// TRAINING ? linearGates + 4 * (i * numElements + layer * seqLength * numElements) : NULL,
// h_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements,
i_data + i * numElements + layer * seqLength * numElements + seqLength * inputNumElements,
c_data + i * numElements + layer * (seqLength + 1) * numElements,
c_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements,
TRAINING,
stream_h[layer]);
}
if (layer != numLayers - 1) {
cudaErrCheck(hipEventCreate(&events_h[layer][i], hipEventDisableTiming));
cudaErrCheck(hipEventRecord(events_h[layer][i], stream_h[layer]));
}
}
}
}
cublasErrCheck(hipblasSetStream(handle, stream_h[numLayers - 1]));
cublasErrCheck(hipblasSasum(handle, numElements * seqLength,
loss, 1, sumLoss));
cudaErrCheck(hipEventRecord(stop));
cudaErrCheck(hipEventSynchronize(stop));
cudaErrCheck(hipEventElapsedTime(&elapsedTime, start, stop));
cudaErrCheck(hipDeviceSynchronize());
cudaErrCheck(hipEventDestroy(start));
cudaErrCheck(hipEventDestroy(stop));
return elapsedTime;
}
float Backward(float learningRate) {
float elapsedTime;
hipEvent_t start_bp, stop_bp;
cudaErrCheck(hipEventCreate(&start_bp));
cudaErrCheck(hipEventCreate(&stop_bp));
cudaErrCheck(hipEventRecord(start_bp));
int lStart = 0;
int lEnd = 0;
int rStart = 0;
int rEnd = 0;
int rev_lStart = 0;
int rev_lEnd = 0;
int rev_rStart = 0;
int rev_rEnd = 0;
int recurBatchSize = RECUR_BATCH_BP_SIZE;
while (true) {
// Many layer "scheduling".
if (lEnd == 0) {
lStart = 0;
lEnd = 1;
rStart = 0;
} else {
// Move "up" and "left"
lStart++;
lEnd++;
rStart -= recurBatchSize;
// Over the top or off the left, reset to layer 0
if (lEnd > numLayers || rStart < 0) {
rStart += (lStart + 1) * recurBatchSize;
lStart = 0;
lEnd = 1;
}
// Off the right, step up
while (rStart >= seqLength && lEnd <= numLayers) {
lStart++;
lEnd++;
rStart -= recurBatchSize;
}
// Over the top or off the left, done!
if (lEnd > numLayers || rStart < 0) {
break;
}
}
rEnd = rStart + recurBatchSize;
if (rEnd > seqLength) rEnd = seqLength;
rev_lStart = numLayers - lEnd;
rev_lEnd = numLayers - lStart;
rev_rStart = seqLength - rStart - 1;
rev_rEnd = seqLength - rEnd - 1;
// printf("rev_lStart %d rev_lEnd %d rev_rStart %d rev_rEnd %d\n", rev_lStart, rev_lEnd, rev_rStart, rev_rEnd);
for (int layer = rev_lStart; layer < rev_lEnd; layer++) {
for (int i = rev_rStart; i > rev_rEnd; i--) {
// printf("level %d row %d\n", layer, i);
cublasErrCheck(hipblasSetStream(handle, stream_h[layer]));
//wait for the upper layer
// if (layer < numLayers-1) {
// cudaErrCheck(hipStreamWaitEvent(stream_h[layer], events_h[layer + 1][i], 0));
// cudaErrCheck(hipEventDestroy(events_h[layer + 1][i]));
// }
//pointwise operations get diff
// cudaErrCheck(hipDeviceSynchronize());
dim3 blockDim;
dim3 gridDim;
blockDim.x = 256;
gridDim.x = (numElements + blockDim.x - 1) / blockDim.x;
hipLaunchKernelGGL(( elementWise_bp) , dim3(gridDim), dim3(blockDim) , 0, stream_h[layer] ,
hiddenSize, miniBatch,
y_diff + i * numElements + layer * numElements * seqLength,
(i == seqLength - 1) ? NULL : stateGates_diff + GATE_NUM * ((i + 1) * numElements + layer * seqLength * numElements),
stateGates_diff + GATE_NUM * (i * numElements + layer * seqLength * numElements),
stateGates + GATE_NUM * (i * numElements + layer * seqLength * numElements),
#ifdef PEEPHOLES
peeps + PEEP_NUM * layer * hiddenSize,
peeps_diff + PEEP_NUM * (i * numElements + layer * seqLength * numElements),
#else
NULL,
NULL,
#endif
c_data + i * numElements + layer * (seqLength + 1) * numElements,
c_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements,
c_diff + layer * numElements,
i != 0);
// printWeight();
cudaErrCheck(hipGetLastError());
if (i == 1) {
cudaErrCheck(hipEventCreate(&events_i[layer][i], hipEventDisableTiming));
cudaErrCheck(hipEventRecord(events_i[layer][i], stream_h[layer]));
}
if(layer > 0) {
cudaErrCheck(hipEventCreate(&events_h[layer][i], hipEventDisableTiming));
cudaErrCheck(hipEventRecord(events_h[layer][i], stream_h[layer]));
}
if (i > 0) {
//RT * diff = dy
float alpha = 1.f;
float beta = 1.f;
if (GROUP_GEMM) {
cublasErrCheck(hipblasSgemm(handle,
HIPBLAS_OP_T, transb,
hiddenSize, miniBatch, GATE_NUM * hiddenSize,
&alpha,
layer > 0 ? &T_f[GetOffsetR(layer)]:&T_f[GATE_NUM * hiddenSize * inputSize],
GATE_NUM * hiddenSize,
stateGates_diff + GATE_NUM * (i * numElements + layer * seqLength * numElements),
GATE_NUM * hiddenSize,
&beta,
y_diff + layer * numElements * seqLength + (i - 1) * numElements,
hiddenSize));
}
else {
for (int igemm = 0; igemm < GATE_NUM; igemm++) {
cublasErrCheck(hipblasSgemm(handle,
HIPBLAS_OP_T, transb,
hiddenSize, miniBatch, hiddenSize,
&alpha,
layer > 0 ? &T_f[GetOffsetR(layer) + igemm * hiddenSize]:&T_f[GATE_NUM * hiddenSize * inputSize + igemm * hiddenSize],
GATE_NUM * hiddenSize,
stateGates_diff + GATE_NUM * (i * numElements + layer * seqLength * numElements) + igemm * hiddenSize,
GATE_NUM * hiddenSize,
&beta,
y_diff + layer * numElements * seqLength + (i - 1) * numElements,
hiddenSize));
}
}
}
}
// transa = (PRE_TRANSPOSE && (seqLength > 1)) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
//W*diff = dx
int row = rev_rEnd+1;
if (layer > 0) {
cublasErrCheck(hipblasSetStream(handle, stream_h[layer - 1]));
for (int i = rev_rStart; i > rev_rEnd; i--) {
cudaErrCheck(hipStreamWaitEvent(stream_h[layer-1], events_h[layer][i], 0));
cudaErrCheck(hipEventDestroy(events_h[layer][i]));
}
float alpha = 1.f;
float beta = 0.f;
if (GROUP_GEMM) {
cublasErrCheck(hipblasSgemm(handle,
HIPBLAS_OP_T, transb,
hiddenSize, miniBatch*(rev_rStart - rev_rEnd), GATE_NUM * hiddenSize,
&alpha,
&T_f[GetOffsetW(layer)],
GATE_NUM * hiddenSize,
stateGates_diff + GATE_NUM * (row * numElements + layer * seqLength * numElements),
GATE_NUM * hiddenSize,
&beta,
y_diff + (layer - 1) * numElements * seqLength + row * numElements,
hiddenSize));
}
else {
for (int igemm = 0; igemm < GATE_NUM; igemm++) {
cublasErrCheck(hipblasSgemm(handle,
HIPBLAS_OP_T, transb,
hiddenSize, miniBatch*(rev_rStart - rev_rEnd), hiddenSize,
&alpha,
&T_f[GetOffsetW(layer) + igemm * hiddenSize],
GATE_NUM * hiddenSize,
stateGates_diff + GATE_NUM * (row * numElements + layer * seqLength * numElements) + igemm * hiddenSize,
GATE_NUM * hiddenSize,
&beta,
y_diff + (layer - 1) * numElements * seqLength + row * numElements,
hiddenSize));
beta = 1.f;
}
}
}
if (row == 0) {
cublasErrCheck(hipblasSetStream(handle, stream_h[layer]));
float lr = -learningRate;
float beta = 1.f;
//update W
cublasErrCheck(hipblasSgemm(handle,
HIPBLAS_OP_N, HIPBLAS_OP_T,
GATE_NUM * hiddenSize, layer > 0 ? hiddenSize : inputSize, miniBatch * seqLength,
&lr,
stateGates_diff + GATE_NUM * layer * seqLength * numElements,
GATE_NUM * hiddenSize,
layer > 0 ? i_data + (layer - 1) * seqLength * numElements + seqLength * inputNumElements : i_data,
layer > 0 ? hiddenSize : inputSize,
&beta,
layer > 0 ? &T_f[GetOffsetW(layer)]:&T_f[0],
GATE_NUM * hiddenSize));
cudaErrCheck(hipStreamWaitEvent(stream_i[layer], events_i[layer][1], 0));
cudaErrCheck(hipEventDestroy(events_i[layer][1]));
cublasErrCheck(hipblasSetStream(handle, stream_i[layer]));
//update R
cublasErrCheck(hipblasSgemm(handle,
HIPBLAS_OP_N, HIPBLAS_OP_T,
GATE_NUM * hiddenSize, hiddenSize, miniBatch * (seqLength - 1),
&lr,
stateGates_diff + GATE_NUM * (layer * seqLength * numElements + numElements),
GATE_NUM * hiddenSize,
i_data + layer * seqLength * numElements + seqLength * inputNumElements,
hiddenSize,
&beta,
layer > 0 ? &T_f[GetOffsetR(layer)]:&T_f[GATE_NUM * hiddenSize * inputSize],
GATE_NUM * hiddenSize));
cublasErrCheck(hipblasSetStream(handle, stream_h[layer]));
//update bias
cublasErrCheck(hipblasSgemv(handle,
HIPBLAS_OP_N,
GATE_NUM * hiddenSize, miniBatch * seqLength,
&lr,
stateGates_diff + GATE_NUM * (layer * seqLength * numElements),
GATE_NUM * hiddenSize,
diff_helper,
1,
&beta,
&bias[layer * hiddenSize * GATE_NUM],
1));
#ifdef PEEPHOLES
//update peeps
cublasErrCheck(hipblasSgemv(handle,
HIPBLAS_OP_N,
PEEP_NUM * hiddenSize, miniBatch * seqLength,
&lr,
peeps_diff + PEEP_NUM * (layer * numElements * seqLength),
PEEP_NUM * hiddenSize,
diff_helper,
1,
&beta,
&peeps[layer * hiddenSize * PEEP_NUM],
1));
#endif
}
}
}
cudaErrCheck(hipEventRecord(stop_bp));
cudaErrCheck(hipEventSynchronize(stop_bp));
cudaErrCheck(hipEventElapsedTime(&elapsedTime, start_bp, stop_bp));
cudaErrCheck(hipDeviceSynchronize());
cudaErrCheck(hipEventDestroy(start_bp));
cudaErrCheck(hipEventDestroy(stop_bp));
return elapsedTime;
}
void printChecksum() {
float* testOutputi;
// float* testOutputh;
float* testOutputc;
int numElements = hiddenSize * miniBatch;
testOutputi = (float*)malloc(numElements * seqLength * sizeof(float));
// testOutputh = (float*)malloc(numElements * numLayers * sizeof(float));
testOutputc = (float*)malloc(numElements * numLayers * sizeof(float));
cudaErrCheck(hipMemcpy(testOutputi, i_data + seqLength * inputNumElements + (numLayers-1) * seqLength * numElements, seqLength * numElements * sizeof(float), hipMemcpyDeviceToHost));
for (int layer = 0; layer < numLayers; layer++) {
// cudaErrCheck(hipMemcpy(testOutputh + layer * numElements, h_data + seqLength * numElements + layer * (seqLength + 1) * numElements, numElements * sizeof(float), hipMemcpyDeviceToHost));
cudaErrCheck(hipMemcpy(testOutputc + layer * numElements, c_data + seqLength * numElements + layer * (seqLength + 1) * numElements, numElements * sizeof(float), hipMemcpyDeviceToHost));
}
double checksumi = 0.;
// double checksumh = 0.;
double checksumc = 0.;
for (int m = 0; m < miniBatch; m++) {
for (int j = 0; j < seqLength; j++) {
for (int i = 0; i < hiddenSize; i++) {
checksumi += testOutputi[j * numElements + m * hiddenSize + i];
printf("i: (%d,%d): %f\n", j, i, testOutputi[j * numElements + m * hiddenSize + i]);
}
}
for (int j = 0; j < numLayers; j++) {
for (int i = 0; i < hiddenSize; i++) {
// checksumh += testOutputh[j * numElements + m * hiddenSize + i];
checksumc += testOutputc[j * numElements + m * hiddenSize + i];
}
}
if (m == 0) printf("i checksum (example %d) %E\n", m, checksumi);
// if (m == 0) printf("h checksum (example %d) %E\n", m, checksumh);
if (m == 0) printf("c checksum (example %d) %E\n", m, checksumc);
}
printf("i checksum %f ", checksumi);
printf("c checksum %E \n", checksumc);
// printf("h checksum %E\n", checksumh);
free(testOutputi);
free(testOutputc);
// free(testOutputh);
cudaErrCheck(hipDeviceSynchronize());
}
void printWeight() {
int t_size = (GATE_NUM * inputSize * hiddenSize + GATE_NUM * hiddenSize * hiddenSize);
int bias_size = GATE_NUM * hiddenSize * numLayers;
int stats_size = GATE_NUM * numElements * seqLength;
int c_diff_size = numLayers * numElements;
float* t_output, * bias_output, * states_output,
* y_output,
* c_diff_output;
t_output = (float*)malloc( t_size * sizeof(float));
bias_output = (float*)malloc(bias_size * sizeof(float));
states_output = (float*)malloc(4 * GATE_NUM * sizeof(float));
y_output = (float*)malloc(seqLength * sizeof(float));
c_diff_output = (float*)malloc(c_diff_size * sizeof(float));
cudaErrCheck(hipMemcpy(t_output, T_f, t_size * sizeof(float),hipMemcpyDeviceToHost));
cudaErrCheck(hipMemcpy(bias_output,bias,bias_size * sizeof(float),hipMemcpyDeviceToHost));
cudaErrCheck(hipMemcpy(states_output,stateGates_diff,stats_size * sizeof(float),hipMemcpyDeviceToHost));
cudaErrCheck(hipMemcpy(y_output, y_diff, seqLength * sizeof(float), hipMemcpyDeviceToHost));
cudaErrCheck(hipMemcpy(c_diff_output, c_diff, c_diff_size * sizeof(float), hipMemcpyDeviceToHost));
printf("weights:\t");
for (int i = 0; i < t_size;i++) {
printf("%f\t",t_output[i]);
}
printf("\nbias:\t");
for (int i = 0; i < bias_size; i++) {
printf("%f\t",bias_output[i]);
}
printf("\n");
#ifdef PEEPHOLES
float * peeps_output;
int peeps_size = PEEP_NUM * numElements * numLayers * seqLength;
peeps_output = (float*)malloc(peeps_size * sizeof(float));
cudaErrCheck(hipMemcpy(peeps_output,peeps_diff, peeps_size * sizeof(float),hipMemcpyDeviceToHost));
printf("peeps:\t");
for (int i = 0; i < peeps_size; i++) {
printf("%f\t",peeps_output[i]);
}
printf("\n");
#endif
printf("states:\t");
for (int i = 0; i < stats_size; i++) {
printf("%f\t",states_output[i]);
}
printf("\ny:\t");
for (int i = 0; i < seqLength; i++) {
printf("%f\t",y_output[i]);
}
printf("\nc_diff:\t");
for (int i = 0; i < c_diff_size ; i++) {
printf("%f\t",c_diff_output[i]);
}
printf("\n");
curandErrCheck(hiprandDestroyGenerator(rng));
}
void freeMemory() {
// cudaErrCheck(hipFree(h_data));
cudaErrCheck(hipFree(i_data));
cudaErrCheck(hipFree(c_data));
if (T != T_f) cudaErrCheck(hipFree(T));
cudaErrCheck(hipFree(T_f));
cudaErrCheck(hipFree(bias));
cudaErrCheck(hipFree(loss));
cudaErrCheck(hipFree(label));
cudaErrCheck(hipFree(mask));
#ifdef PEEPHOLES
cudaErrCheck(hipFree(peeps));
#endif
cudaErrCheck(hipFree(tmp_h));
cudaErrCheck(hipFree(tmp_i));
if (TRAINING) {
// cudaErrCheck(hipMalloc((void**)&linearGates, 4 * seqLength * numLayers * numElements * sizeof(float)));
cudaErrCheck(hipFree(stateGates));
cudaErrCheck(hipFree(stateGates_diff));
cudaErrCheck(hipFree(y_diff));
cudaErrCheck(hipFree(c_diff));
#ifdef PEEPHOLES
cudaErrCheck(hipFree(peeps_diff));
#endif
cudaErrCheck(hipFree(diff_helper));
}
for (int i = 0; i < numLayers; i++) {
if (stream_i[i] != NULL) cudaErrCheck(hipStreamDestroy(stream_i[i]));
if (stream_h[i] != NULL) cudaErrCheck(hipStreamDestroy(stream_h[i]));
}
free(stream_i);
free(stream_h);
for (int i = 0; i < numLayers; i++) {
free(events_i[i]);
free(events_h[i]);
}
free(events_i);
free(events_h);
}
};
float LSTMTest(int hiddenSize, int miniBatch, int seqLength, int numLayers, int inputSize, bool checkF) {
float loss;
float elapsedTime;
hipEvent_t global_start, global_end, run_start, run_end;
cudaErrCheck(hipEventCreate(&global_start));
cudaErrCheck(hipEventCreate(&global_end));
cudaErrCheck(hipEventCreate(&run_start));
cudaErrCheck(hipEventCreate(&run_end));
// cudaErrCheck(hipEventDestroy(run_start));
// cudaErrCheck(hipEventDestroy(run_end));
cudaErrCheck(hipEventRecord(global_start));
LSTM_scheduler scheduler(hiddenSize,miniBatch,seqLength,numLayers,inputSize);
scheduler.init();
printf("Initialize success\n");
cudaErrCheck(hipEventRecord(run_start));
// cudaErrCheck(hipEventSynchronize(run_start));
// scheduler.Forward(&loss);
// printf("Forward loss is %f\n", loss);
// if (checkF) {
// scheduler.printChecksum();
// }
for (int i = 0; i < 10; i++) {
elapsedTime = scheduler.Forward(&loss);
printf("Forward time is %f, loss is %f\n", elapsedTime, loss);
if (TRAINING) {
#ifdef UPDATE
scheduler.clearStates();
#endif
elapsedTime = scheduler.Backward(0.2);
printf("Backward time is %f\n", elapsedTime);
}
// scheduler.printWeight();
}
// scheduler.Forward(&loss);
// printf("Forward loss is %f\n", loss);
cudaErrCheck(hipEventRecord(run_end));
// We're done. Print some checksums
// if (checkF) {
// scheduler.printChecksum();
// }
scheduler.freeMemory();
cudaErrCheck(hipDeviceSynchronize());
cudaErrCheck(hipEventRecord(global_end));
cudaErrCheck(hipEventSynchronize(global_end));
cudaErrCheck(hipEventElapsedTime(&elapsedTime, run_start, run_end));
printf("Running time used %f ms, avg %f\n", elapsedTime, elapsedTime/10);
cudaErrCheck(hipEventElapsedTime(&elapsedTime, global_start, global_end));
printf("Total time used %f ms\n", elapsedTime);
// cudaErrCheck(hipEventDestroy(global_start));
// cudaErrCheck(hipEventDestroy(global_end));
cudaErrCheck(hipEventElapsedTime(&elapsedTime, global_start, run_start));
printf("Initialize time used %f ms\n", elapsedTime);
cudaErrCheck(hipEventElapsedTime(&elapsedTime, run_end, global_end));
printf("Memory free time used %f ms\n", elapsedTime);
return 0;
}
int main(int argc, char* argv[]) {
int seqLength;
int numLayers;
int hiddenSize;
int miniBatch;
int inputSize;
printf("\n");
if (argc == 6) {
seqLength = atoi(argv[1]);
numLayers = atoi(argv[2]);
hiddenSize = atoi(argv[3]);
miniBatch = atoi(argv[4]);
inputSize = atoi(argv[5]);
}
else if (argc == 1) {
printf("Running with default settings\n");
inputSize = 512;
seqLength = 100;
numLayers = 4;
hiddenSize = 512;
miniBatch = 64;
}
else {
printf("Usage: ./LSTM <seqLength> <numLayers> <hiddenSize> <miniBatch> <inputSize>\n");
return 1;
}
printf("seqLength %d, numLayers %d, hiddenSize %d, miniBatch %d inputSize %d\n", seqLength, numLayers, hiddenSize, miniBatch, inputSize);
int numRuns = 1;
float totalTime = 0.f;
for (int run = 0; run < numRuns; run++) {
totalTime += LSTMTest(hiddenSize, miniBatch, seqLength, numLayers, inputSize, true);
}
// printf("Runtime %fms\n", totalTime / numRuns);
return time < 0;
} | 3c46550e411a368ab803987851b55a2a1ec2f9bd.cu | /* Copyright (c) 1993-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Compile:
nvcc -arch=sm_52 -O3 -lcublas -lcurand -o LSTM LSTM.cu
To enable/disable different performance options add the flat -DPERFOPTSx
Where x is a bitmask defining the options used (see below).
Run:
./LSTM
or
./LSTM <seqLength> <numLayers> <hiddenSize> <miniBatch>
Example (run on an NVIDIA M40):
> ./LSTM
Running with default settings
seqLength 100, numLayers 4, hiddenSize 512, miniBatch 64
i checksum (example 0) 5.113463E+04
h checksum (example 0) 2.048000E+03
c checksum (example 0) 2.058137E+05
i checksum 3.272639E+06 c checksum 1.317278E+07 h checksum 1.310720E+05
Runtime 27.807743ms
*/
#include <stdio.h>
#include <cublas_v2.h>
#include <curand.h>
#define TRAINING (true)
// #define UPDATE
// #define PEEPHOLES
// #define CIFG
#define GATE_NUM (4)
#define PEEP_NUM (3)
#define I_INDEX (0)
#define F_INDEX (1)
#define G_INDEX (2)
#define O_INDEX (3)
#ifndef PERFOPTS
#define PERFOPTS (31)
#endif
#define GROUP_GEMM ((PERFOPTS & 1))
#define USE_STREAMS ((PERFOPTS & 2))
// #define FUSE_PW ((PERFOPTS & 4))
#define FUSE_PW (1)
// #define PRE_TRANSPOSE ((PERFOPTS & 8))
#define PRE_TRANSPOSE (1)
#define RECUR_BATCH_SIZE (((PERFOPTS & 16) ? 2 : 1))
#define RECUR_BATCH_BP_SIZE (((PERFOPTS & 16) ? 5 : 1))
#define HFUNC tanhf
#define DEHFUNC de_tanhf
#define GFUNC tanhf
#define DEGFUNC de_tanhf
#define LOSSFUNC squaree
#define DELOSSFUNC de_squaree
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
if (stat != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
}
}
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
void cublasErrCheck_(cublasStatus_t stat, const char *file, int line) {
if (stat != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
void curandErrCheck_(curandStatus_t stat, const char *file, int line) {
if (stat != CURAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
#define GetOffsetW(x) (GATE_NUM * inputSize * hiddenSize + GATE_NUM * hiddenSize * hiddenSize + (x - 1) * 2 * GATE_NUM * hiddenSize * hiddenSize)
#define GetOffsetR(x) (GetOffsetW(x) + GATE_NUM * hiddenSize * hiddenSize)
// (layer - 1) * 8 * hiddenSize * hiddenSize + 8 * hiddenSize * hiddenSize + 4 * hiddenSize * inputSize// #define GetOffsetR(x)
// Device functions
__forceinline__ __device__ float sigmoidf(float in) {
return 1.f / (1.f + expf(-in));
}
__forceinline__ __device__ float de_sigmoidf(float out) {
return out * (1-out);
}
__forceinline__ __device__ float linearf(float in) {
return in;
}
__forceinline__ __device__ float de_linearf(float out) {
return 1.f;
}
__forceinline__ __device__ float de_tanhf(float out) {
return 1.f - pow(out, 2);
}
__forceinline__ __device__ float squaree(float output, float target) {
return pow(output - target, 2);
}
__forceinline__ __device__ float de_squaree(float output, float target) {
return 2 * (output - target);
}
__forceinline__ __device__ float entropye(float output, float target) {
return -(target * logf(output) + (1.f - target) * logf(1.f - output));
}
__forceinline__ __device__ float de_entropye(float output, float target) {
return -(target / output + (1.f - target) / (output - 1.f));
}
__global__ void pw_de_tanh(float *y, float *a, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = 1 - pow(a[i], 2);
}
__global__ void pw_de_sigmoid(float *y, float *a, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = a[i] * (1 - a[i]);
}
// Pointwise functions
__global__ void pw_biasAdd(float *y, float *bias, int n, int nBias) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] += bias[i % nBias];
}
__global__ void pw_peepsAdd(float *y, float *peeps, float *x, int n, int nPeeps) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] += peeps[i % nPeeps] * x[i];
}
__global__ void pw_vecAdd(float *y, float *a, float *b, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = a[i] + b[i];
}
__global__ void pw_vecMul(float *y, float *a, float *b, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = a[i] * b[i];
}
__global__ void pw_tanh(float *y, float *a, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = tanh(a[i]);
}
__global__ void pw_sigmoid(float *y, float *a, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = sigmoidf(a[i]);
}
// Unfused LSTM (calling many pointwise kernels).
int LSTM_elementwise_unfused( int hiddenSize,
int miniBatch,
float * __restrict__ tmp_h,
float * __restrict__ tmp_i,
float * __restrict__ bias,
float * __restrict__ peeps,
// float * __restrict__ linearGates,
// float * __restrict__ h_data,
float * __restrict__ i_data,
float * __restrict__ c_in,
float * __restrict__ c_out,
bool training,
cudaStream_t stream) {
dim3 blockDim;
dim3 gridDim;
int numElements = hiddenSize * miniBatch;
blockDim.x = 128;
gridDim.x = (numElements + blockDim.x - 1) / blockDim.x;
for (int i = 0; i < GATE_NUM; i++) {
if (tmp_h != NULL) {
pw_vecAdd <<< gridDim, blockDim, 0, stream >>> (tmp_i + i * numElements, tmp_i + i * numElements,
tmp_h + i * numElements, numElements);
cudaErrCheck(cudaGetLastError());
}
pw_biasAdd <<< gridDim, blockDim, 0, stream >>> (tmp_i + i * numElements, bias + i * hiddenSize, numElements, hiddenSize);
cudaErrCheck(cudaGetLastError());
if (i == 0) {
pw_biasAdd <<< gridDim, blockDim, 0, stream >>> (tmp_i + i * numElements, bias + i * hiddenSize, numElements, hiddenSize);
cudaErrCheck(cudaGetLastError());
}
if (training) {
printf("LSTM_elementWise_unfused does not support training\n");
return 1;
}
}
float *in_gate = tmp_i +
#ifdef I_INDEX
I_INDEX * numElements;//i
#else
NULL;
#endif
float *forget_gate = tmp_i +
#ifdef F_INDEX
F_INDEX * numElements;//f
#else
NULL;
#endif
float *in_gate2 = tmp_i +
#ifdef G_INDEX
G_INDEX * numElements;//z
#else
NULL;
#endif
float *out_gate = tmp_i +
#ifdef O_INDEX
O_INDEX * numElements;//z
#else
NULL;
#endif
if (c_in != NULL) {
//i_t += p_i * c_t-1
#ifdef I_INDEX
pw_peepsAdd <<< gridDim, blockDim, 0, stream >>> (in_gate, peeps + I_INDEX * hiddenSize, c_in, numElements, hiddenSize);
cudaErrCheck(cudaGetLastError());
#endif
//f_t += p_f * c_t-1
#ifdef F_INDEX
pw_peepsAdd <<< gridDim, blockDim, 0, stream >>> (forget_gate, peeps + F_INDEX * hiddenSize, c_in, numElements, hiddenSize);
cudaErrCheck(cudaGetLastError());
#endif
}
pw_sigmoid <<< gridDim, blockDim, 0, stream >>> (in_gate, tmp_i + 0 * numElements, numElements);
cudaErrCheck(cudaGetLastError());
pw_sigmoid <<< gridDim, blockDim, 0, stream >>> (forget_gate, tmp_i + 1 * numElements, numElements);
cudaErrCheck(cudaGetLastError());
//z'
pw_tanh <<< gridDim, blockDim, 0, stream >>> (in_gate2, tmp_i + 2 * numElements, numElements);
cudaErrCheck(cudaGetLastError());
if (c_in == NULL) {
pw_vecMul <<< gridDim, blockDim, 0, stream >>> (in_gate, in_gate, in_gate2, numElements);
cudaErrCheck(cudaGetLastError());
} else {
//f_t * c_t-1
pw_vecMul <<< gridDim, blockDim, 0, stream >>> (forget_gate, forget_gate, c_in, numElements);
cudaErrCheck(cudaGetLastError());
//i_t * z
pw_vecMul <<< gridDim, blockDim, 0, stream >>> (in_gate, in_gate, in_gate2, numElements);
cudaErrCheck(cudaGetLastError());
//c_t = f_t * c_t-1 + i_t * c_t'
pw_vecAdd <<< gridDim, blockDim, 0, stream >>> (in_gate, in_gate, forget_gate, numElements);
cudaErrCheck(cudaGetLastError());
}
//o_t += p_o * c_t
pw_peepsAdd <<< gridDim, blockDim, 0, stream >>> (out_gate, peeps + 2 * hiddenSize, in_gate, numElements, hiddenSize);
cudaErrCheck(cudaGetLastError());
pw_sigmoid <<< gridDim, blockDim, 0, stream >>> (out_gate, tmp_i + 3 * numElements, numElements);
cudaErrCheck(cudaGetLastError());
if (c_out != NULL) {
cudaErrCheck(cudaMemcpyAsync(c_out, in_gate, numElements * sizeof(float), cudaMemcpyDeviceToDevice, stream));
}
//tanh(c_t)
pw_tanh <<< gridDim, blockDim, 0, stream >>> (in_gate, in_gate, numElements);
cudaErrCheck(cudaGetLastError());
//y = o_t * tanh(c_t)
// pw_vecMul <<< gridDim, blockDim, 0, stream >>> (h_data, out_gate, in_gate, numElements);
// cudaErrCheck(cudaGetLastError());
pw_vecMul <<< gridDim, blockDim, 0, stream >>> (i_data, out_gate, in_gate, numElements);
cudaErrCheck(cudaGetLastError());
return 0;
}
// Fused forward kernel
__global__ void elementWise_fp(int hiddenSize, int miniBatch,
float *tmp_h, //hidden_size * mini_batch * 4: W*xt
float *tmp_i, //hidden_size * mini_batch * 4: R*yt
float *bias, //hidden_size * 4: b*
float *peeps,//hidden_size * 3: p*
// float *linearGates,// hidden_size * mini_batch * 4
float *stateGates,
// float *h_out, //h_data
float *i_out,
float *c_in,
float *c_out,
float *label,
float *loss,
float *mask,
float *y_diff,
bool training) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int numElements = miniBatch * hiddenSize;
if (index >= numElements) return;
int batch = index / hiddenSize;
int gateIndex = (index % hiddenSize) + GATE_NUM * batch * hiddenSize;
float g[GATE_NUM];
for (int i = 0; i < GATE_NUM; i++) {
g[i] = tmp_i[i * hiddenSize + gateIndex] + (tmp_h == NULL? 0:tmp_h[i * hiddenSize + gateIndex]);
g[i] += bias[i * hiddenSize + index % hiddenSize];
}
#ifdef PEEPHOLES
#ifdef I_INDEX
g[I_INDEX] += peeps[I_INDEX * hiddenSize + index % hiddenSize] * c_in[index];//pi * c[t-1]
#endif
#ifdef F_INDEX
g[F_INDEX] += peeps[F_INDEX * hiddenSize + index % hiddenSize] * c_in[index];//pf * c[t-1]
#endif
#endif
float in_gate =
#ifdef I_INDEX
sigmoidf(g[I_INDEX]);//i
#else
1.f;
#endif
float forget_gate =
#ifdef F_INDEX
sigmoidf(g[F_INDEX]);//f
#else
#ifdef CIFG
(1 - in_gate);
#else
1.f;
#endif
#endif
float in_gate2 =
#ifdef G_INDEX
GFUNC(g[G_INDEX]);//z
#else
1.f;
#endif
float val = (forget_gate * c_in[index]) + (in_gate * in_gate2);//c[t] = z*i+c[t-1]*f
c_out[index] = val;
#ifdef PEEPHOLES
#ifdef O_INDEX
g[O_INDEX] += peeps[hiddenSize * (O_INDEX - 1) + index % hiddenSize] * c_out[index];//po * c[t]
#endif
#endif
float out_gate =
#ifdef O_INDEX
sigmoidf(g[O_INDEX]);//o
#else
1.f;
#endif
// #ifdef TRAINING
if(training) {
#ifdef I_INDEX
stateGates[I_INDEX * hiddenSize + gateIndex] = in_gate;
#endif
#ifdef F_INDEX
stateGates[F_INDEX * hiddenSize + gateIndex] = forget_gate;
#endif
#ifdef G_INDEX
stateGates[G_INDEX*hiddenSize + gateIndex] = in_gate2;
#endif
#if O_INDEX
stateGates[O_INDEX*hiddenSize + gateIndex] = out_gate;
#endif
}
// #endif
val = out_gate * HFUNC(val); //h
// h_out[index] = val;
i_out[index] = val;
if (label != NULL) {
if (mask[index % hiddenSize] == 1) {
loss[index] = LOSSFUNC(val, label[index]);
if (training)
y_diff[index] = DELOSSFUNC(val, label[index]);
} else {
if (training)
y_diff[index] = 0;
}
}
}
// Fused backward kernel
__global__ void elementWise_bp(int hiddenSize, int miniBatch,
float *y_diff,
float *stateGates_diff_in,// hidden_size * mini_batch * 4
float *stateGates_diff_out,// hidden_size * mini_batch * 4
float *stateGates,
float *peeps,
float *peeps_diff,
float *c_in,
float *c_out,
float *c_diff,
bool peeps_update) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int numElements = miniBatch * hiddenSize;
if (index >= numElements) return;
int batch = index / hiddenSize;
int gateIndex = (index % hiddenSize) + GATE_NUM * batch * hiddenSize;
float in_gate =
#ifdef I_INDEX
stateGates[I_INDEX * hiddenSize + gateIndex];
#else
1.f;
#endif
float forget_gate =
#ifdef F_INDEX
stateGates[F_INDEX * hiddenSize + gateIndex];
#else
#ifdef CIFG
1 - in_gate;
#else
1.f;
#endif
#endif
float in_gate2 =
#ifdef G_INDEX
stateGates[G_INDEX * hiddenSize + gateIndex];
#else
1.f
#endif
float out_gate =
#ifdef O_INDEX
stateGates[O_INDEX * hiddenSize + gateIndex];
#else
1.f;
#endif
#ifdef O_INDEX
float out_diff = y_diff[index]*HFUNC(c_out[index])*de_sigmoidf(out_gate); //do
#endif
float peep_diff = 0;
if (stateGates_diff_in != NULL) peep_diff += c_diff[index];
#ifdef PEEPHOLES
#ifdef O_INDEX
peep_diff += peeps[(O_INDEX - 1) * hiddenSize + index % hiddenSize] * out_diff;//po * do
#endif
if (stateGates_diff_in != NULL) {
#ifdef I_INDEX
peep_diff += peeps[I_INDEX * hiddenSize + index % hiddenSize] * stateGates_diff_in[I_INDEX * hiddenSize + gateIndex]; //pi * di[t+1]
#endif
#ifdef F_INDEX
peep_diff += peeps[F_INDEX * hiddenSize + index % hiddenSize] * stateGates_diff_in[F_INDEX * hiddenSize + gateIndex];//pf * df[t+1]
#endif
}
#endif
float local_c_diff = y_diff[index]*out_gate*DEHFUNC(HFUNC(c_out[index])) + peep_diff;
float in_diff2 = local_c_diff * in_gate * DEGFUNC(in_gate2);
#ifdef I_INDEX
float in_diff = local_c_diff *
#ifdef CIFG
(in_gate2 - c_in[index])
#else
in_gate2
#endif
* de_sigmoidf(in_gate);
stateGates_diff_out[I_INDEX * hiddenSize + gateIndex] = in_diff;
#endif
#ifdef F_INDEX
float forget_diff = local_c_diff * c_in[index] * de_sigmoidf(forget_gate);
stateGates_diff_out[F_INDEX * hiddenSize + gateIndex] = forget_diff;
#endif
#ifdef G_INDEX
stateGates_diff_out[G_INDEX * hiddenSize + gateIndex] = in_diff2;
#endif
#ifdef O_INDEX
stateGates_diff_out[O_INDEX * hiddenSize + gateIndex] = out_diff;
#endif
#ifdef PEEPHOLES
int peepIndex = (index % hiddenSize) + PEEP_NUM * batch * hiddenSize;
if (peeps_update) {
#ifdef I_INDEX
peeps_diff[I_INDEX * hiddenSize + peepIndex] = in_diff * c_in[index];//p_i
#endif
#ifdef F_INDEX
peeps_diff[F_INDEX * hiddenSize + peepIndex] = forget_diff * c_in[index]; //p_f
#endif
}
#ifdef O_INDEX
peeps_diff[(O_INDEX - 1) * hiddenSize + peepIndex] = out_diff * c_out[index]; //p_o
#endif
#endif
c_diff[index] = local_c_diff * forget_gate;
}
struct LSTM_scheduler
{
// float *h_data;//y
float *i_data;//x
float *c_data;//c
float *T;
float *T_f;
float *bias;
float *tmp_h;
float *tmp_i;
// float *linearGates;
float *stateGates;
//diff
float *stateGates_diff; //di,df,dz,do
float *y_diff;//dy
// float *T_diff;//dW, dR
float *diff_helper;
#ifdef PEEPHOLES
float *peeps_diff;
float *peeps;
#endif
float *c_diff;//dc*ft
float *label;
float *loss;
float *mask;
cudaStream_t *stream_i;
cudaStream_t *stream_h;
cudaEvent_t **events_i;
cudaEvent_t **events_h;
cublasHandle_t handle;
int hiddenSize;
int miniBatch;
int seqLength;
int numLayers;
int numElements;
int inputSize;
int inputNumElements;
int inputLayerSize;
int weightSize;
cublasOperation_t transa;
cublasOperation_t transb;
curandGenerator_t rng;
void init_helper(float * device_ptr, float data, int size) {
if (size == 0)
return;
float* host_ptr;
host_ptr = (float *)malloc(size * sizeof(float));
for (int i = 0; i < size; ++i) {
host_ptr[i] = data;
}
cudaErrCheck(cudaMemcpy(device_ptr, host_ptr, size * sizeof(float), cudaMemcpyHostToDevice));
}
void set_input(float * input) {
cudaErrCheck(cudaMemcpy(i_data, input, inputLayerSize * sizeof(float), cudaMemcpyHostToDevice));
}
void set_label(float * label_) {
cudaErrCheck(cudaMemcpy(label, label_, numElements * seqLength * sizeof(float), cudaMemcpyHostToDevice));
}
void set_mask(float * mask_) {
if (mask_ == NULL) {
init_helper(mask, 1, 1);
init_helper(mask + 1, 0, hiddenSize-1);
} else {
cudaErrCheck(cudaMemcpy(mask, mask_, numElements * seqLength * sizeof(float), cudaMemcpyHostToDevice));
}
}
void set_weight(float T_f_=0.1, float bias_=0.1, float peeps_=0.1, int random=0) {
if (!(random & 1)) {
init_helper(T_f, T_f_, weightSize);
}
else {
curandErrCheck(curandGenerateUniform(rng, T_f, weightSize));
}
if (!(random & 2)) {
init_helper(bias, bias_, hiddenSize * GATE_NUM * numLayers);
}
else {
curandErrCheck(curandGenerateUniform(rng, bias, hiddenSize * GATE_NUM * numLayers));
}
#ifdef PEEPHOLES
if (!(random & 4)) {
init_helper(peeps, peeps_, hiddenSize * 3 * numLayers);
}
else {
curandErrCheck(curandGenerateUniform(rng, bias, hiddenSize * PEEP_NUM * numLayers));
}
#endif
}
LSTM_scheduler(int hiddenSize_, int miniBatch_, int seqLength_, int numLayers_, int inputSize_)
{
transa = (PRE_TRANSPOSE && (seqLength > 1)) ? CUBLAS_OP_N : CUBLAS_OP_T;
transb = CUBLAS_OP_N;
hiddenSize = hiddenSize_;
miniBatch = miniBatch_;
seqLength = seqLength_;
numLayers = numLayers_;
inputSize = inputSize_;
numElements = hiddenSize * miniBatch;
inputNumElements = inputSize * miniBatch;
inputLayerSize = inputNumElements * seqLength;
weightSize = GetOffsetW(numLayers);
cublasErrCheck(cublasCreate(&handle));
curandErrCheck(curandCreateGenerator(&rng, CURAND_RNG_PSEUDO_DEFAULT));
curandErrCheck(curandSetPseudoRandomGeneratorSeed(rng, 1337ull));
stream_i = (cudaStream_t*)malloc(numLayers * sizeof(cudaStream_t));
stream_h = (cudaStream_t*)malloc(numLayers * sizeof(cudaStream_t));
// If we don't want to use streams we can launch everything in to the NULL stream
for (int i = 0; i < numLayers; i++) {
if (USE_STREAMS) {
cudaErrCheck(cudaStreamCreate(&stream_i[i]));
// Priority is empirical.
cudaErrCheck(cudaStreamCreateWithPriority(&stream_h[i], 0, -1));
}
else {
stream_i[i] = NULL;
stream_h[i] = NULL;
}
}
events_i = (cudaEvent_t**)malloc(numLayers * sizeof(cudaEvent_t*));
events_h = (cudaEvent_t**)malloc(numLayers * sizeof(cudaEvent_t*));
for (int i = 0; i < numLayers; i++) {
events_i[i] = (cudaEvent_t*)malloc(seqLength * sizeof(cudaEvent_t));
events_h[i] = (cudaEvent_t*)malloc(seqLength * sizeof(cudaEvent_t));
}
}
void init(float* mask_ = NULL) {
// cudaErrCheck(cudaMalloc((void**)&h_data, (seqLength + 1) * (inputNumElements + (numLayers - 1) * numElements) * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&i_data, (seqLength * inputNumElements + seqLength * numLayers * numElements) * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&c_data, (seqLength + 1) * (numLayers) * numElements * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&T, (inputSize * hiddenSize * GATE_NUM + hiddenSize * hiddenSize * GATE_NUM + (numLayers - 1) * hiddenSize * hiddenSize * 2 * GATE_NUM) * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&T_f, (inputSize * hiddenSize * GATE_NUM + hiddenSize * hiddenSize * GATE_NUM + (numLayers - 1) * hiddenSize * hiddenSize * 2 * GATE_NUM) * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&bias, numLayers * hiddenSize * GATE_NUM * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&label, numElements * seqLength * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&loss, numElements * seqLength * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&mask, hiddenSize * sizeof(float)));
set_mask(NULL);
init_helper(loss, 0, numElements * seqLength);
init_helper(label, 1, numElements * seqLength);
#ifdef PEEPHOLES
cudaErrCheck(cudaMalloc((void**)&peeps, numLayers * hiddenSize * PEEP_NUM * sizeof(float)));
init_helper(peeps, 0.1, numLayers * hiddenSize * PEEP_NUM);
// curandErrCheck(curandGenerateUniform(rng, peeps, numLayers * hiddenSize * 3));
#endif
// Workspace
cudaErrCheck(cudaMalloc((void**)&tmp_h, GATE_NUM * numLayers * numElements * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&tmp_i, GATE_NUM * seqLength * numElements * sizeof(float)));
// // Activations
if (TRAINING) {
// cudaErrCheck(cudaMalloc((void**)&linearGates, 4 * seqLength * numLayers * numElements * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&stateGates, GATE_NUM * seqLength * numLayers * numElements * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&stateGates_diff, GATE_NUM * seqLength * numLayers * numElements * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&y_diff, seqLength * numLayers * numElements * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&c_diff, numLayers * numElements * seqLength * sizeof(float)));
#ifdef PEEPHOLES
cudaErrCheck(cudaMalloc((void**)&peeps_diff, numLayers * numElements * seqLength * PEEP_NUM * sizeof(float)));
init_helper(peeps_diff, 0, numLayers * numElements * seqLength * PEEP_NUM);
#endif
cudaErrCheck(cudaMalloc((void**)&diff_helper, miniBatch * seqLength * sizeof(float)));
init_helper(diff_helper, 1.0, miniBatch * seqLength);
init_helper(c_diff, 0, numLayers * numElements);
init_helper(stateGates_diff, 0, GATE_NUM * seqLength * numLayers * numElements);
}
// Initialise with random values.
// curandErrCheck(curandGenerateUniform(rng, h_data, (seqLength + 1) * (numLayers) * numElements));
// curandErrCheck(curandGenerateUniform(rng, c_data, (seqLength + 1) * (numLayers) * numElements));
// cudaErrCheck(cudaMemset(c_data, 0, (seqLength + 1) * (numLayers) * numElements * sizeof(float)));
init_helper(c_data, 0, (seqLength + 1) * (numLayers) * numElements);
// curandErrCheck(curandGenerateUniform(rng, i_data, seqLength * (inputNumElements + numLayers * numElements)));
// cudaErrCheck(cudaMemset(i_data, 0, seqLength * (inputNumElements + numLayers * numElements) * sizeof(float)));
init_helper(i_data, 0.2, seqLength * (inputNumElements + numLayers * numElements));
// curandErrCheck(curandGenerateUniform(rng, T_f, inputSize * hiddenSize * 4 + hiddenSize * hiddenSize * 4+ (numLayers - 1) * hiddenSize * hiddenSize * 8));
// cudaErrCheck(cudaMemset(T_f, 0.1, inputSize * hiddenSize * 4 + hiddenSize * hiddenSize * 4+ (numLayers - 1) * hiddenSize * hiddenSize * 8 * sizeof(float)));
// curandErrCheck(curandGenerateUniform(rng, bias, numLayers * hiddenSize * 4));
set_weight(0.1,0.1,0.1,0);
// if (TRAINING) {
// init_helper(y_diff, 0, seqLength*numLayers*numElements);
// init_helper(y_diff+seqLength*(numLayers-1)*numElements, 1, seqLength * numElements);
// // curandErrCheck(curandGenerateUniform(rng, y_diff+seqLength*(numLayers-1)*numElements, seqLength * numElements));
// }
// curandErrCheck(curandDestroyGenerator(rng));
// Make sure everything is done before we start the timers
cudaErrCheck(cudaDeviceSynchronize());
// prepare T
// float alpha = 1.f;
// float beta = 0.f;
// for (int layer = 0; layer < numLayers; layer++) {
// float *T_i_in = T + layer * hiddenSize * hiddenSize * 8;
// float *T_i_out = T_f + layer * hiddenSize * hiddenSize * 8;
// float *T_h_in = T + layer * hiddenSize * hiddenSize * 8 + hiddenSize * hiddenSize * 4;
// float *T_h_out = T_f + layer * hiddenSize * hiddenSize * 8 + hiddenSize * hiddenSize * 4;
// cublasErrCheck(cublasSetStream(handle, stream_i[layer]));
// cublasErrCheck(cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, 4 * hiddenSize, hiddenSize, &alpha, T_i_in, hiddenSize, &beta, NULL, 4 * hiddenSize, T_i_out, 4 * hiddenSize));
// cublasErrCheck(cublasSetStream(handle, stream_h[layer]));
// cublasErrCheck(cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, 4 * hiddenSize, hiddenSize, &alpha, T_h_in, hiddenSize, &beta, NULL, 4 * hiddenSize, T_h_out, 4 * hiddenSize));
// }
}
void clearStates(float * input=NULL, float * label_=NULL) {
if(TRAINING) {
if(input) {set_input(input);}
else {init_helper(i_data, 0.2, seqLength * (inputNumElements + numLayers * numElements));}
if(label) set_label(label);
else {init_helper(label, 1, numElements * seqLength);}
}
// init_helper(y_diff, 0, seqLength*(numLayers-1)*numElements);
// init_helper(y_diff+seqLength*(numLayers-1)*numElements, 1, seqLength * numElements);
// init_helper(peeps_diff, 0, 3 * numElements * numLayers * seqLength);
// curandErrCheck(curandGenerateUniform(rng, y_diff+seqLength*(numLayers-1)*numElements, seqLength * numElements));
// init_helper(c_diff, 0, numLayers * numElements );
// init_helper(loss, 0, numElements * seqLength);
// cudaErrCheck(cudaDeviceSynchronize());
}
float Forward(float* sumLoss) {
float alpha = 1.f;
float beta = 0.f;
float elapsedTime;
cudaEvent_t start, stop;
cudaErrCheck(cudaEventCreate(&start));
cudaErrCheck(cudaEventCreate(&stop));
cudaErrCheck(cudaEventRecord(start));
int lStart = 0;
int lEnd = 0;
int rStart = 0;
int rEnd = 0;
int recurBatchSize = RECUR_BATCH_SIZE;
while (true) {
// Many layer "scheduling".
if (lEnd == 0) {
lStart = 0;
lEnd = 1;
rStart = 0;
}
else {
// Move "up" and "left"
lStart++;
lEnd++;
rStart -= recurBatchSize;
// Over the top or off the left, reset to layer 0
if (lEnd > numLayers || rStart < 0) {
rStart += (lStart + 1) * recurBatchSize;
lStart = 0;
lEnd = 1;
}
// Off the right, step up
while (rStart >= seqLength && lEnd <= numLayers) {
lStart++;
lEnd++;
rStart -= recurBatchSize;
}
// Over the top or off the left, done!
if (lEnd > numLayers || rStart < 0) {
break;
}
}
rEnd = rStart + recurBatchSize;
// printf("lStart %d lEnd %d rStart %d rEnd %d\n", lStart, lEnd,
// rStart, rEnd);
if (rEnd > seqLength) rEnd = seqLength;
for (int layer = lStart; layer < lEnd; layer++) {
cublasErrCheck(cublasSetStream(handle, stream_i[layer]));
//wait for xt to be calculated
for (int i = rStart; i < rEnd; i++) {
if (layer > 0) {
cudaErrCheck(cudaStreamWaitEvent(stream_i[layer], events_h[layer - 1][i], 0));
cudaErrCheck(cudaEventDestroy(events_h[layer - 1][i]));
}
}
// Optimization 1
if (GROUP_GEMM) {
//[4N * N] x [N * 2m] = [4N * 2m]
cublasErrCheck(cublasSgemm(handle,
transa, transb,
GATE_NUM * hiddenSize, miniBatch * (rEnd - rStart), layer > 0 ? hiddenSize: inputSize,
&alpha,
layer > 0 ? &T_f[GetOffsetW(layer)]:T_f,
transa == CUBLAS_OP_N ? GATE_NUM * hiddenSize : hiddenSize,
layer > 0 ? i_data + rStart * numElements + (layer - 1) * seqLength * numElements + seqLength * inputNumElements: i_data + rStart * inputNumElements,
layer > 0 ? hiddenSize:inputSize,
&beta,
tmp_i + GATE_NUM * rStart * numElements,
GATE_NUM * hiddenSize));
}
else {
for (int igemm =0; igemm < GATE_NUM; igemm++) {
cublasErrCheck(cublasSgemm(handle,
transa, transb,
hiddenSize, miniBatch * (rEnd - rStart), layer > 0 ? hiddenSize:inputSize,
&alpha,
layer > 0 ? &T_f[GetOffsetW(layer) + igemm * hiddenSize]:&T_f[igemm * hiddenSize],
transa == CUBLAS_OP_N ? GATE_NUM * hiddenSize : hiddenSize,
layer > 0 ? i_data + rStart * numElements + (layer - 1) * seqLength * numElements + seqLength * inputNumElements: i_data + rStart * inputNumElements,
layer > 0 ? hiddenSize:inputSize,
&beta,
tmp_i + GATE_NUM * rStart * numElements + igemm * hiddenSize,
GATE_NUM * hiddenSize));
}
}
for (int i = rStart; i < rEnd; i++) {
cudaErrCheck(cudaEventCreate(&events_i[layer][i], cudaEventDisableTiming));
cudaErrCheck(cudaEventRecord(events_i[layer][i], stream_i[layer]));
}
for (int i = rStart; i < rEnd; i++) {
cublasErrCheck(cublasSetStream(handle, stream_h[layer]));
// Optimization 1
if (GROUP_GEMM) {
//[4N * N] x [N * m] = [4N * m]
if (i > 0) {
cublasErrCheck(cublasSgemm(handle,
transa, transb,
GATE_NUM * hiddenSize, miniBatch, hiddenSize,
&alpha,
layer > 0? &T_f[GetOffsetR(layer)]:&T_f[GATE_NUM * inputSize * hiddenSize],
transa == CUBLAS_OP_N ? GATE_NUM * hiddenSize : hiddenSize,
// h_data + i * numElements + layer * (seqLength + 1) * numElements,
i_data + layer * seqLength * numElements + seqLength * inputNumElements + (i - 1) * numElements,
hiddenSize,
&beta,
tmp_h + GATE_NUM * layer * numElements,
GATE_NUM * hiddenSize));
}
}
else {
if (i > 0) {
for (int igemm =0; igemm < GATE_NUM; igemm++) {
cublasErrCheck(cublasSgemm(handle,
transa, transb,
hiddenSize, miniBatch, hiddenSize,
&alpha,
layer > 0? &T_f[GetOffsetR(layer) + igemm * hiddenSize]:&T_f[GATE_NUM * inputSize * hiddenSize + igemm * hiddenSize],
transa == CUBLAS_OP_N ? GATE_NUM * hiddenSize : hiddenSize,
i_data + layer * seqLength * numElements + seqLength * inputNumElements + (i - 1) * numElements,
hiddenSize,
&beta,
tmp_h + GATE_NUM * layer * numElements + igemm * hiddenSize,
GATE_NUM * hiddenSize));
}
}
}
cudaErrCheck(cudaStreamWaitEvent(stream_h[layer], events_i[layer][i], 0));
cudaErrCheck(cudaEventDestroy(events_i[layer][i]));
// Optimization 3
if (FUSE_PW) {
dim3 blockDim;
dim3 gridDim;
blockDim.x = 256;
gridDim.x = (numElements + blockDim.x - 1) / blockDim.x;
elementWise_fp <<< gridDim, blockDim , 0, stream_h[layer] >>>
(hiddenSize, miniBatch,
i > 0 ? tmp_h + GATE_NUM * layer * numElements:NULL,
tmp_i + GATE_NUM * i * numElements,
bias + GATE_NUM * layer * hiddenSize,
#ifdef PEEPHOLES
peeps + PEEP_NUM * layer * hiddenSize,
#else
NULL,
#endif
// TRAINING ? linearGates + 4 * (i * numElements + layer * seqLength * numElements) : NULL,
TRAINING ? stateGates + GATE_NUM * (i * numElements + layer * seqLength * numElements) : NULL,
// h_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements,
i_data + i * numElements + layer * seqLength * numElements + seqLength * inputNumElements,
c_data + i * numElements + layer * (seqLength + 1) * numElements,
c_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements,
layer == numLayers - 1 ? label + i * numElements : NULL,
layer == numLayers - 1 ? loss + i * numElements: NULL,
mask,
y_diff + i * numElements + layer * seqLength * numElements,
TRAINING);
cudaErrCheck(cudaGetLastError());
}
else {
LSTM_elementwise_unfused(hiddenSize, miniBatch,
i > 0 ? tmp_h + GATE_NUM * layer * numElements:NULL,
tmp_i + GATE_NUM * i * numElements,
bias + GATE_NUM * layer * hiddenSize,
#ifdef PEEPHOLES
peeps + PEEP_NUM * layer * hiddenSize,
#else
NULL,
#endif
// TRAINING ? linearGates + 4 * (i * numElements + layer * seqLength * numElements) : NULL,
// h_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements,
i_data + i * numElements + layer * seqLength * numElements + seqLength * inputNumElements,
c_data + i * numElements + layer * (seqLength + 1) * numElements,
c_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements,
TRAINING,
stream_h[layer]);
}
if (layer != numLayers - 1) {
cudaErrCheck(cudaEventCreate(&events_h[layer][i], cudaEventDisableTiming));
cudaErrCheck(cudaEventRecord(events_h[layer][i], stream_h[layer]));
}
}
}
}
cublasErrCheck(cublasSetStream(handle, stream_h[numLayers - 1]));
cublasErrCheck(cublasSasum(handle, numElements * seqLength,
loss, 1, sumLoss));
cudaErrCheck(cudaEventRecord(stop));
cudaErrCheck(cudaEventSynchronize(stop));
cudaErrCheck(cudaEventElapsedTime(&elapsedTime, start, stop));
cudaErrCheck(cudaDeviceSynchronize());
cudaErrCheck(cudaEventDestroy(start));
cudaErrCheck(cudaEventDestroy(stop));
return elapsedTime;
}
float Backward(float learningRate) {
float elapsedTime;
cudaEvent_t start_bp, stop_bp;
cudaErrCheck(cudaEventCreate(&start_bp));
cudaErrCheck(cudaEventCreate(&stop_bp));
cudaErrCheck(cudaEventRecord(start_bp));
int lStart = 0;
int lEnd = 0;
int rStart = 0;
int rEnd = 0;
int rev_lStart = 0;
int rev_lEnd = 0;
int rev_rStart = 0;
int rev_rEnd = 0;
int recurBatchSize = RECUR_BATCH_BP_SIZE;
while (true) {
// Many layer "scheduling".
if (lEnd == 0) {
lStart = 0;
lEnd = 1;
rStart = 0;
} else {
// Move "up" and "left"
lStart++;
lEnd++;
rStart -= recurBatchSize;
// Over the top or off the left, reset to layer 0
if (lEnd > numLayers || rStart < 0) {
rStart += (lStart + 1) * recurBatchSize;
lStart = 0;
lEnd = 1;
}
// Off the right, step up
while (rStart >= seqLength && lEnd <= numLayers) {
lStart++;
lEnd++;
rStart -= recurBatchSize;
}
// Over the top or off the left, done!
if (lEnd > numLayers || rStart < 0) {
break;
}
}
rEnd = rStart + recurBatchSize;
if (rEnd > seqLength) rEnd = seqLength;
rev_lStart = numLayers - lEnd;
rev_lEnd = numLayers - lStart;
rev_rStart = seqLength - rStart - 1;
rev_rEnd = seqLength - rEnd - 1;
// printf("rev_lStart %d rev_lEnd %d rev_rStart %d rev_rEnd %d\n", rev_lStart, rev_lEnd, rev_rStart, rev_rEnd);
for (int layer = rev_lStart; layer < rev_lEnd; layer++) {
for (int i = rev_rStart; i > rev_rEnd; i--) {
// printf("level %d row %d\n", layer, i);
cublasErrCheck(cublasSetStream(handle, stream_h[layer]));
//wait for the upper layer
// if (layer < numLayers-1) {
// cudaErrCheck(cudaStreamWaitEvent(stream_h[layer], events_h[layer + 1][i], 0));
// cudaErrCheck(cudaEventDestroy(events_h[layer + 1][i]));
// }
//pointwise operations get diff
// cudaErrCheck(cudaDeviceSynchronize());
dim3 blockDim;
dim3 gridDim;
blockDim.x = 256;
gridDim.x = (numElements + blockDim.x - 1) / blockDim.x;
elementWise_bp <<< gridDim, blockDim , 0, stream_h[layer] >>>
(hiddenSize, miniBatch,
y_diff + i * numElements + layer * numElements * seqLength,
(i == seqLength - 1) ? NULL : stateGates_diff + GATE_NUM * ((i + 1) * numElements + layer * seqLength * numElements),
stateGates_diff + GATE_NUM * (i * numElements + layer * seqLength * numElements),
stateGates + GATE_NUM * (i * numElements + layer * seqLength * numElements),
#ifdef PEEPHOLES
peeps + PEEP_NUM * layer * hiddenSize,
peeps_diff + PEEP_NUM * (i * numElements + layer * seqLength * numElements),
#else
NULL,
NULL,
#endif
c_data + i * numElements + layer * (seqLength + 1) * numElements,
c_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements,
c_diff + layer * numElements,
i != 0);
// printWeight();
cudaErrCheck(cudaGetLastError());
if (i == 1) {
cudaErrCheck(cudaEventCreate(&events_i[layer][i], cudaEventDisableTiming));
cudaErrCheck(cudaEventRecord(events_i[layer][i], stream_h[layer]));
}
if(layer > 0) {
cudaErrCheck(cudaEventCreate(&events_h[layer][i], cudaEventDisableTiming));
cudaErrCheck(cudaEventRecord(events_h[layer][i], stream_h[layer]));
}
if (i > 0) {
//RT * diff = dy
float alpha = 1.f;
float beta = 1.f;
if (GROUP_GEMM) {
cublasErrCheck(cublasSgemm(handle,
CUBLAS_OP_T, transb,
hiddenSize, miniBatch, GATE_NUM * hiddenSize,
&alpha,
layer > 0 ? &T_f[GetOffsetR(layer)]:&T_f[GATE_NUM * hiddenSize * inputSize],
GATE_NUM * hiddenSize,
stateGates_diff + GATE_NUM * (i * numElements + layer * seqLength * numElements),
GATE_NUM * hiddenSize,
&beta,
y_diff + layer * numElements * seqLength + (i - 1) * numElements,
hiddenSize));
}
else {
for (int igemm = 0; igemm < GATE_NUM; igemm++) {
cublasErrCheck(cublasSgemm(handle,
CUBLAS_OP_T, transb,
hiddenSize, miniBatch, hiddenSize,
&alpha,
layer > 0 ? &T_f[GetOffsetR(layer) + igemm * hiddenSize]:&T_f[GATE_NUM * hiddenSize * inputSize + igemm * hiddenSize],
GATE_NUM * hiddenSize,
stateGates_diff + GATE_NUM * (i * numElements + layer * seqLength * numElements) + igemm * hiddenSize,
GATE_NUM * hiddenSize,
&beta,
y_diff + layer * numElements * seqLength + (i - 1) * numElements,
hiddenSize));
}
}
}
}
// transa = (PRE_TRANSPOSE && (seqLength > 1)) ? CUBLAS_OP_N : CUBLAS_OP_T;
//W*diff = dx
int row = rev_rEnd+1;
if (layer > 0) {
cublasErrCheck(cublasSetStream(handle, stream_h[layer - 1]));
for (int i = rev_rStart; i > rev_rEnd; i--) {
cudaErrCheck(cudaStreamWaitEvent(stream_h[layer-1], events_h[layer][i], 0));
cudaErrCheck(cudaEventDestroy(events_h[layer][i]));
}
float alpha = 1.f;
float beta = 0.f;
if (GROUP_GEMM) {
cublasErrCheck(cublasSgemm(handle,
CUBLAS_OP_T, transb,
hiddenSize, miniBatch*(rev_rStart - rev_rEnd), GATE_NUM * hiddenSize,
&alpha,
&T_f[GetOffsetW(layer)],
GATE_NUM * hiddenSize,
stateGates_diff + GATE_NUM * (row * numElements + layer * seqLength * numElements),
GATE_NUM * hiddenSize,
&beta,
y_diff + (layer - 1) * numElements * seqLength + row * numElements,
hiddenSize));
}
else {
for (int igemm = 0; igemm < GATE_NUM; igemm++) {
cublasErrCheck(cublasSgemm(handle,
CUBLAS_OP_T, transb,
hiddenSize, miniBatch*(rev_rStart - rev_rEnd), hiddenSize,
&alpha,
&T_f[GetOffsetW(layer) + igemm * hiddenSize],
GATE_NUM * hiddenSize,
stateGates_diff + GATE_NUM * (row * numElements + layer * seqLength * numElements) + igemm * hiddenSize,
GATE_NUM * hiddenSize,
&beta,
y_diff + (layer - 1) * numElements * seqLength + row * numElements,
hiddenSize));
beta = 1.f;
}
}
}
if (row == 0) {
cublasErrCheck(cublasSetStream(handle, stream_h[layer]));
float lr = -learningRate;
float beta = 1.f;
//update W
cublasErrCheck(cublasSgemm(handle,
CUBLAS_OP_N, CUBLAS_OP_T,
GATE_NUM * hiddenSize, layer > 0 ? hiddenSize : inputSize, miniBatch * seqLength,
&lr,
stateGates_diff + GATE_NUM * layer * seqLength * numElements,
GATE_NUM * hiddenSize,
layer > 0 ? i_data + (layer - 1) * seqLength * numElements + seqLength * inputNumElements : i_data,
layer > 0 ? hiddenSize : inputSize,
&beta,
layer > 0 ? &T_f[GetOffsetW(layer)]:&T_f[0],
GATE_NUM * hiddenSize));
cudaErrCheck(cudaStreamWaitEvent(stream_i[layer], events_i[layer][1], 0));
cudaErrCheck(cudaEventDestroy(events_i[layer][1]));
cublasErrCheck(cublasSetStream(handle, stream_i[layer]));
//update R
cublasErrCheck(cublasSgemm(handle,
CUBLAS_OP_N, CUBLAS_OP_T,
GATE_NUM * hiddenSize, hiddenSize, miniBatch * (seqLength - 1),
&lr,
stateGates_diff + GATE_NUM * (layer * seqLength * numElements + numElements),
GATE_NUM * hiddenSize,
i_data + layer * seqLength * numElements + seqLength * inputNumElements,
hiddenSize,
&beta,
layer > 0 ? &T_f[GetOffsetR(layer)]:&T_f[GATE_NUM * hiddenSize * inputSize],
GATE_NUM * hiddenSize));
cublasErrCheck(cublasSetStream(handle, stream_h[layer]));
//update bias
cublasErrCheck(cublasSgemv(handle,
CUBLAS_OP_N,
GATE_NUM * hiddenSize, miniBatch * seqLength,
&lr,
stateGates_diff + GATE_NUM * (layer * seqLength * numElements),
GATE_NUM * hiddenSize,
diff_helper,
1,
&beta,
&bias[layer * hiddenSize * GATE_NUM],
1));
#ifdef PEEPHOLES
//update peeps
cublasErrCheck(cublasSgemv(handle,
CUBLAS_OP_N,
PEEP_NUM * hiddenSize, miniBatch * seqLength,
&lr,
peeps_diff + PEEP_NUM * (layer * numElements * seqLength),
PEEP_NUM * hiddenSize,
diff_helper,
1,
&beta,
&peeps[layer * hiddenSize * PEEP_NUM],
1));
#endif
}
}
}
cudaErrCheck(cudaEventRecord(stop_bp));
cudaErrCheck(cudaEventSynchronize(stop_bp));
cudaErrCheck(cudaEventElapsedTime(&elapsedTime, start_bp, stop_bp));
cudaErrCheck(cudaDeviceSynchronize());
cudaErrCheck(cudaEventDestroy(start_bp));
cudaErrCheck(cudaEventDestroy(stop_bp));
return elapsedTime;
}
void printChecksum() {
float* testOutputi;
// float* testOutputh;
float* testOutputc;
int numElements = hiddenSize * miniBatch;
testOutputi = (float*)malloc(numElements * seqLength * sizeof(float));
// testOutputh = (float*)malloc(numElements * numLayers * sizeof(float));
testOutputc = (float*)malloc(numElements * numLayers * sizeof(float));
cudaErrCheck(cudaMemcpy(testOutputi, i_data + seqLength * inputNumElements + (numLayers-1) * seqLength * numElements, seqLength * numElements * sizeof(float), cudaMemcpyDeviceToHost));
for (int layer = 0; layer < numLayers; layer++) {
// cudaErrCheck(cudaMemcpy(testOutputh + layer * numElements, h_data + seqLength * numElements + layer * (seqLength + 1) * numElements, numElements * sizeof(float), cudaMemcpyDeviceToHost));
cudaErrCheck(cudaMemcpy(testOutputc + layer * numElements, c_data + seqLength * numElements + layer * (seqLength + 1) * numElements, numElements * sizeof(float), cudaMemcpyDeviceToHost));
}
double checksumi = 0.;
// double checksumh = 0.;
double checksumc = 0.;
for (int m = 0; m < miniBatch; m++) {
for (int j = 0; j < seqLength; j++) {
for (int i = 0; i < hiddenSize; i++) {
checksumi += testOutputi[j * numElements + m * hiddenSize + i];
printf("i: (%d,%d): %f\n", j, i, testOutputi[j * numElements + m * hiddenSize + i]);
}
}
for (int j = 0; j < numLayers; j++) {
for (int i = 0; i < hiddenSize; i++) {
// checksumh += testOutputh[j * numElements + m * hiddenSize + i];
checksumc += testOutputc[j * numElements + m * hiddenSize + i];
}
}
if (m == 0) printf("i checksum (example %d) %E\n", m, checksumi);
// if (m == 0) printf("h checksum (example %d) %E\n", m, checksumh);
if (m == 0) printf("c checksum (example %d) %E\n", m, checksumc);
}
printf("i checksum %f ", checksumi);
printf("c checksum %E \n", checksumc);
// printf("h checksum %E\n", checksumh);
free(testOutputi);
free(testOutputc);
// free(testOutputh);
cudaErrCheck(cudaDeviceSynchronize());
}
void printWeight() {
int t_size = (GATE_NUM * inputSize * hiddenSize + GATE_NUM * hiddenSize * hiddenSize);
int bias_size = GATE_NUM * hiddenSize * numLayers;
int stats_size = GATE_NUM * numElements * seqLength;
int c_diff_size = numLayers * numElements;
float* t_output, * bias_output, * states_output,
* y_output,
* c_diff_output;
t_output = (float*)malloc( t_size * sizeof(float));
bias_output = (float*)malloc(bias_size * sizeof(float));
states_output = (float*)malloc(4 * GATE_NUM * sizeof(float));
y_output = (float*)malloc(seqLength * sizeof(float));
c_diff_output = (float*)malloc(c_diff_size * sizeof(float));
cudaErrCheck(cudaMemcpy(t_output, T_f, t_size * sizeof(float),cudaMemcpyDeviceToHost));
cudaErrCheck(cudaMemcpy(bias_output,bias,bias_size * sizeof(float),cudaMemcpyDeviceToHost));
cudaErrCheck(cudaMemcpy(states_output,stateGates_diff,stats_size * sizeof(float),cudaMemcpyDeviceToHost));
cudaErrCheck(cudaMemcpy(y_output, y_diff, seqLength * sizeof(float), cudaMemcpyDeviceToHost));
cudaErrCheck(cudaMemcpy(c_diff_output, c_diff, c_diff_size * sizeof(float), cudaMemcpyDeviceToHost));
printf("weights:\t");
for (int i = 0; i < t_size;i++) {
printf("%f\t",t_output[i]);
}
printf("\nbias:\t");
for (int i = 0; i < bias_size; i++) {
printf("%f\t",bias_output[i]);
}
printf("\n");
#ifdef PEEPHOLES
float * peeps_output;
int peeps_size = PEEP_NUM * numElements * numLayers * seqLength;
peeps_output = (float*)malloc(peeps_size * sizeof(float));
cudaErrCheck(cudaMemcpy(peeps_output,peeps_diff, peeps_size * sizeof(float),cudaMemcpyDeviceToHost));
printf("peeps:\t");
for (int i = 0; i < peeps_size; i++) {
printf("%f\t",peeps_output[i]);
}
printf("\n");
#endif
printf("states:\t");
for (int i = 0; i < stats_size; i++) {
printf("%f\t",states_output[i]);
}
printf("\ny:\t");
for (int i = 0; i < seqLength; i++) {
printf("%f\t",y_output[i]);
}
printf("\nc_diff:\t");
for (int i = 0; i < c_diff_size ; i++) {
printf("%f\t",c_diff_output[i]);
}
printf("\n");
curandErrCheck(curandDestroyGenerator(rng));
}
void freeMemory() {
// cudaErrCheck(cudaFree(h_data));
cudaErrCheck(cudaFree(i_data));
cudaErrCheck(cudaFree(c_data));
if (T != T_f) cudaErrCheck(cudaFree(T));
cudaErrCheck(cudaFree(T_f));
cudaErrCheck(cudaFree(bias));
cudaErrCheck(cudaFree(loss));
cudaErrCheck(cudaFree(label));
cudaErrCheck(cudaFree(mask));
#ifdef PEEPHOLES
cudaErrCheck(cudaFree(peeps));
#endif
cudaErrCheck(cudaFree(tmp_h));
cudaErrCheck(cudaFree(tmp_i));
if (TRAINING) {
// cudaErrCheck(cudaMalloc((void**)&linearGates, 4 * seqLength * numLayers * numElements * sizeof(float)));
cudaErrCheck(cudaFree(stateGates));
cudaErrCheck(cudaFree(stateGates_diff));
cudaErrCheck(cudaFree(y_diff));
cudaErrCheck(cudaFree(c_diff));
#ifdef PEEPHOLES
cudaErrCheck(cudaFree(peeps_diff));
#endif
cudaErrCheck(cudaFree(diff_helper));
}
for (int i = 0; i < numLayers; i++) {
if (stream_i[i] != NULL) cudaErrCheck(cudaStreamDestroy(stream_i[i]));
if (stream_h[i] != NULL) cudaErrCheck(cudaStreamDestroy(stream_h[i]));
}
free(stream_i);
free(stream_h);
for (int i = 0; i < numLayers; i++) {
free(events_i[i]);
free(events_h[i]);
}
free(events_i);
free(events_h);
}
};
float LSTMTest(int hiddenSize, int miniBatch, int seqLength, int numLayers, int inputSize, bool checkF) {
float loss;
float elapsedTime;
cudaEvent_t global_start, global_end, run_start, run_end;
cudaErrCheck(cudaEventCreate(&global_start));
cudaErrCheck(cudaEventCreate(&global_end));
cudaErrCheck(cudaEventCreate(&run_start));
cudaErrCheck(cudaEventCreate(&run_end));
// cudaErrCheck(cudaEventDestroy(run_start));
// cudaErrCheck(cudaEventDestroy(run_end));
cudaErrCheck(cudaEventRecord(global_start));
LSTM_scheduler scheduler(hiddenSize,miniBatch,seqLength,numLayers,inputSize);
scheduler.init();
printf("Initialize success\n");
cudaErrCheck(cudaEventRecord(run_start));
// cudaErrCheck(cudaEventSynchronize(run_start));
// scheduler.Forward(&loss);
// printf("Forward loss is %f\n", loss);
// if (checkF) {
// scheduler.printChecksum();
// }
for (int i = 0; i < 10; i++) {
elapsedTime = scheduler.Forward(&loss);
printf("Forward time is %f, loss is %f\n", elapsedTime, loss);
if (TRAINING) {
#ifdef UPDATE
scheduler.clearStates();
#endif
elapsedTime = scheduler.Backward(0.2);
printf("Backward time is %f\n", elapsedTime);
}
// scheduler.printWeight();
}
// scheduler.Forward(&loss);
// printf("Forward loss is %f\n", loss);
cudaErrCheck(cudaEventRecord(run_end));
// We're done. Print some checksums
// if (checkF) {
// scheduler.printChecksum();
// }
scheduler.freeMemory();
cudaErrCheck(cudaDeviceSynchronize());
cudaErrCheck(cudaEventRecord(global_end));
cudaErrCheck(cudaEventSynchronize(global_end));
cudaErrCheck(cudaEventElapsedTime(&elapsedTime, run_start, run_end));
printf("Running time used %f ms, avg %f\n", elapsedTime, elapsedTime/10);
cudaErrCheck(cudaEventElapsedTime(&elapsedTime, global_start, global_end));
printf("Total time used %f ms\n", elapsedTime);
// cudaErrCheck(cudaEventDestroy(global_start));
// cudaErrCheck(cudaEventDestroy(global_end));
cudaErrCheck(cudaEventElapsedTime(&elapsedTime, global_start, run_start));
printf("Initialize time used %f ms\n", elapsedTime);
cudaErrCheck(cudaEventElapsedTime(&elapsedTime, run_end, global_end));
printf("Memory free time used %f ms\n", elapsedTime);
return 0;
}
int main(int argc, char* argv[]) {
int seqLength;
int numLayers;
int hiddenSize;
int miniBatch;
int inputSize;
printf("\n");
if (argc == 6) {
seqLength = atoi(argv[1]);
numLayers = atoi(argv[2]);
hiddenSize = atoi(argv[3]);
miniBatch = atoi(argv[4]);
inputSize = atoi(argv[5]);
}
else if (argc == 1) {
printf("Running with default settings\n");
inputSize = 512;
seqLength = 100;
numLayers = 4;
hiddenSize = 512;
miniBatch = 64;
}
else {
printf("Usage: ./LSTM <seqLength> <numLayers> <hiddenSize> <miniBatch> <inputSize>\n");
return 1;
}
printf("seqLength %d, numLayers %d, hiddenSize %d, miniBatch %d inputSize %d\n", seqLength, numLayers, hiddenSize, miniBatch, inputSize);
int numRuns = 1;
float totalTime = 0.f;
for (int run = 0; run < numRuns; run++) {
totalTime += LSTMTest(hiddenSize, miniBatch, seqLength, numLayers, inputSize, true);
}
// printf("Runtime %fms\n", totalTime / numRuns);
return time < 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.